gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from bson.objectid import ObjectId
from pymongo import MongoClient
from validate_email import validate_email
from views.base import base
import config
import hashlib
class rols():
@property
def db(self):
if config.debug == True:
client = MongoClient('localhost', 27017)
else:
client = MongoClient('mongodb://'+config.__user+':'+config.__psw+'@'+config.__host, config.__port)
return client[config.database]
def form(self):
form = {'config' : {
'method': 'POST',
'action' : '/admin/rols',
'class' : 'form-horizontal',
'error-class' : ''},
'fields': [{
'required':True,
'widget': 'text',
'attributes': {
'data-hint' : 'Escriba el nombre del rol',
'class': 'form-control floating-label',
'placeholder': 'Nombre del Rol',
'name': 'rol_name'
},
'form-group-class': 'col-md-12'
}]
}
permissions = self.db.permissions.find()
for permission in permissions:
name = permission['name']
field = {
'widget': 'checkbox',
'attributes' : {
'name': name,
'class': 'checkbox'
},
'form-group-class': 'col-md-4'
}
form['fields'].append(field)
submit = {
'widget':'submit',
'attributes':{
'name': 'submit',
'class': 'btn btn-primary',
'value': 'Crear nuevo rol'
},
'form-group-class': 'col-md-6'
}
form['fields'].append(submit)
return form
def form_edit(self, id):
data = self.db.rols.find_one({'_id': ObjectId(id)})
print data
form = {'config' : {
'method': 'POST',
'action' : '/admin/rols/edit/'+id,
'class' : 'form-horizontal',
'error-class' : ''},
'fields': [{
'required':True,
'widget': 'text',
'attributes': {
'data-hint' : 'Escriba el nombre del rol',
'class': 'form-control floating-label',
'placeholder': 'Nombre del Rol',
'name': 'rol_name',
'value': data['name']
},
'form-group-class': 'col-md-12'
},
{
'widget':'hidden',
'attributes':{
'value': id,
'name':'id'
}
}
]
}
permissions = self.db.permissions.find()
for permission in permissions:
name = permission['name']
field = {
'widget': 'checkbox',
'attributes' : {
'name': name,
'class': 'checkbox'
},
'form-group-class': 'col-md-4'
}
if name in data['permissions']:
field['attributes']['checked'] = 'True'
form['fields'].append(field)
submit = {
'widget':'submit',
'attributes':{
'name': 'submit',
'class': 'btn btn-primary',
'value': 'Guardar'
},
'form-group-class': 'col-md-6'
}
form['fields'].append(submit)
return form
def validation(self,data,edit=False):
validation = {'status':True, 'errors': list() }
if 'rol_name' in data:
if len(data['rol_name']) < 3:
validation['status'] = False
validation['errors'].append('El Nombre del rol debe poseer al menos 3 caracteres')
else:
rol = self.db.rols.find_one({'name':data['rol_name']})
if rol != None and data['id'] != str(rol['_id']) :
validation['status'] = False
validation['errors'].append('El nombre de rol ya esta siendo utilizado')
else:
validation['status'] = False
validation['errors'].append('El campo nombre del rol es Obligatorio.')
if edit == True:
_q = self.db.rols.find_one({'_id':ObjectId(data['id'])})
if _q == None:
validation['status'] = False
validation['errors'].append('El id de rol a editar no existe.')
if validation['status'] == True:
if edit == False:
self.insert(data)
return 'Nuevo rol '+data['rol_name']+' Creado'
else:
return self.edit(data)
else:
return validation
def insert(self,data):
_INSERT ={
'name': data['rol_name'],
'permissions': list()
}
for permission in self.db.permissions.find().sort('name',1):
if data[permission['name']] == 'on':
_INSERT['permissions'].append(permission['name'])
return self.db.rols.insert(_INSERT)
def edit(self,data):
old_data = self.db.rols.find_one({'_id':ObjectId(data['id'])})
if 'block' in old_data:
return {'status':False,'errors':['Este rol esta bloqueado no puede ser editado.']}
else:
new_data = {
'name' : data['rol_name'],
'_id' : ObjectId(data['id']),
'permissions': list()
}
for permission in self.db.permissions.find().sort('name',1):
if data[permission['name']] == 'on':
new_data['permissions'].append(permission['name'])
self.db.rols.update(old_data,new_data)
return 'Rol '+old_data['name']+' editado correctamente.'
def delete(self, id):
data = self.db.rols.find_one({'_id':ObjectId(id)})
if data != None:
if 'block' in data and data['block'] == True:
return False
else:
self.db.rols.remove(data)
users = self.db.users.find()
for user in users:
tmp = user
if data['name'] == user['rol']:
tmp['rol'] = 'User'
self.db.users.update({'_id':user['_id']},tmp)
return 'Eliminado rol '+ data['name']
|
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
from edb.common import enum as s_enum
if TYPE_CHECKING:
T = TypeVar("T", covariant=True)
class ParameterKind(s_enum.StrEnum):
VariadicParam = 'VariadicParam'
NamedOnlyParam = 'NamedOnlyParam'
PositionalParam = 'PositionalParam'
def to_edgeql(self) -> str:
if self is ParameterKind.VariadicParam:
return 'VARIADIC'
elif self is ParameterKind.NamedOnlyParam:
return 'NAMED ONLY'
else:
return ''
class TypeModifier(s_enum.StrEnum):
SetOfType = 'SetOfType'
OptionalType = 'OptionalType'
SingletonType = 'SingletonType'
def to_edgeql(self) -> str:
if self is TypeModifier.SetOfType:
return 'SET OF'
elif self is TypeModifier.OptionalType:
return 'OPTIONAL'
else:
return ''
class OperatorKind(s_enum.StrEnum):
Infix = 'Infix'
Postfix = 'Postfix'
Prefix = 'Prefix'
Ternary = 'Ternary'
class TransactionIsolationLevel(s_enum.StrEnum):
REPEATABLE_READ = 'REPEATABLE READ'
SERIALIZABLE = 'SERIALIZABLE'
class TransactionAccessMode(s_enum.StrEnum):
READ_WRITE = 'READ WRITE'
READ_ONLY = 'READ ONLY'
class TransactionDeferMode(s_enum.StrEnum):
DEFERRABLE = 'DEFERRABLE'
NOT_DEFERRABLE = 'NOT DEFERRABLE'
class SchemaCardinality(s_enum.StrEnum):
'''This enum is used to store cardinality in the schema.'''
One = 'One'
Many = 'Many'
Unknown = 'Unknown'
def is_multi(self) -> bool:
if self is SchemaCardinality.One:
return False
elif self is SchemaCardinality.Many:
return True
else:
raise ValueError('cardinality is unknown')
def is_single(self) -> bool:
return not self.is_multi()
def is_known(self) -> bool:
return self is not SchemaCardinality.Unknown
def as_ptr_qual(self) -> str:
if self is SchemaCardinality.One:
return 'single'
elif self is SchemaCardinality.Many:
return 'multi'
else:
raise ValueError('cardinality is unknown')
def to_edgeql(self) -> str:
return self.as_ptr_qual().upper()
class Cardinality(s_enum.StrEnum):
'''This enum is used in cardinality inference internally.'''
# [0, 1]
AT_MOST_ONE = 'AT_MOST_ONE'
# [1, 1]
ONE = 'ONE'
# [0, inf)
MANY = 'MANY'
# [1, inf)
AT_LEAST_ONE = 'AT_LEAST_ONE'
# Sentinel
UNKNOWN = 'UNKNOWN'
def is_single(self) -> bool:
return self in {Cardinality.AT_MOST_ONE, Cardinality.ONE}
def is_multi(self) -> bool:
return not self.is_single()
def can_be_zero(self) -> bool:
return self not in {Cardinality.ONE, Cardinality.AT_LEAST_ONE}
def to_schema_value(self) -> Tuple[bool, SchemaCardinality]:
return _CARD_TO_TUPLE[self]
@classmethod
def from_schema_value(
cls,
required: bool,
card: SchemaCardinality
) -> Cardinality:
return _TUPLE_TO_CARD[(required, card)]
_CARD_TO_TUPLE = {
Cardinality.AT_MOST_ONE: (False, SchemaCardinality.One),
Cardinality.ONE: (True, SchemaCardinality.One),
Cardinality.MANY: (False, SchemaCardinality.Many),
Cardinality.AT_LEAST_ONE: (True, SchemaCardinality.Many),
}
_TUPLE_TO_CARD = {
(False, SchemaCardinality.One): Cardinality.AT_MOST_ONE,
(True, SchemaCardinality.One): Cardinality.ONE,
(False, SchemaCardinality.Many): Cardinality.MANY,
(True, SchemaCardinality.Many): Cardinality.AT_LEAST_ONE,
}
class Volatility(s_enum.StrEnum):
Immutable = 'Immutable'
Stable = 'Stable'
Volatile = 'Volatile'
def is_volatile(self) -> bool:
return self is Volatility.Volatile
@classmethod
def _missing_(cls, name):
# We want both `volatility := 'immutable'` in SDL and
# `SET volatility := 'IMMUTABLE`` in DDL to work.
return cls(name.title())
class Multiplicity(s_enum.StrEnum):
ZERO = 'ZERO' # This is valid for empty sets
ONE = 'ONE'
MANY = 'MANY'
UNKNOWN = 'UNKNOWN'
def is_one(self) -> bool:
return self is Multiplicity.ONE
def is_many(self) -> bool:
return self is Multiplicity.MANY
def is_zero(self) -> bool:
return self is Multiplicity.ZERO
class DescribeLanguage(s_enum.StrEnum):
DDL = 'DDL'
SDL = 'SDL'
TEXT = 'TEXT'
JSON = 'JSON'
class SchemaObjectClass(s_enum.StrEnum):
ALIAS = 'ALIAS'
ANNOTATION = 'ANNOTATION'
ARRAY_TYPE = 'ARRAY TYPE'
CAST = 'CAST'
CONSTRAINT = 'CONSTRAINT'
DATABASE = 'DATABASE'
EXTENSION = 'EXTENSION'
EXTENSION_PACKAGE = 'EXTENSION PACKAGE'
FUNCTION = 'FUNCTION'
INDEX = 'INDEX'
LINK = 'LINK'
MIGRATION = 'MIGRATION'
MODULE = 'MODULE'
OPERATOR = 'OPERATOR'
PARAMETER = 'PARAMETER'
PROPERTY = 'PROPERTY'
PSEUDO_TYPE = 'PSEUDO TYPE'
ROLE = 'ROLE'
SCALAR_TYPE = 'SCALAR TYPE'
TUPLE_TYPE = 'TUPLE TYPE'
TYPE = 'TYPE'
class LinkTargetDeleteAction(s_enum.StrEnum):
Restrict = 'Restrict'
DeleteSource = 'DeleteSource'
Allow = 'Allow'
DeferredRestrict = 'DeferredRestrict'
def to_edgeql(self) -> str:
if self is LinkTargetDeleteAction.DeleteSource:
return 'DELETE SOURCE'
elif self is LinkTargetDeleteAction.DeferredRestrict:
return 'DEFERRED RESTRICT'
elif self is LinkTargetDeleteAction.Restrict:
return 'RESTRICT'
elif self is LinkTargetDeleteAction.Allow:
return 'ALLOW'
else:
raise ValueError(f'unsupported enum value {self!r}')
class ConfigScope(s_enum.StrEnum):
INSTANCE = 'INSTANCE'
DATABASE = 'DATABASE'
SESSION = 'SESSION'
def to_edgeql(self) -> str:
if self is ConfigScope.DATABASE:
return 'CURRENT DATABASE'
else:
return str(self)
|
|
"""Tasks graph in the GUI."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import numpy as np
import pandas as pd
from qtools import inthread, inprocess
from qtools import QtGui, QtCore
from kwiklib.dataio import get_array, pandaize
from klustaviewa.stats.correlations import normalize
from klustaviewa.stats.correlograms import get_baselines, get_excerpts
from kwiklib.utils import logger as log
from klustaviewa import USERPREF
from klustaviewa import SETTINGS
from kwiklib.utils.colors import random_color
from klustaviewa.gui.threads import ThreadedTasks
import klustaviewa.views.viewdata as vd
# -----------------------------------------------------------------------------
# Abstract task graph
# -----------------------------------------------------------------------------
class AbstractTaskGraph(QtCore.QObject):
"""Graph of successive tasks."""
def __init__(self):#, **kwargs):
# for name, value in kwargs.iteritems():
# setattr(self, name, value)
pass
def run_single(self, action):
"""Take an action in input, execute it, and return the next action(s).
"""
if isinstance(action, basestring):
method = action
args, kwargs = (), {}
elif isinstance(action, tuple):
if len(action) == 1:
method, = action
args, kwargs = (), {}
elif len(action) == 2:
method, args = action
kwargs = {}
elif len(action) == 3:
method, args, kwargs = action
else:
method = None
# print method
if method is not None:
return getattr(self, method)(*args, **kwargs)
else:
return action
def run(self, action_first):
# Breadth-first search in the task dependency graph.
queue = [action_first]
marks = []
while queue:
action = queue.pop(0)
# Execute the first action.
outputs = self.run_single(action)
if not isinstance(outputs, list):
outputs = [outputs]
for output in outputs:
if output not in marks:
marks.append(output)
queue.append(output)
return outputs
def __getattr__(self, name):
if not hasattr(self, '_' + name):
raise ValueError('_' + name)
return lambda *args, **kwargs: self.run(('_' + name, args, kwargs))
# -----------------------------------------------------------------------------
# Specific task graph
# -----------------------------------------------------------------------------
class TaskGraph(AbstractTaskGraph):
def __init__(self, mainwindow):
# Shortcuts for the main window.
self.set(mainwindow)
# Create external threads/processes for long-lasting tasks.
self.create_threads()
def set(self, mainwindow):
# Shortcuts for the main window.
self.mainwindow = mainwindow
self.get_view = self.mainwindow.get_view
self.get_views = self.mainwindow.get_views
self.loader = self.mainwindow.loader
self.experiment = self.loader.experiment
self.wizard = self.mainwindow.wizard
self.controller = self.mainwindow.controller
self.statscache = self.mainwindow.statscache
def create_threads(self):
# Create the external threads.
self.tasks = ThreadedTasks()
self.tasks.selection_task.set_loader(self.loader)
self.tasks.selection_task.selectionDone.connect(
self.selection_done_callback)
self.tasks.recluster_task.reclusterDone.connect(
self.recluster_done_callback)
self.tasks.correlograms_task.correlogramsComputed.connect(
self.correlograms_computed_callback)
self.tasks.similarity_matrix_task.correlationMatrixComputed.connect(
self.similarity_matrix_computed_callback)
def join(self):
self.tasks.join()
# Selection.
# ----------
def _select(self, clusters, wizard=False,):
self.tasks.selection_task.select(clusters, wizard,)
def _select_done(self, clusters, wizard=False,):
if wizard:
target = (self.wizard.current_target(),)
else:
target = ()
# self.loader.select(clusters=clusters)
log.debug("Selected clusters {0:s}.".format(str(clusters)))
return [
('_update_feature_view', target, dict()),
('_update_waveform_view', (), dict(wizard=wizard,)),
('_show_selection_in_matrix', (clusters,),),
('_compute_correlograms', (clusters,), dict(wizard=wizard,)),
]
def _select_in_cluster_view(self, clusters, groups=[], wizard=False):
self.get_view('ClusterView').select(clusters, groups=groups,
wizard=wizard)
# Callbacks.
# ----------
def selection_done_callback(self, clusters, wizard,):
self.select_done(clusters, wizard=wizard,)
def recluster_done_callback(self, channel_group, clusters, spikes, clu, wizard):
self.recluster_done(channel_group=channel_group,
clusters=clusters,
spikes=spikes, clu=clu, wizard=wizard)
def correlograms_computed_callback(self, clusters, correlograms, ncorrbins,
corrbin, wizard):
# Execute the callback function under the control of the task manager
# (which handles the graph dependency).
self.correlograms_computed(clusters, correlograms, ncorrbins, corrbin, wizard)
def similarity_matrix_computed_callback(self, clusters_selected, matrix,
clusters, cluster_groups, target_next=None):
# Execute the callback function under the control of the task manager
# (which handles the graph dependency).
self.similarity_matrix_computed(clusters_selected, matrix, clusters,
cluster_groups, target_next=target_next)
# Computations.
# -------------
def _compute_correlograms(self, clusters_selected, wizard=None):
# Get the correlograms parameters.
spiketimes = get_array(self.loader.get_spiketimes('all'))
# print spiketimes.dtype
# Make a copy of the array so that it does not change before the
# computation of the correlograms begins.
clusters = np.array(get_array(self.loader.get_clusters('all')))
# Get excerpts
nexcerpts = USERPREF.get('correlograms_nexcerpts', 100)
excerpt_size = USERPREF.get('correlograms_excerpt_size', 20000)
spiketimes_excerpts = get_excerpts(spiketimes,
nexcerpts=nexcerpts, excerpt_size=excerpt_size)
clusters_excerpts = get_excerpts(clusters,
nexcerpts=nexcerpts, excerpt_size=excerpt_size)
# corrbin = self.loader.corrbin
# ncorrbins = self.loader.ncorrbins
corrbin = SETTINGS.get('correlograms.corrbin', .001)
ncorrbins = SETTINGS.get('correlograms.ncorrbins', 100)
# Get cluster indices that need to be updated.
clusters_to_update = (self.statscache.correlograms.
not_in_key_indices(clusters_selected))
# If there are pairs that need to be updated, launch the task.
if len(clusters_to_update) > 0:
# Set wait cursor.
self.mainwindow.set_busy(computing_correlograms=True)
# Launch the task.
self.tasks.correlograms_task.compute(
spiketimes_excerpts,
clusters_excerpts,
clusters_to_update=clusters_to_update,
clusters_selected=clusters_selected,
ncorrbins=ncorrbins, corrbin=corrbin,
wizard=wizard)
# Otherwise, update directly the correlograms view without launching
# the task in the external process.
else:
# self.update_correlograms_view()
return ('_update_correlograms_view', (wizard,), {})
def _recluster(self):
exp = self.loader.experiment
channel_group = self.loader.shank
clusters_selected = self.loader.get_clusters_selected()
self.tasks.recluster_task.recluster(exp, channel_group=channel_group,
clusters=clusters_selected)
def _recluster_done(self, channel_group=0, clusters=None,
spikes=None, clu=None, wizard=False):
return [('_split2', (spikes, clu, wizard))]
def _compute_similarity_matrix(self, target_next=None):
# TODO: get_similarity_matrix_data in viewdata
# return
similarity_measure = self.loader.similarity_measure
# features = self.loader.background_features
# masks = self.loader.background_masks
# clusters = get_array(self.loader.get_clusters(
# spikes=self.loader.background_spikes))
# cluster_groups = get_array(self.loader.get_cluster_groups('all'))
# clusters_all = self.loader.get_clusters_unique()
exp = self.experiment
channel_group = self.loader.shank
clustering = 'main' # TODO
fetdim = exp.application_data.spikedetekt.nfeatures_per_channel
clusters_data = getattr(exp.channel_groups[channel_group].clusters, clustering)
spikes_data = exp.channel_groups[channel_group].spikes
cluster_groups_data = getattr(exp.channel_groups[channel_group].cluster_groups, clustering)
clusters_all = sorted(clusters_data.keys())
cluster_groups = pd.Series([clusters_data[cl].cluster_group or 0
for cl in clusters_all], index=clusters_all)
spikes_selected, fm = spikes_data.load_features_masks(fraction=.1)
clusters = getattr(spikes_data.clusters, clustering)[:][spikes_selected]
fm = np.atleast_3d(fm)
features = fm[:, :, 0]
if features.shape[1] <= 1:
return []
# masks = fm[:, ::fetdim, 1]
if fm.shape[2] > 1:
masks = fm[:, :, 1]
else:
masks = None
# features = pandaize(features, spikes_selected)
# masks = pandaize(masks, spikes_selected)
# Get cluster indices that need to be updated.
# if clusters_to_update is None:
# NOTE: not specifying explicitely clusters_to_update ensures that
# all clusters that need to be updated are updated.
# Allows to fix a bug where the matrix is not updated correctly
# when multiple calls to this functions are called quickly.
clusters_to_update = (self.statscache.similarity_matrix.
not_in_key_indices(clusters_all))
log.debug("Clusters to update: {0:s}".format(str(clusters_to_update)))
# If there are pairs that need to be updated, launch the task.
if len(clusters_to_update) > 0:
self.mainwindow.set_busy(computing_matrix=True)
# Launch the task.
self.tasks.similarity_matrix_task.compute(features,
clusters, cluster_groups, masks, clusters_to_update,
target_next=target_next, similarity_measure=similarity_measure)
# Otherwise, update directly the correlograms view without launching
# the task in the external process.
else:
return [('_wizard_update', (target_next,)),
('_update_similarity_matrix_view',),
]
def _correlograms_computed(self, clusters, correlograms, ncorrbins, corrbin, wizard):
clusters_selected = self.loader.get_clusters_selected()
# Abort if the selection has changed during the computation of the
# correlograms.
# Reset the cursor.
self.mainwindow.set_busy(computing_correlograms=False)
if not np.array_equal(clusters, clusters_selected):
log.debug("Skip update correlograms with clusters selected={0:s}"
" and clusters updated={1:s}.".format(clusters_selected, clusters))
return
if self.statscache.ncorrbins != ncorrbins:
log.debug(("Skip updating correlograms because ncorrbins has "
"changed (from {0:d} to {1:d})".format(
ncorrbins, self.statscache.ncorrbins)))
return
# Put the computed correlograms in the cache.
self.statscache.correlograms.update(clusters, correlograms)
# Update the view.
# self.update_correlograms_view()
return ('_update_correlograms_view', (), dict(wizard=wizard))
def _similarity_matrix_computed(self, clusters_selected, matrix, clusters,
cluster_groups, target_next=None):
self.mainwindow.set_busy(computing_matrix=False)
# spikes_slice = _get_similarity_matrix_slice(
# self.loader.nspikes,
# len(self.loader.get_clusters_unique()))
# clusters_now = self.loader.get_clusters(
# spikes=self.loader.background_spikes)
# if not np.array_equal(clusters, clusters_now):
# return False
if len(matrix) == 0:
return []
self.statscache.similarity_matrix.update(clusters_selected, matrix)
self.statscache.similarity_matrix_normalized = normalize(
self.statscache.similarity_matrix.to_array(copy=True))
# Update the cluster view with cluster quality.
quality = np.diag(self.statscache.similarity_matrix_normalized).copy()
self.statscache.cluster_quality = pd.Series(
quality,
index=self.statscache.similarity_matrix.indices,
)
self.get_view('ClusterView').set_quality(
self.statscache.cluster_quality)
return [('_wizard_update', (target_next,)),
('_update_similarity_matrix_view',),
]
def _invalidate(self, clusters):
self.statscache.invalidate(clusters)
# View updates.
# -------------
def _update_correlograms_view(self, wizard=None):
clu = self.loader.get_clusters_selected()
# HACK: work around a bug with some GPU drivers and empty selections
if len(clu)==0:
return
data = vd.get_correlogramsview_data(self.experiment,
self.statscache.correlograms,
clusters=clu,
channel_group=self.loader.shank,
wizard=wizard,
)
[view.set_data(**data) for view in self.get_views('CorrelogramsView')]
def _update_similarity_matrix_view(self):
data = vd.get_similaritymatrixview_data(self.experiment,
self.statscache.similarity_matrix_normalized,
channel_group=self.loader.shank,)
[view.set_data(**data)
for view in self.get_views('SimilarityMatrixView')]
# Show selected clusters when the matrix has been updated.
clusters = self.loader.get_clusters_selected()
return ('_show_selection_in_matrix', (clusters,))
def _update_feature_view(self, autozoom=None):
clu = self.loader.clusters_selected
# HACK: work around a bug with some GPU drivers and empty selections
if len(clu)==0:
return
data = vd.get_featureview_data(self.experiment,
clusters=clu,
autozoom=autozoom,
channel_group=self.loader.shank)
[view.set_data(**data) for view in self.get_views('FeatureView')]
def _update_waveform_view(self, autozoom=None, wizard=None):
clu = self.loader.clusters_selected
# HACK: work around a bug with some GPU drivers and empty selections
if len(clu)==0:
return
data = vd.get_waveformview_data(self.experiment,
clusters=clu,
autozoom=autozoom,
wizard=wizard,
channel_group=self.loader.shank
)
[view.set_data(**data) for view in self.get_views('WaveformView')]
def _update_trace_view(self):
data = vd.get_traceview_data(self.experiment,
channel_group=self.loader.shank)
[view.set_data(**data) for view in self.get_views('TraceView')]
def _update_cluster_view(self, clusters=None):
"""Update the cluster view using the data stored in the loader
object."""
data = vd.get_clusterview_data(self.experiment, self.statscache,
channel_group=self.loader.shank)
self.get_view('ClusterView').set_data(**data)
if clusters is not None:
return
def _show_selection_in_matrix(self, clusters):
if clusters is not None and 1 <= len(clusters) <= 2:
[view.show_selection(clusters[0], clusters[-1])
for view in self.get_views('SimilarityMatrixView')]
# Override colors.
# ----------------
def _override_color(self, override_color):
self.loader.set_override_color(override_color)
return ['_update_feature_view', '_update_waveform_view', '_update_correlograms_view']
# Change correlograms parameter.
# ------------------------------
def _change_correlograms_parameters(self, ncorrbins=None, corrbin=None):
# Update the correlograms parameters.
if ncorrbins is not None:
SETTINGS['correlograms.ncorrbins'] = ncorrbins
if corrbin is not None:
SETTINGS['correlograms.corrbin'] = corrbin
# Reset the cache.
self.statscache.reset(ncorrbins)
# Update the correlograms.
clusters = self.loader.get_clusters_selected()
return ('_compute_correlograms', (clusters,))
# Merge/split actions.
# --------------------
def _merge(self, clusters, wizard=False):
if len(clusters) >= 2:
action, output = self.controller.merge_clusters(clusters)
# Tell the next nodes whether the merge occurred after a wizard
# selection or not, so that the merged cluster background is
# highlighted or not.
output['wizard'] = wizard
return after_merge(output)
def _split(self, clusters, spikes_selected, wizard=False):
if len(spikes_selected) >= 1:
action, output = self.controller.split_clusters(clusters,
spikes_selected)
output['wizard'] = wizard
return after_split(output)
def _split2(self, spikes, clusters, wizard=False):
if len(spikes) >= 1:
action, output = self.controller.split2_clusters(spikes, clusters)
output['wizard'] = wizard
return after_split(output)
def _undo(self, wizard=False):
undo = self.controller.undo()
if undo is None:
return
action, output = undo
output['wizard'] = wizard
if action == 'merge_clusters_undo':
return after_merge_undo(output)
elif action == 'split_clusters_undo':
return after_split_undo(output)
elif action == 'split2_clusters_undo':
return after_split_undo(output)
elif action == 'change_cluster_color_undo':
return after_cluster_color_changed_undo(output)
elif action == 'change_group_color_undo':
return after_group_color_changed(output)
elif action == 'move_clusters_undo':
return after_clusters_moved_undo(output)
elif action == 'add_group_undo':
return after_group_added(output)
elif action == 'rename_group_undo':
return after_group_renamed(output)
elif action == 'remove_group_undo':
return after_group_removed(output)
def _redo(self, wizard=False):
redo = self.controller.redo()
if redo is None:
return
action, output = redo
output['wizard'] = wizard
if action == 'merge_clusters':
return after_merge(output)
elif action == 'split_clusters':
return after_split(output)
elif action == 'split2_clusters':
return after_split(output)
elif action == 'change_cluster_color':
return after_cluster_color_changed(output)
elif action == 'change_group_color':
return after_group_color_changed(output)
elif action == 'move_clusters':
return after_clusters_moved(output)
elif action == 'add_group':
return after_group_added(output)
elif action == 'rename_group':
return after_group_renamed(output)
elif action == 'remove_group':
return after_group_removed(output)
# Other actions.
# --------------
def _cluster_color_changed(self, cluster, color, wizard=True):
action, output = self.controller.change_cluster_color(cluster, color)
# if cluster == self.wizard.current_target():
output['wizard'] = wizard
return after_cluster_color_changed(output)
def _group_color_changed(self, group, color):
action, output = self.controller.change_group_color(group, color)
return after_group_color_changed(output)
def _group_renamed(self, group, name):
action, output = self.controller.rename_group(group, name)
return after_group_renamed(output)
def _clusters_moved(self, clusters, group, wizard=False,):
action, output = self.controller.move_clusters(clusters, group)
output['wizard'] = wizard
return after_clusters_moved(output)
def _group_removed(self, group):
action, output = self.controller.remove_group(group)
return after_group_removed(output)
def _group_added(self, group, name, color):
action, output = self.controller.add_group(group, name, color)
return after_group_added(output)
# Wizard.
# -------
def _wizard_update(self, target=None, update_matrix=True):
if update_matrix:
self.wizard.set_data(
cluster_groups=self.loader.get_cluster_groups('all'),
similarity_matrix=self.statscache.similarity_matrix_normalized,
)
else:
self.wizard.set_data(
cluster_groups=self.loader.get_cluster_groups('all'),
)
self.wizard.update_candidates(target)
def _wizard_change_color(self, clusters):
if clusters is not None:
# Set the background color in the cluster view for the wizard
# target and candidate.
self.get_view('ClusterView').set_background(
{cluster: {0: 'target', 1: 'candidate'}.get(i, None)
for i, cluster in enumerate(clusters[:2])})
def _wizard_change_candidate_color(self):
candidate = self.wizard.current_candidate()
target = self.wizard.current_target()
# color = self.loader.get_cluster_color(candidate)
return ('_cluster_color_changed', (candidate, random_color(),))
def _wizard_show_pair(self, target=None, candidate=None):
if target is None:
target = (self.wizard.current_target(),
get_array(self.loader.get_cluster_color(self.wizard.current_target()))[0])
if candidate is None:
try:
candidate = (self.wizard.current_candidate(),
get_array(self.loader.get_cluster_color(self.wizard.current_candidate()))[0])
# HACK: this can fail because when merging clusters, the merged
# cluster (candidate) is deleted, and its color does not exist
# anymore.
except:
candidate = (self.wizard.current_candidate(),
0)
[view.set_wizard_pair(target, candidate)
for view in self.get_views('FeatureView')]
# Navigation.
def _wizard_reset(self):
clusters = self.wizard.reset()
return ['_wizard_update', '_wizard_current_candidate']
def _wizard_previous_candidate(self):
clusters = self.wizard.previous_pair()
return after_wizard_selection(clusters)
def _wizard_current_candidate(self):
clusters = self.wizard.current_pair()
return after_wizard_selection(clusters)
def _wizard_next_candidate(self):
clusters = self.wizard.next_pair()
return after_wizard_selection(clusters)
def _wizard_skip_target(self):
# Skip the current target and go the next target.
self.wizard.skip_target()
return [('_wizard_update', ()),
('_wizard_next_candidate',),]
def _wizard_reset_skipped(self):
self.wizard.reset_skipped()
# Control.
def _wizard_move_and_next(self, what, group):
"""Move target, candidate, or both, to a given group, and go to
the next proposition."""
# Current proposition.
clusters = self.wizard.current_pair()
if clusters is None:
return
target, candidate = clusters
# Select appropriate clusters to move.
if what == 'candidate':
clusters = [candidate]
# Keep the current target.
target_next = target
reset_skipped = False
elif what == 'target':
clusters = [target]
# Go to the next best target cluster.
target_next = None
reset_skipped = True
elif what == 'both':
clusters = [candidate, target]
# Go to the next best target cluster.
target_next = None
reset_skipped = True
# Move clusters, and select next proposition.
r = [('_clusters_moved', (clusters, group, True)),
]
if reset_skipped:
r += [('_wizard_reset_skipped',),]
r += [('_wizard_update', (target_next,)),
('_wizard_next_candidate',),
]
return r
# -----------------------------------------------------------------------------
# Tasks after actions
# -----------------------------------------------------------------------------
def union(*clusters_list):
return sorted(set([item for sublist in clusters_list for item in sublist]))
# Merge/split actions.
def after_merge(output):
if output.get('wizard', False):
r = [('_invalidate', (output['clusters_to_merge'],)),
# We specify here that the target in the wizard must be the
# merged cluster.
('_compute_similarity_matrix', (output['cluster_merged'],)),
('_update_cluster_view'),
('_select_in_cluster_view', (output['cluster_merged'], [], True)),
('_wizard_change_color', ([output['cluster_merged']],)),
('_wizard_show_pair', ((output['cluster_merged'],
output['cluster_merged_colors'][0]),)),
]
else:
r = [('_invalidate', (output['clusters_to_merge'],)),
('_compute_similarity_matrix',),
('_update_cluster_view'),
('_select_in_cluster_view', (output['cluster_merged'],)),
]
return r
def after_merge_undo(output):
clusters_to_invalidate = union(output['clusters_to_merge'], [output['cluster_merged']])
if output.get('wizard', False):
r = [('_invalidate', (clusters_to_invalidate,)),
('_compute_similarity_matrix', ()),
# Update the wizard, but not the similarity matrix yet which
# is being computed in an external process.
# ('_wizard_update', (None, False)),
('_update_cluster_view'),
('_select_in_cluster_view', (output['clusters_to_merge'], [], True)),
('_wizard_change_color', (output['clusters_to_merge'],)),
('_wizard_show_pair', ((output['clusters_to_merge'][0],
output['cluster_to_merge_colors'][0]),
(output['clusters_to_merge'][1],
output['cluster_to_merge_colors'][1])),
),
]
else:
r = [('_invalidate', (clusters_to_invalidate,)),
('_compute_similarity_matrix', ()),
('_update_cluster_view'),
('_select_in_cluster_view', (output['clusters_to_merge'],)),
]
return r
def after_split(output):
clusters_to_update = sorted(set(output['clusters_to_split']).union(set(
output['clusters_split'])) - set(output['clusters_empty']))
if output.get('wizard', False):
r = [('_invalidate', (output['clusters_to_split'],)),
('_compute_similarity_matrix', (True,)),
# Update the wizard, but not the similarity matrix yet which
# is being computed in an external process.
# ('_wizard_update', (True, False)),
('_update_cluster_view'),
('_select_in_cluster_view', (clusters_to_update, [], True)),
('_wizard_change_color', (output['clusters_to_split'],)),
]
else:
r = [ ('_invalidate', (output['clusters_to_split'],)),
('_compute_similarity_matrix', (True,)),
('_update_cluster_view'),
('_select_in_cluster_view', (clusters_to_update,)),
]
return r
def after_split_undo(output):
clusters_to_invalidate = union(output['clusters_to_split'], output['clusters_split'])
if output.get('wizard', False):
r = [('_invalidate', (clusters_to_invalidate,)),
('_compute_similarity_matrix', (True,)),
# Update the wizard, but not the similarity matrix yet which
# is being computed in an external process.
# ('_wizard_update', (True, False)),
('_update_cluster_view'),
('_select_in_cluster_view', (output['clusters_to_split'], [], True)),
('_wizard_change_color', (output['clusters_to_split'],)),
]
else:
r = [('_invalidate', (clusters_to_invalidate,)),
('_compute_similarity_matrix', (True,)),
('_update_cluster_view'),
('_select_in_cluster_view', (output['clusters_to_split'],)),
]
return r
# Other actions.
def after_cluster_color_changed(output):
if output.get('wizard', False):
return [('_update_cluster_view'),
('_select_in_cluster_view', (output['clusters'], [], True)),
('_wizard_change_color', (output['clusters'],)),
('_wizard_show_pair',),# (output['cluster'],
# output['color_new'])),
]
else:
return [('_update_cluster_view'),
('_select_in_cluster_view', (output['clusters'],)),
]
def after_cluster_color_changed_undo(output):
if output.get('wizard', False):
return [('_update_cluster_view'),
('_select_in_cluster_view', (output['clusters'], [], True)),
('_wizard_change_color', (output['clusters'],)),
('_wizard_show_pair',),# (output['cluster'],
# output['color_old'])),
]
else:
return [('_update_cluster_view'),
('_select_in_cluster_view', (output['clusters'],)),
]
def after_group_color_changed(output):
return [('_update_cluster_view'),
('_select_in_cluster_view', ([],), dict(groups=output['groups']),),]
def after_clusters_moved(output):
r = [ ('_update_cluster_view'),
('_update_similarity_matrix_view'),
]
# If the wizard is active, it will be updated later so do not update it
# now.
if not output.get('wizard', False):
r += [('_wizard_update',),]
if 'next_cluster' in output:
clusters = [output['next_cluster']]
else:
clusters = output['clusters']
# When deleting clusters, selecting the next one in the same group.
r += [('_select_in_cluster_view', (clusters,)),]
return r
def after_clusters_moved_undo(output):
if 'next_cluster' in output:
clusters = [output['next_cluster']]
else:
clusters = output['clusters']
r = [('_update_cluster_view'),
('_update_similarity_matrix_view'),
('_wizard_update',),
('_select_in_cluster_view', (clusters,)),]
return r
def after_group_added(output):
return [('_update_cluster_view')]
def after_group_renamed(output):
return [('_update_cluster_view')]
def after_group_removed(output):
return [('_update_cluster_view')]
# Wizard.
def after_wizard_selection(clusters):
if clusters is None:
return None
else:
return [
('_select_in_cluster_view', (clusters, (), True)),
('_wizard_change_color', (clusters,)),
('_wizard_show_pair',),
]
|
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Command-line interface to the Neutron APIs
"""
from __future__ import print_function
import argparse
import getpass
import inspect
import itertools
import logging
import os
import sys
from keystoneclient.auth.identity import v2 as v2_auth
from keystoneclient.auth.identity import v3 as v3_auth
from keystoneclient import discover
from keystoneclient.openstack.common.apiclient import exceptions as ks_exc
from keystoneclient import session
from oslo_utils import encodeutils
import six.moves.urllib.parse as urlparse
from cliff import app
from cliff import commandmanager
from neutronclient.common import clientmanager
from neutronclient.common import command as openstack_command
from neutronclient.common import exceptions as exc
from neutronclient.common import extension as client_extension
from neutronclient.common import utils
from neutronclient.i18n import _
from neutronclient.neutron.v2_0 import agent
from neutronclient.neutron.v2_0 import agentscheduler
from neutronclient.neutron.v2_0 import credential
from neutronclient.neutron.v2_0 import extension
from neutronclient.neutron.v2_0 import floatingip
from neutronclient.neutron.v2_0.fw import firewall
from neutronclient.neutron.v2_0.fw import firewallpolicy
from neutronclient.neutron.v2_0.fw import firewallrule
from neutronclient.neutron.v2_0.lb import healthmonitor as lb_healthmonitor
from neutronclient.neutron.v2_0.lb import member as lb_member
from neutronclient.neutron.v2_0.lb import pool as lb_pool
from neutronclient.neutron.v2_0.lb.v2 import healthmonitor as lbaas_healthmon
from neutronclient.neutron.v2_0.lb.v2 import listener as lbaas_listener
from neutronclient.neutron.v2_0.lb.v2 import loadbalancer as lbaas_loadbalancer
from neutronclient.neutron.v2_0.lb.v2 import member as lbaas_member
from neutronclient.neutron.v2_0.lb.v2 import pool as lbaas_pool
from neutronclient.neutron.v2_0.lb import vip as lb_vip
from neutronclient.neutron.v2_0 import metering
from neutronclient.neutron.v2_0.nec import packetfilter
from neutronclient.neutron.v2_0 import netpartition
from neutronclient.neutron.v2_0 import network
from neutronclient.neutron.v2_0 import networkprofile
from neutronclient.neutron.v2_0.nsx import networkgateway
from neutronclient.neutron.v2_0.nsx import qos_queue
from neutronclient.neutron.v2_0 import policyprofile
from neutronclient.neutron.v2_0 import port
from neutronclient.neutron.v2_0 import quota
from neutronclient.neutron.v2_0 import router
from neutronclient.neutron.v2_0 import securitygroup
from neutronclient.neutron.v2_0 import servicetype
from neutronclient.neutron.v2_0 import subnet
from neutronclient.neutron.v2_0 import subnetpool
from neutronclient.neutron.v2_0.vpn import ikepolicy
from neutronclient.neutron.v2_0.vpn import ipsec_site_connection
from neutronclient.neutron.v2_0.vpn import ipsecpolicy
from neutronclient.neutron.v2_0.vpn import vpnservice
from neutronclient.version import __version__
VERSION = '2.0'
NEUTRON_API_VERSION = '2.0'
def run_command(cmd, cmd_parser, sub_argv):
_argv = sub_argv
index = -1
values_specs = []
if '--' in sub_argv:
index = sub_argv.index('--')
_argv = sub_argv[:index]
values_specs = sub_argv[index:]
known_args, _values_specs = cmd_parser.parse_known_args(_argv)
if(isinstance(cmd, subnet.CreateSubnet) and not known_args.cidr):
cidr = get_first_valid_cidr(_values_specs)
if cidr:
known_args.cidr = cidr
_values_specs.remove(cidr)
cmd.values_specs = (index == -1 and _values_specs or values_specs)
return cmd.run(known_args)
def get_first_valid_cidr(value_specs):
# Bug 1442771, argparse does not allow optional positional parameter
# to be separated from previous positional parameter.
# When cidr was separated from network, the value will not be able
# to be parsed into known_args, but saved to _values_specs instead.
for value in value_specs:
if utils.is_valid_cidr(value):
return value
def env(*_vars, **kwargs):
"""Search for the first defined of possibly many env vars.
Returns the first environment variable defined in vars, or
returns the default defined in kwargs.
"""
for v in _vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
def check_non_negative_int(value):
try:
value = int(value)
except ValueError:
raise argparse.ArgumentTypeError(_("invalid int value: %r") % value)
if value < 0:
raise argparse.ArgumentTypeError(_("input value %d is negative") %
value)
return value
class BashCompletionCommand(openstack_command.OpenStackCommand):
"""Prints all of the commands and options for bash-completion."""
resource = "bash_completion"
COMMAND_V2 = {
'bash-completion': BashCompletionCommand,
'net-list': network.ListNetwork,
'net-external-list': network.ListExternalNetwork,
'net-show': network.ShowNetwork,
'net-create': network.CreateNetwork,
'net-delete': network.DeleteNetwork,
'net-update': network.UpdateNetwork,
'subnet-list': subnet.ListSubnet,
'subnet-show': subnet.ShowSubnet,
'subnet-create': subnet.CreateSubnet,
'subnet-delete': subnet.DeleteSubnet,
'subnet-update': subnet.UpdateSubnet,
'subnetpool-list': subnetpool.ListSubnetPool,
'subnetpool-show': subnetpool.ShowSubnetPool,
'subnetpool-create': subnetpool.CreateSubnetPool,
'subnetpool-delete': subnetpool.DeleteSubnetPool,
'subnetpool-update': subnetpool.UpdateSubnetPool,
'port-list': port.ListPort,
'port-show': port.ShowPort,
'port-create': port.CreatePort,
'port-delete': port.DeletePort,
'port-update': port.UpdatePort,
'quota-list': quota.ListQuota,
'quota-show': quota.ShowQuota,
'quota-delete': quota.DeleteQuota,
'quota-update': quota.UpdateQuota,
'ext-list': extension.ListExt,
'ext-show': extension.ShowExt,
'router-list': router.ListRouter,
'router-port-list': port.ListRouterPort,
'router-show': router.ShowRouter,
'router-create': router.CreateRouter,
'router-delete': router.DeleteRouter,
'router-update': router.UpdateRouter,
'router-interface-add': router.AddInterfaceRouter,
'router-interface-delete': router.RemoveInterfaceRouter,
'router-gateway-set': router.SetGatewayRouter,
'router-gateway-clear': router.RemoveGatewayRouter,
'floatingip-list': floatingip.ListFloatingIP,
'floatingip-show': floatingip.ShowFloatingIP,
'floatingip-create': floatingip.CreateFloatingIP,
'floatingip-delete': floatingip.DeleteFloatingIP,
'floatingip-associate': floatingip.AssociateFloatingIP,
'floatingip-disassociate': floatingip.DisassociateFloatingIP,
'security-group-list': securitygroup.ListSecurityGroup,
'security-group-show': securitygroup.ShowSecurityGroup,
'security-group-create': securitygroup.CreateSecurityGroup,
'security-group-delete': securitygroup.DeleteSecurityGroup,
'security-group-update': securitygroup.UpdateSecurityGroup,
'security-group-rule-list': securitygroup.ListSecurityGroupRule,
'security-group-rule-show': securitygroup.ShowSecurityGroupRule,
'security-group-rule-create': securitygroup.CreateSecurityGroupRule,
'security-group-rule-delete': securitygroup.DeleteSecurityGroupRule,
'lbaas-loadbalancer-list': lbaas_loadbalancer.ListLoadBalancer,
'lbaas-loadbalancer-show': lbaas_loadbalancer.ShowLoadBalancer,
'lbaas-loadbalancer-create': lbaas_loadbalancer.CreateLoadBalancer,
'lbaas-loadbalancer-update': lbaas_loadbalancer.UpdateLoadBalancer,
'lbaas-loadbalancer-delete': lbaas_loadbalancer.DeleteLoadBalancer,
'lbaas-listener-list': lbaas_listener.ListListener,
'lbaas-listener-show': lbaas_listener.ShowListener,
'lbaas-listener-create': lbaas_listener.CreateListener,
'lbaas-listener-update': lbaas_listener.UpdateListener,
'lbaas-listener-delete': lbaas_listener.DeleteListener,
'lbaas-pool-list': lbaas_pool.ListPool,
'lbaas-pool-show': lbaas_pool.ShowPool,
'lbaas-pool-create': lbaas_pool.CreatePool,
'lbaas-pool-update': lbaas_pool.UpdatePool,
'lbaas-pool-delete': lbaas_pool.DeletePool,
'lbaas-healthmonitor-list': lbaas_healthmon.ListHealthMonitor,
'lbaas-healthmonitor-show': lbaas_healthmon.ShowHealthMonitor,
'lbaas-healthmonitor-create': lbaas_healthmon.CreateHealthMonitor,
'lbaas-healthmonitor-update': lbaas_healthmon.UpdateHealthMonitor,
'lbaas-healthmonitor-delete': lbaas_healthmon.DeleteHealthMonitor,
'lbaas-member-list': lbaas_member.ListMember,
'lbaas-member-show': lbaas_member.ShowMember,
'lbaas-member-create': lbaas_member.CreateMember,
'lbaas-member-update': lbaas_member.UpdateMember,
'lbaas-member-delete': lbaas_member.DeleteMember,
'lb-vip-list': lb_vip.ListVip,
'lb-vip-show': lb_vip.ShowVip,
'lb-vip-create': lb_vip.CreateVip,
'lb-vip-update': lb_vip.UpdateVip,
'lb-vip-delete': lb_vip.DeleteVip,
'lb-pool-list': lb_pool.ListPool,
'lb-pool-show': lb_pool.ShowPool,
'lb-pool-create': lb_pool.CreatePool,
'lb-pool-update': lb_pool.UpdatePool,
'lb-pool-delete': lb_pool.DeletePool,
'lb-pool-stats': lb_pool.RetrievePoolStats,
'lb-member-list': lb_member.ListMember,
'lb-member-show': lb_member.ShowMember,
'lb-member-create': lb_member.CreateMember,
'lb-member-update': lb_member.UpdateMember,
'lb-member-delete': lb_member.DeleteMember,
'lb-healthmonitor-list': lb_healthmonitor.ListHealthMonitor,
'lb-healthmonitor-show': lb_healthmonitor.ShowHealthMonitor,
'lb-healthmonitor-create': lb_healthmonitor.CreateHealthMonitor,
'lb-healthmonitor-update': lb_healthmonitor.UpdateHealthMonitor,
'lb-healthmonitor-delete': lb_healthmonitor.DeleteHealthMonitor,
'lb-healthmonitor-associate': lb_healthmonitor.AssociateHealthMonitor,
'lb-healthmonitor-disassociate': (
lb_healthmonitor.DisassociateHealthMonitor
),
'queue-create': qos_queue.CreateQoSQueue,
'queue-delete': qos_queue.DeleteQoSQueue,
'queue-show': qos_queue.ShowQoSQueue,
'queue-list': qos_queue.ListQoSQueue,
'agent-list': agent.ListAgent,
'agent-show': agent.ShowAgent,
'agent-delete': agent.DeleteAgent,
'agent-update': agent.UpdateAgent,
'net-gateway-create': networkgateway.CreateNetworkGateway,
'net-gateway-update': networkgateway.UpdateNetworkGateway,
'net-gateway-delete': networkgateway.DeleteNetworkGateway,
'net-gateway-show': networkgateway.ShowNetworkGateway,
'net-gateway-list': networkgateway.ListNetworkGateway,
'net-gateway-connect': networkgateway.ConnectNetworkGateway,
'net-gateway-disconnect': networkgateway.DisconnectNetworkGateway,
'gateway-device-create': networkgateway.CreateGatewayDevice,
'gateway-device-update': networkgateway.UpdateGatewayDevice,
'gateway-device-delete': networkgateway.DeleteGatewayDevice,
'gateway-device-show': networkgateway.ShowGatewayDevice,
'gateway-device-list': networkgateway.ListGatewayDevice,
'dhcp-agent-network-add': agentscheduler.AddNetworkToDhcpAgent,
'dhcp-agent-network-remove': agentscheduler.RemoveNetworkFromDhcpAgent,
'net-list-on-dhcp-agent': agentscheduler.ListNetworksOnDhcpAgent,
'dhcp-agent-list-hosting-net': agentscheduler.ListDhcpAgentsHostingNetwork,
'l3-agent-router-add': agentscheduler.AddRouterToL3Agent,
'l3-agent-router-remove': agentscheduler.RemoveRouterFromL3Agent,
'router-list-on-l3-agent': agentscheduler.ListRoutersOnL3Agent,
'l3-agent-list-hosting-router': agentscheduler.ListL3AgentsHostingRouter,
'lb-pool-list-on-agent': agentscheduler.ListPoolsOnLbaasAgent,
'lb-agent-hosting-pool': agentscheduler.GetLbaasAgentHostingPool,
'lbaas-loadbalancer-list-on-agent':
agentscheduler.ListLoadBalancersOnLbaasAgent,
'lbaas-agent-hosting-loadbalancer':
agentscheduler.GetLbaasAgentHostingLoadBalancer,
'service-provider-list': servicetype.ListServiceProvider,
'firewall-rule-list': firewallrule.ListFirewallRule,
'firewall-rule-show': firewallrule.ShowFirewallRule,
'firewall-rule-create': firewallrule.CreateFirewallRule,
'firewall-rule-update': firewallrule.UpdateFirewallRule,
'firewall-rule-delete': firewallrule.DeleteFirewallRule,
'firewall-policy-list': firewallpolicy.ListFirewallPolicy,
'firewall-policy-show': firewallpolicy.ShowFirewallPolicy,
'firewall-policy-create': firewallpolicy.CreateFirewallPolicy,
'firewall-policy-update': firewallpolicy.UpdateFirewallPolicy,
'firewall-policy-delete': firewallpolicy.DeleteFirewallPolicy,
'firewall-policy-insert-rule': firewallpolicy.FirewallPolicyInsertRule,
'firewall-policy-remove-rule': firewallpolicy.FirewallPolicyRemoveRule,
'firewall-list': firewall.ListFirewall,
'firewall-show': firewall.ShowFirewall,
'firewall-create': firewall.CreateFirewall,
'firewall-update': firewall.UpdateFirewall,
'firewall-delete': firewall.DeleteFirewall,
'cisco-credential-list': credential.ListCredential,
'cisco-credential-show': credential.ShowCredential,
'cisco-credential-create': credential.CreateCredential,
'cisco-credential-delete': credential.DeleteCredential,
'cisco-network-profile-list': networkprofile.ListNetworkProfile,
'cisco-network-profile-show': networkprofile.ShowNetworkProfile,
'cisco-network-profile-create': networkprofile.CreateNetworkProfile,
'cisco-network-profile-delete': networkprofile.DeleteNetworkProfile,
'cisco-network-profile-update': networkprofile.UpdateNetworkProfile,
'cisco-policy-profile-list': policyprofile.ListPolicyProfile,
'cisco-policy-profile-show': policyprofile.ShowPolicyProfile,
'cisco-policy-profile-update': policyprofile.UpdatePolicyProfile,
'ipsec-site-connection-list': (
ipsec_site_connection.ListIPsecSiteConnection
),
'ipsec-site-connection-show': (
ipsec_site_connection.ShowIPsecSiteConnection
),
'ipsec-site-connection-create': (
ipsec_site_connection.CreateIPsecSiteConnection
),
'ipsec-site-connection-update': (
ipsec_site_connection.UpdateIPsecSiteConnection
),
'ipsec-site-connection-delete': (
ipsec_site_connection.DeleteIPsecSiteConnection
),
'vpn-service-list': vpnservice.ListVPNService,
'vpn-service-show': vpnservice.ShowVPNService,
'vpn-service-create': vpnservice.CreateVPNService,
'vpn-service-update': vpnservice.UpdateVPNService,
'vpn-service-delete': vpnservice.DeleteVPNService,
'vpn-ipsecpolicy-list': ipsecpolicy.ListIPsecPolicy,
'vpn-ipsecpolicy-show': ipsecpolicy.ShowIPsecPolicy,
'vpn-ipsecpolicy-create': ipsecpolicy.CreateIPsecPolicy,
'vpn-ipsecpolicy-update': ipsecpolicy.UpdateIPsecPolicy,
'vpn-ipsecpolicy-delete': ipsecpolicy.DeleteIPsecPolicy,
'vpn-ikepolicy-list': ikepolicy.ListIKEPolicy,
'vpn-ikepolicy-show': ikepolicy.ShowIKEPolicy,
'vpn-ikepolicy-create': ikepolicy.CreateIKEPolicy,
'vpn-ikepolicy-update': ikepolicy.UpdateIKEPolicy,
'vpn-ikepolicy-delete': ikepolicy.DeleteIKEPolicy,
'meter-label-create': metering.CreateMeteringLabel,
'meter-label-list': metering.ListMeteringLabel,
'meter-label-show': metering.ShowMeteringLabel,
'meter-label-delete': metering.DeleteMeteringLabel,
'meter-label-rule-create': metering.CreateMeteringLabelRule,
'meter-label-rule-list': metering.ListMeteringLabelRule,
'meter-label-rule-show': metering.ShowMeteringLabelRule,
'meter-label-rule-delete': metering.DeleteMeteringLabelRule,
'nuage-netpartition-list': netpartition.ListNetPartition,
'nuage-netpartition-show': netpartition.ShowNetPartition,
'nuage-netpartition-create': netpartition.CreateNetPartition,
'nuage-netpartition-delete': netpartition.DeleteNetPartition,
'nec-packet-filter-list': packetfilter.ListPacketFilter,
'nec-packet-filter-show': packetfilter.ShowPacketFilter,
'nec-packet-filter-create': packetfilter.CreatePacketFilter,
'nec-packet-filter-update': packetfilter.UpdatePacketFilter,
'nec-packet-filter-delete': packetfilter.DeletePacketFilter,
}
COMMANDS = {'2.0': COMMAND_V2}
class HelpAction(argparse.Action):
"""Provide a custom action so the -h and --help options
to the main app will print a list of the commands.
The commands are determined by checking the CommandManager
instance, passed in as the "default" value for the action.
"""
def __call__(self, parser, namespace, values, option_string=None):
outputs = []
max_len = 0
app = self.default
parser.print_help(app.stdout)
app.stdout.write(_('\nCommands for API v%s:\n') % app.api_version)
command_manager = app.command_manager
for name, ep in sorted(command_manager):
factory = ep.load()
cmd = factory(self, None)
one_liner = cmd.get_description().split('\n')[0]
outputs.append((name, one_liner))
max_len = max(len(name), max_len)
for (name, one_liner) in outputs:
app.stdout.write(' %s %s\n' % (name.ljust(max_len), one_liner))
sys.exit(0)
class NeutronShell(app.App):
# verbose logging levels
WARNING_LEVEL = 0
INFO_LEVEL = 1
DEBUG_LEVEL = 2
CONSOLE_MESSAGE_FORMAT = '%(message)s'
DEBUG_MESSAGE_FORMAT = '%(levelname)s: %(name)s %(message)s'
log = logging.getLogger(__name__)
def __init__(self, apiversion):
super(NeutronShell, self).__init__(
description=__doc__.strip(),
version=VERSION,
command_manager=commandmanager.CommandManager('neutron.cli'), )
self.commands = COMMANDS
for k, v in self.commands[apiversion].items():
self.command_manager.add_command(k, v)
self._register_extensions(VERSION)
# Pop the 'complete' to correct the outputs of 'neutron help'.
self.command_manager.commands.pop('complete')
# This is instantiated in initialize_app() only when using
# password flow auth
self.auth_client = None
self.api_version = apiversion
def build_option_parser(self, description, version):
"""Return an argparse option parser for this application.
Subclasses may override this method to extend
the parser with more global options.
:param description: full description of the application
:paramtype description: str
:param version: version number for the application
:paramtype version: str
"""
parser = argparse.ArgumentParser(
description=description,
add_help=False, )
parser.add_argument(
'--version',
action='version',
version=__version__, )
parser.add_argument(
'-v', '--verbose', '--debug',
action='count',
dest='verbose_level',
default=self.DEFAULT_VERBOSE_LEVEL,
help=_('Increase verbosity of output and show tracebacks on'
' errors. You can repeat this option.'))
parser.add_argument(
'-q', '--quiet',
action='store_const',
dest='verbose_level',
const=0,
help=_('Suppress output except warnings and errors.'))
parser.add_argument(
'-h', '--help',
action=HelpAction,
nargs=0,
default=self, # tricky
help=_("Show this help message and exit."))
parser.add_argument(
'-r', '--retries',
metavar="NUM",
type=check_non_negative_int,
default=0,
help=_("How many times the request to the Neutron server should "
"be retried if it fails."))
# FIXME(bklei): this method should come from python-keystoneclient
self._append_global_identity_args(parser)
return parser
def _append_global_identity_args(self, parser):
# FIXME(bklei): these are global identity (Keystone) arguments which
# should be consistent and shared by all service clients. Therefore,
# they should be provided by python-keystoneclient. We will need to
# refactor this code once this functionality is available in
# python-keystoneclient.
#
# Note: At that time we'll need to decide if we can just abandon
# the deprecated args (--service-type and --endpoint-type).
parser.add_argument(
'--os-service-type', metavar='<os-service-type>',
default=env('OS_NETWORK_SERVICE_TYPE', default='network'),
help=_('Defaults to env[OS_NETWORK_SERVICE_TYPE] or network.'))
parser.add_argument(
'--os-endpoint-type', metavar='<os-endpoint-type>',
default=env('OS_ENDPOINT_TYPE', default='publicURL'),
help=_('Defaults to env[OS_ENDPOINT_TYPE] or publicURL.'))
# FIXME(bklei): --service-type is deprecated but kept in for
# backward compatibility.
parser.add_argument(
'--service-type', metavar='<service-type>',
default=env('OS_NETWORK_SERVICE_TYPE', default='network'),
help=_('DEPRECATED! Use --os-service-type.'))
# FIXME(bklei): --endpoint-type is deprecated but kept in for
# backward compatibility.
parser.add_argument(
'--endpoint-type', metavar='<endpoint-type>',
default=env('OS_ENDPOINT_TYPE', default='publicURL'),
help=_('DEPRECATED! Use --os-endpoint-type.'))
parser.add_argument(
'--os-auth-strategy', metavar='<auth-strategy>',
default=env('OS_AUTH_STRATEGY', default='keystone'),
help=_('DEPRECATED! Only keystone is supported.'))
parser.add_argument(
'--os_auth_strategy',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-auth-url', metavar='<auth-url>',
default=env('OS_AUTH_URL'),
help=_('Authentication URL, defaults to env[OS_AUTH_URL].'))
parser.add_argument(
'--os_auth_url',
help=argparse.SUPPRESS)
project_name_group = parser.add_mutually_exclusive_group()
project_name_group.add_argument(
'--os-tenant-name', metavar='<auth-tenant-name>',
default=env('OS_TENANT_NAME'),
help=_('Authentication tenant name, defaults to '
'env[OS_TENANT_NAME].'))
project_name_group.add_argument(
'--os-project-name',
metavar='<auth-project-name>',
default=utils.env('OS_PROJECT_NAME'),
help='Another way to specify tenant name. '
'This option is mutually exclusive with '
' --os-tenant-name. '
'Defaults to env[OS_PROJECT_NAME].')
parser.add_argument(
'--os_tenant_name',
help=argparse.SUPPRESS)
project_id_group = parser.add_mutually_exclusive_group()
project_id_group.add_argument(
'--os-tenant-id', metavar='<auth-tenant-id>',
default=env('OS_TENANT_ID'),
help=_('Authentication tenant ID, defaults to '
'env[OS_TENANT_ID].'))
project_id_group.add_argument(
'--os-project-id',
metavar='<auth-project-id>',
default=utils.env('OS_PROJECT_ID'),
help='Another way to specify tenant ID. '
'This option is mutually exclusive with '
' --os-tenant-id. '
'Defaults to env[OS_PROJECT_ID].')
parser.add_argument(
'--os-username', metavar='<auth-username>',
default=utils.env('OS_USERNAME'),
help=_('Authentication username, defaults to env[OS_USERNAME].'))
parser.add_argument(
'--os_username',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-user-id', metavar='<auth-user-id>',
default=env('OS_USER_ID'),
help=_('Authentication user ID (Env: OS_USER_ID)'))
parser.add_argument(
'--os_user_id',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-user-domain-id',
metavar='<auth-user-domain-id>',
default=utils.env('OS_USER_DOMAIN_ID'),
help='OpenStack user domain ID. '
'Defaults to env[OS_USER_DOMAIN_ID].')
parser.add_argument(
'--os_user_domain_id',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-user-domain-name',
metavar='<auth-user-domain-name>',
default=utils.env('OS_USER_DOMAIN_NAME'),
help='OpenStack user domain name. '
'Defaults to env[OS_USER_DOMAIN_NAME].')
parser.add_argument(
'--os_user_domain_name',
help=argparse.SUPPRESS)
parser.add_argument(
'--os_project_id',
help=argparse.SUPPRESS)
parser.add_argument(
'--os_project_name',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-project-domain-id',
metavar='<auth-project-domain-id>',
default=utils.env('OS_PROJECT_DOMAIN_ID'),
help='Defaults to env[OS_PROJECT_DOMAIN_ID].')
parser.add_argument(
'--os-project-domain-name',
metavar='<auth-project-domain-name>',
default=utils.env('OS_PROJECT_DOMAIN_NAME'),
help='Defaults to env[OS_PROJECT_DOMAIN_NAME].')
parser.add_argument(
'--os-cert',
metavar='<certificate>',
default=utils.env('OS_CERT'),
help=_("Path of certificate file to use in SSL "
"connection. This file can optionally be "
"prepended with the private key. Defaults "
"to env[OS_CERT]."))
parser.add_argument(
'--os-cacert',
metavar='<ca-certificate>',
default=env('OS_CACERT', default=None),
help=_("Specify a CA bundle file to use in "
"verifying a TLS (https) server certificate. "
"Defaults to env[OS_CACERT]."))
parser.add_argument(
'--os-key',
metavar='<key>',
default=utils.env('OS_KEY'),
help=_("Path of client key to use in SSL "
"connection. This option is not necessary "
"if your key is prepended to your certificate "
"file. Defaults to env[OS_KEY]."))
parser.add_argument(
'--os-password', metavar='<auth-password>',
default=utils.env('OS_PASSWORD'),
help=_('Authentication password, defaults to env[OS_PASSWORD].'))
parser.add_argument(
'--os_password',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-region-name', metavar='<auth-region-name>',
default=env('OS_REGION_NAME'),
help=_('Authentication region name, defaults to '
'env[OS_REGION_NAME].'))
parser.add_argument(
'--os_region_name',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-token', metavar='<token>',
default=env('OS_TOKEN'),
help=_('Authentication token, defaults to env[OS_TOKEN].'))
parser.add_argument(
'--os_token',
help=argparse.SUPPRESS)
parser.add_argument(
'--http-timeout', metavar='<seconds>',
default=env('OS_NETWORK_TIMEOUT', default=None), type=float,
help=_('Timeout in seconds to wait for an HTTP response. Defaults '
'to env[OS_NETWORK_TIMEOUT] or None if not specified.'))
parser.add_argument(
'--os-url', metavar='<url>',
default=env('OS_URL'),
help=_('Defaults to env[OS_URL].'))
parser.add_argument(
'--os_url',
help=argparse.SUPPRESS)
parser.add_argument(
'--insecure',
action='store_true',
default=env('NEUTRONCLIENT_INSECURE', default=False),
help=_("Explicitly allow neutronclient to perform \"insecure\" "
"SSL (https) requests. The server's certificate will "
"not be verified against any certificate authorities. "
"This option should be used with caution."))
def _bash_completion(self):
"""Prints all of the commands and options for bash-completion."""
commands = set()
options = set()
for option, _action in self.parser._option_string_actions.items():
options.add(option)
for command_name, command in self.command_manager:
commands.add(command_name)
cmd_factory = command.load()
cmd = cmd_factory(self, None)
cmd_parser = cmd.get_parser('')
for option, _action in cmd_parser._option_string_actions.items():
options.add(option)
print(' '.join(commands | options))
def _register_extensions(self, version):
for name, module in itertools.chain(
client_extension._discover_via_entry_points()):
self._extend_shell_commands(module, version)
def _extend_shell_commands(self, module, version):
classes = inspect.getmembers(module, inspect.isclass)
for cls_name, cls in classes:
if (issubclass(cls, client_extension.NeutronClientExtension) and
hasattr(cls, 'shell_command')):
cmd = cls.shell_command
if hasattr(cls, 'versions'):
if version not in cls.versions:
continue
try:
self.command_manager.add_command(cmd, cls)
self.commands[version][cmd] = cls
except TypeError:
pass
def run(self, argv):
"""Equivalent to the main program for the application.
:param argv: input arguments and options
:paramtype argv: list of str
"""
try:
index = 0
command_pos = -1
help_pos = -1
help_command_pos = -1
for arg in argv:
if arg == 'bash-completion' and help_command_pos == -1:
self._bash_completion()
return 0
if arg in self.commands[self.api_version]:
if command_pos == -1:
command_pos = index
elif arg in ('-h', '--help'):
if help_pos == -1:
help_pos = index
elif arg == 'help':
if help_command_pos == -1:
help_command_pos = index
index = index + 1
if command_pos > -1 and help_pos > command_pos:
argv = ['help', argv[command_pos]]
if help_command_pos > -1 and command_pos == -1:
argv[help_command_pos] = '--help'
self.options, remainder = self.parser.parse_known_args(argv)
self.configure_logging()
self.interactive_mode = not remainder
self.initialize_app(remainder)
except Exception as err:
if self.options.verbose_level >= self.DEBUG_LEVEL:
self.log.exception(err)
raise
else:
self.log.error(err)
return 1
if self.interactive_mode:
_argv = [sys.argv[0]]
sys.argv = _argv
return self.interact()
return self.run_subcommand(remainder)
def run_subcommand(self, argv):
subcommand = self.command_manager.find_command(argv)
cmd_factory, cmd_name, sub_argv = subcommand
cmd = cmd_factory(self, self.options)
try:
self.prepare_to_run_command(cmd)
full_name = (cmd_name
if self.interactive_mode
else ' '.join([self.NAME, cmd_name])
)
cmd_parser = cmd.get_parser(full_name)
return run_command(cmd, cmd_parser, sub_argv)
except Exception as e:
if self.options.verbose_level >= self.DEBUG_LEVEL:
self.log.exception("%s", e)
raise
self.log.error("%s", e)
return 1
def authenticate_user(self):
"""Make sure the user has provided all of the authentication
info we need.
"""
if self.options.os_auth_strategy == 'keystone':
if self.options.os_token or self.options.os_url:
# Token flow auth takes priority
if not self.options.os_token:
raise exc.CommandError(
_("You must provide a token via"
" either --os-token or env[OS_TOKEN]"
" when providing a service URL"))
if not self.options.os_url:
raise exc.CommandError(
_("You must provide a service URL via"
" either --os-url or env[OS_URL]"
" when providing a token"))
else:
# Validate password flow auth
project_info = (self.options.os_tenant_name or
self.options.os_tenant_id or
(self.options.os_project_name and
(self.options.os_project_domain_name or
self.options.os_project_domain_id)) or
self.options.os_project_id)
if (not self.options.os_username
and not self.options.os_user_id):
raise exc.CommandError(
_("You must provide a username or user ID via"
" --os-username, env[OS_USERNAME] or"
" --os-user-id, env[OS_USER_ID]"))
if not self.options.os_password:
# No password, If we've got a tty, try prompting for it
if hasattr(sys.stdin, 'isatty') and sys.stdin.isatty():
# Check for Ctl-D
try:
self.options.os_password = getpass.getpass(
'OS Password: ')
except EOFError:
pass
# No password because we didn't have a tty or the
# user Ctl-D when prompted.
if not self.options.os_password:
raise exc.CommandError(
_("You must provide a password via"
" either --os-password or env[OS_PASSWORD]"))
if (not project_info):
# tenent is deprecated in Keystone v3. Use the latest
# terminology instead.
raise exc.CommandError(
_("You must provide a project_id or project_name ("
"with project_domain_name or project_domain_id) "
"via "
" --os-project-id (env[OS_PROJECT_ID])"
" --os-project-name (env[OS_PROJECT_NAME]),"
" --os-project-domain-id "
"(env[OS_PROJECT_DOMAIN_ID])"
" --os-project-domain-name "
"(env[OS_PROJECT_DOMAIN_NAME])"))
if not self.options.os_auth_url:
raise exc.CommandError(
_("You must provide an auth url via"
" either --os-auth-url or via env[OS_AUTH_URL]"))
auth_session = self._get_keystone_session()
auth = auth_session.auth
else: # not keystone
if not self.options.os_url:
raise exc.CommandError(
_("You must provide a service URL via"
" either --os-url or env[OS_URL]"))
auth_session = None
auth = None
self.client_manager = clientmanager.ClientManager(
token=self.options.os_token,
url=self.options.os_url,
auth_url=self.options.os_auth_url,
tenant_name=self.options.os_tenant_name,
tenant_id=self.options.os_tenant_id,
username=self.options.os_username,
user_id=self.options.os_user_id,
password=self.options.os_password,
region_name=self.options.os_region_name,
api_version=self.api_version,
auth_strategy=self.options.os_auth_strategy,
# FIXME (bklei) honor deprecated service_type and
# endpoint type until they are removed
service_type=self.options.os_service_type or
self.options.service_type,
endpoint_type=self.options.os_endpoint_type or self.endpoint_type,
insecure=self.options.insecure,
ca_cert=self.options.os_cacert,
timeout=self.options.http_timeout,
retries=self.options.retries,
raise_errors=False,
session=auth_session,
auth=auth,
log_credentials=True)
return
def initialize_app(self, argv):
"""Global app init bits:
* set up API versions
* validate authentication info
"""
super(NeutronShell, self).initialize_app(argv)
self.api_version = {'network': self.api_version}
# If the user is not asking for help, make sure they
# have given us auth.
cmd_name = None
if argv:
cmd_info = self.command_manager.find_command(argv)
cmd_factory, cmd_name, sub_argv = cmd_info
if self.interactive_mode or cmd_name != 'help':
self.authenticate_user()
def configure_logging(self):
"""Create logging handlers for any log output."""
root_logger = logging.getLogger('')
# Set up logging to a file
root_logger.setLevel(logging.DEBUG)
# Send higher-level messages to the console via stderr
console = logging.StreamHandler(self.stderr)
console_level = {self.WARNING_LEVEL: logging.WARNING,
self.INFO_LEVEL: logging.INFO,
self.DEBUG_LEVEL: logging.DEBUG,
}.get(self.options.verbose_level, logging.DEBUG)
# The default log level is INFO, in this situation, set the
# log level of the console to WARNING, to avoid displaying
# useless messages. This equals using "--quiet"
if console_level == logging.INFO:
console.setLevel(logging.WARNING)
else:
console.setLevel(console_level)
if logging.DEBUG == console_level:
formatter = logging.Formatter(self.DEBUG_MESSAGE_FORMAT)
else:
formatter = logging.Formatter(self.CONSOLE_MESSAGE_FORMAT)
logging.getLogger('iso8601.iso8601').setLevel(logging.WARNING)
logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)
console.setFormatter(formatter)
root_logger.addHandler(console)
return
def get_v2_auth(self, v2_auth_url):
return v2_auth.Password(
v2_auth_url,
username=self.options.os_username,
password=self.options.os_password,
tenant_id=self.options.os_tenant_id,
tenant_name=self.options.os_tenant_name)
def get_v3_auth(self, v3_auth_url):
project_id = self.options.os_project_id or self.options.os_tenant_id
project_name = (self.options.os_project_name or
self.options.os_tenant_name)
return v3_auth.Password(
v3_auth_url,
username=self.options.os_username,
password=self.options.os_password,
user_id=self.options.os_user_id,
user_domain_name=self.options.os_user_domain_name,
user_domain_id=self.options.os_user_domain_id,
project_id=project_id,
project_name=project_name,
project_domain_name=self.options.os_project_domain_name,
project_domain_id=self.options.os_project_domain_id
)
def _discover_auth_versions(self, session, auth_url):
# discover the API versions the server is supporting base on the
# given URL
try:
ks_discover = discover.Discover(session=session, auth_url=auth_url)
return (ks_discover.url_for('2.0'), ks_discover.url_for('3.0'))
except ks_exc.ClientException:
# Identity service may not support discover API version.
# Lets try to figure out the API version from the original URL.
url_parts = urlparse.urlparse(auth_url)
(scheme, netloc, path, params, query, fragment) = url_parts
path = path.lower()
if path.startswith('/v3'):
return (None, auth_url)
elif path.startswith('/v2'):
return (auth_url, None)
else:
# not enough information to determine the auth version
msg = _('Unable to determine the Keystone version '
'to authenticate with using the given '
'auth_url. Identity service may not support API '
'version discovery. Please provide a versioned '
'auth_url instead.')
raise exc.CommandError(msg)
def _get_keystone_session(self):
# first create a Keystone session
cacert = self.options.os_cacert or None
cert = self.options.os_cert or None
key = self.options.os_key or None
insecure = self.options.insecure or False
ks_session = session.Session.construct(dict(cacert=cacert,
cert=cert,
key=key,
insecure=insecure))
# discover the supported keystone versions using the given url
(v2_auth_url, v3_auth_url) = self._discover_auth_versions(
session=ks_session,
auth_url=self.options.os_auth_url)
# Determine which authentication plugin to use. First inspect the
# auth_url to see the supported version. If both v3 and v2 are
# supported, then use the highest version if possible.
user_domain_name = self.options.os_user_domain_name or None
user_domain_id = self.options.os_user_domain_id or None
project_domain_name = self.options.os_project_domain_name or None
project_domain_id = self.options.os_project_domain_id or None
domain_info = (user_domain_name or user_domain_id or
project_domain_name or project_domain_id)
if (v2_auth_url and not domain_info) or not v3_auth_url:
ks_session.auth = self.get_v2_auth(v2_auth_url)
else:
ks_session.auth = self.get_v3_auth(v3_auth_url)
return ks_session
def main(argv=sys.argv[1:]):
try:
return NeutronShell(NEUTRON_API_VERSION).run(
list(map(encodeutils.safe_decode, argv)))
except KeyboardInterrupt:
print("... terminating neutron client", file=sys.stderr)
return 130
except exc.NeutronClientException:
return 1
except Exception as e:
print(e)
return 1
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from datetime import timedelta
from operator import attrgetter
from sqlalchemy.ext.declarative import declared_attr
from indico.core.db import db
from indico.core.db.sqlalchemy.attachments import AttachedItemsMixin
from indico.core.db.sqlalchemy.colors import ColorMixin, ColorTuple
from indico.core.db.sqlalchemy.descriptions import DescriptionMixin, RenderMode
from indico.core.db.sqlalchemy.locations import LocationMixin
from indico.core.db.sqlalchemy.notes import AttachedNotesMixin
from indico.core.db.sqlalchemy.protection import ProtectionManagersMixin
from indico.core.db.sqlalchemy.util.models import auto_table_args
from indico.core.db.sqlalchemy.util.queries import increment_and_get
from indico.modules.events.management.util import get_non_inheriting_objects
from indico.modules.events.timetable.models.entries import TimetableEntry, TimetableEntryType
from indico.util.caching import memoize_request
from indico.util.locators import locator_property
from indico.util.string import format_repr, return_ascii
def _get_next_friendly_id(context):
"""Get the next friendly id for a session."""
from indico.modules.events import Event
event_id = context.current_parameters['event_id']
assert event_id is not None
return increment_and_get(Event._last_friendly_session_id, Event.id == event_id)
class Session(DescriptionMixin, ColorMixin, ProtectionManagersMixin, LocationMixin, AttachedItemsMixin,
AttachedNotesMixin, db.Model):
__tablename__ = 'sessions'
__auto_table_args = (db.Index(None, 'friendly_id', 'event_id', unique=True),
{'schema': 'events'})
location_backref_name = 'sessions'
disallowed_protection_modes = frozenset()
inheriting_have_acl = True
default_colors = ColorTuple('#202020', '#e3f2d3')
allow_relationship_preloading = True
PRELOAD_EVENT_ATTACHED_ITEMS = True
PRELOAD_EVENT_NOTES = True
ATTACHMENT_FOLDER_ID_COLUMN = 'session_id'
possible_render_modes = {RenderMode.markdown}
default_render_mode = RenderMode.markdown
@declared_attr
def __table_args__(cls):
return auto_table_args(cls)
id = db.Column(
db.Integer,
primary_key=True
)
#: The human-friendly ID for the session
friendly_id = db.Column(
db.Integer,
nullable=False,
default=_get_next_friendly_id
)
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
index=True,
nullable=False
)
type_id = db.Column(
db.Integer,
db.ForeignKey('events.session_types.id'),
index=True,
nullable=True
)
title = db.Column(
db.String,
nullable=False
)
code = db.Column(
db.String,
nullable=False,
default=''
)
default_contribution_duration = db.Column(
db.Interval,
nullable=False,
default=timedelta(minutes=20)
)
is_deleted = db.Column(
db.Boolean,
nullable=False,
default=False
)
event = db.relationship(
'Event',
lazy=True,
backref=db.backref(
'sessions',
primaryjoin='(Session.event_id == Event.id) & ~Session.is_deleted',
cascade='all, delete-orphan',
lazy=True
)
)
acl_entries = db.relationship(
'SessionPrincipal',
lazy=True,
cascade='all, delete-orphan',
collection_class=set,
backref='session'
)
blocks = db.relationship(
'SessionBlock',
lazy=True,
cascade='all, delete-orphan',
backref=db.backref(
'session',
lazy=False
)
)
type = db.relationship(
'SessionType',
lazy=True,
backref=db.backref(
'sessions',
lazy=True
)
)
# relationship backrefs:
# - attachment_folders (AttachmentFolder.session)
# - contributions (Contribution.session)
# - default_for_tracks (Track.default_session)
# - legacy_mapping (LegacySessionMapping.session)
# - note (EventNote.session)
def __init__(self, **kwargs):
# explicitly initialize this relationship with None to avoid
# an extra query to check whether there is an object associated
# when assigning a new one (e.g. during cloning)
kwargs.setdefault('note', None)
super(Session, self).__init__(**kwargs)
@classmethod
def preload_acl_entries(cls, event):
cls.preload_relationships(cls.query.with_parent(event), 'acl_entries')
@property
def location_parent(self):
return self.event
@property
def protection_parent(self):
return self.event
@property
def session(self):
"""Convenience property so all event entities have it"""
return self
@property
@memoize_request
def start_dt(self):
from indico.modules.events.sessions.models.blocks import SessionBlock
start_dt = (self.event.timetable_entries
.with_entities(TimetableEntry.start_dt)
.join('session_block')
.filter(TimetableEntry.type == TimetableEntryType.SESSION_BLOCK,
SessionBlock.session == self)
.order_by(TimetableEntry.start_dt)
.first())
return start_dt[0] if start_dt else None
@property
@memoize_request
def end_dt(self):
sorted_blocks = sorted(self.blocks, key=attrgetter('timetable_entry.end_dt'), reverse=True)
return sorted_blocks[0].timetable_entry.end_dt if sorted_blocks else None
@property
@memoize_request
def conveners(self):
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.events.sessions.models.persons import SessionBlockPersonLink
return (SessionBlockPersonLink.query
.join(SessionBlock)
.filter(SessionBlock.session_id == self.id)
.distinct(SessionBlockPersonLink.person_id)
.all())
@property
def is_poster(self):
return self.type.is_poster if self.type else False
@locator_property
def locator(self):
return dict(self.event.locator, session_id=self.id)
def get_non_inheriting_objects(self):
"""Get a set of child objects that do not inherit protection"""
return get_non_inheriting_objects(self)
@return_ascii
def __repr__(self):
return format_repr(self, 'id', is_deleted=False, _text=self.title)
def can_manage_contributions(self, user, allow_admin=True):
"""Check whether a user can manage contributions within the session."""
from indico.modules.events.sessions.util import session_coordinator_priv_enabled
if user is None:
return False
elif self.session.can_manage(user, allow_admin=allow_admin):
return True
elif (self.session.can_manage(user, 'coordinate') and
session_coordinator_priv_enabled(self.event, 'manage-contributions')):
return True
else:
return False
def can_manage_blocks(self, user, allow_admin=True):
"""Check whether a user can manage session blocks.
This only applies to the blocks themselves, not to contributions inside them.
"""
from indico.modules.events.sessions.util import session_coordinator_priv_enabled
if user is None:
return False
# full session manager can always manage blocks. this also includes event managers and higher.
elif self.session.can_manage(user, allow_admin=allow_admin):
return True
# session coordiator if block management is allowed
elif (self.session.can_manage(user, 'coordinate') and
session_coordinator_priv_enabled(self.event, 'manage-blocks')):
return True
else:
return False
Session.register_location_events()
Session.register_protection_events()
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
import uuid
import ldap
import ldap.modlist
from oslo_config import cfg
from six.moves import range
from keystone import exception
from keystone.identity.backends import ldap as identity_ldap
from keystone.tests import unit as tests
from keystone.tests.unit import test_backend_ldap
CONF = cfg.CONF
def create_object(dn, attrs):
conn = ldap.initialize(CONF.ldap.url)
conn.simple_bind_s(CONF.ldap.user, CONF.ldap.password)
ldif = ldap.modlist.addModlist(attrs)
conn.add_s(dn, ldif)
conn.unbind_s()
class LiveLDAPIdentity(test_backend_ldap.LDAPIdentity):
def setUp(self):
self._ldap_skip_live()
super(LiveLDAPIdentity, self).setUp()
def _ldap_skip_live(self):
self.skip_if_env_not_set('ENABLE_LDAP_LIVE_TEST')
def clear_database(self):
devnull = open('/dev/null', 'w')
subprocess.call(['ldapdelete',
'-x',
'-D', CONF.ldap.user,
'-H', CONF.ldap.url,
'-w', CONF.ldap.password,
'-r', CONF.ldap.suffix],
stderr=devnull)
if CONF.ldap.suffix.startswith('ou='):
tree_dn_attrs = {'objectclass': 'organizationalUnit',
'ou': 'openstack'}
else:
tree_dn_attrs = {'objectclass': ['dcObject', 'organizationalUnit'],
'dc': 'openstack',
'ou': 'openstack'}
create_object(CONF.ldap.suffix, tree_dn_attrs)
create_object(CONF.ldap.user_tree_dn,
{'objectclass': 'organizationalUnit',
'ou': 'Users'})
create_object(CONF.ldap.role_tree_dn,
{'objectclass': 'organizationalUnit',
'ou': 'Roles'})
create_object(CONF.ldap.project_tree_dn,
{'objectclass': 'organizationalUnit',
'ou': 'Projects'})
create_object(CONF.ldap.group_tree_dn,
{'objectclass': 'organizationalUnit',
'ou': 'UserGroups'})
def config_files(self):
config_files = super(LiveLDAPIdentity, self).config_files()
config_files.append(tests.dirs.tests_conf('backend_liveldap.conf'))
return config_files
def test_build_tree(self):
"""Regression test for building the tree names
"""
# logic is different from the fake backend.
user_api = identity_ldap.UserApi(CONF)
self.assertTrue(user_api)
self.assertEqual(user_api.tree_dn, CONF.ldap.user_tree_dn)
def tearDown(self):
tests.TestCase.tearDown(self)
def test_ldap_dereferencing(self):
alt_users_ldif = {'objectclass': ['top', 'organizationalUnit'],
'ou': 'alt_users'}
alt_fake_user_ldif = {'objectclass': ['person', 'inetOrgPerson'],
'cn': 'alt_fake1',
'sn': 'alt_fake1'}
aliased_users_ldif = {'objectclass': ['alias', 'extensibleObject'],
'aliasedobjectname': "ou=alt_users,%s" %
CONF.ldap.suffix}
create_object("ou=alt_users,%s" % CONF.ldap.suffix, alt_users_ldif)
create_object("%s=alt_fake1,ou=alt_users,%s" %
(CONF.ldap.user_id_attribute, CONF.ldap.suffix),
alt_fake_user_ldif)
create_object("ou=alt_users,%s" % CONF.ldap.user_tree_dn,
aliased_users_ldif)
self.config_fixture.config(group='ldap',
query_scope='sub',
alias_dereferencing='never')
self.identity_api = identity_ldap.Identity()
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
'alt_fake1')
self.config_fixture.config(group='ldap',
alias_dereferencing='searching')
self.identity_api = identity_ldap.Identity()
user_ref = self.identity_api.get_user('alt_fake1')
self.assertEqual('alt_fake1', user_ref['id'])
self.config_fixture.config(group='ldap', alias_dereferencing='always')
self.identity_api = identity_ldap.Identity()
user_ref = self.identity_api.get_user('alt_fake1')
self.assertEqual('alt_fake1', user_ref['id'])
# FakeLDAP does not correctly process filters, so this test can only be
# run against a live LDAP server
def test_list_groups_for_user_filtered(self):
domain = self._get_domain_fixture()
test_groups = []
test_users = []
GROUP_COUNT = 3
USER_COUNT = 2
for x in range(0, USER_COUNT):
new_user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': domain['id']}
new_user = self.identity_api.create_user(new_user)
test_users.append(new_user)
positive_user = test_users[0]
negative_user = test_users[1]
for x in range(0, USER_COUNT):
group_refs = self.identity_api.list_groups_for_user(
test_users[x]['id'])
self.assertEqual(0, len(group_refs))
for x in range(0, GROUP_COUNT):
new_group = {'domain_id': domain['id'],
'name': uuid.uuid4().hex}
new_group = self.identity_api.create_group(new_group)
test_groups.append(new_group)
group_refs = self.identity_api.list_groups_for_user(
positive_user['id'])
self.assertEqual(x, len(group_refs))
self.identity_api.add_user_to_group(
positive_user['id'],
new_group['id'])
group_refs = self.identity_api.list_groups_for_user(
positive_user['id'])
self.assertEqual(x + 1, len(group_refs))
group_refs = self.identity_api.list_groups_for_user(
negative_user['id'])
self.assertEqual(0, len(group_refs))
self.config_fixture.config(group='ldap', group_filter='(dn=xx)')
self.reload_backends(CONF.identity.default_domain_id)
group_refs = self.identity_api.list_groups_for_user(
positive_user['id'])
self.assertEqual(0, len(group_refs))
group_refs = self.identity_api.list_groups_for_user(
negative_user['id'])
self.assertEqual(0, len(group_refs))
self.config_fixture.config(group='ldap',
group_filter='(objectclass=*)')
self.reload_backends(CONF.identity.default_domain_id)
group_refs = self.identity_api.list_groups_for_user(
positive_user['id'])
self.assertEqual(GROUP_COUNT, len(group_refs))
group_refs = self.identity_api.list_groups_for_user(
negative_user['id'])
self.assertEqual(0, len(group_refs))
def test_user_enable_attribute_mask(self):
self.config_fixture.config(
group='ldap',
user_enabled_emulation=False,
user_enabled_attribute='employeeType')
super(LiveLDAPIdentity, self).test_user_enable_attribute_mask()
def test_create_project_case_sensitivity(self):
# The attribute used for the live LDAP tests is case insensitive.
def call_super():
(super(LiveLDAPIdentity, self).
test_create_project_case_sensitivity())
self.assertRaises(exception.Conflict, call_super)
def test_create_user_case_sensitivity(self):
# The attribute used for the live LDAP tests is case insensitive.
def call_super():
super(LiveLDAPIdentity, self).test_create_user_case_sensitivity()
self.assertRaises(exception.Conflict, call_super)
def test_project_update_missing_attrs_with_a_falsey_value(self):
# The description attribute doesn't allow an empty value.
def call_super():
(super(LiveLDAPIdentity, self).
test_project_update_missing_attrs_with_a_falsey_value())
self.assertRaises(ldap.INVALID_SYNTAX, call_super)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script helps modifying exif tags in photos.
Copyright 2018 C Bhushan; Licensed under the Apache License v2.0.
https://github.com/cbhushan/script-collection
@author: C Bhushan
"""
import os
import sys
from PIL import Image
from PIL.ExifTags import TAGS, GPSTAGS
from datetime import datetime
from datetime import timedelta
import traceback
import argparse
import piexif
import math
sys.path.append(os.path.realpath(__file__))
import print_metadata
def get_exif_dict(img_file):
try:
exif_dict = piexif.load(img_file)
return exif_dict
except Exception as e:
return None
def offset_datetime(exif_dict, t_sec):
'''Offset datetime by t seconds, based on DateTimeOriginal. All other time stamps are
over-written. t can be positive or negative.
piexif.ImageIFD.DateTime
piexif.ExifIFD.DateTimeDigitized
piexif.ExifIFD.DateTimeOriginal
'''
curr_dt = exif_dict['Exif'][piexif.ExifIFD.DateTimeOriginal]
dt_obj = datetime.strptime(curr_dt, '%Y:%m:%d %H:%M:%S')
new_dt_obj = dt_obj + timedelta(seconds=t_sec)
new_dt = new_dt_obj.strftime('%Y:%m:%d %H:%M:%S')
exif_dict['Exif'][piexif.ExifIFD.DateTimeOriginal] = new_dt
exif_dict['Exif'][piexif.ExifIFD.DateTimeDigitized] = new_dt
exif_dict['0th'][piexif.ImageIFD.DateTime] = new_dt
new_dt_str = new_dt_obj.strftime('%Y%m%d_%H%M%S')
return (exif_dict, new_dt_str)
def latlon_to_GPSdict(lat, lon):
def deg2dmstuple(degrees, second_decimal_digits=3):
degrees = float(degrees)
degs = math.floor(degrees)
minsFloat = (degrees - degs) * 60.0
mins = math.floor(minsFloat)
secsFloat = (minsFloat - mins) * 60.0
sec_fmt = 10 ** second_decimal_digits
secs = round(secsFloat * sec_fmt)
return ((int(degs), 1), (int(mins), 1), (int(secs), sec_fmt))
GPS_dict = {}
if lat < 0:
GPS_dict[piexif.GPSIFD.GPSLatitudeRef] = 'S'
else:
GPS_dict[piexif.GPSIFD.GPSLatitudeRef] = 'N'
if lon < 0:
GPS_dict[piexif.GPSIFD.GPSLongitudeRef] = 'W'
else:
GPS_dict[piexif.GPSIFD.GPSLongitudeRef] = 'E'
GPS_dict[piexif.GPSIFD.GPSLatitude] = deg2dmstuple(abs(lat))
GPS_dict[piexif.GPSIFD.GPSLongitude] = deg2dmstuple(abs(lon))
return GPS_dict
def update_location(exif_dict, lat, lon, overwrite=True):
''' Lat, lon must be specified as floating point degree values.
"When adding GPS information to an image, it is important to set all of
the following tags: GPSLatitude, GPSLatitudeRef, GPSLongitude,
GPSLongitudeRef, and GPSAltitude and GPSAltitudeRef if the altitude
is known. ExifTool will write the required GPSVersionID tag automatically if
new a GPS IFD is added to an image."
-- http://owl.phy.queensu.ca/~phil/exiftool/TagNames/GPS.html
'''
if not overwrite and piexif.GPSIFD.GPSLatitude in exif_dict['GPS']:
return exif_dict
GPS_dict = latlon_to_GPSdict(lat, lon)
GPS_dict[piexif.GPSIFD.GPSAltitudeRef] = 0 # measure from sea level
GPS_dict[piexif.GPSIFD.GPSAltitude] = (2300, 100) # 23 meters
for k, v in GPS_dict.items():
exif_dict['GPS'][k] = v
return exif_dict
def print_exif(img_file):
image = Image.open(img_file) # load an image through PIL's Image object
exif_data = print_metadata.get_exif_data(image)
exif_str, exif_dict = print_metadata.get_exif_str_dict(exif_data)
print(exif_str)
def save_with_updated_metadata(img_file, geo_tag, offset_time, out_file,
prefix_filename_with_date):
if geo_tag is None and offset_time is None:
return
exif_dict = piexif.load(img_file)
if offset_time is not None:
exif_dict, new_dt_str = offset_datetime(exif_dict, offset_time)
if prefix_filename_with_date:
hd, tl = os.path.split(out_file)
out_file = os.path.join(hd, new_dt_str + '_' + tl)
if geo_tag is not None:
exif_dict = update_location(exif_dict, geo_tag[0], geo_tag[1])
exif_bytes = piexif.dump(exif_dict)
piexif.insert(exif_bytes, img_file, out_file)
return out_file
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Modify exif metadata')
parser.add_argument('--geo-tag', metavar='xx.xx', nargs=2, type=float, required=False,
help='Add geo location. Inputs in form of latitude and longitude in degrees')
parser.add_argument('--offset-time', metavar='xx.xx', type=float, required=False,
help='Offset date-time tags by N seconds')
parser.add_argument('-i', '--input', metavar='/path/to/file-or-folder', required=True,
help='File or folder name')
parser.add_argument('-o', '--output-dir', metavar='/path/to/folder', required=True,
help='File or folder name')
parser.add_argument('--prefix-filename-with-date', default=False, action='store_true', required=False,
help='When used output filename is prefixed with datetime. Only used with --offset-time.')
args = parser.parse_args()
if not os.path.isdir(args.output_dir):
raise ValueError('--output-dir must be an existing directory!')
output_dir = os.path.abspath(args.output_dir)
if os.path.isfile(args.input):
img_file = os.path.abspath(args.input)
hd, fn = os.path.split(img_file)
input_dir = os.path.abspath(hd)
if input_dir == output_dir:
raise ValueError('--output-dir must be a different directory from source-image directory!')
print('Original exif data: ')
print_exif(img_file)
out_file = os.path.join(output_dir, fn)
saved_file = save_with_updated_metadata(img_file, args.geo_tag, args.offset_time, out_file,
args.prefix_filename_with_date)
print('Updated exif data: ')
print_exif(saved_file)
elif os.path.isdir(args.input):
input_dir = os.path.abspath(args.input)
files = os.walk(input_dir).next()[2]
files.sort()
if input_dir == output_dir:
raise ValueError('--output-dir must be a different directory from source-image directory!')
failed_processing = []
for fn in files:
rt, ext = os.path.splitext(fn)
ext = ext.lower()
if ext in ['.jpg', '.jpeg']:
print('%s...' % fn)
img_file = os.path.join(input_dir, fn)
out_file = os.path.join(output_dir, fn)
try:
saved_file = save_with_updated_metadata(img_file, args.geo_tag, args.offset_time,
out_file, args.prefix_filename_with_date)
except:
failed_processing.append(fn)
print('Error while processing:')
print(traceback.format_exc())
print('\n')
if len(failed_processing) > 0:
print('Following files failed processing: ')
for fn in failed_processing:
print(fn)
else:
raise ValueError('File not found')
# dirname = '/mnt/data/tmp_dupe/test_folder'
# img_file = os.path.join(dirname, '5C2A4560.JPG')
# exif_dict = get_exif_dict(img_file)
#
# exif_dict, new_dt_str = offset_datetime(exif_dict, 2400*60)
# out_file = os.path.join(dirname, new_dt_str+'_5C2A4560.JPG')
#
#
#
# image = Image.open(img_file) # load an image through PIL's Image object
# e_data = print_metadata.get_exif_data(image)
# e_str, e_dict = print_metadata.get_exif_str_dict(e_data)
#
#
# im = Image.open('IMG_20180310_072313.jpg')
#
## save with no metadata
# data = list(im.getdata())
# image_clean = Image.new(im.mode, im.size)
# image_clean.putdata(data)
# image_clean.save("no-metadata_PIL.jpg") # this re-encodes the output image. Not good!
#
## This is fast, but saves "empty" metadata in it. Leaves behind XMP metadata if any
# piexif.remove('IMG_20180310_072313.jpg', 'no-metadata_piexif_remove.jpg')
#
## exiftool removes all metadata & reduces size a little as well
## GExiv2 solution: https://stackoverflow.com/a/19787239
#
#
## Modify exif,
## Hawaii coordinates: 19.8968 deg N, 155.5828 deg W
## NYC coordinates: 40.7128 deg N, 74.0060 deg W
# exif_dict = piexif.load(im.info["exif"])
# print(exif_dict.keys())
#
## list all GPU related keys:
# GPS_tags = vars(piexif.GPSIFD)
# for key, val in GPS_tags.items():
# if 'GPS' in key:
# store_val = None
# if val in exif_dict['GPS']:
# store_val = exif_dict['GPS'][val]
# print('%s : %s'%(key, str(store_val)))
#
# exif_data = get_exif_data(im) # PIL
#
#
# exif_bytes = piexif.dump(exif_dict)
# image_clean.save("only-exif.jpeg", exif=exif_bytes)
#
#
## modify exif info
# w, h = im.size
# exif_dict["0th"][piexif.ImageIFD.XResolution] = (w, 1)
# exif_dict["0th"][piexif.ImageIFD.YResolution] = (h, 1)
# exif_bytes = piexif.dump(exif_dict)
# im.save(new_file, "jpeg", exif=exif_bytes)
#
|
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .. import ivi
from .. import extra
from .. import scpi
import time
AmplitudeUnitsMapping = {'dBm' : 'dbm',
'watt' : 'w'}
DetectorType = set(['auto_peak', 'average', 'maximum_peak', 'minimum_peak', 'sample', 'rms'])
TraceType = set(['clear_write', 'maximum_hold', 'minimum_hold', 'video_average', 'view', 'store'])
VerticalScale = set(['linear', 'logarithmic'])
AcquisitionStatus = set(['complete', 'in_progress', 'unknown'])
ScreenshotImageFormatMapping = {
'pcl': 'pcl',
'cgm': 'cgm',
'gif': 'gif'}
class agilent86140B(ivi.Driver, extra.common.Screenshot, scpi.common.Memory):
"Agilent 86140B Series Optical Spectrum Analyzer Driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '86140B')
super(agilent86140B, self).__init__(*args, **kwargs)
self._memory_size = 10
self._trace_count = 1
self._level_amplitude_units = 'dBm'
self._acquisition_detector_type = 'sample'
self._acquisition_detector_type_auto = False
self._wavelength_start = 600.0e-9
self._wavelength_stop = 1700.0e-9
self._wavelength_offset = 0.0
self._acquisition_number_of_sweeps = 1
self._level_reference = 0.0
self._level_reference_offset = 0.0
self._sweep_coupling_resolution_bandwidth = 11e-9
self._sweep_coupling_resolution_bandwidth_auto = False
self._acquisition_sweep_mode_continuous = True
self._sweep_coupling_sweep_time = 1e-1
self._sweep_coupling_sweep_time_auto = False
self._trace_name = list()
self._trace_type = list()
self._acquisition_vertical_scale = 'logarithmic'
self._sweep_coupling_video_bandwidth = 1e2
self._sweep_coupling_video_bandwidth_auto = False
self._identity_description = "Agilent 86140B Series Optical Spectrum Analyzer Driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Agilent Technologies"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 0
self._identity_specification_minor_version = 0
self._identity_supported_instrument_models = ['86140B', '86141B', '86142B', '86143B',
'86144B', '86145B', '86146B']
self._add_property('level.amplitude_units',
self._get_level_amplitude_units,
self._set_level_amplitude_units,
None,
"""
Specifies the amplitude units for input, output and display amplitude.
""")
self._add_property('acquisition.detector_type',
self._get_acquisition_detector_type,
self._set_acquisition_detector_type,
None,
"""
Specifies the detection method used to capture and process the signal.
This governs the data acquisition for a particular sweep, but does not
have any control over how multiple sweeps are processed.
""")
self._add_property('acquisition.detector_type_auto',
self._get_acquisition_detector_type_auto,
self._set_acquisition_detector_type_auto,
None,
"""
If set to True, the detector type is automatically selected. The
relationship between Trace Type and Detector Type is not defined by the
specification when the Detector Type Auto is set to True. If set to False,
the detector type is manually selected.
""")
self._add_property('wavelength.start',
self._get_wavelength_start,
self._set_wavelength_start,
None,
"""
Specifies the left edge of the wavelength domain in meters. This is used in
conjunction with the Wavelength Stop attribute to define the wavelength
domain. If the Wavelength Start attribute value is equal to the Wavelength
Stop attribute value then the spectrum analyzer's horizontal attributes
are in time-domain.
""")
self._add_property('wavelength.stop',
self._get_wavelength_stop,
self._set_wavelength_stop,
None,
"""
Specifies the right edge of the wavelength domain in meters. This is used in
conjunction with the Wavelength Start attribute to define the wavelength
domain. If the Wavelength Start attribute value is equal to the Wavelength
Stop attribute value then the spectrum analyzer's horizontal attributes are
in time-domain.
""")
self._add_property('wavelength.offset',
self._get_wavelength_offset,
self._set_wavelength_offset,
None,
"""
Specifies an offset value, in meters, that is added to the wavelength
readout. This changes the driver's Wavelength Start and Wavelength Stop
attributes.
The equations relating the affected values are:
Wavelength Start = Actual Start Wavelength + Wavelength Offset
Wavelength Stop = Actual Stop Wavelength + Wavelength Offset
Marker Position = Actual Marker Wavelength + Wavelength Offset
""")
self._add_property('acquisition.number_of_sweeps',
self._get_acquisition_number_of_sweeps,
self._set_acquisition_number_of_sweeps,
None,
"""
This attribute defines the number of sweeps. This attribute value has no
effect if the Trace Type attribute is set to the value Clear Write.
""")
self._add_property('level.reference',
self._get_level_reference,
self._set_level_reference,
None,
"""
The calibrated vertical position of the captured data used as a reference
for amplitude measurements. This is typically set to a value slightly
higher than the highest expected signal level. The units are determined by
the Amplitude Units attribute.
""")
self._add_property('level.reference_offset',
self._get_level_reference_offset,
self._set_level_reference_offset,
None,
"""
Specifies an offset for the Reference Level attribute. This value is used
to adjust the reference level for external signal gain or loss. A
positive value corresponds to a gain while a negative number corresponds
to a loss. The value is in dB.
""")
self._add_property('sweep_coupling.resolution_bandwidth',
self._get_sweep_coupling_resolution_bandwidth,
self._set_sweep_coupling_resolution_bandwidth,
None,
"""
Specifies the width of the IF filter in Hertz. For more information see
Section 4.1.1, Sweep Coupling Overview.
""")
self._add_property('sweep_coupling.resolution_bandwidth_auto',
self._get_sweep_coupling_resolution_bandwidth_auto,
self._set_sweep_coupling_resolution_bandwidth_auto,
None,
"""
If set to True, the resolution bandwidth is automatically selected. If set
to False, the resolution bandwidth is manually selected.
""")
self._add_property('acquisition.sweep_mode_continuous',
self._get_acquisition_sweep_mode_continuous,
self._set_acquisition_sweep_mode_continuous,
None,
"""
If set to True, the sweep mode is continuous If set to False, the sweep
mode is not continuous.
""")
self._add_property('sweep_coupling.sweep_time',
self._get_sweep_coupling_sweep_time,
self._set_sweep_coupling_sweep_time,
None,
"""
Specifies the length of time to sweep from the left edge to the right edge
of the current domain. The units are seconds.
""")
self._add_property('sweep_coupling.sweep_time_auto',
self._get_sweep_coupling_sweep_time_auto,
self._set_sweep_coupling_sweep_time_auto,
None,
"""
If set to True, the sweep time is automatically selected If set to False,
the sweep time is manually selected.
""")
self._add_property('traces[].name',
self._get_trace_name,
None,
None,
"""
Returns the physical repeated capability identifier defined by the
specific driver for the trace that corresponds to the index that the user
specifies. If the driver defines a qualified trace name, this property
returns the qualified name.
""")
self._add_property('traces[].type',
self._get_trace_type,
self._set_trace_type,
None,
"""
Specifies the representation of the acquired data.
""")
self._add_property('acquisition.vertical_scale',
self._get_acquisition_vertical_scale,
self._set_acquisition_vertical_scale,
None,
"""
Specifies the vertical scale of the measurement hardware (use of log
amplifiers versus linear amplifiers).
""")
self._add_property('sweep_coupling.video_bandwidth',
self._get_sweep_coupling_video_bandwidth,
self._set_sweep_coupling_video_bandwidth,
None,
"""
Specifies the video bandwidth of the post-detection filter in Hertz.
""")
self._add_property('sweep_coupling.video_bandwidth_auto',
self._get_sweep_coupling_video_bandwidth_auto,
self._set_sweep_coupling_video_bandwidth_auto,
None,
"""
If set to True, the video bandwidth is automatically selected. If set to
False, the video bandwidth is manually selected.
""")
self._add_method('acquisition.abort',
self._acquisition_abort,
"""
This function aborts a previously initiated measurement and returns the
spectrum analyzer to the idle state. This function does not check
instrument status.
""")
self._add_method('acquisition.status',
self._acquisition_status,
"""
This function determines and returns the status of an acquisition.
""")
self._add_method('acquisition.configure',
self._acquisition_configure,
"""
This function configures the acquisition attributes of the spectrum
analyzer.
""")
self._add_method('wavelength.configure_center_span',
self._wavelength_configure_center_span,
"""
This function configures the wavelength range defining the center wavelength
and the wavelength span. If the span corresponds to zero meters, then the
spectrum analyzer operates in time-domain mode. Otherwise, the spectrum
analyzer operates in wavelength-domain mode.
This function modifies the Wavelength Start and Wavelength Stop attributes as
follows:
Wavelength Start = CenterWavelength - Span / 2
Wavelength Stop = CenterWavelength + Span / 2
""")
self._add_method('wavelength.configure_start_stop',
self._wavelength_configure_start_stop,
"""
This function configures the wavelength range defining its start wavelength
and its stop wavelength. If the start wavelength is equal to the stop
wavelength, then the spectrum analyzer operates in time-domain mode.
Otherwise, the spectrum analyzer operates in wavelength-domain mode.
""")
self._add_method('level.configure',
self._level_configure,
"""
This function configures the vertical attributes of the spectrum analyzer.
This corresponds to the Amplitude Units, Input Attenuation, Input
Impedance, Reference Level, and Reference Level Offset attributes.
""")
self._add_method('sweep_coupling.configure',
self._sweep_coupling_configure,
"""
This function configures the coupling and sweeping attributes. For
additional sweep coupling information refer to Section 4.1.1, Sweep
Coupling Overview.
""")
self._add_method('traces[].fetch_y',
self._trace_fetch_y,
"""
This function returns the trace the spectrum analyzer acquires. The trace
is from a previously initiated acquisition. The user calls the Initiate
function to start an acquisition. The user calls the Acquisition Status
function to determine when the acquisition is complete.
The user may call the Read Y Trace function instead of the Initiate
function. This function starts an acquisition, waits for the acquisition
to complete, and returns the trace in one function call.
The Amplitude array returns data that represents the amplitude of the
signals obtained by sweeping from the start wavelength to the stop wavelength
(in wavelength domain, in time domain the amplitude array is ordered from
beginning of sweep to end). The Amplitude Units attribute determines the
units of the points in the Amplitude array.
This function does not check the instrument status. The user calls the
Error Query function at the conclusion of the sequence to check the
instrument status.
""")
self._add_method('acquisition.initiate',
self._acquisition_initiate,
"""
This function initiates an acquisition. After calling this function, the
spectrum analyzer leaves the idle state.
This function does not check the instrument status. The user calls the
Acquisition Status function to determine when the acquisition is complete.
""")
self._add_method('traces[].read_y',
self._trace_read_y,
"""
This function initiates a signal acquisition based on the present
instrument configuration. It then waits for the acquisition to complete,
and returns the trace as an array of amplitude values. The amplitude array
returns data that represent the amplitude of the signals obtained by
sweeping from the start wavelength to the stop wavelength (in wavelength
domain, in time domain the amplitude array is ordered from beginning of
sweep to end). The Amplitude Units attribute determines the units of the
points in the amplitude array. This function resets the sweep count.
If the spectrum analyzer did not complete the acquisition within the time
period the user specified with the MaxTime parameter, the function returns
the Max Time Exceeded error.
""")
self._init_traces()
def _initialize(self, resource = None, id_query = False, reset = False, **keywargs):
"Opens an I/O session to the instrument."
super(agilent86140B, self)._initialize(resource, id_query, reset, **keywargs)
# interface clear
if not self._driver_operation_simulate:
self._clear()
# check ID
if id_query and not self._driver_operation_simulate:
id = self.identity.instrument_model
id_check = self._instrument_id
id_short = id[:len(id_check)]
if id_short != id_check:
raise Exception("Instrument ID mismatch, expecting %s, got %s", id_check, id_short)
# reset
if reset:
self.utility_reset()
def _load_id_string(self):
if self._driver_operation_simulate:
self._identity_instrument_manufacturer = "Not available while simulating"
self._identity_instrument_model = "Not available while simulating"
self._identity_instrument_firmware_revision = "Not available while simulating"
else:
lst = self._ask("*IDN?").split(",")
self._identity_instrument_manufacturer = lst[0]
self._identity_instrument_model = lst[1]
self._identity_instrument_firmware_revision = lst[3]
self._set_cache_valid(True, 'identity_instrument_manufacturer')
self._set_cache_valid(True, 'identity_instrument_model')
self._set_cache_valid(True, 'identity_instrument_firmware_revision')
def _get_identity_instrument_manufacturer(self):
if self._get_cache_valid():
return self._identity_instrument_manufacturer
self._load_id_string()
return self._identity_instrument_manufacturer
def _get_identity_instrument_model(self):
if self._get_cache_valid():
return self._identity_instrument_model
self._load_id_string()
return self._identity_instrument_model
def _get_identity_instrument_firmware_revision(self):
if self._get_cache_valid():
return self._identity_instrument_firmware_revision
self._load_id_string()
return self._identity_instrument_firmware_revision
def _utility_disable(self):
pass
def _utility_error_query(self):
error_code = 0
error_message = "No error"
if not self._driver_operation_simulate:
error_code, error_message = self._ask(":system:error:next?").split(',')
error_code = int(error_code)
error_message = error_message.strip(' "')
return (error_code, error_message)
def _utility_lock_object(self):
pass
def _utility_reset(self):
if not self._driver_operation_simulate:
self._write("*RST")
self._clear()
self.driver_operation.invalidate_all_attributes()
def _utility_reset_with_defaults(self):
self._utility_reset()
def _utility_self_test(self):
code = 0
message = "Self test passed"
if not self._driver_operation_simulate:
code = int(self._ask("*TST?"))
if code != 0:
message = "Self test failed"
return (code, message)
def _utility_unlock_object(self):
pass
def _init_traces(self):
try:
super(agilent86140B, self)._init_traces()
except AttributeError:
pass
self._trace_name = list()
self._trace_type = list()
for i in range(self._trace_count):
self._trace_name.append("tr%c" % chr(i+ord('a')))
self._trace_type.append('')
self.traces._set_list(self._trace_name)
def _display_fetch_screenshot(self, format='gif'):
if self._driver_operation_simulate:
return b''
if format not in ScreenshotImageFormatMapping:
raise ivi.ValueNotSupportedException()
format = ScreenshotImageFormatMapping[format]
self._write("hcopy:device:language \"%s\"" % format)
self._write("hcopy:data?")
time.sleep(25)
return self._read_ieee_block()
def _get_level_amplitude_units(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask("unit:pow?").lower()
self._level_amplitude_units = [k for k,v in AmplitudeUnitsMapping.items() if v==value][0]
self._set_cache_valid()
return self._level_amplitude_units
def _set_level_amplitude_units(self, value):
if value not in AmplitudeUnitsMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write("unit:pow %s" % AmplitudeUnitsMapping[value])
self._level_amplitude_units = value
self._set_cache_valid()
def _get_acquisition_detector_type(self):
return self._acquisition_detector_type
def _set_acquisition_detector_type(self, value):
if value not in DetectorType:
raise ivi.ValueNotSupportedException()
self._acquisition_detector_type = value
def _get_acquisition_detector_type_auto(self):
return self._acquisition_detector_type_auto
def _set_acquisition_detector_type_auto(self, value):
value = bool(value)
self._acquisition_detector_type_auto = value
def _get_wavelength_start(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._wavelength_start = float(self._ask("sense:wavelength:start?"))
self._set_cache_valid()
return self._wavelength_start
def _set_wavelength_start(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("sense:wavelength:start %e" % value)
self._wavelength_start = value
self._set_cache_valid()
self._set_cache_valid(False, 'sweep_coupling_resolution_bandwidth')
def _get_wavelength_stop(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._wavelength_stop = float(self._ask("sense:wavelength:stop?"))
self._set_cache_valid()
return self._wavelength_stop
def _set_wavelength_stop(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("sense:wavelength:stop %e" % value)
self._wavelength_stop = value
self._set_cache_valid()
self._set_cache_valid(False, 'sweep_coupling_resolution_bandwidth')
def _get_wavelength_offset(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._wavelength_offset = float(self._ask("sense:wavelength:offset?"))
self._set_cache_valid()
return self._wavelength_offset
def _set_wavelength_offset(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("sense:wavelength:offset %e" % value)
self._wavelength_offset = value
self._set_cache_valid()
def _get_acquisition_number_of_sweeps(self):
return self._acquisition_number_of_sweeps
def _set_acquisition_number_of_sweeps(self, value):
value = int(value)
self._acquisition_number_of_sweeps = value
def _get_level_reference(self):
return self._level_reference
def _set_level_reference(self, value):
value = float(value)
self._level_reference = value
def _get_level_reference_offset(self):
return self._level_reference_offset
def _set_level_reference_offset(self, value):
value = float(value)
self._level_reference_offset = value
def _get_sweep_coupling_resolution_bandwidth(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._sweep_coupling_resolution_bandwidth = float(self._ask("sense:bandwidth:resolution?"))
self._set_cache_valid()
return self._sweep_coupling_resolution_bandwidth
def _set_sweep_coupling_resolution_bandwidth(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("sense:bandwidth:resolution %e" % value)
self._sweep_coupling_resolution_bandwidth = value
self._set_cache_valid()
def _get_sweep_coupling_resolution_bandwidth_auto(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._sweep_coupling_resolution_bandwidth_auto = bool(int(self._ask("sense:bandwidth:resolution:auto?")))
self._set_cache_valid()
return self._sweep_coupling_resolution_bandwidth_auto
def _set_sweep_coupling_resolution_bandwidth_auto(self, value):
value = bool(value)
if not self._driver_operation_simulate:
self._write("sense:bandwidth:resolution:auto %d" % int(value))
self._sweep_coupling_resolution_bandwidth_auto = value
self._set_cache_valid()
def _get_acquisition_sweep_mode_continuous(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._acquisition_sweep_mode_continuous = bool(int(self._ask("initiate:continuous?")))
self._set_cache_valid()
return self._acquisition_sweep_mode_continuous
def _set_acquisition_sweep_mode_continuous(self, value):
value = bool(value)
if not self._driver_operation_simulate:
self._write("initiate:continuous %d" % int(value))
self._acquisition_sweep_mode_continuous = value
self._set_cache_valid()
def _get_sweep_coupling_sweep_time(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._sweep_coupling_sweep_time = float(self._ask("sense:sweep:time?"))
self._set_cache_valid()
return self._sweep_coupling_sweep_time
def _set_sweep_coupling_sweep_time(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("sense:sweep:time %e" % value)
self._sweep_coupling_sweep_time = value
self._set_cache_valid()
def _get_sweep_coupling_sweep_time_auto(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._sweep_coupling_sweep_time_auto = bool(int(self._ask("sense:sweep:time:auto?")))
self._set_cache_valid()
return self._sweep_coupling_sweep_time_auto
def _set_sweep_coupling_sweep_time_auto(self, value):
value = bool(value)
if not self._driver_operation_simulate:
self._write("sense:sweep:time:auto %d" % int(value))
self._sweep_coupling_sweep_time_auto = value
self._set_cache_valid()
def _get_trace_name(self, index):
index = ivi.get_index(self._trace_name, index)
return self._trace_name[index]
def _get_trace_type(self, index):
index = ivi.get_index(self._trace_name, index)
return self._trace_type[index]
def _set_trace_type(self, index, value):
index = ivi.get_index(self._trace_name, index)
if value not in TraceType:
raise ivi.ValueNotSupportedException()
self._trace_type[index] = value
def _get_acquisition_vertical_scale(self):
return self._acquisition_vertical_scale
def _set_acquisition_vertical_scale(self, value):
value = float(value)
self._acquisition_vertical_scale = value
def _get_sweep_coupling_video_bandwidth(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._sweep_coupling_video_bandwidth = float(self._ask("sense:bandwidth:video?"))
self._set_cache_valid()
return self._sweep_coupling_video_bandwidth
def _set_sweep_coupling_video_bandwidth(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write("sense:bandwidth:video %e" % value)
self._sweep_coupling_video_bandwidth = value
self._set_cache_valid()
def _get_sweep_coupling_video_bandwidth_auto(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._sweep_coupling_video_bandwidth_auto = bool(int(self._ask("sense:bandwidth:video:auto?")))
self._set_cache_valid()
return self._sweep_coupling_video_bandwidth_auto
def _set_sweep_coupling_video_bandwidth_auto(self, value):
value = bool(value)
if not self._driver_operation_simulate:
self._write("sense:bandwidth:video:auto %d" % int(value))
self._sweep_coupling_video_bandwidth_auto = value
self._set_cache_valid()
def _acquisition_abort(self):
pass
def _acquisition_status(self):
return 'unknown'
def _acquisition_configure(self, sweep_mode_continuous, number_of_sweeps, detector_type, vertical_scale):
self._set_acquisition_sweep_mode_continuous(sweep_mode_continuous)
self._set_acquisition_number_of_sweeps(number_of_sweeps)
if detector_type == 'auto' or not detector_type:
self._set_acquisition_detector_type_auto(True)
else:
self._set_acquisition_detector_type_auto(False)
self._set_acquisition_detector_type(detector_type)
self._set_acquisition_vertical_scale(vertical_scale)
def _wavelength_configure_center_span(self, center, span):
self._set_wavelength_start(center - span/2)
self._set_wavelength_stop(center + span/2)
def _wavelength_configure_start_stop(self, start, stop):
self._set_wavelength_start(start)
self._set_wavelength_stop(stop)
def _level_configure(self, amplitude_units, reference, reference_offset):
self._set_level_amplitude_units(amplitude_units)
self._set_level_reference(reference)
self._set_level_reference_offset(reference_offset)
def _sweep_coupling_configure(self, resolution_bandwidth, video_bandwidth, sweep_time):
if resolution_bandwidth == 'auto':
self._set_sweep_coupling_resolution_bandwidth_auto(True)
else:
self._set_sweep_coupling_resolution_bandwidth_auto(False)
self._set_sweep_coupling_resolution_bandwidth(resolution_bandwidth)
if video_bandwidth == 'auto':
self._set_sweep_coupling_video_bandwidth_auto(True)
else:
self._set_sweep_coupling_video_bandwidth_auto(False)
self._set_sweep_coupling_video_bandwidth(video_bandwidth)
if sweep_time == 'auto':
self._set_sweep_coupling_sweep_time_auto(True)
else:
self._set_sweep_coupling_sweep_time_auto(False)
self._set_sweep_coupling_sweep_time(sweep_time)
def _trace_fetch_y(self, index):
index = ivi.get_index(self._trace_name, index)
name = self._trace_name[index]
if self._driver_operation_simulate:
return list()
self._write('format:data ascii')
l = self._ask('trace:data:y? %s' % name)
data = list()
for p in l.split(','):
data.append(float(p))
return data
def _acquisition_initiate(self):
if not self._driver_operation_simulate:
self._write("initiate:immediate")
def _trace_read_y(self, index):
return self._trace_fetch_y(index)
|
|
#!/usr/bin/python
"""nrvr.remote.ssh - Remote commands over ssh
Classes provided by this module include
* SshCommandException
* SshParameters
* SshCommand
The main class provided by this module is SshCommand.
On the downside, for now it
* reports back indistinguishably the same way stdout and stderr,
* doesn't report back the command's returncode.
Works only if module pty is available (e.g. in Python 2.6 on Linux, but not on Windows).
As implemented works in Linux.
As implemented requires ssh command.
Nevertheless essential. To be improved as needed.
Idea and first implementation - Leo Baschy <srguiwiz12 AT nrvr DOT com>
Public repository - https://github.com/srguiwiz/nrvr-commander
Copyright (c) Nirvana Research 2006-2015.
Simplified BSD License"""
import os.path
import re
import signal
import sys
import time
from nrvr.process.commandcapture import CommandCapture
from nrvr.util.classproperty import classproperty
from nrvr.util.ipaddress import IPAddress
_gotPty = False
try:
import pty
_gotPty = True
except ImportError:
pass
class SshCommandException(Exception):
def __init__(self, message):
self._message = message
def __str__(self):
return unicode(self._message)
@property
def message(self):
return self._message
class SshParameters(object):
"""Parameters needed to connect to an ssh host.
Implemented to avoid verbosity and complexity of passing same information
many times across several uses each time in separate arguments."""
def __init__(self, ipaddress, user, pwd):
"""Create new SshParameters instance.
Example use::
exampleSshParameters = SshParameters("10.123.45.67", "joe", "redwood")
ipaddress
IP address or domain name.
user
a string.
pwd
a string or None."""
self.ipaddress = IPAddress.asString(ipaddress)
self.user = user
self.pwd = pwd
class SshCommand(object):
"""Send a command over ssh."""
_pwdPromptRegex = re.compile(r"(?i)password:")
_removeLeadingSpaceAndFirstNewlineRegex = re.compile(r"^\s*?\n(.*)$")
_acceptPromptRegex = re.compile(r"(?i)\(yes/no\)\?")
_acceptAnswer="yes\no"
_permissionDeniedRegex = re.compile(r"(?i)Permission\s+denied")
@classmethod
def commandsUsedInImplementation(cls):
"""Return a list to be passed to SystemRequirements.commandsRequired().
This class can be passed to SystemRequirements.commandsRequiredByImplementations()."""
return ["ssh", "ssh-keygen"]
def __init__(self, sshParameters, argv,
exceptionIfNotZero=True,
connectTimeoutSeconds=None,
maxConnectionRetries=10,
tickerForRetry=True,
checkForPermissionDenied=False):
"""Create new SshCommand instance.
Will wait until completed.
Captures returncode, and output.
Output may contain extraneous leading or trailing newlines and whitespace.
Example use::
example = SshCommand(exampleSshParameters, ["ls", "-al"])
print "returncode=" + str(example.returncode)
print "output=" + example.output
sshParameters
an SshParameters instance.
argv
list of command and arguments passed to ssh.
If given a string instead of a list then fixed by argv=argv.split() making a list.
That may only work as expected for some commands on some platforms.
It should work for a command without arguments.
Hence if you don't want a string split, pass it in wrapped as sole item of a list."""
if not _gotPty:
# cannot use ssh if no pty
raise Exception("must have module pty available to use ssh command"
", which is known to be available in Python 2.6 on Linux, but not on Windows")
#
if isinstance(argv, basestring):
argv = argv.split()
maxConnectionRetries = int(maxConnectionRetries)
#
self._ipaddress = sshParameters.ipaddress
self._argv = argv
self._user = sshParameters.user
self._pwd = sshParameters.pwd
self._exceptionIfNotZero = exceptionIfNotZero
self._connectTimeoutSeconds = connectTimeoutSeconds
self._connectionRetriesRemaining = maxConnectionRetries if maxConnectionRetries else -1
self._output = ""
self._returncode = None
#
ticked = False
while self._connectionRetriesRemaining:
self._connectionRetriesRemaining -= 1
# fork and connect child to a pseudo-terminal
self._pid, self._fd = pty.fork()
if self._pid == 0:
# in child process
sshOptions = ["-l", self._user]
if connectTimeoutSeconds:
sshOptions.extend(["-o", "ConnectTimeout=" + str(connectTimeoutSeconds)])
sshOptions.append(self._ipaddress)
os.execvp("ssh", ["ssh"] + sshOptions + self._argv)
else:
# in parent process
if self._pwd:
# if given a password then apply
promptedForPassword = False
outputTillPrompt = ""
# look for password prompt
while not promptedForPassword:
try:
newOutput = os.read(self._fd, 1024)
if not len(newOutput):
# end has been reached
if not self._connectionRetriesRemaining:
# was raise Exception("unexpected end of output from ssh")
raise Exception("failing to connect via ssh\n" +
outputTillPrompt)
if tickerForRetry:
if not ticked:
# first time only printing
sys.stdout.write("retrying to connect via ssh [")
sys.stdout.write(".")
sys.stdout.flush()
ticked = True
break # break out of while not promptedForPassword:
# ssh has been observed returning "\r\n" for newline, but we want "\n"
newOutput = SshCommand._crLfRegex.sub("\n", newOutput)
outputTillPrompt += newOutput
if SshCommand._acceptPromptRegex.search(outputTillPrompt):
# e.g. "Are you sure you want to continue connecting (yes/no)? "
raise Exception("cannot proceed unless having accepted host key\n" +
outputTillPrompt +
'\nE.g. invoke SshCommand.acceptKnownHostKey(SshParameters("{0}",user,pwd)).'.format(self._ipaddress))
if SshCommand._pwdPromptRegex.search(outputTillPrompt):
# e.g. "10.123.45.67's password: "
promptedForPassword = True
except EnvironmentError:
# e.g. "@ WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED! @" and closing
raise Exception("failing to connect via ssh\n" +
outputTillPrompt)
if not promptedForPassword: # i.e. if got here from breaking out of while not promptedForPassword:
continue # continue at while self._connectionRetriesRemaining:
else: # promptedForPassword is normal
# if connecting then no more retries,
# maxConnectionRetries is meant for retrying connecting only
self._connectionRetriesRemaining = 0
os.write(self._fd, self._pwd + "\n")
# look for output
endOfOutput = False
outputSincePrompt = ""
try:
while not endOfOutput:
try:
newOutput = os.read(self._fd, 1024)
if len(newOutput):
outputSincePrompt += newOutput
else:
# end has been reached
endOfOutput = True
if checkForPermissionDenied:
# seen stderr "Permission denied, please try again."
# and a repeat of stdout "10.123.45.67's password: "
if len(outputSincePrompt) <= 128: # limit to early in output
if SshCommand._permissionDeniedRegex.search(outputSincePrompt) and SshCommand._pwdPromptRegex.search(outputSincePrompt):
os.kill(self._pid, signal.SIGKILL)
except EnvironmentError as e:
# some ideas maybe at http://bugs.python.org/issue5380
if e.errno == 5: # errno.EIO:
# seen when pty closes OSError: [Errno 5] Input/output error
endOfOutput = True
else:
# we accept what we got so far, for now
endOfOutput = True
finally:
# remove any leading space (maybe there after "password:" prompt) and
# remove first newline (is there after entering password and "\n")
self._output = re.sub(SshCommand._removeLeadingSpaceAndFirstNewlineRegex, r"\1", outputSincePrompt)
#
# get returncode
signalled = False
try:
ignorePidAgain, waitEncodedStatusIndication = os.waitpid(self._pid, 0)
if os.WIFEXITED(waitEncodedStatusIndication):
# normal exit(status) call
self._returncode = os.WEXITSTATUS(waitEncodedStatusIndication)
else:
# e.g. os.WIFSIGNALED or os.WIFSTOPPED
# less common case
signalled = True
self._returncode = -1
# raise an exception if asked to and there is a reason
exceptionMessage = ""
if signalled:
# less common case
exceptionMessage += "ssh did not exit normally"
elif self._exceptionIfNotZero and self._returncode:
exceptionMessage += "returncode: " + str(self._returncode)
if exceptionMessage:
commandDescription = "ipaddress: " + self._ipaddress
commandDescription += "\ncommand:\n\t" + self._argv[0]
if len(self._argv) > 1:
commandDescription += "\narguments:\n\t" + "\n\t".join(self._argv[1:])
else:
commandDescription += "\nno arguments"
commandDescription += "\nuser: " + self._user
exceptionMessage = commandDescription + "\n" + exceptionMessage
exceptionMessage += "\noutput:\n" + self._output
raise SshCommandException(exceptionMessage)
except OSError:
# supposedly can occur
self._returncode = -1
raise SshCommandException("ssh did not exit normally")
if ticked:
# final printing
sys.stdout.write("]\n")
sys.stdout.flush()
@property
def output(self):
"""Collected output string of command.
May contain extraneous leading or trailing newlines and whitespace."""
return self._output
@property
def returncode(self):
"""Returncode of command or 255 if an ssh error occurred.
Could be None."""
return self._returncode
# auxiliary
_crLfRegex = re.compile(r"\r\n")
_regexType = type(_crLfRegex)
@classproperty
def _knownHostFilePath(cls):
"""Path of the known_host file."""
return os.path.expanduser("~/.ssh/known_hosts")
@classmethod
def removeKnownHostKey(cls, ipaddress):
"""Remove line from ~/.ssh/known_hosts file."""
knownHostsFile = SshCommand._knownHostFilePath
ipaddress = IPAddress.asString(ipaddress)
if not os.path.exists(knownHostsFile):
# maybe file hasn't been created yet, nothing to do
return
with open (knownHostsFile, "r") as inputFile:
knownHostLines = inputFile.readlines()
ipaddressRegex = re.compile(r"^[ \t]*" + re.escape(ipaddress) + r"\s")
anyMatch = False
newKnownHostLines = []
for knownHostLine in knownHostLines:
if ipaddressRegex.search(knownHostLine):
# a match, don't copy it over
anyMatch = True
else:
# all others copy over
newKnownHostLines.append(knownHostLine)
if anyMatch:
with open (knownHostsFile, "w") as outputFile:
outputFile.writelines(newKnownHostLines)
if not anyMatch: # possibly not found as plain text because hashed
# within this case, an even more special case has been observed with stderr containing
# "invalid key:" and "not a valid known_hosts file" and returncode not zero,
# hence exceptionIfNotZero=True
sshKeygen = CommandCapture(["ssh-keygen",
"-f", knownHostsFile,
"-R", ipaddress],
copyToStdio=False,
exceptionIfNotZero=True, exceptionIfAnyStderr=False)
@classmethod
def acceptKnownHostKey(cls, sshParameters, connectTimeoutSeconds=None):
"""Accept host's key.
Will wait until completed.
ipaddress
IP address or domain name."""
if not _gotPty:
# cannot use ssh if no pty
raise Exception("must have module pty available to use ssh command"
", which is known to be available in Python 2.6 on Linux, but not on Windows")
#
ipaddress = sshParameters.ipaddress
user = sshParameters.user
pwd = sshParameters.pwd
if user is None:
user = "dummy" # user "dummy" doesn't give away information about this script's user
pwd = None # don't give away information
if pwd is None:
pwd = "bye" # a dummy too
#
# remove any pre-existing key, if any
SshCommand.removeKnownHostKey(ipaddress)
#
# fork and connect child to a pseudo-terminal
pid, fd = pty.fork()
if pid == 0:
# in child process;
# user if given, real or dummy, doesn't give away information about this script's user;
sshOptions = ["-l", user]
if connectTimeoutSeconds:
sshOptions.extend(["-o", "ConnectTimeout=" + str(connectTimeoutSeconds)])
sshOptions.append(ipaddress)
# commands "sleep 1 ; exit" if it executes should be harmless
os.execvp("ssh", ["ssh"] + sshOptions + ['"sleep 1 ; exit"'])
else:
# in parent process
promptedForAccept = False # common case
promptedForPassword = False # less common case
outputTillPrompt = ""
# look for accept prompt
while not promptedForAccept and not promptedForPassword:
try:
newOutput = os.read(fd, 1024)
if not len(newOutput):
# end has been reached
# was raise Exception("unexpected end of output from ssh")
raise Exception("failing to connect via ssh\n" +
outputTillPrompt)
# ssh has been observed returning "\r\n" for newline, but we want "\n"
newOutput = SshCommand._crLfRegex.sub("\n", newOutput)
outputTillPrompt += newOutput
if SshCommand._acceptPromptRegex.search(outputTillPrompt):
# e.g. "Are you sure you want to continue connecting (yes/no)? "
# common case
promptedForAccept = True
if SshCommand._pwdPromptRegex.search(outputTillPrompt):
# e.g. "10.123.45.67's password: "
# which has been observed when apparently an alternative way of storing and accepting host keys was in effect,
# if it gets here it works and hence let it pass,
# less common case
promptedForPassword = True
except EnvironmentError:
# e.g. "@ WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED! @" and closing
raise Exception("failing to connect via ssh\n" +
outputTillPrompt)
if promptedForAccept:
# do a special dance here to avoid being quicker to next invocation than
# this invocation takes to get around to writing known_hosts file,
# which would cause only one of the ssh invocations to write known_hosts file,
# which has been observed as a problem in bulk processing
startTime = time.time()
knownHostsFile = SshCommand._knownHostFilePath
if os.path.exists(knownHostsFile):
# normal case
originalModificationTime = os.path.getctime(knownHostsFile)
else:
# maybe file hasn't been created yet
originalModificationTime = startTime
if originalModificationTime > startTime:
# fix impossible future time
os.utime(knownHostsFile, (startTime, startTime))
while originalModificationTime == startTime:
# wait to make sure modification will be after originalModificationTime
time.sleep(0.1)
startTime = time.time()
# actually accept, one line in the middle of the special dance
os.write(fd, SshCommand._acceptAnswer)
# continue special dance
looksDone = False
while not looksDone:
if os.path.exists(knownHostsFile):
# normal case
currentModificationTime = os.path.getctime(knownHostsFile)
else:
# maybe file hasn't been created yet
currentModificationTime = originalModificationTime
if currentModificationTime != originalModificationTime:
# has been modified
looksDone = True
break
currentTime = time.time()
if currentTime - startTime > 3.0:
# don't want to block forever, done or not
looksDone = True
break
# sleep
time.sleep(0.1)
# NOT os.close(fd) because has been observed to prevent ssh writing known_hosts file,
# instead enter a password, real or dummy, to accelerate closing of ssh port
os.write(fd, pwd + "\n")
@classmethod
def isAvailable(cls, sshParameters,
probingCommand="hostname"):
"""Return whether probingCommand succeeds.
Will wait until completed."""
try:
sshCommand = SshCommand(sshParameters,
argv=probingCommand,
maxConnectionRetries=1)
return True
except Exception as e:
return False
@classmethod
def sleepUntilIsAvailable(cls, sshParameters,
checkIntervalSeconds=5.0, ticker=False,
probingCommand="hostname"):
"""If available return, else loop sleeping for checkIntervalSeconds."""
printed = False
ticked = False
# check the essential condition, initially and then repeatedly
while not SshCommand.isAvailable(sshParameters,
probingCommand=probingCommand):
if not printed:
# first time only printing
print "waiting for ssh to be available to connect to " + IPAddress.asString(sshParameters.ipaddress)
sys.stdout.flush()
printed = True
if ticker:
if not ticked:
# first time only printing
sys.stdout.write("[")
sys.stdout.write(".")
sys.stdout.flush()
ticked = True
time.sleep(checkIntervalSeconds)
if ticked:
# final printing
sys.stdout.write("]\n")
sys.stdout.flush()
@classmethod
def hasAcceptedKnownHostKey(cls, sshParameters):
"""Return whether an attempt to acceptKnownHostKey() succeeds.
Will wait until completed with success or failure.
sshParameters
an SshParameters instance to use in the attempt.
return
whether success."""
try:
SshCommand.acceptKnownHostKey(sshParameters=sshParameters)
return True
except Exception as e:
return False
@classmethod
def sleepUntilHasAcceptedKnownHostKey(cls, sshParameters,
checkIntervalSeconds=3.0, ticker=False,
extraSleepSeconds=5.0):
"""If available return, else loop sleeping for checkIntervalSeconds.
sshParameters
an SshParameters instance to use in the attempts."""
printed = False
ticked = False
# check the essential condition, initially and then repeatedly
while not SshCommand.hasAcceptedKnownHostKey(sshParameters=sshParameters):
if not printed:
# first time only printing
print "waiting for ssh to be available to get host key from " + IPAddress.asString(sshParameters.ipaddress)
sys.stdout.flush()
printed = True
if ticker:
if not ticked:
# first time only printing
sys.stdout.write("[")
sys.stdout.write(".")
sys.stdout.flush()
ticked = True
time.sleep(checkIntervalSeconds)
if ticked:
# final printing
sys.stdout.write("]\n")
sys.stdout.flush()
if extraSleepSeconds:
time.sleep(extraSleepSeconds)
if __name__ == "__main__":
from nrvr.util.requirements import SystemRequirements
SystemRequirements.commandsRequiredByImplementations([SshCommand], verbose=True)
#
SshCommand.removeKnownHostKey("localhost")
SshCommand.acceptKnownHostKey(SshParameters("localhost", "i", "madeitup"))
# fictional address
_exampleSshParameters = SshParameters("10.123.45.67", "root", "redwood")
# _sshExample1 = SshCommand(_exampleSshParameters, "hostname")
# print "returncode=" + str(_sshExample1.returncode)
# print "output=" + _sshExample1.output
# _sshExample2 = SshCommand(_exampleSshParameters, ["ls"])
# print "returncode=" + str(_sshExample2.returncode)
# print "output=" + _sshExample2.output
# _sshExample3 = SshCommand(_exampleSshParameters, ["ls", "-al"])
# print "returncode=" + str(_sshExample3.returncode)
# print "output=" + _sshExample3.output
# _sshExample4 = SshCommand(_exampleSshParameters, ["ls", "doesntexist"], exceptionIfNotZero=False)
# print "returncode=" + str(_sshExample4.returncode)
# print "output=" + _sshExample4.output
# _sshExample5 = SshCommand(_exampleSshParameters, ["ls", "doesntexist"])
# print "returncode=" + str(_sshExample5.returncode)
# print "output=" + _sshExample5.output
class ScpCommandException(SshCommandException):
def __init__(self, message):
SshCommandException.__init__(self, message)
class ScpCommand(object):
"""Copy a file or files via scp."""
@classmethod
def commandsUsedInImplementation(cls):
"""Return a list to be passed to SystemRequirements.commandsRequired().
This class can be passed to SystemRequirements.commandsRequiredByImplementations()."""
return ["scp"]
_pwdPromptRegex = re.compile(re.escape(r"password:"))
_acceptPromptRegex = re.compile(re.escape(r"(yes/no)?"))
def __init__(self,
fromPath, toPath,
fromSshParameters=None, toSshParameters=None,
recurseDirectories=False,
preserveTimes=True):
"""Create new ScpCommand instance.
Will wait until completed.
Captures returncode, and output.
Either fromPath or toPath is expected to be local, i.e. without user and without IP address.
Correspondingly either fromSshParameters or toSshParameters must NOT be assigned an SshParameters
instance and remain default None.
fromPath
one path or a list of paths.
Absolute paths strongly recommended.
toPath
one path.
Absolute path strongly recommended.
Must be directory if more than one fromPath.
fromSshParameters
an SshParameters instance.
toSshParameters
an SshParameters instance.
recurseDirectories
a hint for when fromSshParameters."""
if not _gotPty:
# cannot use scp if no pty
raise Exception("must have module pty available to use scp command"
", which is known to be available in Python 2.6 on Linux, but not on Windows")
#
if fromSshParameters and toSshParameters:
raise Exception("cannot copy if both fromSshParameters and toSshParameters, only one or other")
if not fromSshParameters and not toSshParameters:
raise Exception("cannot copy if neither fromSshParameters nor toSshParameters, requires one or other")
#
if not isinstance(fromPath, (list, tuple)): # should be one string for one path to copy from
fromPaths = [fromPath]
else: # should be a list of strings for multiple paths to copy from
fromPaths = fromPath
if len(fromPaths) == 0:
raise Exception("cannot copy zero files, requires at least one")
if fromSshParameters: # get files from remote
if len(fromPaths) > 1 or recurseDirectories:
if not os.path.isdir(toPath):
raise Exception("cannot copy multiple files into a file, must copy into a directory, not into %s" % toPath)
self._fromSpecification = \
[fromSshParameters.user + "@" + IPAddress.asString(fromSshParameters.ipaddress) + ":" + " ".join(fromPaths)]
self._toSpecification = toPath
self._ipaddress = fromSshParameters.ipaddress
self._pwd = fromSshParameters.pwd
else: # put files to remote
anyFromDirectory = False
for path in fromPaths:
if os.path.isdir(path):
anyFromDirectory = True
break
if anyFromDirectory:
recurseDirectories = True # mandatory in this case
self._fromSpecification = fromPaths
self._toSpecification = \
toSshParameters.user + "@" + IPAddress.asString(toSshParameters.ipaddress) + ":" + toPath
self._ipaddress = toSshParameters.ipaddress
self._pwd = toSshParameters.pwd
self._args = ["scp"]
if preserveTimes:
self._args.append("-p")
if recurseDirectories:
self._args.append("-r")
self._args.extend(self._fromSpecification) # a list because possibly more than one
self._args.append(self._toSpecification)
#
self._output = ""
self._returncode = None
#
# fork and connect child to a pseudo-terminal
self._pid, self._fd = pty.fork()
if self._pid == 0:
# in child process
os.execvp("scp", self._args)
else:
# in parent process
if self._pwd:
# if given a password then apply
promptedForPassword = False
outputTillPrompt = ""
# look for password prompt
while not promptedForPassword:
try:
newOutput = os.read(self._fd, 1024)
if not len(newOutput):
# end has been reached
# was raise Exception("unexpected end of output from scp")
raise Exception("failing to connect for scp\n" +
outputTillPrompt)
# ssh has been observed returning "\r\n" for newline, but we want "\n"
newOutput = SshCommand._crLfRegex.sub("\n", newOutput)
outputTillPrompt += newOutput
if SshCommand._acceptPromptRegex.search(outputTillPrompt):
# e.g. "Are you sure you want to continue connecting (yes/no)? "
raise Exception("cannot proceed unless having accepted host key\n" +
outputTillPrompt +
'\nE.g. invoke SshCommand.acceptKnownHostKey(SshParameters("{0}",user,pwd)).'.format(self._ipaddress))
if SshCommand._pwdPromptRegex.search(outputTillPrompt):
# e.g. "10.123.45.67's password: "
promptedForPassword = True
except EnvironmentError:
# e.g. "@ WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED! @" and closing
raise Exception("failing to connect for scp\n" +
outputTillPrompt)
os.write(self._fd, self._pwd + "\n")
# look for output
endOfOutput = False
outputSincePrompt = ""
try:
while not endOfOutput:
try:
newOutput = os.read(self._fd, 1024)
if len(newOutput):
outputSincePrompt += newOutput
else:
# end has been reached
endOfOutput = True
except EnvironmentError as e:
# some ideas maybe at http://bugs.python.org/issue5380
if e.errno == 5: # errno.EIO:
# seen when pty closes OSError: [Errno 5] Input/output error
endOfOutput = True
else:
# we accept what we got so far, for now
endOfOutput = True
finally:
# remove any leading space (maybe there after "password:" prompt) and
# remove first newline (is there after entering password and "\n")
self._output = re.sub(r"^\s*?\n(.*)$", r"\1", outputSincePrompt)
#
# get returncode
try:
ignorePidAgain, waitEncodedStatusIndication = os.waitpid(self._pid, 0)
if os.WIFEXITED(waitEncodedStatusIndication):
# normal exit(status) call
self._returncode = os.WEXITSTATUS(waitEncodedStatusIndication)
# raise an exception if there is a reason
exceptionMessage = ""
if self._returncode:
exceptionMessage += "returncode: " + str(self._returncode)
if exceptionMessage:
commandDescription = "scp from:\n\t" + str(self._fromSpecification)
commandDescription += "\nto:\n\t" + self._toSpecification
commandDescription += "\nargs:\n\t" + str(self._args)
exceptionMessage = commandDescription + "\n" + exceptionMessage
exceptionMessage += "\noutput:\n" + self._output
raise ScpCommandException(exceptionMessage)
else:
# e.g. os.WIFSIGNALED or os.WIFSTOPPED
self._returncode = -1
raise ScpCommandException("scp did not exit normally")
except OSError:
# supposedly can occur
self._returncode = -1
raise ScpCommandException("scp did not exit normally")
@property
def output(self):
"""Collected output string of scp command.
May contain extraneous leading or trailing newlines and whitespace."""
return self._output
@property
def returncode(self):
"""Returncode of command or 255 if an scp error occurred.
Could be None."""
return self._returncode
@classmethod
def put(cls,
fromLocalPath, toSshParameters, toRemotePath,
preserveTimes=True):
"""Return an ScpCommand instance.
Will wait until completed.
fromLocalPath
one path or a list of paths.
Absolute paths strongly recommended.
toSshParameters
an SshParameters instance for remote."""
scpCommand = ScpCommand(fromPath=fromLocalPath, toPath=toRemotePath, toSshParameters=toSshParameters,
preserveTimes=preserveTimes)
return scpCommand
@classmethod
def get(cls,
fromSshParameters, fromRemotePath, toLocalPath,
recurseDirectories=False, preserveTimes=True):
"""Return an ScpCommand instance.
Will wait until completed.
fromSshParameters
an SshParameters instance for remote.
fromRemotePath
one path or a list of paths.
Absolute paths strongly recommended."""
scpCommand = ScpCommand(fromPath=fromRemotePath, toPath=toLocalPath, fromSshParameters=fromSshParameters,
recurseDirectories=recurseDirectories, preserveTimes=preserveTimes)
return scpCommand
if __name__ == "__main__":
SystemRequirements.commandsRequiredByImplementations([ScpCommand], verbose=True)
#
import shutil
import tempfile
from nrvr.util.times import Timestamp
_testDir = os.path.join(tempfile.gettempdir(), Timestamp.microsecondTimestamp())
os.mkdir(_testDir, 0755)
try:
_sendDir = os.path.join(_testDir, "send")
os.mkdir(_sendDir, 0755)
_exampleFile1 = os.path.join(_sendDir, "example1.txt")
with open(_exampleFile1, "w") as outputFile:
outputFile.write("this is an example\n" * 1000000)
# fictional 10.123.45.67
_exampleSshParameters = SshParameters("10.123.45.67", "root", "redwood")
# _scpExample1 = ScpCommand(fromPath=_exampleFile1,
# toSshParameters=_exampleSshParameters,
# toPath="~/example1.txt")
# print "returncode=" + str(_scpExample1.returncode)
# print "output=" + _scpExample1.output
# _scpExample2 = ScpCommand(fromSshParameters=_exampleSshParameters,
# fromPath="/etc/hosts",
# toPath=_exampleFile1)
# print "returncode=" + str(_scpExample2.returncode)
# print "output=" + _scpExample2.output
# with open(_exampleFile1, "r") as inputFile:
# _exampleFile1Content = inputFile.read()
# print "content=\n" + _exampleFile1Content
finally:
shutil.rmtree(_testDir)
|
|
# CUDA_VISIBLE_DEVICES=gpu-number python deepSimDEF_for_gene_expression.py arguments
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}
import sys
import logging
import random
import numpy as np
import pprint
import argparse
import tensorflow.keras
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from networks import deepSimDEF_network
from datasets import gene_expression_dataset, generic_production_dataset
from dataloaders import generic_dataloader, generic_production_dataloader
from scipy.stats.stats import pearsonr, spearmanr
import datetime
from pytz import timezone
import collections
from utils import *
tz = timezone('US/Eastern') # To monitor training time (showing start & end points of a fixed timezone when the code runs on a remote server)
pp = pprint.PrettyPrinter(indent=4)
#checkpoint = '[base_dir]/2020.03.04-23h40m37s_server_name/model_checkpoints/epoch_58'
checkpoint = None
parser = argparse.ArgumentParser(description='Calculate gene-product pairs similarity.')
# experiment arguments
parser.add_argument('--deepsimdef_mode', default='training', type=str, help='mode of the model can be either "training", "evaluation", or "production"')
parser.add_argument('--nb_fold', default=10, type=int, help='number of folds of training and evaluation in n-fold cross-validation (default: 10)')
parser.add_argument('--iea', default=True, type=str2bool, help='whether to consider "inferred from electronic annotations" or not')
parser.add_argument('--sub_ontology', default='all', type=str, help='considering annotations of what subontologies, "bp", "cc", "mf", or "all" (default: "all")')
parser.add_argument('--inpute_file', default='default', type=str, help='inpute file of the gene product terms and the score(s), if not provide use default file')
parser.add_argument('--production_input_file', default='', type=str, help='test file of the gene product terms used in production mode (you should provide directory too)')
parser.add_argument('--production_output_file', default='', type=str, help='result file of the gene product terms used in production mode (you should provide directory too)')
parser.add_argument('--experiment_mode', default=2, type=int, help='1: any pairs of unseen genes; 2: only pair in which both genes are unseen')
parser.add_argument('--partial_shuffle_percent', default=0.0, type=float, help='Should be more than 0.0 for "Negative Control" experiments (default: 0.0)')
parser.add_argument('--species', default='yeast', type=str, help='the species of interest for evaluation (human, yeast, etc)')
# network arguments
parser.add_argument('--dropout', default=0.3, type=float, help='dropout applied to dense layers of the network (default: 0.3)')
parser.add_argument('--embedding_dropout', default=0.15, type=float, help='dropout applied to embedding layers of the network; i.e., percentage of features dropped out completely (default: 0.15)')
parser.add_argument('--annotation_dropout', default=0.0, type=float, help='dropout applied to annotations of a gene at training time; i.e., percentage of annotations ignored (default: 0.0)')
parser.add_argument('--pretrained_embedding', default=True, type=str2bool, help='whether the GO term embeddings loaded should be computed in advance from a pretrained unsupervised model (default: True)')
parser.add_argument('--updatable_embedding', default=True, type=str2bool, help='whether the GO term embeddings should be updatable during the traning (default: True)')
parser.add_argument('--activation_hidden', default='relu', type=str, help='activation function of hidden layers (default: "relu")')
parser.add_argument('--activation_highway', default='relu', type=str, help='activation function of highway layer (default: "relu")')
parser.add_argument('--activation_output', default='linear', type=str, help='activation function of last, i.e. output, layer (default: "linear")')
parser.add_argument('--embedding_dim', default=100, type=int, help='dimentionality of GO term embeddings, i.e. number of latent features (default: 100)')
parser.add_argument('--highway_layer', default=True, type=str2bool, help='True if highway layer instead of cosince similarity (default: True)')
parser.add_argument('--cosine_similarity', default=False, type=str2bool, help='True cosince similarity instead of highway layer (default: False)')
# training arguments
parser.add_argument('--nb_epoch', default=400, type=int, help='number of epochs for training')
parser.add_argument('--batch_size', default=256, type=int, help='batch size (default: 256)')
parser.add_argument('--loss', default='mean_squared_error', type=str, help='loss type of the onjective function that gets optimized ("binary_crossentropy" or "mean_squared_error")')
parser.add_argument('--optimizer', default='adam', type=str, help='optimizer algorithm, can be: "adam", "rmsprop", etc. (default: "adam")')
parser.add_argument('--learning_rate', default=0.001, type=float, help='starting learning rate for optimization')
parser.add_argument('--adaptive_lr', default=True, type=str2bool, help='whether to use adavtive learning rate or not')
parser.add_argument('--adaptive_lr_rate', default=10, type=int, help='after how many epoch, decay the learning rate')
# checkpointting arguments
parser.add_argument('--checkpoint', default=checkpoint, help='starting from scratch or using model checkpoints')
parser.add_argument('--save_model', default=False, type=str2bool, help='model checkpointing, whether to save the models during training')
parser.add_argument('--save_embeddings', default=False, type=str2bool, help='storing weights of the embedding layers, whether to save updated embeddings')
parser.add_argument('--save_interval', default=5, type=int, help='-1, checkpoint if see improvement in the result; otherwise after each interval (default: -1)')
parser.add_argument('--log_dir', default='logs/', type=str, help='base log folder (will be created if it does not exist)')
parser.add_argument('--log_name', default='GE_test', type=str, help='prefix name to use when logging this model')
# misc arguments
parser.add_argument('--verbose', default=False, type=str2bool, help='if print extra information during model training')
parser.add_argument('--reproducible', default=True, type=str2bool, help='whether we want to have a reproducible result (mostly helpful with training on a CPU at the cost of training speed)')
parser.add_argument('--seed', default=2021, type=int, help='seed used for Random Number Generation if "reproducible=True"')
def fit_gene_expression(models, args):
best_epoch_pearson, best_epoch_spearman = 0, 0
final_pearson, final_spearman = [], []
start_time = datetime.datetime.now(tz)
former_iteration_endpoint = start_time
print("~~~~~~~~~~~~~ TIME ~~~~~~~~~~~~~~")
print("Time started: {}".format(start_time.strftime("%Y-%m-%d %H:%M:%S")))
"""Training loop"""
for e in range(checkpoint_baseline, args.nb_epoch):
print("~~~~~~~~ {} ({}) ~~~~~~~~ EPOCH {}/{} (Embedding dimention: {}) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n".format(
'/'.join(sub_ontology_interested), args.species, e+1, args.nb_epoch, args.embedding_dim))
if args.adaptive_lr:
learning_rate = exp_decay(epoch=e//args.adaptive_lr_rate, initial_lrate=args.learning_rate) # claculating the desired learning rate using the exponential decay formula
else:
learning_rate = args.learning_rate
epoch_pearsons, epoch_spearmans = [], []
for model_index in range(len(models)): # Going through each model one by one
# Preparing the data
train_pair, X_train, y_train, test_pair, X_test, y_test, train_gene, test_gene = generic_dataloader(
model_index, nb_test_genes, gene_shuffled_indx, gene_1, gene_2, fully_annotated_genes,
gene_1_annotation, gene_2_annotation, prediction_value, sub_ontology_interested,
args.experiment_mode)
if e==0: save_gene_pairs(logdir=logdir, model_id=model_index, train_pair=train_pair,
test_pair=test_pair, train_gene=train_gene, test_gene=test_gene)
if args.nb_fold==1: X_train, y_train = X_test, y_test # If single model, we need to redefine the training data due to absence of folds
# Training and Prediction
model = models[model_index]
model.optimizer.lr = learning_rate # decreasing a model's learning rate already calculated by exponential decay formula
history = model.fit(X_train, y_train, batch_size=args.batch_size, epochs=1, shuffle=True)
if args.nb_fold!=1: # Only evaluation and report in n-fold cross-validation set up
predictions = model.predict(X_test)
"""Pearson"""
#finding the best pearson
pr = np.round(pearsonr(y_test.reshape(y_test.shape[0]), predictions.reshape(predictions.shape[0]))[0], 5)
epoch_pearsons.append(pr)
# Keeping track of improving or receding pearson
best_pearson = best_pearsons[model_index]
if best_pearson < pr:
best_pearsons[model_index] = pr
st = "(+){}".format(best_pearsons[model_index])
else:
st = "(-){}".format(best_pearsons[model_index])
print(">>> Pearson ({}): {} Best({}): {} ({})".format(
model_index+1, pr, model_index+1, st, np.round(pr-best_pearson, 5)))
"""Spearman"""
#finding the best spearman
sp = np.round(spearmanr(y_test.reshape(y_test.shape[0]), predictions.reshape(predictions.shape[0]))[0], 5)
epoch_spearmans.append(sp)
# Keeping track of improving or receding spearman
best_spearman = best_spearmans[model_index]
if best_spearman < sp:
best_spearmans[model_index] = sp
st = "(+){}".format(best_spearmans[model_index])
else:
st = "(-){}".format(best_spearmans[model_index])
print(">>> Spearman ({}): {} Best({}): {} ({})\n".format(
model_index+1, sp, model_index+1, st, np.round(sp-best_spearman, 5)))
# loging the model results
log_model_result_for_gene_expression(e+1, model_index+1, learning_rate,
best_pearsons[model_index], pr-best_pearson, pr,
best_spearmans[model_index], sp-best_spearman, sp, logdir)
if args.nb_fold!=1: # Stats on all folds in this epoch
"""Pearson"""
pr_res = np.mean(epoch_pearsons) # best pearson for all models
final_pearson.append(pr_res)
# Stats on what we have done so far from the begining of training
if e==checkpoint_baseline:
best_epoch_pearson = e+1
best_cv_pearson = final_pearson[0]
else:
for epoch, final_result in enumerate(final_pearson):
if best_cv_pearson < final_pearson[epoch]:
best_epoch_pearson = checkpoint_baseline+epoch+1
best_cv_pearson = final_pearson[epoch]
print(" Pearson for this epoch: {:.2f}% -- Best Pearson::==> {:.2f}% (for epoch #{} of {})".format(
pr_res*100, best_cv_pearson*100, best_epoch_pearson, args.nb_epoch))
"""Spearman"""
sp_res = np.mean(epoch_spearmans) # best spearman for all models
final_spearman.append(sp_res)
# Stats on what we have done so far from the begining of training
if e==checkpoint_baseline:
best_epoch_spearman = e+1
best_cv_spearman = final_spearman[0]
else:
for epoch, final_result in enumerate(final_spearman):
if best_cv_spearman < final_spearman[epoch]:
best_epoch_spearman = checkpoint_baseline+epoch+1
best_cv_spearman = final_spearman[epoch]
print(" Spearman for this epoch: {:.2f}% -- Best Spearman::==> {:.2f}% (for epoch #{} of {})\n".format(
sp_res*100, best_cv_spearman*100, best_epoch_spearman, args.nb_epoch))
# save models and embeddings
if args.save_interval==-1 and best_epoch_spearman==e+1: # save if improved the result
if args.save_model: # save models
save_model(path=logdir, models=models, epoch=e+1, verbose=args.verbose)
if args.save_embeddings: # save (updated) GO term embeddings
save_embeddings(path=logdir,
models=models,
go_term_indeces=go_term_indeces,
sub_ontology_interested=sub_ontology_interested,
embedding_save=go_term_embedding_save_in,
epoch=e+1,
verbose=args.verbose)
elif args.save_interval!=-1 and (e+1)%args.save_interval==0: # save after each interval
if args.save_model: # save models
save_model(path=logdir, models=models, epoch=e+1, verbose=args.verbose)
if args.save_embeddings: # save (updated) GO term embeddings
save_embeddings(path=logdir,
models=models,
go_term_indeces=go_term_indeces,
sub_ontology_interested=sub_ontology_interested,
embedding_save=go_term_embedding_save_in,
epoch=e+1,
verbose=args.verbose)
# Calculating 'Computation Time' for this round of iteration
former_iteration_endpoint, current_iteration_elapsed = cal_iter_time(former_iteration_endpoint, e, args, tz)
# loging the epoch results
if args.nb_fold!=1: # Stats on all folds in this epoch
log_epoch_result_for_gene_expression(e+1, args.nb_epoch, current_iteration_elapsed,
best_cv_pearson, pr_res, best_epoch_pearson,
best_cv_spearman, sp_res, best_epoch_spearman, logdir)
def eval_gene_expression(models, args):
"""if we need the cutoff point for accuracy"""
epoch_pearsons, epoch_spearmans = [], []
for model_index in range(len(models)): # Going through each model one by one
# Preparing the data
train_pair, X_train, y_train, test_pair, X_test, y_test, train_gene, test_gene = generic_dataloader(
model_index, nb_test_genes, gene_shuffled_indx, gene_1, gene_2, fully_annotated_genes,
gene_1_annotation, gene_2_annotation, prediction_value, sub_ontology_interested,
args.experiment_mode)
# Training and Prediction
model = models[model_index]
predictions = model.predict(X_test)
"""Pearson"""
pr = np.round(pearsonr(y_test.reshape(y_test.shape[0]), predictions.reshape(predictions.shape[0]))[0], 5)
epoch_pearsons.append(pr)
"""Spearman"""
sp = np.round(spearmanr(y_test.reshape(y_test.shape[0]), predictions.reshape(predictions.shape[0]))[0], 5)
epoch_spearmans.append(sp)
print(f"Fold {(model_index+1):2d} >>> Pearson: {pr:.4f}, Spearman: {sp:.4f}")
print(f"Final averaged results are >>> Pearson: {np.mean(epoch_pearsons):.4f}, Spearman: {np.mean(epoch_spearmans):.4f}")
if __name__ == "__main__":
args = parser.parse_args()
# some assertions before proceeding
assert not ((args.deepsimdef_mode == 'evaluation') and (checkpoint is None)), "'checkpoint' cann't be None in 'evaluation' mode of '--deepsimdef_mode' as the model needs a trained deepSimDEF network"
assert not ((args.deepsimdef_mode == 'production') and (checkpoint is None)), "'checkpoint' cann't be None in 'production' mode of '--deepsimdef_mode' as the model needs a trained deepSimDEF network"
assert not ((args.deepsimdef_mode == 'production') and (not os.path.isfile(args.production_input_file))), "the provided 'args.production_input_file' does not exist, provide proper directory"
assert not ((args.nb_fold == 1) and (args.save_interval == -1)), "'--save_interval' cann't be -1 when '--nb_fold' is 1; define a 'positive integer' interval"
assert not (args.highway_layer and args.cosine_similarity), "Either '--highway_layer' can be True or '--cosine_similarity', not both."
if args.deepsimdef_mode=='training' or args.deepsimdef_mode=='evaluation':
# printing out the argument of the model
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("Arguments are:")
pp.pprint(vars(args))
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n")
# directory to the files needed for traning and testing
if args.inpute_file=='default':
data_file_name = f'{args.species}_gene_expression.tsv'
else:
data_file_name = args.inpute_file
gene_expression_data_dir = './data/species/{}/gene_expression/processed'.format(args.species) # directory to the ppi datasets, pay attention to the file names and thier content format
embedding_dir = f'./data/gene_ontology/definition_embedding/{args.embedding_dim}_dimensional' # directory to the GO term embeddings, pay attention to the file names and thier content format
gene_annotations_dir = './data/species/{}/association_file/processed'.format(args.species) # directory to the gene annotations, pay attention to the file names and thier content format
# set RNG
if args.reproducible: make_reproducible(args.seed)
# some variables needed to work with sub-ontologies
if args.sub_ontology=='all': sub_ontology_interested = ['BP', 'CC', 'MF']
elif args.sub_ontology=='bp': sub_ontology_interested = ['BP']
elif args.sub_ontology=='cc': sub_ontology_interested = ['CC']
elif args.sub_ontology=='mf': sub_ontology_interested = ['MF']
sub_ontology_all = ['BP', 'CC', 'MF'] # for experimets, to make sure all genes have annotations from all three ontologies
# do we use a checkpointed model, if not we can set it
if args.checkpoint is None:
args.log_name = f"{args.log_name}_{args.species}/pretrained_emb_{args.pretrained_embedding}_iea_{args.iea}_ontology_{args.sub_ontology}"
logdir, checkpoint_baseline = log(args)
else:
logdir = args.checkpoint.rsplit("/", 2)[0]
checkpoint_baseline = int(args.checkpoint.rsplit("_")[-1])
args.log_name = checkpoint.rsplit("/", 3)[0]
print(f"The checkpoint directory is: '{logdir}'\n")
# some variables to work with GO-term embeddings later
go_term_embedding_file_path = {} # directory of embedding files (for every subontolgy)
go_term_embedding_save_in = {} # files into which the updated GO term embeddings will be stored
for sbo in sub_ontology_interested:
go_term_embedding_file_path[sbo] = '{}/GO_{}_Embeddings_{}D.emb'.format(embedding_dir, sbo, args.embedding_dim)
go_term_embedding_save_in[sbo] = 'GO_{}_Embeddings_{}D_Updated'.format(sbo, args.embedding_dim)
# getting GO annotations
gene_indeces, gene_annotations, go_term_indeces, max_ann_len, max_ann_len_indx = extract_annotation_1st_form(
sub_ontology_all, gene_annotations_dir, args.iea, args.verbose)
fully_annotated_genes = [] # we keep only those genes for which we have annatoation from all sub-ontologies
for sbo in sub_ontology_all:
fully_annotated_genes.append(gene_indeces[sbo].keys())
fully_annotated_genes = sorted(list(set(fully_annotated_genes[0]).intersection(*fully_annotated_genes)))
if args.verbose: print("Number of fully annotated gene products: {}\n".format(len(fully_annotated_genes)))
"""Shuffling the genes"""
gene_shuffled_indx = np.arange(len(fully_annotated_genes))
np.random.shuffle(gene_shuffled_indx)
VALIDATION_SPLIT = 1.0/args.nb_fold
nb_test_genes = int(VALIDATION_SPLIT * len(fully_annotated_genes))
gene_1, gene_2, gene_1_annotation, gene_2_annotation, prediction_value = gene_expression_dataset(
gene_expression_data_dir, data_file_name, fully_annotated_genes, gene_annotations,
gene_indeces, max_ann_len, args.partial_shuffle_percent, sub_ontology_interested)
VALIDATION_SPLIT = 1.0/args.nb_fold
gene_pair_indx = np.arange(gene_1_annotation[sub_ontology_interested[0]].shape[0])
np.random.shuffle(gene_pair_indx)
nb_test_gene_pairs = int(VALIDATION_SPLIT * gene_1_annotation[sub_ontology_interested[0]].shape[0])
if args.verbose:
for sbo in sub_ontology_interested:
print("Shape of data tensor for gene 1 ({}): {}".format(sbo, gene_1_annotation[sbo].shape))
print("Shape of data tensor for gene 2 ({}): {}\n".format(sbo, gene_2_annotation[sbo].shape))
print("Shape of output sequence homology tensors: {}\n".format(prediction_value.shape))
models = []
best_pearsons = []
best_spearmans = []
for m in range(args.nb_fold):
network = deepSimDEF_network(args, model_ind=m, max_ann_len=max_ann_len, go_term_embedding_file_path=go_term_embedding_file_path,
sub_ontology_interested=sub_ontology_interested, go_term_indeces=go_term_indeces)
models.append(network)
best_pearsons.append(0)
best_spearmans.append(0)
if args.deepsimdef_mode=='training':
fit_gene_expression(models, args)
save_model(path=logdir, models=models, epoch=args.nb_epoch, verbose=args.verbose)
save_embeddings(path=logdir,
models=models,
go_term_indeces=go_term_indeces,
sub_ontology_interested=sub_ontology_interested,
embedding_save=go_term_embedding_save_in,
epoch=args.nb_epoch,
verbose=args.verbose)
elif args.deepsimdef_mode=='evaluation':
eval_gene_expression(models, args)
elif args.deepsimdef_mode=='production':
data_file_dir, data_file_name = os.path.dirname(args.production_input_file), os.path.basename(args.production_input_file)
gene_annotations_dir = './data/species/{}/association_file/processed'.format(args.species) # directory to the gene annotations, pay attention to the file names and thier content format
# some variables needed to work with sub-ontologies
if args.sub_ontology=='all': sub_ontology_interested = ['BP', 'CC', 'MF']
elif args.sub_ontology=='bp': sub_ontology_interested = ['BP']
elif args.sub_ontology=='cc': sub_ontology_interested = ['CC']
elif args.sub_ontology=='mf': sub_ontology_interested = ['MF']
sub_ontology_all = ['BP', 'CC', 'MF'] # to make sure all genes have annotations from all three ontologies, modify this if you want to work with single channel networks
gene_indeces, gene_annotations, go_term_indeces, max_ann_len, max_ann_len_indx = extract_annotation_1st_form(
sub_ontology_all, gene_annotations_dir, args.iea, args.verbose)
fully_annotated_genes = [] # we keep only those genes for which we have annatoation from all sub-ontologies
for sbo in sub_ontology_all:
fully_annotated_genes.append(gene_indeces[sbo].keys())
fully_annotated_genes = sorted(list(set(fully_annotated_genes[0]).intersection(*fully_annotated_genes)))
gene_1, gene_2, gene_1_annotation, gene_2_annotation = generic_production_dataset(
data_file_dir, data_file_name, fully_annotated_genes,
gene_annotations, gene_indeces, max_ann_len, sub_ontology_interested)
model = deepSimDEF_network(args, model_ind=0)
test_pair, X_test, test_gene = generic_production_dataloader(gene_1, gene_2, fully_annotated_genes,
gene_1_annotation, gene_2_annotation, sub_ontology_interested)
preds = model.predict(X_test)
if len(args.production_output_file)==0:
for i, (pair, pred) in enumerate(zip(test_pair, preds)):
#print("{}\t{}\t{:.8f}".format(i+1, pair.replace(" ", "\t"), pred[0]))
print("{}\t{:.8f}".format(pair.replace(" ", "\t"), pred[0]))
else:
with open(args.production_output_file, "w") as wf:
for i, (pair, pred) in enumerate(zip(test_pair, preds)):
wf.write("{}\t{:.8f}\n".format(pair.replace(" ", "\t"), pred[0]))
|
|
"""Tests related to retraction of public registrations"""
import datetime
import httplib as http
import mock
from django.utils import timezone
from django.db import DataError
from nose.tools import * # noqa
from framework.auth import Auth
from framework.exceptions import PermissionsError
from tests.base import fake, OsfTestCase
from osf_tests.factories import (
AuthUserFactory, NodeFactory, ProjectFactory,
RegistrationFactory, UserFactory, UnconfirmedUserFactory,
UnregUserFactory
)
from website import tokens
from website.exceptions import (
InvalidSanctionApprovalToken, InvalidSanctionRejectionToken,
NodeStateError,
)
from osf.models import Contributor, Retraction
class RegistrationRetractionModelsTestCase(OsfTestCase):
def setUp(self):
super(RegistrationRetractionModelsTestCase, self).setUp()
self.user = UserFactory()
self.registration = RegistrationFactory(creator=self.user, is_public=True)
self.valid_justification = fake.sentence()
self.invalid_justification = fake.text(max_nb_chars=3000)
def test_set_public_registration_to_private_raises_NodeStateException(self):
self.registration.save()
with assert_raises(NodeStateError):
self.registration.set_privacy('private')
self.registration.reload()
assert_true(self.registration.is_public)
def test_initiate_retraction_saves_retraction(self):
initial_count = Retraction.objects.all().count()
self.registration._initiate_retraction(self.user)
assert_equal(Retraction.objects.all().count(), initial_count + 1)
def test__initiate_retraction_does_not_create_tokens_for_unregistered_admin(self):
unconfirmed_user = UnconfirmedUserFactory()
Contributor.objects.create(node=self.registration, user=unconfirmed_user)
self.registration.add_permission(unconfirmed_user, 'admin', save=True)
assert_true(self.registration.has_permission(unconfirmed_user, 'admin'))
retraction = self.registration._initiate_retraction(self.user)
assert_true(self.user._id in retraction.approval_state)
assert_false(unconfirmed_user._id in retraction.approval_state)
def test__initiate_retraction_adds_admins_on_child_nodes(self):
project_admin = UserFactory()
project_non_admin = UserFactory()
child_admin = UserFactory()
child_non_admin = UserFactory()
grandchild_admin = UserFactory()
project = ProjectFactory(creator=project_admin)
project.add_contributor(project_non_admin, auth=Auth(project.creator), save=True)
child = NodeFactory(creator=child_admin, parent=project)
child.add_contributor(child_non_admin, auth=Auth(child.creator), save=True)
grandchild = NodeFactory(creator=grandchild_admin, parent=child) # noqa
registration = RegistrationFactory(project=project)
retraction = registration._initiate_retraction(registration.creator)
assert_in(project_admin._id, retraction.approval_state)
assert_in(child_admin._id, retraction.approval_state)
assert_in(grandchild_admin._id, retraction.approval_state)
assert_not_in(project_non_admin._id, retraction.approval_state)
assert_not_in(child_non_admin._id, retraction.approval_state)
# Backref tests
def test_retraction_initiator_has_backref(self):
self.registration.retract_registration(self.user, self.valid_justification)
self.registration.save()
self.registration.reload()
assert_equal(Retraction.objects.filter(initiated_by=self.user).count(), 1)
# Node#retract_registration tests
def test_pending_retract(self):
self.registration.retract_registration(self.user, self.valid_justification)
self.registration.save()
self.registration.reload()
assert_false(self.registration.is_retracted)
assert_equal(self.registration.retraction.state, Retraction.UNAPPROVED)
assert_equal(self.registration.retraction.justification, self.valid_justification)
assert_equal(self.registration.retraction.initiated_by, self.user)
assert_equal(
self.registration.retraction.initiation_date.date(),
timezone.now().date()
)
def test_retract_component_raises_NodeStateError(self):
project = ProjectFactory(is_public=True, creator=self.user)
NodeFactory(is_public=True, creator=self.user, parent=project)
registration = RegistrationFactory(is_public=True, project=project)
with assert_raises(NodeStateError):
registration._nodes.first().retract_registration(self.user, self.valid_justification)
def test_long_justification_raises_ValidationValueError(self):
with assert_raises(DataError):
self.registration.retract_registration(self.user, self.invalid_justification)
self.registration.save()
assert_is_none(self.registration.retraction)
def test_retract_private_registration_raises_NodeStateError(self):
self.registration.is_public = False
with assert_raises(NodeStateError):
self.registration.retract_registration(self.user, self.valid_justification)
self.registration.save()
self.registration.reload()
assert_is_none(self.registration.retraction)
def test_retraction_of_registration_pending_embargo_cancels_embargo(self):
self.registration.embargo_registration(
self.user,
(timezone.now() + datetime.timedelta(days=10)),
for_existing_registration=True
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, approval_token)
assert_false(self.registration.is_pending_retraction)
assert_true(self.registration.is_retracted)
self.registration.embargo.reload()
assert_false(self.registration.is_pending_embargo)
assert_true(self.registration.embargo.is_rejected)
def test_retraction_of_registration_in_active_embargo_cancels_embargo(self):
self.registration.embargo_registration(
self.user,
(timezone.now() + datetime.timedelta(days=10)),
for_existing_registration=True
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
embargo_approval_token = self.registration.embargo.approval_state[self.user._id]['approval_token']
self.registration.embargo.approve_embargo(self.user, embargo_approval_token)
assert_false(self.registration.is_pending_embargo)
assert_true(self.registration.embargo_end_date)
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
retraction_approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, retraction_approval_token)
assert_false(self.registration.is_pending_retraction)
assert_true(self.registration.is_retracted)
self.registration.embargo.reload()
assert_false(self.registration.is_pending_embargo)
assert_true(self.registration.embargo.is_rejected)
# Retraction#approve_retraction_tests
def test_invalid_approval_token_raises_InvalidSanctionApprovalToken(self):
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
with assert_raises(InvalidSanctionApprovalToken):
self.registration.retraction.approve_retraction(self.user, fake.sentence())
assert_true(self.registration.is_pending_retraction)
assert_false(self.registration.is_retracted)
def test_non_admin_approval_token_raises_PermissionsError(self):
non_admin = UserFactory()
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
with assert_raises(PermissionsError):
self.registration.retraction.approve_retraction(non_admin, approval_token)
assert_true(self.registration.is_pending_retraction)
assert_false(self.registration.is_retracted)
def test_one_approval_with_one_admin_retracts(self):
self.registration.retract_registration(self.user)
self.registration.save()
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
assert_true(self.registration.is_pending_retraction)
self.registration.retraction.approve_retraction(self.user, approval_token)
assert_true(self.registration.is_retracted)
num_of_approvals = sum([val['has_approved'] for val in self.registration.retraction.approval_state.values()])
assert_equal(num_of_approvals, 1)
def test_approval_adds_to_parent_projects_log(self):
initial_project_logs = self.registration.registered_from.logs.count()
self.registration.retract_registration(self.user)
self.registration.save()
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, approval_token)
# Logs: Created, registered, retraction initiated, retraction approved
assert_equal(self.registration.registered_from.logs.count(), initial_project_logs + 2)
def test_retraction_of_registration_pending_embargo_cancels_embargo_public(self):
self.registration.is_public = True
self.registration.embargo_registration(
self.user,
timezone.now() + datetime.timedelta(days=10),
for_existing_registration=True
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, approval_token)
assert_false(self.registration.is_pending_retraction)
assert_true(self.registration.is_retracted)
self.registration.embargo.reload()
assert_false(self.registration.is_pending_embargo)
assert_true(self.registration.embargo.is_rejected)
def test_approval_of_registration_with_embargo_adds_to_parent_projects_log(self):
initial_project_logs = self.registration.registered_from.logs.count()
self.registration.is_public = True
self.registration.embargo_registration(
self.user,
timezone.now() + datetime.timedelta(days=10),
for_existing_registration=True
)
self.registration.save()
self.registration.retract_registration(self.user)
self.registration.save()
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, approval_token)
# Logs: Created, registered, embargo initiated, retraction initiated, retraction approved, embargo cancelled
assert_equal(self.registration.registered_from.logs.count(), initial_project_logs + 4)
def test_retraction_of_public_registration_in_active_embargo_cancels_embargo(self):
self.registration.is_public = True
self.registration.embargo_registration(
self.user,
timezone.now() + datetime.timedelta(days=10),
for_existing_registration=True
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
embargo_approval_token = self.registration.embargo.approval_state[self.user._id]['approval_token']
self.registration.embargo.approve_embargo(self.user, embargo_approval_token)
assert_false(self.registration.is_pending_embargo)
assert_true(self.registration.embargo_end_date)
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
retraction_approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, retraction_approval_token)
assert_false(self.registration.is_pending_retraction)
assert_true(self.registration.is_retracted)
self.registration.embargo.reload()
assert_false(self.registration.is_pending_embargo)
assert_true(self.registration.embargo.is_rejected)
def test_two_approvals_with_two_admins_retracts(self):
self.admin2 = UserFactory()
Contributor.objects.create(node=self.registration, user=self.admin2)
self.registration.add_permission(self.admin2, 'admin', save=True)
self.registration.retract_registration(self.user)
self.registration.save()
self.registration.reload()
# First admin approves
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, approval_token)
assert_true(self.registration.is_pending_retraction)
num_of_approvals = sum([val['has_approved'] for val in self.registration.retraction.approval_state.values()])
assert_equal(num_of_approvals, 1)
# Second admin approves
approval_token = self.registration.retraction.approval_state[self.admin2._id]['approval_token']
self.registration.retraction.approve_retraction(self.admin2, approval_token)
num_of_approvals = sum([val['has_approved'] for val in self.registration.retraction.approval_state.values()])
assert_equal(num_of_approvals, 2)
assert_true(self.registration.is_retracted)
def test_one_approval_with_two_admins_stays_pending(self):
self.admin2 = UserFactory()
Contributor.objects.create(node=self.registration, user=self.admin2)
self.registration.add_permission(self.admin2, 'admin', save=True)
self.registration.retract_registration(self.user)
self.registration.save()
self.registration.reload()
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
assert_equal(self.registration.retraction.state, Retraction.UNAPPROVED)
self.registration.retraction.approve_retraction(self.user, approval_token)
assert_true(self.registration.is_pending_retraction)
num_of_approvals = sum([val['has_approved'] for val in self.registration.retraction.approval_state.values()])
assert_equal(num_of_approvals, 1)
# Retraction#disapprove_retraction tests
def test_invalid_rejection_token_raises_InvalidSanctionRejectionToken(self):
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
with assert_raises(InvalidSanctionRejectionToken):
self.registration.retraction.disapprove_retraction(self.user, fake.sentence())
assert_true(self.registration.is_pending_retraction)
assert_false(self.registration.is_retracted)
def test_non_admin_rejection_token_raises_PermissionsError(self):
non_admin = UserFactory()
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
rejection_token = self.registration.retraction.approval_state[self.user._id]['rejection_token']
with assert_raises(PermissionsError):
self.registration.retraction.disapprove_retraction(non_admin, rejection_token)
assert_true(self.registration.is_pending_retraction)
assert_false(self.registration.is_retracted)
def test_one_disapproval_cancels_retraction(self):
self.registration.retract_registration(self.user)
self.registration.save()
self.registration.reload()
rejection_token = self.registration.retraction.approval_state[self.user._id]['rejection_token']
assert_equal(self.registration.retraction.state, Retraction.UNAPPROVED)
self.registration.retraction.disapprove_retraction(self.user, rejection_token)
assert_true(self.registration.retraction.is_rejected)
def test_disapproval_adds_to_parent_projects_log(self):
initial_project_logs = self.registration.registered_from.logs.count()
self.registration.retract_registration(self.user)
self.registration.save()
self.registration.reload()
rejection_token = self.registration.retraction.approval_state[self.user._id]['rejection_token']
self.registration.retraction.disapprove_retraction(self.user, rejection_token)
# Logs: Created, registered, retraction initiated, retraction cancelled
assert_equal(self.registration.registered_from.logs.count(), initial_project_logs + 2)
def test__on_complete_makes_project_and_components_public(self):
project_admin = UserFactory()
child_admin = UserFactory()
grandchild_admin = UserFactory()
project = ProjectFactory(creator=project_admin, is_public=False)
child = NodeFactory(creator=child_admin, parent=project, is_public=False)
grandchild = NodeFactory(creator=grandchild_admin, parent=child, is_public=False) # noqa
registration = RegistrationFactory(project=project)
registration._initiate_retraction(self.user)
registration.retraction._on_complete(self.user)
for each in registration.node_and_primary_descendants():
each.reload()
assert_true(each.is_public)
# Retraction property tests
def test_new_retraction_is_pending_retraction(self):
self.registration.retract_registration(self.user)
assert_true(self.registration.is_pending_retraction)
assert_false(self.registration.is_retracted)
class RegistrationWithChildNodesRetractionModelTestCase(OsfTestCase):
def setUp(self):
super(RegistrationWithChildNodesRetractionModelTestCase, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
self.project = ProjectFactory(is_public=True, creator=self.user)
self.component = NodeFactory(
creator=self.user,
parent=self.project,
title='Component'
)
self.subproject = ProjectFactory(
creator=self.user,
parent=self.project,
title='Subproject'
)
self.subproject_component = NodeFactory(
creator=self.user,
parent=self.subproject,
title='Subcomponent'
)
self.registration = RegistrationFactory(project=self.project, is_public=True)
# Reload the registration; else tests won't catch failures to svae
self.registration.reload()
@mock.patch('website.project.tasks.format_node')
@mock.patch('website.project.tasks.format_registration')
@mock.patch('website.project.tasks.settings.SHARE_URL', 'ima_real_website')
@mock.patch('website.project.tasks.settings.SHARE_API_TOKEN', 'totaly_real_token')
@mock.patch('website.project.tasks.send_share_node_data')
def test_approval_retracts_descendant_nodes(self, mock_update_share, mock_format_registration, mock_format_node):
# Initiate retraction for parent registration
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
# Ensure descendant nodes are pending registration
descendants = self.registration.get_descendants_recursive()
for node in descendants:
node.save()
assert_true(node.is_pending_retraction)
# Approve parent registration's retraction
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, approval_token)
assert_true(self.registration.is_retracted)
# Ensure descendant nodes are retracted
descendants = self.registration.get_descendants_recursive()
for node in descendants:
assert_true(node.is_retracted)
assert mock_update_share.called
assert mock_format_registration.called
assert not mock_format_node.called
def test_disapproval_cancels_retraction_on_descendant_nodes(self):
# Initiate retraction for parent registration
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
# Ensure descendant nodes are pending registration
descendants = self.registration.get_descendants_recursive()
for node in descendants:
node.save()
assert_true(node.is_pending_retraction)
# Disapprove parent registration's retraction
rejection_token = self.registration.retraction.approval_state[self.user._id]['rejection_token']
self.registration.retraction.disapprove_retraction(self.user, rejection_token)
assert_false(self.registration.is_pending_retraction)
assert_false(self.registration.is_retracted)
assert_true(self.registration.retraction.is_rejected)
# Ensure descendant nodes' retractions are cancelled
descendants = self.registration.get_descendants_recursive()
for node in descendants:
assert_false(node.is_pending_retraction)
assert_false(node.is_retracted)
@mock.patch('website.project.tasks.settings.SHARE_URL', 'ima_real_website')
@mock.patch('website.project.tasks.settings.SHARE_API_TOKEN', 'totaly_real_token')
@mock.patch('website.project.tasks.send_share_node_data')
def test_approval_cancels_pending_embargoes_on_descendant_nodes(self, mock_update_share):
# Initiate embargo for registration
self.registration.embargo_registration(
self.user,
timezone.now() + datetime.timedelta(days=10),
for_existing_registration=True
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
# Initiate retraction for parent registration
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
# Ensure descendant nodes are pending embargo
descendants = self.registration.get_descendants_recursive()
for node in descendants:
assert_true(node.is_pending_retraction)
assert_true(node.is_pending_embargo)
# Approve parent registration's retraction
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, approval_token)
assert_true(self.registration.is_retracted)
self.registration.embargo.reload()
assert_false(self.registration.is_pending_embargo)
# Ensure descendant nodes are not pending embargo
descendants = self.registration.get_descendants_recursive()
for node in descendants:
assert_true(node.is_retracted)
assert_false(node.is_pending_embargo)
assert mock_update_share.called
@mock.patch('website.project.tasks.settings.SHARE_URL', 'ima_real_website')
@mock.patch('website.project.tasks.settings.SHARE_API_TOKEN', 'totaly_real_token')
@mock.patch('website.project.tasks.send_share_node_data')
def test_approval_cancels_active_embargoes_on_descendant_nodes(self, mock_update_share):
# Initiate embargo for registration
self.registration.embargo_registration(
self.user,
timezone.now() + datetime.timedelta(days=10),
for_existing_registration=True
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
# Approve embargo for registration
embargo_approval_token = self.registration.embargo.approval_state[self.user._id]['approval_token']
self.registration.embargo.approve_embargo(self.user, embargo_approval_token)
assert_false(self.registration.is_pending_embargo)
assert_true(self.registration.embargo_end_date)
# Initiate retraction for parent registration
self.registration.retract_registration(self.user)
self.registration.save()
assert_true(self.registration.is_pending_retraction)
# Ensure descendant nodes are not pending embargo
descendants = self.registration.get_descendants_recursive()
for node in descendants:
assert_true(node.is_pending_retraction)
assert_true(node.embargo_end_date)
# Approve parent registration's retraction
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, approval_token)
assert_true(self.registration.is_retracted)
# Ensure descendant nodes are not pending embargo
descendants = self.registration.get_descendants_recursive()
for node in descendants:
assert_true(node.is_retracted)
assert mock_update_share.called
class RegistrationRetractionShareHook(OsfTestCase):
def setUp(self):
super(RegistrationRetractionShareHook, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
self.project = ProjectFactory(is_public=True, creator=self.user)
self.registration = RegistrationFactory(project=self.project, is_public=True)
# Reload the registration; else tests won't catch failures to svae
self.registration.reload()
@mock.patch('website.project.tasks.format_node')
@mock.patch('website.project.tasks.format_registration')
@mock.patch('website.project.tasks.settings.SHARE_URL', 'ima_real_website')
@mock.patch('website.project.tasks.settings.SHARE_API_TOKEN', 'totaly_real_token')
@mock.patch('website.project.tasks.send_share_node_data')
def test_approval_calls_share_hook(self, mock_update_share, mock_format_registration, mock_format_node):
# Initiate retraction for parent registration
self.registration.retract_registration(self.user)
self.registration.save()
# Approve parent registration's retraction
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.registration.retraction.approve_retraction(self.user, approval_token)
assert_true(self.registration.is_retracted)
assert mock_update_share.called
assert mock_format_registration.called
assert not mock_format_node.called
@mock.patch('website.project.tasks.settings.SHARE_URL', 'ima_real_website')
@mock.patch('website.project.tasks.settings.SHARE_API_TOKEN', 'totaly_real_token')
@mock.patch('website.project.tasks.send_share_node_data')
def test_disapproval_does_not_call_share_hook(self, mock_update_share):
# Initiate retraction for parent registration
self.registration.retract_registration(self.user)
self.registration.save()
rejection_token = self.registration.retraction.approval_state[self.user._id]['rejection_token']
self.registration.retraction.disapprove_retraction(self.user, rejection_token)
assert_false(self.registration.is_retracted)
assert not mock_update_share.called
class RegistrationRetractionApprovalDisapprovalViewsTestCase(OsfTestCase):
def setUp(self):
super(RegistrationRetractionApprovalDisapprovalViewsTestCase, self).setUp()
self.user = AuthUserFactory()
self.registered_from = ProjectFactory(is_public=True, creator=self.user)
self.registration = RegistrationFactory(is_public=True, project=self.registered_from)
self.registration.retract_registration(self.user)
self.registration.save()
self.approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
self.rejection_token = self.registration.retraction.approval_state[self.user._id]['rejection_token']
self.corrupt_token = fake.sentence()
self.token_without_sanction = tokens.encode({
'action': 'approve_retraction',
'user_id': self.user._id,
'sanction_id': 'invalid id'
})
# node_registration_retraction_approve_tests
def test_GET_approve_from_unauthorized_user_returns_HTTPError_UNAUTHORIZED(self):
unauthorized_user = AuthUserFactory()
res = self.app.get(
self.registration.web_url_for('view_project', token=self.approval_token),
auth=unauthorized_user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.UNAUTHORIZED)
def test_GET_approve_registration_without_retraction_returns_HTTPError_BAD_REQUEST(self):
assert_true(self.registration.is_pending_retraction)
self.registration.retraction.reject(self.user, self.rejection_token)
assert_false(self.registration.is_pending_retraction)
self.registration.retraction.save()
res = self.app.get(
self.registration.web_url_for('view_project', token=self.approval_token),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_GET_approve_with_invalid_token_returns_HTTPError_BAD_REQUEST(self):
res = self.app.get(
self.registration.web_url_for('view_project', token=self.corrupt_token),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_GET_approve_with_non_existant_sanction_returns_HTTPError_BAD_REQUEST(self):
res = self.app.get(
self.registration.web_url_for('view_project', token=self.token_without_sanction),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_GET_approve_with_valid_token_returns_200(self):
res = self.app.get(
self.registration.web_url_for('view_project', token=self.approval_token),
auth=self.user.auth
)
self.registration.retraction.reload()
assert_true(self.registration.is_retracted)
assert_false(self.registration.is_pending_retraction)
assert_equal(res.status_code, http.OK)
# node_registration_retraction_disapprove_tests
def test_GET_disapprove_from_unauthorized_user_returns_HTTPError_UNAUTHORIZED(self):
unauthorized_user = AuthUserFactory()
res = self.app.get(
self.registration.web_url_for('view_project', token=self.rejection_token),
auth=unauthorized_user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.UNAUTHORIZED)
def test_GET_disapprove_registration_without_retraction_returns_HTTPError_BAD_REQUEST(self):
assert_true(self.registration.is_pending_retraction)
self.registration.retraction.reject(self.user, self.rejection_token)
assert_false(self.registration.is_pending_retraction)
self.registration.retraction.save()
res = self.app.get(
self.registration.web_url_for('view_project', token=self.rejection_token),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_GET_disapprove_with_invalid_token_HTTPError_BAD_REQUEST(self):
res = self.app.get(
self.registration.web_url_for('view_project', token=self.corrupt_token),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_GET_disapprove_with_valid_token_returns_redirect(self):
res = self.app.get(
self.registration.web_url_for('view_project', token=self.rejection_token),
auth=self.user.auth,
)
self.registration.retraction.reload()
assert_false(self.registration.is_retracted)
assert_false(self.registration.is_pending_retraction)
assert_true(self.registration.retraction.is_rejected)
assert_equal(res.status_code, http.OK)
class ComponentRegistrationRetractionViewsTestCase(OsfTestCase):
def setUp(self):
super(ComponentRegistrationRetractionViewsTestCase, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
self.project = ProjectFactory(is_public=True, creator=self.user)
self.component = NodeFactory(
is_public=True,
creator=self.user,
parent=self.project,
title='Component'
)
self.subproject = ProjectFactory(
is_public=True,
creator=self.user,
parent=self.project,
title='Subproject'
)
self.subproject_component = NodeFactory(
is_public=True,
creator=self.user,
parent=self.subproject,
title='Subcomponent'
)
self.registration = RegistrationFactory(is_public=True, project=self.project)
self.component_registration = self.registration._nodes.order_by('created').first()
self.subproject_registration = list(self.registration._nodes.order_by('created'))[1]
self.subproject_component_registration = self.subproject_registration._nodes.order_by('created').first()
def test_POST_retraction_to_component_returns_HTTPError_BAD_REQUEST(self):
res = self.app.post_json(
self.component_registration.api_url_for('node_registration_retraction_post'),
auth=self.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_POST_retraction_to_subproject_returns_HTTPError_BAD_REQUEST(self):
res = self.app.post_json(
self.subproject_registration.api_url_for('node_registration_retraction_post'),
auth=self.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_POST_retraction_to_subproject_component_returns_HTTPError_BAD_REQUEST(self):
res = self.app.post_json(
self.subproject_component_registration.api_url_for('node_registration_retraction_post'),
auth=self.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.BAD_REQUEST)
class RegistrationRetractionViewsTestCase(OsfTestCase):
def setUp(self):
super(RegistrationRetractionViewsTestCase, self).setUp()
self.user = AuthUserFactory()
self.registered_from = ProjectFactory(creator=self.user, is_public=True)
self.registration = RegistrationFactory(project=self.registered_from, is_public=True)
self.retraction_post_url = self.registration.api_url_for('node_registration_retraction_post')
self.retraction_get_url = self.registration.web_url_for('node_registration_retraction_get')
self.justification = fake.sentence()
def test_GET_retraction_page_when_pending_retraction_returns_HTTPError_BAD_REQUEST(self):
self.registration.retract_registration(self.user)
self.registration.save()
res = self.app.get(
self.retraction_get_url,
auth=self.user.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_POST_retraction_to_private_registration_returns_HTTPError_FORBIDDEN(self):
self.registration.is_public = False
self.registration.save()
res = self.app.post_json(
self.retraction_post_url,
{'justification': ''},
auth=self.user.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.FORBIDDEN)
self.registration.reload()
assert_is_none(self.registration.retraction)
@mock.patch('website.mails.send_mail')
def test_POST_retraction_does_not_send_email_to_unregistered_admins(self, mock_send_mail):
unreg = UnregUserFactory()
self.registration.add_contributor(
unreg,
auth=Auth(self.user),
permissions=['read', 'write', 'admin']
)
self.registration.save()
self.app.post_json(
self.retraction_post_url,
{'justification': ''},
auth=self.user.auth,
)
# Only the creator gets an email; the unreg user does not get emailed
assert_equal(mock_send_mail.call_count, 1)
def test_POST_pending_embargo_returns_HTTPError_HTTPOK(self):
self.registration.embargo_registration(
self.user,
(timezone.now() + datetime.timedelta(days=10)),
for_existing_registration=True
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
res = self.app.post_json(
self.retraction_post_url,
{'justification': ''},
auth=self.user.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.OK)
self.registration.reload()
assert_true(self.registration.is_pending_retraction)
def test_POST_active_embargo_returns_HTTPOK(self):
self.registration.embargo_registration(
self.user,
(timezone.now() + datetime.timedelta(days=10)),
for_existing_registration=True
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
approval_token = self.registration.embargo.approval_state[self.user._id]['approval_token']
self.registration.embargo.approve(self.user, approval_token)
assert_true(self.registration.embargo_end_date)
res = self.app.post_json(
self.retraction_post_url,
{'justification': ''},
auth=self.user.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.OK)
self.registration.reload()
assert_true(self.registration.is_pending_retraction)
def test_POST_retraction_by_non_admin_retract_HTTPError_UNAUTHORIZED(self):
res = self.app.post_json(self.retraction_post_url, expect_errors=True)
assert_equals(res.status_code, http.UNAUTHORIZED)
self.registration.reload()
assert_is_none(self.registration.retraction)
@mock.patch('website.mails.send_mail')
def test_POST_retraction_without_justification_returns_HTTPOK(self, mock_send):
res = self.app.post_json(
self.retraction_post_url,
{'justification': ''},
auth=self.user.auth,
)
assert_equal(res.status_code, http.OK)
self.registration.reload()
assert_false(self.registration.is_retracted)
assert_true(self.registration.is_pending_retraction)
assert_is_none(self.registration.retraction.justification)
@mock.patch('website.mails.send_mail')
def test_valid_POST_retraction_adds_to_parent_projects_log(self, mock_send):
initial_project_logs = self.registration.registered_from.logs.count()
self.app.post_json(
self.retraction_post_url,
{'justification': ''},
auth=self.user.auth,
)
self.registration.registered_from.reload()
# Logs: Created, registered, retraction initiated
assert_equal(self.registration.registered_from.logs.count(), initial_project_logs + 1)
@mock.patch('website.mails.send_mail')
def test_valid_POST_retraction_when_pending_retraction_raises_400(self, mock_send):
self.app.post_json(
self.retraction_post_url,
{'justification': ''},
auth=self.user.auth,
)
res = self.app.post_json(
self.retraction_post_url,
{'justification': ''},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
@mock.patch('website.mails.send_mail')
def test_valid_POST_calls_send_mail_with_username(self, mock_send):
self.app.post_json(
self.retraction_post_url,
{'justification': ''},
auth=self.user.auth,
)
assert_true(mock_send.called)
args, kwargs = mock_send.call_args
assert_true(self.user.username in args)
def test_non_contributor_GET_approval_returns_HTTPError_UNAUTHORIZED(self):
non_contributor = AuthUserFactory()
self.registration.retract_registration(self.user)
approval_token = self.registration.retraction.approval_state[self.user._id]['approval_token']
approval_url = self.registration.web_url_for('view_project', token=approval_token)
res = self.app.get(approval_url, auth=non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, http.UNAUTHORIZED)
assert_true(self.registration.is_pending_retraction)
assert_false(self.registration.is_retracted)
def test_non_contributor_GET_disapproval_returns_HTTPError_UNAUTHORIZED(self):
non_contributor = AuthUserFactory()
self.registration.retract_registration(self.user)
rejection_token = self.registration.retraction.approval_state[self.user._id]['rejection_token']
disapproval_url = self.registration.web_url_for('view_project', token=rejection_token)
res = self.app.get(disapproval_url, auth=non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, http.UNAUTHORIZED)
assert_true(self.registration.is_pending_retraction)
assert_false(self.registration.is_retracted)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class BoolModelOperations(object):
"""BoolModelOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_true(
self, custom_headers=None, raw=False, **operation_config):
"""Get true Boolean value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: bool
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyBoolean.models.ErrorException>`
"""
# Construct URL
url = '/bool/true'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('bool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_true(
self, bool_body, custom_headers=None, raw=False, **operation_config):
"""Set Boolean value true.
:param bool_body:
:type bool_body: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyBoolean.models.ErrorException>`
"""
# Construct URL
url = '/bool/true'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(bool_body, 'bool')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_false(
self, custom_headers=None, raw=False, **operation_config):
"""Get false Boolean value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: bool
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyBoolean.models.ErrorException>`
"""
# Construct URL
url = '/bool/false'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('bool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_false(
self, bool_body, custom_headers=None, raw=False, **operation_config):
"""Set Boolean value false.
:param bool_body:
:type bool_body: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyBoolean.models.ErrorException>`
"""
# Construct URL
url = '/bool/false'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(bool_body, 'bool')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get null Boolean value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: bool
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyBoolean.models.ErrorException>`
"""
# Construct URL
url = '/bool/null'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('bool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_invalid(
self, custom_headers=None, raw=False, **operation_config):
"""Get invalid Boolean value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: bool
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyBoolean.models.ErrorException>`
"""
# Construct URL
url = '/bool/invalid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('bool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
|
#===============================================================================
# Copyright 2007 Matt Chaput
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
"""Support functions and classes implementing the KinoSearch-like external sort
merging model. This module does not contain any user-level objects.
"""
import os, tempfile
from heapq import heapify, heapreplace, heappop
from whoosh.filedb.structfile import StructFile, pack_ushort, unpack_ushort
from whoosh.system import _INT_SIZE, _USHORT_SIZE
from whoosh.util import utf8encode, utf8decode
from whoosh.util.struct2 import Struct
# Utility functions
_2int_struct = Struct("!II")
pack2ints = _2int_struct.pack
unpack2ints = _2int_struct.unpack
def encode_posting(fieldnum, text, doc, freq, datastring):
"""Encodes a posting as a string, for sorting.
"""
return "".join([pack_ushort(fieldnum),
utf8encode(text)[0],
chr(0),
pack2ints(doc, freq),
datastring
])
def decode_posting(posting):
"""Decodes an encoded posting string into a
(field_number, text, document_number, datastring) tuple.
"""
fieldnum = unpack_ushort(posting[:_USHORT_SIZE])[0]
zero = posting.find(chr(0), _USHORT_SIZE)
text = utf8decode(posting[_USHORT_SIZE:zero])[0]
metastart = zero + 1
metaend = metastart + _INT_SIZE * 2
doc, freq = unpack2ints(posting[metastart:metaend])
datastring = posting[metaend:]
return fieldnum, text, doc, freq, datastring
def merge(run_readers, max_chunk_size):
# Initialize a list of terms we're "current"ly looking at, by taking the
# first posting from each buffer.
#
# The format of the list is [("encoded_posting", reader_number), ...]
#
# The list is sorted, and the runs are already sorted, so the first term in
# this list should be the absolute "lowest" term.
current = [(r.next(), i) for i, r
in enumerate(run_readers)]
heapify(current)
# The number of active readers (readers with more postings to available),
# initially equal to the total number of readers/buffers.
active = len(run_readers)
# Initialize the output buffer, and a variable to keep track of the output
# buffer size. This buffer accumulates postings from the various buffers in
# proper sorted order.
output = []
outputBufferSize = 0
while active > 0:
# Get the first ("encoded_posting", reader_number) pair and add it to
# the output buffer.
p, i = current[0]
output.append(p)
outputBufferSize += len(p)
# If the output buffer is full, "flush" it by yielding the accumulated
# postings back to the parent writer and clearing the output buffer.
if outputBufferSize > max_chunk_size:
for p in output:
yield decode_posting(p)
output = []
outputBufferSize = 0
# We need to replace the posting we just added to the output by getting
# the next posting from the same buffer.
if run_readers[i] is not None:
# Take the first posting from buffer i and insert it into the
# "current" list in sorted order. The current list must always stay
# sorted, so the first item is always the lowest.
p = run_readers[i].next()
if p:
heapreplace(current, (p, i))
else:
heappop(current)
active -= 1
# If there are still terms in the "current" list after all the readers are
# empty, dump them into the output buffer.
if len(current) > 0:
output.extend([p for p, i in current])
# If there's still postings in the output buffer, yield them all to the
# parent writer.
if len(output) > 0:
for p in output:
yield decode_posting(p)
# Classes
class RunReader(object):
"""An iterator that yields posting strings from a "run" on disk.
This class buffers the reads to improve efficiency.
"""
def __init__(self, stream, count, buffer_size):
"""
:param stream: the file from which to read.
:param count: the number of postings in the stream.
:param buffer_size: the size (in bytes) of the read buffer to use.
"""
self.stream = stream
self.count = count
self.buffer_size = buffer_size
self.buffer = []
self.pointer = 0
self.finished = False
def close(self):
self.stream.close()
def _fill(self):
# Clears and refills the buffer.
# If this reader is exhausted, do nothing.
if self.finished:
return
# Clear the buffer.
buffer = self.buffer = []
# Reset the index at which the next() method
# reads from the buffer.
self.pointer = 0
# How much we've read so far.
so_far = 0
count = self.count
while so_far < self.buffer_size:
if count <= 0:
break
p = self.stream.read_string2()
buffer.append(p)
so_far += len(p)
count -= 1
self.count = count
def __iter__(self):
return self
def next(self):
assert self.pointer <= len(self.buffer)
if self.pointer == len(self.buffer):
self._fill()
# If after refilling the buffer is still empty, we're at the end of the
# file and should stop. Probably this should raise StopIteration
# instead of returning None.
if len(self.buffer) == 0:
self.finished = True
return None
r = self.buffer[self.pointer]
self.pointer += 1
return r
class PostingPool(object):
"""Represents the "pool" of all postings to be sorted. As documents are
added, this object writes out "runs" of sorted encoded postings. When all
documents have been added, this object merge sorts the runs from disk,
yielding decoded postings to the SegmentWriter.
"""
def __init__(self, limit):
"""
:param limit: the maximum amount of memory to use at once for adding
postings and the merge sort.
"""
self.limit = limit
self.size = 0
self.postings = []
self.finished = False
self.runs = []
self.tempfilenames = []
self.count = 0
def add_posting(self, field_num, text, doc, freq, datastring):
"""Adds a posting to the pool.
"""
if self.finished:
raise Exception("Can't add postings after you iterate over the pool")
if self.size >= self.limit:
#print "Flushing..."
self._flush_run()
posting = encode_posting(field_num, text, doc, freq, datastring)
self.size += len(posting)
self.postings.append(posting)
self.count += 1
def _flush_run(self):
# Called when the memory buffer (of size self.limit) fills up.
# Sorts the buffer and writes the current buffer to a "run" on disk.
if self.size > 0:
tempfd, tempname = tempfile.mkstemp(".whooshrun")
runfile = StructFile(os.fdopen(tempfd, "w+b"))
self.postings.sort()
for p in self.postings:
runfile.write_string2(p)
runfile.flush()
runfile.seek(0)
self.runs.append((runfile, self.count))
self.tempfilenames.append(tempname)
#print "Flushed run:", self.runs
self.postings = []
self.size = 0
self.count = 0
def __iter__(self):
# Iterating the PostingPool object performs a merge sort of the runs
# that have been written to disk and yields the sorted, decoded
# postings.
if self.finished:
raise Exception("Tried to iterate on PostingPool twice")
run_count = len(self.runs)
if self.postings and run_count == 0:
# Special case: we never accumulated enough postings to flush to
# disk, so the postings are still in memory: just yield them from
# there.
self.postings.sort()
for p in self.postings:
yield decode_posting(p)
return
if not self.postings and run_count == 0:
# No postings at all
return
if self.postings:
self._flush_run()
run_count = len(self.runs)
#This method does an external merge to yield postings from the (n > 1)
#runs built up during indexing and merging.
# Divide up the posting pool's memory limit between the number of runs
# plus an output buffer.
max_chunk_size = int(self.limit / (run_count + 1))
run_readers = [RunReader(run_file, count, max_chunk_size)
for run_file, count in self.runs]
for decoded_posting in merge(run_readers, max_chunk_size):
yield decoded_posting
for rr in run_readers:
assert rr.count == 0
rr.close()
for tempfilename in self.tempfilenames:
os.remove(tempfilename)
# And we're done.
self.finished = True
|
|
#!/usr/bin/env python
# Copyright 2017, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ssl
import os
from flask import Flask, request, jsonify, abort, make_response
from flask_basicauth import BasicAuth
from google.cloud import pubsub, logging
import datetime
from not_psq.task import Task
from not_psq.queue import Queue
from not_psq.safe_logger import Safe_Logger
import sys
import time
from isb_cgc_user_data.utils.build_config import read_dict
from isb_cgc_user_data.utils.processed_file import processed_name
from google.gax.errors import RetryError
#
# Make sure we come up with a unique name, though clearly if this was handling
# multiple requests at once (which we are not doing, since we are just using
# the straight app.run) it would still have a race condition:
#
def time_stamped_unique(fname, fmt='%Y-%m-%d-%H-%M-%S-{num}_{fname}'):
num = 0
while True:
test_name = datetime.datetime.now().strftime(fmt).format(num=num, fname=fname)
test_name = os.path.join(app.config['UPLOAD_FOLDER'], test_name)
if not os.path.isfile(test_name):
return test_name
num += 1
#
# Here we read the secrets file, build the Flask server, install config
# settings, and build the psq queues. Since we are using SSL, we get away
# with using Basic Authentication
#
my_secrets = read_dict('../config/udu_secrets.txt')
my_config = read_dict('../config/udu_config.txt')
PROJECT_ID = my_config['UDU_PSQ_PROJECT_ID']
UPLOAD_FOLDER = my_config['UDU_UPLOAD_FOLDER']
RESPONSE_LOCATION_PREFIX = my_config['UDU_RESPONSE_LOCATION']
PING_COUNT = int(my_config['UDU_PING_COUNT'])
STACKDRIVER_LOG = my_config['UDU_STACKDRIVER_LOG']
PSQ_TOPIC_NAME = my_config['UDU_PSQ_TOPIC_NAME']
# FLASK
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['BASIC_AUTH_USERNAME'] = my_secrets['UDU_PSQ_USERNAME']
app.config['BASIC_AUTH_PASSWORD'] = my_secrets['UDU_PSQ_PASSWORD']
basic_auth = BasicAuth(app)
# STACKDRIVER LOGGING
logger = Safe_Logger(STACKDRIVER_LOG)
#
# This is the guts of the server. Takes UDU job requests and queues them up
# for execution using psq:
#
@app.route('/jenkins/job/user-data-proc/buildWithParameters', methods=['POST'])
@basic_auth.required
def run_udu_job():
if request.method == 'POST':
#
# Extract the needed URLs and do sanity checking:
#
print >> sys.stderr, "Logging to " + STACKDRIVER_LOG
logger.log_text('request issued to user data upload server', severity='INFO')
success_url = request.args.get('SUCCESS_POST_URL')
failure_url = request.args.get('FAILURE_POST_URL')
if (not (success_url and success_url.strip()) or
not (failure_url and failure_url.strip())):
logger.log_text('Inbound request was missing response URLs', severity='WARNING')
print 'missing URLs'
return abort(400)
if 'config.json' not in request.files:
logger.log_text('Inbound request was missing config.json', severity='WARNING')
print 'missing config.json'
return abort(400)
my_file = request.files['config.json']
if not (my_file.filename and my_file.filename.strip()):
logger.log_text('Inbound request had empty filename', severity='WARNING')
print 'empty filename'
return abort(400)
if my_file.filename == 'config.json':
my_file_name = time_stamped_unique(my_file.filename)
my_file.save(my_file_name)
#
# WJRL 3/19/17: Google Pub/Sub behaves terribly if there is only one message
# published to a topic. It sits for ~10 minutes, or even more. We can deal with
# this by creating a pile of no-op calls before and after to flush the message
# queue:
#
pubsub_client = pubsub.Client(project=PROJECT_ID)
q = Queue(pubsub_client, name=PSQ_TOPIC_NAME)
logger.log_text('pub/sub stuffing with preamble pings', severity='INFO')
for _ in xrange(PING_COUNT):
try:
ping_task = {
'method': 'ping'
}
q.enqueue(Task(ping_task))
except RetryError:
time.sleep(2)
pubsub_client = pubsub.Client(project=PROJECT_ID)
q = Queue(pubsub_client, name=PSQ_TOPIC_NAME)
sending = True
try_count = 10
while sending and try_count > 0:
try:
logger.log_text('pub/sub issuing processing request', severity='INFO')
user_process_task = {
'method': 'buildWithParameters',
'file_name': my_file_name,
'success_url': success_url,
'failure_url': failure_url
}
q.enqueue(Task(user_process_task))
sending = False
except RetryError:
time.sleep(2)
pubsub_client = pubsub.Client(project=PROJECT_ID)
q = Queue(pubsub_client, name=PSQ_TOPIC_NAME)
try_count -= 1
if try_count <= 0:
print 'pub/sub failure'
return abort(400)
logger.log_text('pub/sub stuffing with postscript pings', severity='INFO')
for _ in xrange(PING_COUNT):
try:
ping_task = {
'method': 'ping'
}
q.enqueue(Task(ping_task))
except RetryError:
time.sleep(2)
pubsub_client = pubsub.Client(project=PROJECT_ID)
q = Queue(pubsub_client, name=PSQ_TOPIC_NAME)
resp = make_response(jsonify("processing"))
resp.headers['Location'] = RESPONSE_LOCATION_PREFIX + processed_name(my_file_name)
logger.log_text('response issued to caller', severity='INFO')
return resp
else:
logger.log_text('Unexpected filename', severity='WARNING')
print 'unexpected filename'
return abort(400)
else:
logger.log_text('Unexpected transport', severity='WARNING')
print 'unexpected transport'
return abort(400)
#
# We advertise a function that allows us to unclog the task queue with pings:
#
@app.route('/pipePing', methods=['GET'])
def pinger():
pubsub_client = pubsub.Client(project=PROJECT_ID)
q = Queue(pubsub_client, name=PSQ_TOPIC_NAME)
sending = True
try_count = 10
while sending and try_count > 0:
logger.log_text('processing ping request', severity='INFO')
try:
ping_task = {
'method': 'ping'
}
q.enqueue(Task(ping_task))
sending = False
except RetryError:
time.sleep(2)
pubsub_client = pubsub.Client(project=PROJECT_ID)
q = Queue(pubsub_client, name=PSQ_TOPIC_NAME)
try_count -= 1
if try_count <= 0:
print 'pub/sub failure'
return abort(400)
return jsonify("hello")
#
# We run the Flask server using https. Note that we are depending on Django to call us using https.
# We are not doing redirects to https:
#
if __name__ == '__main__':
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.load_cert_chain('../config/flask-server.crt', '../config/flask-server.key')
logger.log_text('Starting up the UDU server', severity='INFO')
app.run(host='0.0.0.0', debug=False, ssl_context=context)
|
|
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal, assert_raises
)
from numpy.lib.index_tricks import (
mgrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from,
index_exp, ndindex, r_, s_, ix_
)
class TestRavelUnravelIndex(TestCase):
def test_basic(self):
assert_equal(np.unravel_index(2, (2, 2)), (1, 0))
assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2)
assert_equal(np.unravel_index(254, (17, 94)), (2, 66))
assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254)
assert_raises(ValueError, np.unravel_index, -1, (2, 2))
assert_raises(TypeError, np.unravel_index, 0.5, (2, 2))
assert_raises(ValueError, np.unravel_index, 4, (2, 2))
assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2))
assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2))
assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2))
assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2))
assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2))
assert_equal(np.unravel_index((2*3 + 1)*6 + 4, (4, 3, 6)), [2, 1, 4])
assert_equal(
np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2*3 + 1)*6 + 4)
arr = np.array([[3, 6, 6], [4, 5, 1]])
assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37])
assert_equal(
np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13])
assert_equal(
np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19])
assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')),
[12, 13, 13])
assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621)
assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)),
[[3, 6, 6], [4, 5, 1]])
assert_equal(
np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'),
[[3, 6, 6], [4, 5, 1]])
assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1])
def test_big_indices(self):
# ravel_multi_index for big indices (issue #7546)
if np.intp == np.int64:
arr = ([1, 29], [3, 5], [3, 117], [19, 2],
[2379, 1284], [2, 2], [0, 1])
assert_equal(
np.ravel_multi_index(arr, (41, 7, 120, 36, 2706, 8, 6)),
[5627771580, 117259570957])
# test overflow checking for too big array (issue #7546)
dummy_arr = ([0],[0])
half_max = np.iinfo(np.intp).max // 2
assert_equal(
np.ravel_multi_index(dummy_arr, (half_max, 2)), [0])
assert_raises(ValueError,
np.ravel_multi_index, dummy_arr, (half_max+1, 2))
assert_equal(
np.ravel_multi_index(dummy_arr, (half_max, 2), order='F'), [0])
assert_raises(ValueError,
np.ravel_multi_index, dummy_arr, (half_max+1, 2), order='F')
def test_dtypes(self):
# Test with different data types
for dtype in [np.int16, np.uint16, np.int32,
np.uint32, np.int64, np.uint64]:
coords = np.array(
[[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype)
shape = (5, 8)
uncoords = 8*coords[0]+coords[1]
assert_equal(np.ravel_multi_index(coords, shape), uncoords)
assert_equal(coords, np.unravel_index(uncoords, shape))
uncoords = coords[0]+5*coords[1]
assert_equal(
np.ravel_multi_index(coords, shape, order='F'), uncoords)
assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))
coords = np.array(
[[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]],
dtype=dtype)
shape = (5, 8, 10)
uncoords = 10*(8*coords[0]+coords[1])+coords[2]
assert_equal(np.ravel_multi_index(coords, shape), uncoords)
assert_equal(coords, np.unravel_index(uncoords, shape))
uncoords = coords[0]+5*(coords[1]+8*coords[2])
assert_equal(
np.ravel_multi_index(coords, shape, order='F'), uncoords)
assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))
def test_clipmodes(self):
# Test clipmodes
assert_equal(
np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'),
np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12)))
assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12),
mode=(
'wrap', 'raise', 'clip', 'raise')),
np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12)))
assert_raises(
ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12))
def test_writeability(self):
# See gh-7269
x, y = np.unravel_index([1, 2, 3], (4, 5))
self.assertTrue(x.flags.writeable)
self.assertTrue(y.flags.writeable)
class TestGrid(TestCase):
def test_basic(self):
a = mgrid[-1:1:10j]
b = mgrid[-1:1:0.1]
assert_(a.shape == (10,))
assert_(b.shape == (20,))
assert_(a[0] == -1)
assert_almost_equal(a[-1], 1)
assert_(b[0] == -1)
assert_almost_equal(b[1]-b[0], 0.1, 11)
assert_almost_equal(b[-1], b[0]+19*0.1, 11)
assert_almost_equal(a[1]-a[0], 2.0/9.0, 11)
def test_linspace_equivalence(self):
y, st = np.linspace(2, 10, retstep=1)
assert_almost_equal(st, 8/49.0)
assert_array_almost_equal(y, mgrid[2:10:50j], 13)
def test_nd(self):
c = mgrid[-1:1:10j, -2:2:10j]
d = mgrid[-1:1:0.1, -2:2:0.2]
assert_(c.shape == (2, 10, 10))
assert_(d.shape == (2, 20, 20))
assert_array_equal(c[0][0, :], -np.ones(10, 'd'))
assert_array_equal(c[1][:, 0], -2*np.ones(10, 'd'))
assert_array_almost_equal(c[0][-1, :], np.ones(10, 'd'), 11)
assert_array_almost_equal(c[1][:, -1], 2*np.ones(10, 'd'), 11)
assert_array_almost_equal(d[0, 1, :] - d[0, 0, :],
0.1*np.ones(20, 'd'), 11)
assert_array_almost_equal(d[1, :, 1] - d[1, :, 0],
0.2*np.ones(20, 'd'), 11)
class TestConcatenator(TestCase):
def test_1d(self):
assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6]))
b = np.ones(5)
c = r_[b, 0, 0, b]
assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1])
def test_mixed_type(self):
g = r_[10.1, 1:10]
assert_(g.dtype == 'f8')
def test_more_mixed_type(self):
g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0]
assert_(g.dtype == 'f8')
def test_2d(self):
b = np.random.rand(5, 5)
c = np.random.rand(5, 5)
d = r_['1', b, c] # append columns
assert_(d.shape == (5, 10))
assert_array_equal(d[:, :5], b)
assert_array_equal(d[:, 5:], c)
d = r_[b, c]
assert_(d.shape == (10, 5))
assert_array_equal(d[:5, :], b)
assert_array_equal(d[5:, :], c)
def test_matrix(self):
a = [1, 2]
b = [3, 4]
ab_r = np.r_['r', a, b]
ab_c = np.r_['c', a, b]
assert_equal(type(ab_r), np.matrix)
assert_equal(type(ab_c), np.matrix)
assert_equal(np.array(ab_r), [[1,2,3,4]])
assert_equal(np.array(ab_c), [[1],[2],[3],[4]])
assert_raises(ValueError, lambda: np.r_['rc', a, b])
def test_matrix_scalar(self):
r = np.r_['r', [1, 2], 3]
assert_equal(type(r), np.matrix)
assert_equal(np.array(r), [[1,2,3]])
def test_matrix_builder(self):
a = np.array([1])
b = np.array([2])
c = np.array([3])
d = np.array([4])
actual = np.r_['a, b; c, d']
expected = np.bmat([[a, b], [c, d]])
assert_equal(actual, expected)
assert_equal(type(actual), type(expected))
class TestNdenumerate(TestCase):
def test_basic(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(list(ndenumerate(a)),
[((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)])
class TestIndexExpression(TestCase):
def test_regression_1(self):
# ticket #1196
a = np.arange(2)
assert_equal(a[:-1], a[s_[:-1]])
assert_equal(a[:-1], a[index_exp[:-1]])
def test_simple_1(self):
a = np.random.rand(4, 5, 6)
assert_equal(a[:, :3, [1, 2]], a[index_exp[:, :3, [1, 2]]])
assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]])
class TestIx_(TestCase):
def test_regression_1(self):
# Test empty inputs create ouputs of indexing type, gh-5804
# Test both lists and arrays
for func in (range, np.arange):
a, = np.ix_(func(0))
assert_equal(a.dtype, np.intp)
def test_shape_and_dtype(self):
sizes = (4, 5, 3, 2)
# Test both lists and arrays
for func in (range, np.arange):
arrays = np.ix_(*[func(sz) for sz in sizes])
for k, (a, sz) in enumerate(zip(arrays, sizes)):
assert_equal(a.shape[k], sz)
assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k))
assert_(np.issubdtype(a.dtype, int))
def test_bool(self):
bool_a = [True, False, True, True]
int_a, = np.nonzero(bool_a)
assert_equal(np.ix_(bool_a)[0], int_a)
def test_1d_only(self):
idx2d = [[1, 2, 3], [4, 5, 6]]
assert_raises(ValueError, np.ix_, idx2d)
def test_repeated_input(self):
length_of_vector = 5
x = np.arange(length_of_vector)
out = ix_(x, x)
assert_equal(out[0].shape, (length_of_vector, 1))
assert_equal(out[1].shape, (1, length_of_vector))
# check that input shape is not modified
assert_equal(x.shape, (length_of_vector,))
def test_c_():
a = np.c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])]
assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]])
def test_fill_diagonal():
a = np.zeros((3, 3), int)
fill_diagonal(a, 5)
yield (assert_array_equal, a,
np.array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]]))
#Test tall matrix
a = np.zeros((10, 3), int)
fill_diagonal(a, 5)
yield (assert_array_equal, a,
np.array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]))
#Test tall matrix wrap
a = np.zeros((10, 3), int)
fill_diagonal(a, 5, True)
yield (assert_array_equal, a,
np.array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5],
[0, 0, 0],
[5, 0, 0],
[0, 5, 0],
[0, 0, 5],
[0, 0, 0],
[5, 0, 0],
[0, 5, 0]]))
#Test wide matrix
a = np.zeros((3, 10), int)
fill_diagonal(a, 5)
yield (assert_array_equal, a,
np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 5, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 5, 0, 0, 0, 0, 0, 0, 0]]))
# The same function can operate on a 4-d array:
a = np.zeros((3, 3, 3, 3), int)
fill_diagonal(a, 4)
i = np.array([0, 1, 2])
yield (assert_equal, np.where(a != 0), (i, i, i, i))
def test_diag_indices():
di = diag_indices(4)
a = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
a[di] = 100
yield (assert_array_equal, a,
np.array([[100, 2, 3, 4],
[5, 100, 7, 8],
[9, 10, 100, 12],
[13, 14, 15, 100]]))
# Now, we create indices to manipulate a 3-d array:
d3 = diag_indices(2, 3)
# And use it to set the diagonal of a zeros array to 1:
a = np.zeros((2, 2, 2), int)
a[d3] = 1
yield (assert_array_equal, a,
np.array([[[1, 0],
[0, 0]],
[[0, 0],
[0, 1]]]))
def test_diag_indices_from():
x = np.random.random((4, 4))
r, c = diag_indices_from(x)
assert_array_equal(r, np.arange(4))
assert_array_equal(c, np.arange(4))
def test_ndindex():
x = list(ndindex(1, 2, 3))
expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))]
assert_array_equal(x, expected)
x = list(ndindex((1, 2, 3)))
assert_array_equal(x, expected)
# Test use of scalars and tuples
x = list(ndindex((3,)))
assert_array_equal(x, list(ndindex(3)))
# Make sure size argument is optional
x = list(ndindex())
assert_equal(x, [()])
x = list(ndindex(()))
assert_equal(x, [()])
# Make sure 0-sized ndindex works correctly
x = list(ndindex(*[0]))
assert_equal(x, [])
if __name__ == "__main__":
run_module_suite()
|
|
from datetime import timedelta, date
from subprocess import call
from flask import (Blueprint, render_template, request, session, g,
redirect, url_for, abort, flash, send_file, escape,
current_app)
from cfmi import cache, db
from cfmi.billing.models import (User, Project, Session, Problem, Invoice,
Subject)
from cfmi.auth import (superuser_only, login_required,
authorized_users_only)
from cfmi.billing.utils import (
fiscal_year, total_last_month, limit_month, gchart_ytd_url, active_projects)
from cfmi.billing.views.api import invoice_send_email, problem_send_email
from cfmi.billing.forms import ROSessionForm, SessionForm, ProblemForm, ProblemRequestForm
frontend = Blueprint('billing', __name__, static_folder='../static',
template_folder='../templates')
## Views
@frontend.route('/')
@login_required
def index():
if not g.user.is_superuser():
return redirect(url_for("billing.user_portal"))
return render_template('billing.html')
@frontend.route('/user/')
@login_required
def user_portal():
today = date.today()
recent = []
unpaid = []
for project in g.user.get_projects():
# A kludgy way to get all the user's scans from the last month
if today.month == 1:
recent += project.invoice_scans(today.year-1, 12)
else:
recent += project.invoice_scans(today.year, today.month-1)
unpaid += Invoice.query.filter(
Invoice.project==project).filter(Invoice.reconciled==False).all()
return render_template('user.html', recent=recent, unpaid=unpaid)
@frontend.route('/reconcile/')
@superuser_only
def reconcile():
outstanding = Invoice.query.filter(Invoice.reconciled==False).order_by(Invoice.date)
paid = Invoice.query.filter(Invoice.reconciled==True).order_by(Invoice.date)
return render_template('reconcile.html', due=outstanding, paid=paid)
@frontend.route('/invoice/<int:invoice_id>/')
@authorized_users_only
def invoice_view(invoice_id):
inv = Invoice.query.get(invoice_id)
if not inv:
abort(404)
return render_template('invoice.html', invoice=inv)
@frontend.route('/invoice/<int:id>/delete')
@superuser_only
def invoice_delete(id):
inv = Invoice.query.get(id)
if not inv:
abort(404)
db.session.delete(inv)
db.session.commit()
return redirect(url_for('billing.reconcile'))
@frontend.route('/invoice/<int:id>/paid')
@superuser_only
def invoice_paid(id):
inv = Invoice.query.get(id)
if not inv:
abort(404)
inv.reconciled = True
db.session.commit()
return redirect(url_for('billing.reconcile'))
@frontend.route('/invoice/<int:id>/unpaid')
@superuser_only
def invoice_unpaid(id):
inv = Invoice.query.get(id)
if not inv:
abort(404)
inv.reconciled = False
db.session.commit()
return redirect(url_for('billing.reconcile'))
@frontend.route('/invoice/<int:invoice_id>/notify')
@superuser_only
def invoice_notify(invoice_id):
inv = Invoice.query.get(invoice_id)
if not inv:
abort(404)
invoice_send_email(invoice_id)
return redirect(url_for('billing.reconcile'))
@frontend.route('/stats/')
@login_required
def statistics():
return render_template(
'stats.html', ytd=fiscal_year(), lastyear=fiscal_year(2010),
lastmonth=total_last_month(), gchart_ytd_url=gchart_ytd_url(),
sessions=len(Session.query.all()), subjects=len(Subject.query.all()))
@frontend.route('/batch/')
@superuser_only
def batch():
return render_template('batch.html')
@frontend.route('/batch/report')
@superuser_only
def batch_report():
if not 'year' in request.args and 'month' in request.args:
abort(404)
year = int(request.args['year'])
month = int(request.args['month'])
projects = active_projects(year, month)
return render_template('report.html', projects=projects, date=date(year, month, 1) )
@frontend.route('/invoice/<invoice_id>')
@authorized_users_only
def invoice(id):
inv = Invoice.query.get(invoice_id)
if not inv:
abort(404)
return inv.render()
@frontend.route('/<pi_uname>/<int:year>/<int:month>/')
@authorized_users_only
def pi_month_view(pi_uname, year, month):
pi = User.query.filter(User.username==pi_uname).first()
if not pi:
abort(404)
if 'format' in request.args:
if request.args['format'] == 'tex':
return render_template('invoice.tex', pi=pi,
date=date(year,month,1))
if request.args['format'] == 'pdf':
tex = render_template('invoice.tex', pi=pi,
date=date(year,month,1))
path = '/tmp/invoice-%s_%s-%s' % (pi_uname, month, year)
tmpfile = open(path+'.tex', 'w')
tmpfile.write(tex)
tmpfile.close()
r = call(['pdflatex', path+'.tex'], cwd='/tmp/')
path = path+'.pdf'
return send_file(path, as_attachment=True)
mindate = date(year, month, 1)
query = Session.query.join(Project).filter(
Project.pi==pi)
scans = limit_month(query, year, month)
total = sum(float(scan.cost()) for scan in scans)
total = "{0:.2f}".format(total)
return render_template('pi_month_view.html', pi=pi,
date=mindate, total=total)
@frontend.route('/session/<int:session_id>/', methods=['GET', 'POST'])
@authorized_users_only
def edit_session(session_id):
scan = Session.query.get(session_id)
if not scan:
abort(404)
form = SessionForm(obj=scan)
if form.validate_on_submit():
if not g.user.is_superuser():
flash("Permission Denied")
return redirect(request.url)
form.populate_obj(scan)
try:
db.session.commit()
flash("Sucess: Session Modified")
except:
flash("Failed to update database")
db.session.rollback()
return redirect(request.url)
if g.user.is_superuser():
return render_template('scan_form.html', scan=scan,
form=form)
return render_template('session.html', scan=scan, form=form)
@frontend.route('/session/<int:session_id>/problem/delete/')
@superuser_only
def del_problem(session_id):
scan = Session.query.get(session_id)
if not scan:
abort(404)
prob = scan.problem
if not scan.problem:
abort(404)
try:
db.session.delete(prob)
db.session.commit()
flash("Success: Removed billing correction", category='success')
except:
db.session.rollback()
flash("Database error", category='error')
return redirect(url_for('billing.edit_session', session_id=scan.id))
@frontend.route('/session/<int:session_id>/problem/', methods=['GET', 'POST'])
@authorized_users_only
def problem(session_id):
scan = Session.query.get(session_id)
if not scan:
abort(404)
prob = scan.problem if scan.problem else Problem(scan)
form = ProblemForm(obj=prob)
if form.validate_on_submit():
if not g.user.is_superuser():
abort(403)
try:
prob.scan = scan
form.populate_obj(prob)
db.session.add(prob)
db.session.commit()
flash("Sucess: Problem added or modified", category='success')
except:
flash("Failed: Could not update database", category='error')
db.session.rollback()
return redirect(url_for('billing.edit_session', session_id=scan.id))
if g.user.is_superuser():
return render_template('problem_form.html', scan=scan,
form=form)
return render_template('problem_request.html', scan=scan, form=ProblemRequestForm())
@frontend.route('/session/<int:session_id>/problem/usersubmit', methods=['POST'])
@authorized_users_only
def problem_request(session_id):
scan = Session.query.get(session_id)
if not scan:
abort(404)
form = ProblemRequestForm()
if form.validate_on_submit():
flash("We've received your report, you'll be notified of any changes to your invoice",
category='success')
problem_send_email(session_id, form.problem, form.duration)
return redirect(url_for("billing.edit_session", session_id=session_id))
return redirect(url_for('billing.problem', session_id=session_id))
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nicira Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import urlparse
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.nicira import NvpApiClient
LOG = logging.getLogger(__name__)
MAX_NAME_LEN = 40
def _validate_name(name):
if name and len(name) > MAX_NAME_LEN:
raise Exception("Logical switch name exceeds %d characters",
MAX_NAME_LEN)
def _validate_resource(body):
_validate_name(body.get('display_name'))
class FakeClient:
LSWITCH_RESOURCE = 'lswitch'
LPORT_RESOURCE = 'lport'
LROUTER_RESOURCE = 'lrouter'
NAT_RESOURCE = 'nat'
LQUEUE_RESOURCE = 'lqueue'
SECPROF_RESOURCE = 'securityprofile'
LSWITCH_STATUS = 'lswitchstatus'
LROUTER_STATUS = 'lrouterstatus'
LSWITCH_LPORT_RESOURCE = 'lswitch_lport'
LROUTER_LPORT_RESOURCE = 'lrouter_lport'
LROUTER_NAT_RESOURCE = 'lrouter_nat'
LSWITCH_LPORT_STATUS = 'lswitch_lportstatus'
LSWITCH_LPORT_ATT = 'lswitch_lportattachment'
LROUTER_LPORT_STATUS = 'lrouter_lportstatus'
LROUTER_LPORT_ATT = 'lrouter_lportattachment'
GWSERVICE_RESOURCE = 'gatewayservice'
RESOURCES = [LSWITCH_RESOURCE, LROUTER_RESOURCE, LQUEUE_RESOURCE,
LPORT_RESOURCE, NAT_RESOURCE, SECPROF_RESOURCE,
GWSERVICE_RESOURCE]
FAKE_GET_RESPONSES = {
LSWITCH_RESOURCE: "fake_get_lswitch.json",
LSWITCH_LPORT_RESOURCE: "fake_get_lswitch_lport.json",
LSWITCH_LPORT_STATUS: "fake_get_lswitch_lport_status.json",
LSWITCH_LPORT_ATT: "fake_get_lswitch_lport_att.json",
LROUTER_RESOURCE: "fake_get_lrouter.json",
LROUTER_LPORT_RESOURCE: "fake_get_lrouter_lport.json",
LROUTER_LPORT_STATUS: "fake_get_lrouter_lport_status.json",
LROUTER_LPORT_ATT: "fake_get_lrouter_lport_att.json",
LROUTER_STATUS: "fake_get_lrouter_status.json",
LROUTER_NAT_RESOURCE: "fake_get_lrouter_nat.json",
SECPROF_RESOURCE: "fake_get_security_profile.json",
LQUEUE_RESOURCE: "fake_get_lqueue.json",
GWSERVICE_RESOURCE: "fake_get_gwservice.json"
}
FAKE_POST_RESPONSES = {
LSWITCH_RESOURCE: "fake_post_lswitch.json",
LROUTER_RESOURCE: "fake_post_lrouter.json",
LSWITCH_LPORT_RESOURCE: "fake_post_lswitch_lport.json",
LROUTER_LPORT_RESOURCE: "fake_post_lrouter_lport.json",
LROUTER_NAT_RESOURCE: "fake_post_lrouter_nat.json",
SECPROF_RESOURCE: "fake_post_security_profile.json",
LQUEUE_RESOURCE: "fake_post_lqueue.json",
GWSERVICE_RESOURCE: "fake_post_gwservice.json"
}
FAKE_PUT_RESPONSES = {
LSWITCH_RESOURCE: "fake_post_lswitch.json",
LROUTER_RESOURCE: "fake_post_lrouter.json",
LSWITCH_LPORT_RESOURCE: "fake_post_lswitch_lport.json",
LROUTER_LPORT_RESOURCE: "fake_post_lrouter_lport.json",
LROUTER_NAT_RESOURCE: "fake_post_lrouter_nat.json",
LSWITCH_LPORT_ATT: "fake_put_lswitch_lport_att.json",
LROUTER_LPORT_ATT: "fake_put_lrouter_lport_att.json",
SECPROF_RESOURCE: "fake_post_security_profile.json",
LQUEUE_RESOURCE: "fake_post_lqueue.json",
GWSERVICE_RESOURCE: "fake_post_gwservice.json"
}
MANAGED_RELATIONS = {
LSWITCH_RESOURCE: [],
LROUTER_RESOURCE: [],
LSWITCH_LPORT_RESOURCE: ['LogicalPortAttachment'],
LROUTER_LPORT_RESOURCE: ['LogicalPortAttachment'],
}
_validators = {
LSWITCH_RESOURCE: _validate_resource,
LSWITCH_LPORT_RESOURCE: _validate_resource,
LROUTER_LPORT_RESOURCE: _validate_resource,
SECPROF_RESOURCE: _validate_resource,
LQUEUE_RESOURCE: _validate_resource,
GWSERVICE_RESOURCE: _validate_resource
}
def __init__(self, fake_files_path):
self.fake_files_path = fake_files_path
self._fake_lswitch_dict = {}
self._fake_lrouter_dict = {}
self._fake_lswitch_lport_dict = {}
self._fake_lrouter_lport_dict = {}
self._fake_lrouter_nat_dict = {}
self._fake_lswitch_lportstatus_dict = {}
self._fake_lrouter_lportstatus_dict = {}
self._fake_securityprofile_dict = {}
self._fake_lqueue_dict = {}
self._fake_gatewayservice_dict = {}
def _get_tag(self, resource, scope):
tags = [tag['tag'] for tag in resource['tags']
if tag['scope'] == scope]
return len(tags) > 0 and tags[0]
def _get_filters(self, querystring):
if not querystring:
return (None, None, None, None)
params = urlparse.parse_qs(querystring)
tag_filter = None
attr_filter = None
if 'tag' in params and 'tag_scope' in params:
tag_filter = {'scope': params['tag_scope'][0],
'tag': params['tag'][0]}
elif 'uuid' in params:
attr_filter = {'uuid': params['uuid'][0]}
# Handle page length and page cursor parameter
page_len = params.get('_page_length')
page_cursor = params.get('_page_cursor')
if page_len:
page_len = int(page_len[0])
else:
# Explicitly set it to None (avoid 0 or empty list)
page_len = None
return (tag_filter, attr_filter, page_len, page_cursor)
def _add_lswitch(self, body):
fake_lswitch = json.loads(body)
fake_lswitch['uuid'] = uuidutils.generate_uuid()
self._fake_lswitch_dict[fake_lswitch['uuid']] = fake_lswitch
# put the tenant_id and the zone_uuid in the main dict
# for simplyfying templating
zone_uuid = fake_lswitch['transport_zones'][0]['zone_uuid']
fake_lswitch['zone_uuid'] = zone_uuid
fake_lswitch['tenant_id'] = self._get_tag(fake_lswitch, 'os_tid')
fake_lswitch['lport_count'] = 0
# set status value
fake_lswitch['status'] = 'true'
return fake_lswitch
def _build_lrouter(self, body, uuid=None):
fake_lrouter = json.loads(body)
if uuid:
fake_lrouter['uuid'] = uuid
fake_lrouter['tenant_id'] = self._get_tag(fake_lrouter, 'os_tid')
default_nexthop = fake_lrouter['routing_config'].get(
'default_route_next_hop')
if default_nexthop:
fake_lrouter['default_next_hop'] = default_nexthop.get(
'gateway_ip_address', '0.0.0.0')
else:
fake_lrouter['default_next_hop'] = '0.0.0.0'
# NOTE(salv-orlando): We won't make the Fake NVP API client
# aware of NVP version. The long term plan is to replace it
# with behavioral mocking of NVP API requests
if 'distributed' not in fake_lrouter:
fake_lrouter['distributed'] = False
distributed_json = ('"distributed": %s,' %
str(fake_lrouter['distributed']).lower())
fake_lrouter['distributed_json'] = distributed_json
return fake_lrouter
def _add_lrouter(self, body):
fake_lrouter = self._build_lrouter(body,
uuidutils.generate_uuid())
self._fake_lrouter_dict[fake_lrouter['uuid']] = fake_lrouter
fake_lrouter['lport_count'] = 0
# set status value
fake_lrouter['status'] = 'true'
return fake_lrouter
def _add_lqueue(self, body):
fake_lqueue = json.loads(body)
fake_lqueue['uuid'] = uuidutils.generate_uuid()
self._fake_lqueue_dict[fake_lqueue['uuid']] = fake_lqueue
return fake_lqueue
def _add_lswitch_lport(self, body, ls_uuid):
fake_lport = json.loads(body)
new_uuid = uuidutils.generate_uuid()
fake_lport['uuid'] = new_uuid
# put the tenant_id and the ls_uuid in the main dict
# for simplyfying templating
fake_lport['ls_uuid'] = ls_uuid
fake_lport['tenant_id'] = self._get_tag(fake_lport, 'os_tid')
fake_lport['neutron_port_id'] = self._get_tag(fake_lport,
'q_port_id')
fake_lport['neutron_device_id'] = self._get_tag(fake_lport, 'vm_id')
fake_lport['att_type'] = "NoAttachment"
fake_lport['att_info_json'] = ''
self._fake_lswitch_lport_dict[fake_lport['uuid']] = fake_lport
fake_lswitch = self._fake_lswitch_dict[ls_uuid]
fake_lswitch['lport_count'] += 1
fake_lport_status = fake_lport.copy()
fake_lport_status['ls_tenant_id'] = fake_lswitch['tenant_id']
fake_lport_status['ls_uuid'] = fake_lswitch['uuid']
fake_lport_status['ls_name'] = fake_lswitch['display_name']
fake_lport_status['ls_zone_uuid'] = fake_lswitch['zone_uuid']
# set status value
fake_lport['status'] = 'true'
self._fake_lswitch_lportstatus_dict[new_uuid] = fake_lport_status
return fake_lport
def _build_lrouter_lport(self, body, new_uuid=None, lr_uuid=None):
fake_lport = json.loads(body)
if new_uuid:
fake_lport['uuid'] = new_uuid
# put the tenant_id and the le_uuid in the main dict
# for simplyfying templating
if lr_uuid:
fake_lport['lr_uuid'] = lr_uuid
fake_lport['tenant_id'] = self._get_tag(fake_lport, 'os_tid')
fake_lport['neutron_port_id'] = self._get_tag(fake_lport,
'q_port_id')
# replace ip_address with its json dump
if 'ip_addresses' in fake_lport:
ip_addresses_json = json.dumps(fake_lport['ip_addresses'])
fake_lport['ip_addresses_json'] = ip_addresses_json
return fake_lport
def _add_lrouter_lport(self, body, lr_uuid):
new_uuid = uuidutils.generate_uuid()
fake_lport = self._build_lrouter_lport(body, new_uuid, lr_uuid)
self._fake_lrouter_lport_dict[fake_lport['uuid']] = fake_lport
try:
fake_lrouter = self._fake_lrouter_dict[lr_uuid]
except KeyError:
raise NvpApiClient.ResourceNotFound()
fake_lrouter['lport_count'] += 1
fake_lport_status = fake_lport.copy()
fake_lport_status['lr_tenant_id'] = fake_lrouter['tenant_id']
fake_lport_status['lr_uuid'] = fake_lrouter['uuid']
fake_lport_status['lr_name'] = fake_lrouter['display_name']
self._fake_lrouter_lportstatus_dict[new_uuid] = fake_lport_status
return fake_lport
def _add_securityprofile(self, body):
fake_securityprofile = json.loads(body)
fake_securityprofile['uuid'] = uuidutils.generate_uuid()
fake_securityprofile['tenant_id'] = self._get_tag(
fake_securityprofile, 'os_tid')
fake_securityprofile['nova_spid'] = self._get_tag(fake_securityprofile,
'nova_spid')
self._fake_securityprofile_dict[fake_securityprofile['uuid']] = (
fake_securityprofile)
return fake_securityprofile
def _add_lrouter_nat(self, body, lr_uuid):
fake_nat = json.loads(body)
new_uuid = uuidutils.generate_uuid()
fake_nat['uuid'] = new_uuid
fake_nat['lr_uuid'] = lr_uuid
self._fake_lrouter_nat_dict[fake_nat['uuid']] = fake_nat
if 'match' in fake_nat:
match_json = json.dumps(fake_nat['match'])
fake_nat['match_json'] = match_json
return fake_nat
def _add_gatewayservice(self, body):
fake_gwservice = json.loads(body)
fake_gwservice['uuid'] = str(uuidutils.generate_uuid())
fake_gwservice['tenant_id'] = self._get_tag(
fake_gwservice, 'os_tid')
# FIXME(salvatore-orlando): For simplicity we're managing only a
# single device. Extend the fake client for supporting multiple devices
first_gw = fake_gwservice['gateways'][0]
fake_gwservice['transport_node_uuid'] = first_gw['transport_node_uuid']
fake_gwservice['device_id'] = first_gw['device_id']
self._fake_gatewayservice_dict[fake_gwservice['uuid']] = (
fake_gwservice)
return fake_gwservice
def _build_relation(self, src, dst, resource_type, relation):
if relation not in self.MANAGED_RELATIONS[resource_type]:
return # Relation is not desired in output
if not '_relations' in src or not src['_relations'].get(relation):
return # Item does not have relation
relation_data = src['_relations'].get(relation)
dst_relations = dst.get('_relations', {})
dst_relations[relation] = relation_data
dst['_relations'] = dst_relations
def _fill_attachment(self, att_data, ls_uuid=None,
lr_uuid=None, lp_uuid=None):
new_data = att_data.copy()
for k in ('ls_uuid', 'lr_uuid', 'lp_uuid'):
if locals().get(k):
new_data[k] = locals()[k]
def populate_field(field_name):
if field_name in att_data:
new_data['%s_field' % field_name] = ('"%s" : "%s",'
% (field_name,
att_data[field_name]))
del new_data[field_name]
else:
new_data['%s_field' % field_name] = ""
for field in ['vif_uuid', 'peer_port_href', 'vlan_id',
'peer_port_uuid', 'l3_gateway_service_uuid']:
populate_field(field)
return new_data
def _get_resource_type(self, path):
"""Get resource type.
Identifies resource type and relevant uuids in the uri
/ws.v1/lswitch/xxx
/ws.v1/lswitch/xxx/status
/ws.v1/lswitch/xxx/lport/yyy
/ws.v1/lswitch/xxx/lport/yyy/status
/ws.v1/lrouter/zzz
/ws.v1/lrouter/zzz/status
/ws.v1/lrouter/zzz/lport/www
/ws.v1/lrouter/zzz/lport/www/status
/ws.v1/lqueue/xxx
"""
# The first element will always be 'ws.v1' - so we just discard it
uri_split = path.split('/')[1:]
# parse uri_split backwards
suffix = ""
idx = len(uri_split) - 1
if 'status' in uri_split[idx]:
suffix = "status"
idx = idx - 1
elif 'attachment' in uri_split[idx]:
suffix = "attachment"
idx = idx - 1
# then check if we have an uuid
uuids = []
if uri_split[idx].replace('-', '') not in self.RESOURCES:
uuids.append(uri_split[idx])
idx = idx - 1
resource_type = "%s%s" % (uri_split[idx], suffix)
if idx > 1:
uuids.insert(0, uri_split[idx - 1])
resource_type = "%s_%s" % (uri_split[idx - 2], resource_type)
return (resource_type.replace('-', ''), uuids)
def _list(self, resource_type, response_file,
parent_uuid=None, query=None, relations=None):
(tag_filter, attr_filter,
page_len, page_cursor) = self._get_filters(query)
# result_count attribute in response should appear only when
# page_cursor is not specified
do_result_count = not page_cursor
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
response_template = f.read()
res_dict = getattr(self, '_fake_%s_dict' % resource_type)
if parent_uuid == '*':
parent_uuid = None
# NSX raises ResourceNotFound if lswitch doesn't exist and is not *
elif not res_dict and resource_type == self.LSWITCH_LPORT_RESOURCE:
raise NvpApiClient.ResourceNotFound()
def _attr_match(res_uuid):
if not attr_filter:
return True
item = res_dict[res_uuid]
for (attr, value) in attr_filter.iteritems():
if item.get(attr) != value:
return False
return True
def _tag_match(res_uuid):
if not tag_filter:
return True
return any([x['scope'] == tag_filter['scope'] and
x['tag'] == tag_filter['tag']
for x in res_dict[res_uuid]['tags']])
def _lswitch_match(res_uuid):
# verify that the switch exist
if parent_uuid and not parent_uuid in self._fake_lswitch_dict:
raise Exception(_("lswitch:%s not found") % parent_uuid)
if (not parent_uuid
or res_dict[res_uuid].get('ls_uuid') == parent_uuid):
return True
return False
def _lrouter_match(res_uuid):
# verify that the router exist
if parent_uuid and not parent_uuid in self._fake_lrouter_dict:
raise Exception(_("lrouter:%s not found") % parent_uuid)
if (not parent_uuid or
res_dict[res_uuid].get('lr_uuid') == parent_uuid):
return True
return False
def _cursor_match(res_uuid, page_cursor):
if not page_cursor:
return True
if page_cursor == res_uuid:
# always return True once page_cursor has been found
page_cursor = None
return True
return False
def _build_item(resource):
item = json.loads(response_template % resource)
if relations:
for relation in relations:
self._build_relation(resource, item,
resource_type, relation)
return item
for item in res_dict.itervalues():
if 'tags' in item:
item['tags_json'] = json.dumps(item['tags'])
if resource_type in (self.LSWITCH_LPORT_RESOURCE,
self.LSWITCH_LPORT_ATT,
self.LSWITCH_LPORT_STATUS):
parent_func = _lswitch_match
elif resource_type in (self.LROUTER_LPORT_RESOURCE,
self.LROUTER_LPORT_ATT,
self.LROUTER_NAT_RESOURCE,
self.LROUTER_LPORT_STATUS):
parent_func = _lrouter_match
else:
parent_func = lambda x: True
items = [_build_item(res_dict[res_uuid])
for res_uuid in res_dict
if (parent_func(res_uuid) and
_tag_match(res_uuid) and
_attr_match(res_uuid) and
_cursor_match(res_uuid, page_cursor))]
# Rather inefficient, but hey this is just a mock!
next_cursor = None
total_items = len(items)
if page_len:
try:
next_cursor = items[page_len]['uuid']
except IndexError:
next_cursor = None
items = items[:page_len]
response_dict = {'results': items}
if next_cursor:
response_dict['page_cursor'] = next_cursor
if do_result_count:
response_dict['result_count'] = total_items
return json.dumps(response_dict)
def _show(self, resource_type, response_file,
uuid1, uuid2=None, relations=None):
target_uuid = uuid2 or uuid1
if resource_type.endswith('attachment'):
resource_type = resource_type[:resource_type.index('attachment')]
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
response_template = f.read()
res_dict = getattr(self, '_fake_%s_dict' % resource_type)
for item in res_dict.itervalues():
if 'tags' in item:
item['tags_json'] = json.dumps(item['tags'])
# replace sec prof rules with their json dump
def jsonify_rules(rule_key):
if rule_key in item:
rules_json = json.dumps(item[rule_key])
item['%s_json' % rule_key] = rules_json
jsonify_rules('logical_port_egress_rules')
jsonify_rules('logical_port_ingress_rules')
items = [json.loads(response_template % res_dict[res_uuid])
for res_uuid in res_dict if res_uuid == target_uuid]
if items:
return json.dumps(items[0])
raise NvpApiClient.ResourceNotFound()
def handle_get(self, url):
#TODO(salvatore-orlando): handle field selection
parsedurl = urlparse.urlparse(url)
(res_type, uuids) = self._get_resource_type(parsedurl.path)
relations = urlparse.parse_qs(parsedurl.query).get('relations')
response_file = self.FAKE_GET_RESPONSES.get(res_type)
if not response_file:
raise NvpApiClient.NvpApiException()
if 'lport' in res_type or 'nat' in res_type:
if len(uuids) > 1:
return self._show(res_type, response_file, uuids[0],
uuids[1], relations=relations)
else:
return self._list(res_type, response_file, uuids[0],
query=parsedurl.query, relations=relations)
elif ('lswitch' in res_type or
'lrouter' in res_type or
self.SECPROF_RESOURCE in res_type or
self.LQUEUE_RESOURCE in res_type or
'gatewayservice' in res_type):
LOG.debug("UUIDS:%s", uuids)
if uuids:
return self._show(res_type, response_file, uuids[0],
relations=relations)
else:
return self._list(res_type, response_file,
query=parsedurl.query,
relations=relations)
else:
raise Exception("unknown resource:%s" % res_type)
def handle_post(self, url, body):
parsedurl = urlparse.urlparse(url)
(res_type, uuids) = self._get_resource_type(parsedurl.path)
response_file = self.FAKE_POST_RESPONSES.get(res_type)
if not response_file:
raise Exception("resource not found")
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
response_template = f.read()
add_resource = getattr(self, '_add_%s' % res_type)
body_json = json.loads(body)
val_func = self._validators.get(res_type)
if val_func:
val_func(body_json)
args = [body]
if uuids:
args.append(uuids[0])
response = response_template % add_resource(*args)
return response
def handle_put(self, url, body):
parsedurl = urlparse.urlparse(url)
(res_type, uuids) = self._get_resource_type(parsedurl.path)
response_file = self.FAKE_PUT_RESPONSES.get(res_type)
if not response_file:
raise Exception("resource not found")
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
response_template = f.read()
# Manage attachment operations
is_attachment = False
if res_type.endswith('attachment'):
is_attachment = True
res_type = res_type[:res_type.index('attachment')]
res_dict = getattr(self, '_fake_%s_dict' % res_type)
body_json = json.loads(body)
val_func = self._validators.get(res_type)
if val_func:
val_func(body_json)
try:
resource = res_dict[uuids[-1]]
except KeyError:
raise NvpApiClient.ResourceNotFound()
if not is_attachment:
edit_resource = getattr(self, '_build_%s' % res_type, None)
if edit_resource:
body_json = edit_resource(body)
resource.update(body_json)
else:
relations = resource.get("_relations", {})
body_2 = json.loads(body)
resource['att_type'] = body_2['type']
relations['LogicalPortAttachment'] = body_2
resource['_relations'] = relations
if body_2['type'] == "PatchAttachment":
# We need to do a trick here
if self.LROUTER_RESOURCE in res_type:
res_type_2 = res_type.replace(self.LROUTER_RESOURCE,
self.LSWITCH_RESOURCE)
elif self.LSWITCH_RESOURCE in res_type:
res_type_2 = res_type.replace(self.LSWITCH_RESOURCE,
self.LROUTER_RESOURCE)
res_dict_2 = getattr(self, '_fake_%s_dict' % res_type_2)
body_2['peer_port_uuid'] = uuids[-1]
resource_2 = res_dict_2[json.loads(body)['peer_port_uuid']]
relations_2 = resource_2.get("_relations")
if not relations_2:
relations_2 = {}
relations_2['LogicalPortAttachment'] = body_2
resource_2['_relations'] = relations_2
resource['peer_port_uuid'] = body_2['peer_port_uuid']
resource['att_info_json'] = (
"\"peer_port_uuid\": \"%s\"," %
resource_2['uuid'])
resource_2['att_info_json'] = (
"\"peer_port_uuid\": \"%s\"," %
body_2['peer_port_uuid'])
elif body_2['type'] == "L3GatewayAttachment":
resource['attachment_gwsvc_uuid'] = (
body_2['l3_gateway_service_uuid'])
resource['vlan_id'] = body_2.get('vlan_id')
elif body_2['type'] == "L2GatewayAttachment":
resource['attachment_gwsvc_uuid'] = (
body_2['l2_gateway_service_uuid'])
elif body_2['type'] == "VifAttachment":
resource['vif_uuid'] = body_2['vif_uuid']
resource['att_info_json'] = (
"\"vif_uuid\": \"%s\"," % body_2['vif_uuid'])
if not is_attachment:
response = response_template % resource
else:
if res_type == self.LROUTER_LPORT_RESOURCE:
lr_uuid = uuids[0]
ls_uuid = None
elif res_type == self.LSWITCH_LPORT_RESOURCE:
ls_uuid = uuids[0]
lr_uuid = None
lp_uuid = uuids[1]
response = response_template % self._fill_attachment(
json.loads(body), ls_uuid, lr_uuid, lp_uuid)
return response
def handle_delete(self, url):
parsedurl = urlparse.urlparse(url)
(res_type, uuids) = self._get_resource_type(parsedurl.path)
response_file = self.FAKE_PUT_RESPONSES.get(res_type)
if not response_file:
raise Exception("resource not found")
res_dict = getattr(self, '_fake_%s_dict' % res_type)
try:
del res_dict[uuids[-1]]
except KeyError:
raise NvpApiClient.ResourceNotFound()
return ""
def fake_request(self, *args, **kwargs):
method = args[0]
handler = getattr(self, "handle_%s" % method.lower())
return handler(*args[1:])
def reset_all(self):
self._fake_lswitch_dict.clear()
self._fake_lrouter_dict.clear()
self._fake_lswitch_lport_dict.clear()
self._fake_lrouter_lport_dict.clear()
self._fake_lswitch_lportstatus_dict.clear()
self._fake_lrouter_lportstatus_dict.clear()
self._fake_lqueue_dict.clear()
self._fake_securityprofile_dict.clear()
self._fake_gatewayservice_dict.clear()
|
|
import os
import ConfigParser
import logging
import exceptions
from OpenSSL import crypto
from keystoneclient import session
from keystoneclient.auth.identity import v2 as v2_client
from keystoneclient.auth.identity import v3 as v3_client
from barbicanclient import client
class BarbicanKeystoneSession(object):
def __init__(self):
self.admin_user = None
self.admin_password = None
self.project_name = None
self.auth_url = None
self.auth_version = '2'
self.region = 'RegionOne'
self.session = {}
def parse_args(self, auth_conf=None):
config = ConfigParser.SafeConfigParser()
if (auth_conf):
self.auth_conf = auth_conf
else:
self.auth_conf = '/etc/contrail/contrail-lbaas-auth.conf'
config.read(self.auth_conf)
self.admin_user = config.get('BARBICAN', 'admin_user')
self.admin_password = config.get('BARBICAN', 'admin_password')
self.project_name = config.get('BARBICAN', 'admin_tenant_name')
self.auth_url = config.get('BARBICAN', 'auth_url')
tmp_auth_url = self.auth_url
if (tmp_auth_url[-1] == '/'):
tmp_auth_url[:-1]
auth_version = tmp_auth_url.split('/')[-1]
if (auth_version.lower() == 'v2.0'):
self.auth_version = '2'
elif (auth_version.lower() == 'v3'):
self.auth_version = '3'
self.admin_user_domain = config.get('BARBICAN', 'admin_user_domain')
self.admin_project_domain = config.get('BARBICAN', 'admin_project_domain')
try:
self.region = config.get('BARBICAN', 'region')
except Exception:
pass
def get_session(self, auth_conf=None):
if self.session.get(self.project_name):
return self.session[self.project_name]
self.parse_args(auth_conf)
kwargs = {'auth_url': self.auth_url,
'username': self.admin_user,
'password': self.admin_password}
if self.auth_version == '2':
client = v2_client
kwargs['tenant_name'] = self.project_name
elif self.auth_version == '3':
client = v3_client
kwargs['project_name'] = self.project_name
kwargs['user_domain_name'] = self.admin_user_domain
kwargs['project_domain_name'] = self.admin_project_domain
try:
kc = client.Password(**kwargs)
self.session[self.project_name] = session.Session(auth=kc)
except exceptions.Exception as e:
logging.exception('Error creating Keystone session')
logging.error(e.__class__)
logging.error(e.__doc__)
logging.error(e.message)
return None
return self.session[self.project_name]
class Cert:
def __init__(self, cert_container):
self._cert_container = cert_container
def get_certificate(self):
if self._cert_container.certificate:
try:
payload = self._cert_container.certificate.payload
return True, payload
except exceptions.Exception as e:
logging.exception('')
logging.error(e.__class__)
logging.error(e.__doc__)
logging.error(e.message)
return False, None
return True, None
def get_intermediates(self):
if self._cert_container.intermediates:
try:
payload = self._cert_container.intermediates.payload
return True, payload
except exceptions.Exception as e:
logging.exception('')
logging.error(e.__class__)
logging.error(e.__doc__)
logging.error(e.message)
return False, None
return True, None
def get_private_key(self):
if self._cert_container.private_key:
try:
payload = self._cert_container.private_key.payload
return True, payload
except exceptions.Exception as e:
logging.exception('')
logging.error(e.__class__)
logging.error(e.__doc__)
logging.error(e.message)
return False, None
return True, None
def get_private_key_passphrase(self):
if self._cert_container.private_key_passphrase:
try:
payload = self._cert_container.private_key_passphrase.payload
return True, payload
except exceptions.Exception as e:
msg = "Error in getting Barbican Private-Key-PassPhrase from Container"
logging.exception('')
logging.error(e.__class__)
logging.error(e.__doc__)
logging.error(e.message)
return False, None
return True, None
class TLSContainer:
def __init__(self, id=None, certificate=None, private_key=None,
passphrase=None, intermediates=None, primary_cn=None):
self.id = id
self.certificate = certificate
self.private_key = private_key
self.passphrase = passphrase
self.intermediates = intermediates
self.primary_cn = primary_cn
def build_pem(tls_cert):
pem = ()
if tls_cert.intermediates:
for c in tls_cert.intermediates:
pem = pem + (c,)
if tls_cert.certificate:
pem = pem + (tls_cert.certificate,)
if tls_cert.private_key:
pem = pem + (tls_cert.private_key,)
pem = "\n".join(pem)
return pem
def get_primary_cn(certificate):
cert = crypto.load_certificate(crypto.FILETYPE_PEM, certificate)
subject = cert.get_subject()
issued_to = subject.CN
issuer = cert.get_issuer()
issued_by = issuer.CN
return issued_to
def get_tls_certificates(barbican, url):
try:
container = barbican.containers.get(url)
except exceptions.Exception as e:
msg = "Error in getting Barbican Containers for url %s" %url
logging.exception(msg)
logging.error(e.__class__)
logging.error(e.__doc__)
logging.error(e.message)
return None
cert = Cert(container)
status, certificate = cert.get_certificate()
if (status == False):
msg = "Error in getting Barbican Certficates from Container %s" %url
logging.error(msg)
return None
primary_cn = get_primary_cn(certificate)
status, private_key = cert.get_private_key()
if (status == False):
msg = "Error in getting Barbican Private-Key from Container %s" %url
logging.error(msg)
return None
status, intermediates = cert.get_intermediates()
if (status == False):
msg = "Error in getting Barbican Intermediates from Container %s" %url
logging.error(msg)
return None
return TLSContainer(
primary_cn=primary_cn,
private_key=private_key,
certificate=certificate,
intermediates=intermediates)
def create_pem_file(barbican, url, dest_dir):
tls_cert = get_tls_certificates(barbican, url)
if tls_cert is None:
return None
pem = build_pem(tls_cert)
pem_file_name = dest_dir + '/'+ tls_cert.primary_cn + '.pem'
f = open(pem_file_name, 'w')
f.write(pem)
f.close()
return pem_file_name
def update_ssl_config(haproxy_config, auth_conf, dest_dir):
barb_auth = BarbicanKeystoneSession()
sess = barb_auth.get_session(auth_conf)
if sess is None:
return None
barbican = client.Client(session=sess)
updated_config = haproxy_config
for line in haproxy_config.split('\n'):
if 'ssl crt http' in line:
try:
url_list = filter(lambda x: x.startswith('http'), line.split(' '))
except IndexError:
return None
for url in url_list or []:
pem_file_name = create_pem_file(barbican, url, dest_dir)
if pem_file_name is None:
return None
updated_config = updated_config.replace(url, pem_file_name)
return updated_config
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import numpy as np
import cv2
from tools.rand_sampler import RandSampler
class DetRecordIter(mx.io.DataIter):
"""
The new detection iterator wrapper for mx.io.ImageDetRecordIter which is
written in C++, it takes record file as input and runs faster.
Supports various augment operations for object detection.
Parameters:
-----------
path_imgrec : str
path to the record file
path_imglist : str
path to the list file to replace the labels in record
batch_size : int
batch size
data_shape : tuple
(3, height, width)
label_width : int
specify the label width, use -1 for variable length
label_pad_width : int
labels must have same shape in batches, use -1 for automatic estimation
in each record, otherwise force padding to width in case you want t
rain/validation to match the same width
label_pad_value : float
label padding value
resize_mode : str
force - resize to data_shape regardless of aspect ratio
fit - try fit to data_shape preserving aspect ratio
shrink - shrink to data_shape only, preserving aspect ratio
mean_pixels : list or tuple
mean values for red/green/blue
kwargs : dict
see mx.io.ImageDetRecordIter
Returns:
----------
"""
def __init__(self, path_imgrec, batch_size, data_shape, path_imglist="",
label_width=-1, label_pad_width=-1, label_pad_value=-1,
resize_mode='force', mean_pixels=[123.68, 116.779, 103.939],
**kwargs):
super(DetRecordIter, self).__init__()
self.rec = mx.io.ImageDetRecordIter(
path_imgrec = path_imgrec,
path_imglist = path_imglist,
label_width = label_width,
label_pad_width = label_pad_width,
label_pad_value = label_pad_value,
batch_size = batch_size,
data_shape = data_shape,
mean_r = mean_pixels[0],
mean_g = mean_pixels[1],
mean_b = mean_pixels[2],
resize_mode = resize_mode,
**kwargs)
self.provide_label = None
self._get_batch()
if not self.provide_label:
raise RuntimeError("Invalid ImageDetRecordIter: " + path_imgrec)
self.reset()
@property
def provide_data(self):
return self.rec.provide_data
def reset(self):
self.rec.reset()
def iter_next(self):
return self._get_batch()
def next(self):
if self.iter_next():
return self._batch
else:
raise StopIteration
def _get_batch(self):
self._batch = self.rec.next()
if not self._batch:
return False
if self.provide_label is None:
# estimate the label shape for the first batch, always reshape to n*5
first_label = self._batch.label[0][0].asnumpy()
self.batch_size = self._batch.label[0].shape[0]
self.label_header_width = int(first_label[4])
self.label_object_width = int(first_label[5])
assert self.label_object_width >= 5, "object width must >=5"
self.label_start = 4 + self.label_header_width
self.max_objects = (first_label.size - self.label_start) // self.label_object_width
self.label_shape = (self.batch_size, self.max_objects, self.label_object_width)
self.label_end = self.label_start + self.max_objects * self.label_object_width
self.provide_label = [('label', self.label_shape)]
# modify label
label = self._batch.label[0].asnumpy()
label = label[:, self.label_start:self.label_end].reshape(
(self.batch_size, self.max_objects, self.label_object_width))
self._batch.label = [mx.nd.array(label)]
return True
class DetIter(mx.io.DataIter):
"""
Detection Iterator, which will feed data and label to network
Optional data augmentation is performed when providing batch
Parameters:
----------
imdb : Imdb
image database
batch_size : int
batch size
data_shape : int or (int, int)
image shape to be resized
mean_pixels : float or float list
[R, G, B], mean pixel values
rand_samplers : list
random cropping sampler list, if not specified, will
use original image only
rand_mirror : bool
whether to randomly mirror input images, default False
shuffle : bool
whether to shuffle initial image list, default False
rand_seed : int or None
whether to use fixed random seed, default None
max_crop_trial : bool
if random crop is enabled, defines the maximum trial time
if trial exceed this number, will give up cropping
is_train : bool
whether in training phase, default True, if False, labels might
be ignored
"""
def __init__(self, imdb, batch_size, data_shape, \
mean_pixels=[128, 128, 128], rand_samplers=[], \
rand_mirror=False, shuffle=False, rand_seed=None, \
is_train=True, max_crop_trial=50):
super(DetIter, self).__init__()
self._imdb = imdb
self.batch_size = batch_size
if isinstance(data_shape, int):
data_shape = (data_shape, data_shape)
self._data_shape = data_shape
self._mean_pixels = mx.nd.array(mean_pixels).reshape((3,1,1))
if not rand_samplers:
self._rand_samplers = []
else:
if not isinstance(rand_samplers, list):
rand_samplers = [rand_samplers]
assert isinstance(rand_samplers[0], RandSampler), "Invalid rand sampler"
self._rand_samplers = rand_samplers
self.is_train = is_train
self._rand_mirror = rand_mirror
self._shuffle = shuffle
if rand_seed:
np.random.seed(rand_seed) # fix random seed
self._max_crop_trial = max_crop_trial
self._current = 0
self._size = imdb.num_images
self._index = np.arange(self._size)
self._data = None
self._label = None
self._get_batch()
@property
def provide_data(self):
return [(k, v.shape) for k, v in self._data.items()]
@property
def provide_label(self):
if self.is_train:
return [(k, v.shape) for k, v in self._label.items()]
else:
return []
def reset(self):
self._current = 0
if self._shuffle:
np.random.shuffle(self._index)
def iter_next(self):
return self._current < self._size
def next(self):
if self.iter_next():
self._get_batch()
data_batch = mx.io.DataBatch(data=self._data.values(),
label=self._label.values(),
pad=self.getpad(), index=self.getindex())
self._current += self.batch_size
return data_batch
else:
raise StopIteration
def getindex(self):
return self._current // self.batch_size
def getpad(self):
pad = self._current + self.batch_size - self._size
return 0 if pad < 0 else pad
def _get_batch(self):
"""
Load data/label from dataset
"""
batch_data = mx.nd.zeros((self.batch_size, 3, self._data_shape[0], self._data_shape[1]))
batch_label = []
for i in range(self.batch_size):
if (self._current + i) >= self._size:
if not self.is_train:
continue
# use padding from middle in each epoch
idx = (self._current + i + self._size // 2) % self._size
index = self._index[idx]
else:
index = self._index[self._current + i]
# index = self.debug_index
im_path = self._imdb.image_path_from_index(index)
with open(im_path, 'rb') as fp:
img_content = fp.read()
img = mx.img.imdecode(img_content)
gt = self._imdb.label_from_index(index).copy() if self.is_train else None
data, label = self._data_augmentation(img, gt)
batch_data[i] = data
if self.is_train:
batch_label.append(label)
self._data = {'data': batch_data}
if self.is_train:
self._label = {'label': mx.nd.array(np.array(batch_label))}
else:
self._label = {'label': None}
def _data_augmentation(self, data, label):
"""
perform data augmentations: crop, mirror, resize, sub mean, swap channels...
"""
if self.is_train and self._rand_samplers:
rand_crops = []
for rs in self._rand_samplers:
rand_crops += rs.sample(label)
num_rand_crops = len(rand_crops)
# randomly pick up one as input data
if num_rand_crops > 0:
index = int(np.random.uniform(0, 1) * num_rand_crops)
width = data.shape[1]
height = data.shape[0]
crop = rand_crops[index][0]
xmin = int(crop[0] * width)
ymin = int(crop[1] * height)
xmax = int(crop[2] * width)
ymax = int(crop[3] * height)
if xmin >= 0 and ymin >= 0 and xmax <= width and ymax <= height:
data = mx.img.fixed_crop(data, xmin, ymin, xmax-xmin, ymax-ymin)
else:
# padding mode
new_width = xmax - xmin
new_height = ymax - ymin
offset_x = 0 - xmin
offset_y = 0 - ymin
data_bak = data
data = mx.nd.full((new_height, new_width, 3), 128, dtype='uint8')
data[offset_y:offset_y+height, offset_x:offset_x + width, :] = data_bak
label = rand_crops[index][1]
if self.is_train:
interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, \
cv2.INTER_NEAREST, cv2.INTER_LANCZOS4]
else:
interp_methods = [cv2.INTER_LINEAR]
interp_method = interp_methods[int(np.random.uniform(0, 1) * len(interp_methods))]
data = mx.img.imresize(data, self._data_shape[1], self._data_shape[0], interp_method)
if self.is_train and self._rand_mirror:
if np.random.uniform(0, 1) > 0.5:
data = mx.nd.flip(data, axis=1)
valid_mask = np.where(label[:, 0] > -1)[0]
tmp = 1.0 - label[valid_mask, 1]
label[valid_mask, 1] = 1.0 - label[valid_mask, 3]
label[valid_mask, 3] = tmp
data = mx.nd.transpose(data, (2,0,1))
data = data.astype('float32')
data = data - self._mean_pixels
return data, label
|
|
import calendar
from datetime import datetime
from datetime import timedelta
from pandac.PandaModules import Vec3, Vec4, Point3, TextNode, VBase4
from otp.otpbase import OTPLocalizer
from direct.gui.DirectGui import DirectFrame, DirectButton, DirectLabel, DirectScrolledList, DirectCheckButton
from direct.gui import DirectGuiGlobals
from direct.showbase import DirectObject
from direct.showbase import PythonUtil
from direct.fsm.FSM import FSM
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.toontowngui import TTDialog
from toontown.toontowngui.TeaserPanel import TeaserPanel
from toontown.toon import ToonHead
from toontown.parties import PartyGlobals
from toontown.friends.FriendsListPanel import determineFriendName
from toontown.parties.ScrolledFriendList import ScrolledFriendList
from toontown.parties.CalendarGuiMonth import CalendarGuiMonth
from toontown.parties.InviteVisual import InviteVisual
from toontown.parties.PartyInfo import PartyInfo
from toontown.parties import PartyUtils
from toontown.parties.PartyEditor import PartyEditor
from otp.otpbase import OTPGlobals
from pandac.PandaModules import *
from direct.directnotify import DirectNotifyGlobal
from otp.nametag.NametagGroup import NametagGroup
from otp.nametag.Nametag import Nametag
from otp.nametag.NametagFloat2d import *
class PartyPlanner(DirectFrame, FSM):
notify = DirectNotifyGlobal.directNotify.newCategory('PartyPlanner')
def __init__(self, doneEvent = None):
FSM.__init__(self, 'PartyPlannerFSM')
DirectFrame.__init__(self)
self.doneEvent = doneEvent
self.stateArray = ['Off',
'Welcome',
'PartyEditor',
#'Guests', jjkoletar: this should mean that it skips over the guests state
'Date',
'Time',
'Invitation',
'Farewell']
self.partyTime = base.cr.toontownTimeManager.getCurServerDateTime()
self.partyNowTime = base.cr.toontownTimeManager.getCurServerDateTime()
minutesToNextFifteen = 15 - self.partyTime.minute % 15
self.cleanPartyTime = self.partyTime + timedelta(minutes=minutesToNextFifteen, seconds=-self.partyTime.second)
self.partyTime = self.cleanPartyTime
self.guests = []
self.isPrivate = False
self.selectedCalendarGuiDay = None
self.gui = loader.loadModel('phase_4/models/parties/partyPlannerGUI')
self.partyDuration = timedelta(hours=PartyGlobals.DefaultPartyDuration)
self.timeTypeToMaxValue = {'hour': 23,
'minute': 59}
self.timeTypeToChangeAmount = {'hour': (1, -1),
'minute': (15, -15),
'ampm': (1, -1)}
self.partyInfo = None
self.asapMinuteRounding = base.config.GetInt('party-asap-minute-rounding', PartyGlobals.PartyPlannerAsapMinuteRounding)
self.load()
self.request('Welcome')
return
def enterWelcome(self, *args):
self.prevButton['state'] = DirectGuiGlobals.DISABLED
self.prevButton.hide()
self.nextButton['state'] = DirectGuiGlobals.NORMAL
self.welcomePage.show()
self.partyPlannerHead.reparentTo(self.welcomePage)
self.partyPlannerHead.startBlink()
self.partyPlannerHead.startLookAround()
self.nametagNP.reparentTo(self.welcomePage)
self.chatNP.reparentTo(self.welcomePage)
def exitWelcome(self):
self.welcomePage.hide()
self.prevButton.show()
self.partyPlannerHead.stopBlink()
self.partyPlannerHead.stopLookAround()
def enterPartyEditor(self, *args):
self.prevButton['state'] = DirectGuiGlobals.NORMAL
self.nextButton['state'] = DirectGuiGlobals.DISABLED
self.nextButton.hide()
self.partyEditorPage.show()
self.okWithGroundsGui.doneStatus = ''
self.partyEditor.request('Idle')
def exitPartyEditor(self):
self.partyEditor.request('Hidden')
self.partyEditorPage.hide()
def enterGuests(self, *args):
self.prevButton['state'] = DirectGuiGlobals.NORMAL
self.nextButton['state'] = DirectGuiGlobals.NORMAL
self.nextButton.show()
self.guestPage.show()
def exitGuests(self):
self.guests = []
for friendCheckBox in self.friendList['items']:
if friendCheckBox['indicatorValue']:
self.guests.append(friendCheckBox.getPythonTag('id'))
self.guestPage.hide()
def enterDate(self, *args):
self.prevButton.show()
self.prevButton['state'] = DirectGuiGlobals.NORMAL
if self.selectedCalendarGuiDay is None:
self.nextButton['state'] = DirectGuiGlobals.DISABLED
self.nextButton.hide()
self.makePartyNowButton.show()
self.datePage.show()
return
def exitDate(self):
self.datePage.hide()
self.nextButton.show()
if self.selectedCalendarGuiDay is not None:
self.partyTime = self.cleanPartyTime
self.alterPartyTime(year=self.selectedCalendarGuiDay.myDate.year, month=self.selectedCalendarGuiDay.myDate.month, day=self.selectedCalendarGuiDay.myDate.day)
else:
self.partyNowTime = self.calcAsapTime()
self.partyTime = self.partyNowTime
return
def calcAsapTime(self):
curServerTime = base.cr.toontownTimeManager.getCurServerDateTime()
baseTime = curServerTime
baseTime = baseTime.replace(baseTime.year, baseTime.month, baseTime.day, baseTime.hour, baseTime.minute, second=0, microsecond=0)
minute = curServerTime.minute
remainder = minute % self.asapMinuteRounding
if remainder:
baseTime += timedelta(minutes=self.asapMinuteRounding - remainder)
else:
baseTime += timedelta(minutes=self.asapMinuteRounding)
return baseTime
def enterTime(self, *args):
self.prevButton.show()
self.prevButton['state'] = DirectGuiGlobals.NORMAL
self.nextButton.show()
self.timePage.show()
self.timePageRecapToontownTimeLabel2['text'] = '%s' % PartyUtils.formatDateTime(self.partyTime)
self.timePageRecapLocalTimeLabel['text'] = '%s%s' % (TTLocalizer.PartyPlannerTimeLocalTime, PartyUtils.formatDateTime(self.partyTime, inLocalTime=True))
def exitTime(self):
self.timePage.hide()
self.nextButton.show()
def enterInvitation(self, *args):
self.prevButton['state'] = DirectGuiGlobals.NORMAL
self.nextButton.hide()
defaultInviteTheme = PartyGlobals.InviteTheme.GenericMale
if hasattr(base.cr, 'newsManager') and base.cr.newsManager:
if ToontownGlobals.VICTORY_PARTY_HOLIDAY in base.cr.newsManager.getHolidayIdList():
defaultInviteTheme = PartyGlobals.InviteTheme.VictoryParty
elif ToontownGlobals.KARTING_TICKETS_HOLIDAY in base.cr.newsManager.getHolidayIdList() or ToontownGlobals.CIRCUIT_RACING_EVENT in base.cr.newsManager.getHolidayIdList():
defaultInviteTheme = PartyGlobals.InviteTheme.Racing
elif ToontownGlobals.VALENTINES_DAY in base.cr.newsManager.getHolidayIdList():
defaultInviteTheme = PartyGlobals.InviteTheme.Valentoons
if self.partyInfo is not None:
del self.partyInfo
activityList = self.partyEditor.partyEditorGrid.getActivitiesOnGrid()
decorationList = self.partyEditor.partyEditorGrid.getDecorationsOnGrid()
endTime = self.partyTime + self.partyDuration
self.partyInfo = PartyInfo(0, 0, self.partyTime.year, self.partyTime.month, self.partyTime.day, self.partyTime.hour, self.partyTime.minute, endTime.year, endTime.month, endTime.day, endTime.hour, endTime.minute, self.isPrivate, defaultInviteTheme, activityList, decorationList, 0)
if self.noFriends or len(self.getInvitees()) == 0:
self.inviteVisual.setNoFriends(True)
self.invitationTitleLabel['text'] = TTLocalizer.PartyPlannerConfirmTitleNoFriends
self.inviteButton['text'] = TTLocalizer.PartyPlannerInviteButtonNoFriends
self.selectedInviteThemeLabel.stash()
self.nextThemeButton.stash()
self.prevThemeButton.stash()
self.setInviteTheme(defaultInviteTheme)
else:
self.inviteVisual.setNoFriends(False)
self.invitationTitleLabel['text'] = TTLocalizer.PartyPlannerConfirmTitle
self.inviteButton['text'] = TTLocalizer.PartyPlannerInviteButton
self.selectedInviteThemeLabel.unstash()
self.nextThemeButton.unstash()
self.prevThemeButton.unstash()
self.setInviteTheme(defaultInviteTheme)
self.inviteVisual.updateInvitation(base.localAvatar.getName(), self.partyInfo)
self.invitationPage.show()
return
def __prevTheme(self):
self.nextThemeButton.show()
prevTheme = self.currentInvitationTheme - 1
while prevTheme not in self.inviteThemes:
prevTheme -= 1
if prevTheme == self.currentInvitationTheme:
self.notify.warning('No previous invite theme found.')
break
elif prevTheme < 0:
prevTheme = len(self.inviteVisual.inviteThemesIdToInfo) - 1
self.setInviteTheme(prevTheme)
def __nextTheme(self):
self.prevThemeButton.show()
nextTheme = self.currentInvitationTheme + 1
while nextTheme not in self.inviteThemes:
nextTheme += 1
if nextTheme == self.currentInvitationTheme:
self.notify.warning('No next invite theme found.')
break
elif nextTheme >= len(self.inviteVisual.inviteThemesIdToInfo):
nextTheme = 0
self.setInviteTheme(nextTheme)
def setInviteTheme(self, themeNumber):
self.currentInvitationTheme = themeNumber
self.selectedInviteThemeLabel['text'] = '%s %s (%d/%d)' % (self.inviteVisual.inviteThemesIdToInfo[self.currentInvitationTheme][1],
TTLocalizer.PartyPlannerInvitationTheme,
self.inviteThemes.index(self.currentInvitationTheme) + 1,
len(self.inviteThemes))
self.partyInfo.inviteTheme = self.currentInvitationTheme
self.inviteVisual.updateInvitation(base.localAvatar.getName(), self.partyInfo)
def exitInvitation(self):
self.invitationPage.hide()
self.nextButton.show()
def enterFarewell(self, goingBackAllowed):
self.farewellPage.show()
if goingBackAllowed:
self.prevButton.show()
else:
self.prevButton.hide()
self.nextButton.hide()
self.partyPlannerHead.reparentTo(self.farewellPage)
self.partyPlannerHead.startBlink()
self.partyPlannerHead.startLookAround()
self.nametagNP.reparentTo(self.farewellPage)
self.chatNP.reparentTo(self.farewellPage)
def exitFarewell(self):
self.farewellPage.hide()
self.nextButton.show()
self.prevButton.show()
self.partyPlannerHead.stopBlink()
self.partyPlannerHead.stopLookAround()
def load(self):
self.frame = DirectFrame(parent=aspect2d, geom=self.gui.find('**/background'), relief=None, scale=0.85, pos=(0.05, 0.0, 0.1))
self.titleScale = TTLocalizer.PPtitleScale
self._createNavButtons()
self.welcomePage = self._createWelcomePage()
self.welcomePage.hide()
self.datePage = self._createDatePage()
self.datePage.hide()
self.timePage = self._createTimePage()
self.timePage.hide()
self.guestPage = self._createGuestPage()
self.guestPage.hide()
self.partyEditorPage = self._createPartyEditorPage()
self.partyEditorPage.hide()
self.invitationPage = self._createInvitationPage()
self.invitationPage.hide()
self.farewellPage = self._createFarewellPage()
self.farewellPage.hide()
return
def _createNavButtons(self):
self.quitButton = DirectButton(parent=self.frame, relief=None, geom=(self.gui.find('**/cancelButton_up'), self.gui.find('**/cancelButton_down'), self.gui.find('**/cancelButton_rollover')), command=self.__acceptExit)
self.nextButton = DirectButton(parent=self.frame, relief=None, geom=(self.gui.find('**/bottomNext_button/nextButton_up'), self.gui.find('**/bottomNext_button/nextButton_down'), self.gui.find('**/bottomNext_button/nextButton_rollover')), command=self.__nextItem, state=DirectGuiGlobals.DISABLED)
self.prevButton = DirectButton(parent=self.frame, relief=None, geom=(self.gui.find('**/bottomPrevious_button/previousButton_up'), self.gui.find('**/bottomPrevious_button/previousButton_down'), self.gui.find('**/bottomPrevious_button/previousButton_rollover')), command=self.__prevItem, state=DirectGuiGlobals.DISABLED)
self.currentItem = None
return
def __createNametag(self, parent):
if self.nametagGroup == None:
self.nametagGroup = NametagGroup()
self.nametagGroup.setFont(OTPGlobals.getInterfaceFont())
self.nametagGroup.setActive(0)
self.nametagGroup.setAvatar(self.partyPlannerHead)
self.nametagGroup.manage(base.marginManager)
self.nametagGroup.setColorCode(self.nametagGroup.CCNonPlayer)
self.nametagGroup.getNametag2d().setContents(0)
self.nametagNode = NametagFloat2d()
self.nametagNode.setContents(Nametag.CName)
self.nametagGroup.addNametag(self.nametagNode)
self.nametagGroup.setName(base.cr.partyManager.getPartyPlannerName())
self.nametagNP = parent.attachNewNode(self.nametagNode)
nametagPos = self.gui.find('**/step_01_partymanPeteNametag_locator').getPos()
self.nametagNP.setPosHprScale(nametagPos[0], 0, nametagPos[2], 0, 0, 0, 0.1, 1, 0.1)
self.chatNode = NametagFloat2d()
self.chatNode.setContents(Nametag.CSpeech | Nametag.CThought)
self.nametagGroup.addNametag(self.chatNode)
self.nametagGroup.setChat(TTLocalizer.PartyPlannerInstructions, CFSpeech)
self.chatNP = parent.attachNewNode(self.chatNode)
chatPos = self.gui.find('**/step_01_partymanPeteText_locator').getPos()
self.chatNP.setPosHprScale(chatPos[0], 0, chatPos[2], 0, 0, 0, 0.08, 1, 0.08)
return
def clearNametag(self):
if self.nametagGroup != None:
self.nametagGroup.unmanage(base.marginManager)
self.nametagGroup.removeNametag(self.nametagNode)
self.nametagGroup.removeNametag(self.chatNode)
self.nametagNP.removeNode()
self.chatNP.removeNode()
del self.nametagNP
del self.chatNP
del self.nametagNode
del self.chatNode
self.nametagGroup.setAvatar(NodePath())
self.nametagGroup.destroy()
self.nametagGroup = None
return
def _createWelcomePage(self):
self.nametagGroup = None
page = DirectFrame(self.frame)
page.setName('PartyPlannerWelcomePage')
self.welcomeTitleLabel = DirectLabel(parent=page, relief=None, text=TTLocalizer.PartyPlannerWelcomeTitle, pos=self.gui.find('**/title_locator').getPos(), scale=self.titleScale)
self.partyPlannerHead = ToonHead.ToonHead()
partyPlannerStyle = base.cr.partyManager.getPartyPlannerStyle()
self.partyPlannerHead.setupHead(partyPlannerStyle, forGui=True)
self.partyPlannerHead.setPos(self.gui.find('**/step_01_partymanPete_locator').getPos())
animal = partyPlannerStyle.getAnimal()
if animal == 'cat' or animal == 'pig':
headScale = 0.4
elif animal == 'dog' or animal == 'bear':
headScale = 0.45
elif animal == 'rabbit':
headScale = 0.35
else:
headScale = 0.3
self.partyPlannerHead.setScale(headScale)
self.partyPlannerHead.setH(180.0)
self.partyPlannerHead.reparentTo(page)
self.__createNametag(page)
return page
def _createDatePage(self):
page = DirectFrame(self.frame)
page.setName('PartyPlannerDatePage')
self.createDateTitleLabel = DirectLabel(parent=page, relief=None, text=TTLocalizer.PartyPlannerDateTitle, pos=self.gui.find('**/title_locator').getPos(), scale=self.titleScale)
pos = self.gui.find('**/step_06_sendInvitation_locator').getPos()
self.makePartyNowButton = DirectButton(parent=page, relief=None, geom=(self.gui.find('**/send_up'), self.gui.find('**/send_down'), self.gui.find('**/send_rollover')), text=TTLocalizer.PartyPlannerPartyNow, text_pos=(pos[0], pos[2]), text_scale=0.05, command=self.__doMakePartyNow)
curServerDate = base.cr.toontownTimeManager.getCurServerDateTime()
self.calendarGuiMonth = CalendarGuiMonth(page, curServerDate, scale=0.95, pos=(-0.05, 0.0, -0.33), dayClickCallback=self._dayClickCallback, onlyFutureDaysClickable=True)
return page
def __doMakePartyNow(self):
self.request('Invitation')
def _dayClickCallback(self, calendarGuiDay):
self.selectedCalendarGuiDay = calendarGuiDay
self.nextButton['state'] = DirectGuiGlobals.NORMAL
self.makePartyNowButton.hide()
self.nextButton.show()
def alterPartyTime(self, year = None, month = None, day = None, hour = None, minute = None):
self.partyTime = datetime(year=self.positiveTime('year', year), month=self.positiveTime('month', month), day=self.positiveTime('day', day), hour=self.positiveTime('hour', hour), minute=self.positiveTime('minute', minute), tzinfo=self.partyTime.tzinfo)
def positiveTime(self, type, amount):
if amount is None:
return getattr(self.partyTime, type)
if type == 'hour' or type == 'minute':
if amount < 0:
return self.timeTypeToMaxValue[type] + 1 + self.timeTypeToChangeAmount[type][1]
elif amount > self.timeTypeToMaxValue[type]:
return 0
return amount
def _createTimePage(self):
page = DirectFrame(self.frame)
page.setName('PartyPlannerTimePage')
self.createTimeTitleLabel = DirectLabel(parent=page, relief=None, text=TTLocalizer.PartyPlannerTimeTitle, pos=self.gui.find('**/title_locator').getPos(), scale=self.titleScale)
self.clockImage = DirectFrame(parent=page, relief=None, geom=self.gui.find('**/toontownTime_background'))
self.timePageToontownLabel = DirectLabel(parent=page, relief=None, text=TTLocalizer.PartyPlannerTimeToontown, pos=self.gui.find('**/step_03_toontown_locator').getPos(), scale=0.15, text_fg=(1.0, 0.0, 0.0, 1.0), text_font=ToontownGlobals.getSignFont())
self.timePageTimeLabel = DirectLabel(parent=page, relief=None, text=TTLocalizer.PartyPlannerTimeTime, pos=self.gui.find('**/step_03_time_locator').getPos(), scale=0.15, text_fg=(1.0, 0.0, 0.0, 1.0), text_font=ToontownGlobals.getSignFont())
self.timePageRecapLabel = DirectLabel(parent=page, relief=None, text=TTLocalizer.PartyPlannerTimeRecap, pos=self.gui.find('**/step_03_partyDateAndTime_locator').getPos(), scale=0.09)
self.timePageRecapToontownTimeLabel1 = DirectLabel(parent=page, relief=None, text=TTLocalizer.PartyPlannerTimeToontownTime, pos=self.gui.find('**/step_03_toontownTime_locator').getPos(), scale=0.06)
self.timePageRecapToontownTimeLabel2 = DirectLabel(parent=page, relief=None, text='%s' % PartyUtils.formatDateTime(self.partyTime), pos=self.gui.find('**/step_03_toontownDateAndTime_loactor').getPos(), textMayChange=True, scale=0.06)
self.timePageRecapLocalTimeLabel = DirectLabel(parent=page, relief=None, text='%s%s' % (TTLocalizer.PartyPlannerTimeLocalTime, PartyUtils.formatDateTime(self.partyTime, inLocalTime=True)), pos=self.gui.find('**/step_03_localDateAndTime_loactor').getPos(), textMayChange=True, scale=0.06, text_fg=(1.0, 0.0, 0.0, 1.0))
self.timeInputHourLabel, self.timeInputHourUpButton, self.timeInputHourDownButton = self.getTimeWidgets(page, 'hour')
self.timeInputMinuteLabel, self.timeInputMinuteUpButton, self.timeInputMinuteDownButton = self.getTimeWidgets(page, 'minute')
self.timeInputAmPmLabel, self.timeInputAmPmUpButton, self.timeInputAmPmDownButton = self.getTimeWidgets(page, 'ampm')
self.timePagecolonLabel = DirectLabel(parent=page, relief=None, text=':', pos=self.gui.find('**/step_03_colon_locator').getPos(), scale=0.15)
return page
def getTimeWidgets(self, page, type):
if type == 'ampm':
data = self.getCurrentAmPm()
else:
data = getattr(self.partyTime, type)
if data == 0 and type == 'minute':
data = '00'
else:
if type == 'hour':
data = data % 12
if data == 0:
data = 12
data = '%d' % data
label = DirectLabel(parent=page, relief=None, text='%s' % data, textMayChange=True, pos=self.gui.find('**/step_03_%s_locator' % type).getPos(), scale=0.12)
def changeValue(self, amount):
if type == 'ampm':
self.alterPartyTime(hour=(self.partyTime.hour + 12) % 24)
newAmount = self.getCurrentAmPm()
label['text'] = newAmount
else:
if type == 'hour':
newAmount = getattr(self.partyTime, type) + amount
newAmount = newAmount % 12
if self.timeInputAmPmLabel['text'] == TTLocalizer.PartyTimeFormatMeridiemPM:
newAmount = newAmount % 12 + 12
self.alterPartyTime(hour=newAmount)
elif type == 'minute':
newAmount = getattr(self.partyTime, type) + amount
self.alterPartyTime(minute=newAmount)
else:
PartyPlanner.notify.error('Invalid type for changeValue in PartyPlanner: %s' % type)
newAmount = getattr(self.partyTime, type)
if newAmount < 10 and type == 'minute':
label['text'] = '0%d' % newAmount
else:
if type == 'hour':
newAmount = newAmount % 12
if newAmount == 0:
newAmount = 12
label['text'] = '%d' % newAmount
self.timePageRecapToontownTimeLabel2['text'] = '%s' % PartyUtils.formatDateTime(self.partyTime)
self.timePageRecapLocalTimeLabel['text'] = '%s%s' % (TTLocalizer.PartyPlannerTimeLocalTime, PartyUtils.formatDateTime(self.partyTime, inLocalTime=True))
upButton = DirectButton(parent=page, relief=None, geom=(self.gui.find('**/%sButtonUp_up' % type), self.gui.find('**/%sButtonUp_down' % type), self.gui.find('**/%sButtonUp_rollover' % type)), command=changeValue, extraArgs=[self, self.timeTypeToChangeAmount[type][0]])
downButton = DirectButton(parent=page, relief=None, geom=(self.gui.find('**/%sButtonDown_up' % type), self.gui.find('**/%sButtonDown_down' % type), self.gui.find('**/%sButtonDown_rollover' % type)), command=changeValue, extraArgs=[self, self.timeTypeToChangeAmount[type][1]])
return (label, upButton, downButton)
def getCurrentAmPm(self):
if self.partyTime.hour < 12:
return TTLocalizer.PartyTimeFormatMeridiemAM
else:
return TTLocalizer.PartyTimeFormatMeridiemPM
def _createGuestPage(self):
page = DirectFrame(self.frame)
page.setName('PartyPlannerGuestPage')
self.guestTitleLabel = DirectLabel(parent=page, relief=None, text=TTLocalizer.PartyPlannerGuestTitle, pos=self.gui.find('**/title_locator').getPos(), scale=self.titleScale)
self.guestBackgroundLabel = DirectLabel(parent=page, relief=None, image=self.gui.find('**/guestListBackground_flat'), scale=(1.2, 1.0, 1.0))
self.friendList = ScrolledFriendList(page, self.gui, makeItemsCheckBoxes=True)
if len(base.localAvatar.friendsList) == 0:
self.noFriends = True
else:
self.noFriends = False
for friendPair in base.localAvatar.friendsList:
self.friendList.addFriend(determineFriendName(friendPair), friendPair[0])
self.friendList.scrollTo(0)
pos = self.gui.find('**/step_04_partyWillBe_locator').getPos()
self.publicPrivateLabel = DirectLabel(parent=page, relief=None, text=TTLocalizer.PartyPlannerPublicPrivateLabel, text_align=TextNode.ACenter, text_scale=0.065, pos=pos)
self.publicDescriptionLabel = DirectLabel(parent=page, relief=None, text=TTLocalizer.PartyPlannerPublicDescription, text_align=TextNode.ACenter, text_scale=TTLocalizer.PPpbulicDescriptionLabel, pos=(pos[0] - 0.52, pos[1], pos[2]))
self.publicDescriptionLabel.stash()
self.privateDescriptionLabel = DirectLabel(parent=page, relief=None, text=TTLocalizer.PartyPlannerPrivateDescription, text_align=TextNode.ACenter, text_scale=TTLocalizer.PPprivateDescriptionLabel, pos=(pos[0] + 0.55, pos[1], pos[2]))
self.privateDescriptionLabel.stash()
pos = self.gui.find('**/step_04_public_locator').getPos()
self.publicButton = DirectButton(parent=page, relief=None, geom=(self.gui.find('**/publicButton_up'),
self.gui.find('**/publicButton_down'),
self.gui.find('**/publicButton_rollover'),
self.gui.find('**/publicButton_inactive')), text=TTLocalizer.PartyPlannerPublic, text_pos=(pos[0], pos[2]), text_scale=TTLocalizer.PPpublicButton, command=self.__doTogglePublicPrivate)
self.publicButton['state'] = DirectGuiGlobals.DISABLED
self.publicButton.bind(DirectGuiGlobals.ENTER, self.__enterPublic)
self.publicButton.bind(DirectGuiGlobals.EXIT, self.__exitPublic)
pos = self.gui.find('**/step_04_private_locator').getPos()
self.privateButton = DirectButton(parent=page, relief=None, geom=(self.gui.find('**/privateButton_up'),
self.gui.find('**/privateButton_down'),
self.gui.find('**/privateButton_rollover'),
self.gui.find('**/privateButton_inactive')), text=TTLocalizer.PartyPlannerPrivate, text_pos=(pos[0], pos[2]), text_scale=TTLocalizer.PPprivateButton, command=self.__doTogglePublicPrivate)
self.privateButton.bind(DirectGuiGlobals.ENTER, self.__enterPrivate)
self.privateButton.bind(DirectGuiGlobals.EXIT, self.__exitPrivate)
self.checkAllButton = DirectButton(parent=page, relief=None, geom=(self.gui.find('**/checkAllButton_up'), self.gui.find('**/checkAllButton_down'), self.gui.find('**/checkAllButton_rollover')), command=self.__doCheckAll)
self.uncheckAllButton = DirectButton(parent=page, relief=None, geom=(self.gui.find('**/uncheckAllButton_up'), self.gui.find('**/uncheckAllButton_down'), self.gui.find('**/uncheckAllButton_rollover')), command=self.__doUncheckAll)
return page
def __doCheckAll(self):
for friendBox in self.friendList['items']:
friendBox['indicatorValue'] = True
def __doUncheckAll(self):
for friendBox in self.friendList['items']:
friendBox['indicatorValue'] = False
def __enterPrivate(self, mouseEvent):
self.privateDescriptionLabel.unstash()
def __exitPrivate(self, mouseEvent):
self.privateDescriptionLabel.stash()
def __enterPublic(self, mouseEvent):
self.publicDescriptionLabel.unstash()
def __exitPublic(self, mouseEvent):
self.publicDescriptionLabel.stash()
def __doTogglePublicPrivate(self):
if self.isPrivate:
self.isPrivate = False
self.privateButton['state'] = DirectGuiGlobals.NORMAL
self.publicButton['state'] = DirectGuiGlobals.DISABLED
else:
self.isPrivate = True
self.privateButton['state'] = DirectGuiGlobals.DISABLED
self.publicButton['state'] = DirectGuiGlobals.NORMAL
def _createPartyEditorPage(self):
page = DirectFrame(self.frame)
page.setName('PartyPlannerEditorPage')
self.LayoutTitleLabel = DirectLabel(parent=page, relief=None, text=TTLocalizer.PartyPlannerEditorTitle, pos=self.gui.find('**/title_locator').getPos() + Point3(0.0, 0.0, 0.075), scale=self.titleScale)
self.costLabel = DirectLabel(parent=page, pos=(-0.74, 0.0, 0.17), relief=None, text=TTLocalizer.PartyPlannerTotalCost % 0, text_align=TextNode.ACenter, scale=TTLocalizer.PPcostLabel, textMayChange=True)
self.partyGridBackground = DirectFrame(parent=page, relief=None, geom=self.gui.find('**/partyGrid_flat'))
self.partyGroundsLabel = DirectLabel(parent=page, relief=None, text=TTLocalizer.PartyPlannerPartyGrounds, text_font=ToontownGlobals.getSignFont(), text_fg=VBase4(1.0, 0.0, 0.0, 1.0), text_scale=TTLocalizer.PPpartyGroundsLabel, pos=self.gui.find('**/step_05_partyGrounds_text_locator').getPos(), scale=0.1)
self.activityBackground = DirectFrame(parent=page, relief=None, geom=self.gui.find('**/activitiesDecorations_flat1'), pos=(0.0, 0.0, 0.04))
pos = self.gui.find('**/step_05_instructions_locator').getPos()
self.instructionLabel = DirectLabel(parent=page, relief=None, text=' ', text_pos=(pos[0], pos[2]), text_scale=TTLocalizer.PPinstructionLabel, textMayChange=True, geom=self.gui.find('**/instructions_flat'))
self.elementTitleLabel = DirectLabel(parent=page, relief=None, text=' ', pos=self.gui.find('**/step_05_activitiesName_text_locator').getPos() + Point3(0.0, 0.0, 0.04), text_scale=TTLocalizer.PPelementTitleLabel, textMayChange=True)
self.elementPriceNode = TextNode('ElementPrice')
self.elementPriceNode.setAlign(TextNode.ALeft)
self.elementPriceNode.setTextColor(0.0, 0.0, 0.0, 1.0)
self.elementPriceNode.setFont(ToontownGlobals.getToonFont())
self.elementPrice = page.attachNewNode(self.elementPriceNode)
self.elementPrice.setScale(TTLocalizer.PPelementPriceNode)
self.elementPrice.setPos(self.gui.find('**/step_05_activityPrice_text_locator').getPos() + Point3(-0.02, 0.0, 0.04))
self.elementDescriptionNode = TextNode('ElementDescription')
self.elementDescriptionNode.setAlign(TextNode.ACenter)
self.elementDescriptionNode.setWordwrap(8)
self.elementDescriptionNode.setFont(ToontownGlobals.getToonFont())
self.elementDescriptionNode.setTextColor(0.0, 0.0, 0.0, 1.0)
self.elementDescription = page.attachNewNode(self.elementDescriptionNode)
self.elementDescription.setScale(TTLocalizer.PPelementDescription)
self.elementDescription.setPos(self.gui.find('**/step_05_activityDescription_text_locator').getPos() + Point3(0.0, 0.0, 0.04))
self.totalMoney = base.localAvatar.getTotalMoney()
catalogGui = loader.loadModel('phase_5.5/models/gui/catalog_gui')
self.beanBank = DirectLabel(parent=page, relief=None, text=str(self.totalMoney), text_align=TextNode.ARight, text_scale=0.075, text_fg=(0.95, 0.95, 0, 1), text_shadow=(0, 0, 0, 1), text_pos=(0.495, -0.53), text_font=ToontownGlobals.getSignFont(), textMayChange=True, image=catalogGui.find('**/bean_bank'), image_scale=(0.65, 0.65, 0.65), scale=0.9, pos=(-0.75, 0.0, 0.6))
catalogGui.removeNode()
del catalogGui
self.accept(localAvatar.uniqueName('moneyChange'), self.__moneyChange)
self.accept(localAvatar.uniqueName('bankMoneyChange'), self.__moneyChange)
self.partyEditor = PartyEditor(self, page)
self.partyEditor.request('Hidden')
pos = self.gui.find('**/step_05_add_text_locator').getPos()
self.elementBuyButton = DirectButton(parent=page, relief=None, text=TTLocalizer.PartyPlannerBuy, text_pos=(pos[0], pos[2]), text_scale=TTLocalizer.PPelementBuyButton, geom=(self.gui.find('**/add_up'), self.gui.find('**/add_down'), self.gui.find('**/add_rollover')), geom3_color=VBase4(0.5, 0.5, 0.5, 1.0), textMayChange=True, pos=(0.0, 0.0, 0.04), command=self.partyEditor.buyCurrentElement)
self.okWithPartyGroundsLayoutEvent = 'okWithPartyGroundsLayoutEvent'
self.accept(self.okWithPartyGroundsLayoutEvent, self.okWithPartyGroundsLayout)
self.okWithGroundsGui = TTDialog.TTGlobalDialog(dialogName=self.uniqueName('PartyEditorOkGui'), doneEvent=self.okWithPartyGroundsLayoutEvent, message=TTLocalizer.PartyPlannerOkWithGroundsLayout, style=TTDialog.YesNo, okButtonText=OTPLocalizer.DialogYes, cancelButtonText=OTPLocalizer.DialogNo)
self.okWithGroundsGui.doneStatus = ''
self.okWithGroundsGui.hide()
return page
def okWithPartyGroundsLayout(self):
self.okWithGroundsGui.hide()
if self.okWithGroundsGui.doneStatus == 'ok':
self.__nextItem()
def setNextButtonState(self, enabled):
if enabled:
self.nextButton['state'] = DirectGuiGlobals.NORMAL
self.nextButton.show()
else:
self.nextButton['state'] = DirectGuiGlobals.DISABLED
self.nextButton.hide()
def _createInvitationPage(self):
self.__handleHolidays()
page = DirectFrame(self.frame)
page.setName('PartyPlannerInvitationPage')
self.invitationTitleLabel = DirectLabel(parent=page, relief=None, text=TTLocalizer.PartyPlannerConfirmTitle, textMayChange=True, pos=self.gui.find('**/title_locator').getPos(), scale=self.titleScale)
self.invitationBackground = DirectFrame(parent=page, relief=None, geom=self.gui.find('**/invitationBackground'))
self.inviteVisual = InviteVisual(page)
self.selectedInviteThemeLabel = DirectLabel(parent=page, relief=None, pos=self.gui.find('**/step_06_theme_locator').getPos(), text='', text_scale=0.06, textMayChange=True)
self.nextThemeButton = DirectButton(parent=page, relief=None, geom=(self.gui.find('**/topNext_button/nextButton_up'), self.gui.find('**/topNext_button/nextButton_down'), self.gui.find('**/topNext_button/nextButton_rollover')), command=self.__nextTheme)
self.prevThemeButton = DirectButton(parent=page, relief=None, geom=(self.gui.find('**/topPrevious_button/previousButton_up'), self.gui.find('**/topPrevious_button/previousButton_down'), self.gui.find('**/topPrevious_button/previousButton_rollover')), command=self.__prevTheme)
pos = self.gui.find('**/step_06_sendInvitation_locator').getPos()
self.inviteButton = DirectButton(parent=page, relief=None, geom=(self.gui.find('**/send_up'), self.gui.find('**/send_down'), self.gui.find('**/send_rollover')), text=TTLocalizer.PartyPlannerInviteButton, textMayChange=True, text_scale=0.05, text_pos=(pos[0], pos[2]), command=self.__handleComplete)
return page
def __handleHolidays(self):
self.inviteThemes = range(len(PartyGlobals.InviteTheme))
if hasattr(base.cr, 'newsManager') and base.cr.newsManager:
holidayIds = base.cr.newsManager.getHolidayIdList()
if ToontownGlobals.VALENTINES_DAY not in holidayIds:
self.inviteThemes.remove(PartyGlobals.InviteTheme.Valentoons)
if ToontownGlobals.VICTORY_PARTY_HOLIDAY not in holidayIds:
self.inviteThemes.remove(PartyGlobals.InviteTheme.VictoryParty)
if ToontownGlobals.WINTER_DECORATIONS not in holidayIds and ToontownGlobals.WACKY_WINTER_DECORATIONS not in holidayIds:
self.inviteThemes.remove(PartyGlobals.InviteTheme.Winter)
def _createFarewellPage(self):
page = DirectFrame(self.frame)
page.setName('PartyPlannerFarewellPage')
self.confirmTitleLabel = DirectLabel(parent=page, relief=None, text=TTLocalizer.PartyPlannerConfirmationAllOkTitle, textMayChange=True, pos=self.gui.find('**/title_locator').getPos(), scale=self.titleScale)
pos = self.gui.find('**/step_07_close_text_locator').getPos()
self.closePlannerButton = DirectButton(parent=page, relief=None, geom=(self.gui.find('**/close_up'), self.gui.find('**/close_down'), self.gui.find('**/close_rollover')), text=TTLocalizer.PartyPlannerClosePlanner, text_scale=0.055, text_pos=(pos[0], pos[2]), command=self.__acceptExit)
return page
def close(self):
self.ignore('addPartyResponseReceived')
self.ignore(localAvatar.uniqueName('moneyChange'))
self.ignore(localAvatar.uniqueName('bankMoneyChange'))
self.timeInputHourUpButton.destroy()
self.timeInputHourDownButton.destroy()
self.timeInputMinuteUpButton.destroy()
self.timeInputMinuteDownButton.destroy()
self.timeInputAmPmUpButton.destroy()
self.timeInputAmPmDownButton.destroy()
self.privateButton.destroy()
self.publicButton.destroy()
self.makePartyNowButton.destroy()
self.checkAllButton.destroy()
self.uncheckAllButton.destroy()
self.elementBuyButton.destroy()
self.nextThemeButton.destroy()
self.prevThemeButton.destroy()
self.inviteButton.destroy()
self.closePlannerButton.destroy()
self.ignore(self.okWithPartyGroundsLayoutEvent)
if hasattr(self, 'okWithGroundsGui'):
self.okWithGroundsGui.cleanup()
del self.okWithGroundsGui
if hasattr(self, 'frame') and not self.frame.isEmpty():
messenger.send(self.doneEvent)
self.hide()
self.cleanup()
self.friendList.removeAndDestroyAllItems()
self.friendList.destroy()
self.calendarGuiMonth.destroy()
self.frame.destroy()
self.partyPlannerHead.delete()
self.partyPlannerHead.removeNode()
self.clearNametag()
self.partyEditor.request('Cleanup')
self.partyEditor = None
self.destroy()
del self
return
def __handleComplete(self):
self.inviteButton['state'] = DirectGuiGlobals.DISABLED
self.prevButton['state'] = DirectGuiGlobals.DISABLED
endTime = self.partyTime + self.partyDuration
hostId = base.localAvatar.doId
self.partyActivities = self.partyEditor.partyEditorGrid.getActivitiesOnGrid()
decorations = self.partyEditor.partyEditorGrid.getDecorationsOnGrid()
invitees = self.getInvitees()
self.accept('addPartyResponseReceived', self.processAddPartyResponse)
base.cr.partyManager.sendAddParty(hostId, self.partyTime.strftime('%Y-%m-%d %H:%M:%S'), endTime.strftime('%Y-%m-%d %H:%M:%S'), self.isPrivate, self.currentInvitationTheme, self.partyActivities, decorations, invitees)
def getInvitees(self):
invitees = []
for friendBox in self.friendList['items']:
if friendBox['indicatorValue']:
invitees.append(friendBox.getPythonTag('id'))
return invitees
def processAddPartyResponse(self, hostId, errorCode):
PartyPlanner.notify.debug('processAddPartyResponse : hostId=%d errorCode=%s' % (hostId, PartyGlobals.AddPartyErrorCode.getString(errorCode)))
goingBackAllowed = False
if errorCode == PartyGlobals.AddPartyErrorCode.AllOk:
goingBackAllowed = False
self.confirmTitleLabel['text'] = TTLocalizer.PartyPlannerConfirmationAllOkTitle
if self.noFriends or len(self.getInvitees()) == 0:
confirmRecapText = TTLocalizer.PartyPlannerConfirmationAllOkTextNoFriends
else:
confirmRecapText = TTLocalizer.PartyPlannerConfirmationAllOkText
elif errorCode == PartyGlobals.AddPartyErrorCode.ValidationError:
self.confirmTitleLabel['text'] = TTLocalizer.PartyPlannerConfirmationErrorTitle
confirmRecapText = TTLocalizer.PartyPlannerConfirmationValidationErrorText
elif errorCode == PartyGlobals.AddPartyErrorCode.DatabaseError:
self.confirmTitleLabel['text'] = TTLocalizer.PartyPlannerConfirmationErrorTitle
confirmRecapText = TTLocalizer.PartyPlannerConfirmationDatabaseErrorText
elif errorCode == PartyGlobals.AddPartyErrorCode.TooManyHostedParties:
goingBackAllowed = False
self.confirmTitleLabel['text'] = TTLocalizer.PartyPlannerConfirmationErrorTitle
confirmRecapText = TTLocalizer.PartyPlannerConfirmationTooManyText
self.nametagGroup.setChat(confirmRecapText, CFSpeech)
self.request('Farewell', goingBackAllowed)
def __acceptExit(self):
PartyPlanner.notify.debug('__acceptExit')
if hasattr(self, 'frame'):
self.hide()
messenger.send(self.doneEvent)
def __nextItem(self):
messenger.send('wakeup')
if self.state == 'PartyEditor' and self.okWithGroundsGui.doneStatus != 'ok':
self.okWithGroundsGui.show()
return
if self.state == 'PartyEditor' and self.noFriends:
self.request('Date')
self.selectedCalendarGuiDay = None
self.calendarGuiMonth.clearSelectedDay()
return
if self.state == 'Guests':
self.selectedCalendarGuiDay = None
self.calendarGuiMonth.clearSelectedDay()
if self.state == 'Time':
if self.partyTime < base.cr.toontownTimeManager.getCurServerDateTime():
self.okChooseFutureTimeEvent = 'okChooseFutureTimeEvent'
self.acceptOnce(self.okChooseFutureTimeEvent, self.okChooseFutureTime)
self.chooseFutureTimeDialog = TTDialog.TTGlobalDialog(dialogName=self.uniqueName('chooseFutureTimeDialog'), doneEvent=self.okChooseFutureTimeEvent, message=TTLocalizer.PartyPlannerChooseFutureTime, style=TTDialog.Acknowledge)
self.chooseFutureTimeDialog.show()
return
self.requestNext()
return
def okChooseFutureTime(self):
if hasattr(self, 'chooseFutureTimeDialog'):
self.chooseFutureTimeDialog.cleanup()
del self.chooseFutureTimeDialog
if hasattr(self, 'okChooseFutureTimeEvent'):
self.ignore(self.okChooseFutureTimeEvent)
def __prevItem(self):
messenger.send('wakeup')
if self.state == 'Date' and self.noFriends:
self.request('PartyEditor')
return
if self.state == 'Invitation' and self.selectedCalendarGuiDay is None:
self.request('Guests')
return
self.requestPrev()
return
def __moneyChange(self, newMoney):
if hasattr(self, 'totalMoney'):
self.totalMoney = base.localAvatar.getTotalMoney()
if hasattr(self, 'beanBank'):
self.beanBank['text'] = str(int(self.totalMoney))
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Givens rotations routines."""
import numpy
from openfermion.config import EQ_TOLERANCE
def givens_matrix_elements(a, b, which='left'):
"""Compute the matrix elements of the Givens rotation that zeroes out one
of two row entries.
If `which='left'` then returns a matrix G such that
G * [a b]^T= [0 r]^T
otherwise, returns a matrix G such that
G * [a b]^T= [r 0]^T
where r is a complex number.
Args:
a(complex or float): A complex number representing the upper row entry
b(complex or float): A complex number representing the lower row entry
which(string): Either 'left' or 'right', indicating whether to
zero out the left element (first argument) or right element
(second argument). Default is `left`.
Returns:
G(ndarray): A 2 x 2 numpy array representing the matrix G.
The numbers in the first column of G are real.
"""
# Handle case that a is zero
if abs(a) < EQ_TOLERANCE:
cosine = 1.
sine = 0.
phase = 1.
# Handle case that b is zero and a is nonzero
elif abs(b) < EQ_TOLERANCE:
cosine = 0.
sine = 1.
phase = 1.
# Handle case that a and b are both nonzero
else:
denominator = numpy.sqrt(abs(a)**2 + abs(b)**2)
cosine = abs(b) / denominator
sine = abs(a) / denominator
sign_b = b / abs(b)
sign_a = a / abs(a)
phase = sign_a * sign_b.conjugate()
# If phase is a real number, convert it to a float
if numpy.isreal(phase):
phase = numpy.real(phase)
# Construct matrix and return
if which == 'left':
# We want to zero out a
if (abs(numpy.imag(a)) < EQ_TOLERANCE and
abs(numpy.imag(b)) < EQ_TOLERANCE):
# a and b are real, so return a standard rotation matrix
givens_rotation = numpy.array([[cosine, -phase * sine],
[phase * sine, cosine]])
else:
givens_rotation = numpy.array([[cosine, -phase * sine],
[sine, phase * cosine]])
elif which == 'right':
# We want to zero out b
if (abs(numpy.imag(a)) < EQ_TOLERANCE and
abs(numpy.imag(b)) < EQ_TOLERANCE):
# a and b are real, so return a standard rotation matrix
givens_rotation = numpy.array([[sine, phase * cosine],
[-phase * cosine, sine]])
else:
givens_rotation = numpy.array([[sine, phase * cosine],
[cosine, -phase * sine]])
else:
raise ValueError('"which" must be equal to "left" or "right".')
return givens_rotation
def givens_rotate(operator, givens_rotation, i, j, which='row'):
"""Apply a Givens rotation to coordinates i and j of an operator."""
if which == 'row':
# Rotate rows i and j
row_i = operator[i].copy()
row_j = operator[j].copy()
operator[i] = (givens_rotation[0, 0] * row_i +
givens_rotation[0, 1] * row_j)
operator[j] = (givens_rotation[1, 0] * row_i +
givens_rotation[1, 1] * row_j)
elif which == 'col':
# Rotate columns i and j
col_i = operator[:, i].copy()
col_j = operator[:, j].copy()
operator[:, i] = (givens_rotation[0, 0] * col_i +
givens_rotation[0, 1].conj() * col_j)
operator[:, j] = (givens_rotation[1, 0] * col_i +
givens_rotation[1, 1].conj() * col_j)
else:
raise ValueError('"which" must be equal to "row" or "col".')
def double_givens_rotate(operator, givens_rotation, i, j, which='row'):
"""Apply a double Givens rotation.
Applies a Givens rotation to coordinates i and j and the the conjugate
Givens rotation to coordinates n + i and n + j, where
n = dim(operator) / 2. dim(operator) must be even.
"""
m, p = operator.shape
if which == 'row':
if m % 2 != 0:
raise ValueError('To apply a double Givens rotation on rows, '
'the number of rows must be even.')
n = m // 2
# Rotate rows i and j
givens_rotate(operator[:n], givens_rotation, i, j, which='row')
# Rotate rows n + i and n + j
givens_rotate(operator[n:], givens_rotation.conj(), i, j, which='row')
elif which == 'col':
if p % 2 != 0:
raise ValueError('To apply a double Givens rotation on columns, '
'the number of columns must be even.')
n = p // 2
# Rotate columns i and j
givens_rotate(operator[:, :n], givens_rotation, i, j, which='col')
# Rotate cols n + i and n + j
givens_rotate(operator[:, n:],
givens_rotation.conj(),
i,
j,
which='col')
else:
raise ValueError('"which" must be equal to "row" or "col".')
def givens_decomposition_square(unitary_matrix, always_insert=False):
r"""Decompose a square matrix into a sequence of Givens rotations.
The input is a square $n \times n$ matrix $Q$.
$Q$ can be decomposed as follows:
$$
Q = DU
$$
where $U$ is unitary and $D$ is diagonal.
Furthermore, we can decompose $U$ as
$$
U = G_k ... G_1
$$
where $G_1, \ldots, G_k$ are complex Givens rotations.
A Givens rotation is a rotation within the two-dimensional subspace
spanned by two coordinate axes. Within the two relevant coordinate
axes, a Givens rotation has the form
$$
\begin{pmatrix}
\cos(\theta) & -e^{i \varphi} \sin(\theta) \\
\sin(\theta) & e^{i \varphi} \cos(\theta)
\end{pmatrix}.
$$
Args:
unitary_matrix: A numpy array with orthonormal rows,
representing the matrix Q.
Returns
-------
decomposition (list[tuple]):
A list of tuples of objects describing Givens
rotations. The list looks like [(G_1, ), (G_2, G_3), ... ].
The Givens rotations within a tuple can be implemented in parallel.
The description of a Givens rotation is itself a tuple of the
form $(i, j, \theta, \varphi)$, which represents a
Givens rotation of coordinates
$i$ and $j$ by angles $\theta$ and
$\varphi$.
diagonal (ndarray):
A list of the nonzero entries of $D$.
"""
current_matrix = numpy.copy(unitary_matrix)
n = current_matrix.shape[0]
decomposition = []
for k in range(2 * (n - 1) - 1):
# Initialize the list of parallel operations to perform
# in this iteration
parallel_ops = []
# Get the (row, column) indices of elements to zero out in parallel.
if k < n - 1:
start_row = 0
start_column = n - 1 - k
else:
start_row = k - (n - 2)
start_column = k - (n - 3)
column_indices = range(start_column, n, 2)
row_indices = range(start_row, start_row + len(column_indices))
indices_to_zero_out = zip(row_indices, column_indices)
for i, j in indices_to_zero_out:
# Compute the Givens rotation to zero out the (i, j) element,
# if needed
right_element = current_matrix[i, j].conj()
if always_insert or abs(right_element) > EQ_TOLERANCE:
# We actually need to perform a Givens rotation
left_element = current_matrix[i, j - 1].conj()
givens_rotation = givens_matrix_elements(left_element,
right_element,
which='right')
# Add the parameters to the list
theta = numpy.arcsin(numpy.real(givens_rotation[1, 0]))
phi = numpy.angle(givens_rotation[1, 1])
parallel_ops.append((j - 1, j, theta, phi))
# Update the matrix
givens_rotate(current_matrix,
givens_rotation,
j - 1,
j,
which='col')
# If the current list of parallel operations is not empty,
# append it to the list,
if parallel_ops:
decomposition.append(tuple(parallel_ops))
# Get the diagonal entries
diagonal = current_matrix[range(n), range(n)]
return decomposition, diagonal
def givens_decomposition(unitary_rows, always_insert=False):
r"""Decompose a matrix into a sequence of Givens rotations.
The input is an $m \times n$ matrix $Q$ with $m \leq n$.
The rows of $Q$ are orthonormal.
$Q$ can be decomposed as follows:
$$
V Q U^\dagger = D
$$
where $V$ and $U$ are unitary matrices, and $D$
is an $m \times n$ matrix with the
first $m$ columns forming a diagonal matrix and the rest of the
columns being zero. Furthermore, we can decompose $U$ as
$$
U = G_k ... G_1
$$
where $G_1, \ldots, G_k$ are complex Givens rotations.
A Givens rotation is a rotation within the two-dimensional subspace
spanned by two coordinate axes. Within the two relevant coordinate
axes, a Givens rotation has the form
$$
\begin{pmatrix}
\cos(\theta) & -e^{i \varphi} \sin(\theta) \\
\sin(\theta) & e^{i \varphi} \cos(\theta)
\end{pmatrix}.
$$
Args:
unitary_rows: A numpy array or matrix with orthonormal rows,
representing the matrix Q.
Returns
-------
givens_rotations (list[tuple]):
A list of tuples of objects describing Givens
rotations. The list looks like [(G_1, ), (G_2, G_3), ... ].
The Givens rotations within a tuple can be implemented in parallel.
The description of a Givens rotation is itself a tuple of the
form $(i, j, \theta, \varphi)$, which represents a
Givens rotation of coordinates
$i$ and $j$ by angles $\theta$ and
$\varphi$.
left_unitary (ndarray):
An $m \times m$ numpy array representing the matrix
$V$.
diagonal (ndarray):
A list of the nonzero entries of $D$.
"""
current_matrix = numpy.copy(unitary_rows)
m, n = current_matrix.shape
# Check that m <= n
if m > n:
raise ValueError('The input m x n matrix must have m <= n')
# Compute left_unitary using Givens rotations
left_unitary = numpy.eye(m, dtype=complex)
for k in reversed(range(n - m + 1, n)):
# Zero out entries in column k
for l in range(m - n + k):
# Zero out entry in row l if needed
if abs(current_matrix[l, k]) > EQ_TOLERANCE:
givens_rotation = givens_matrix_elements(
current_matrix[l, k], current_matrix[l + 1, k])
# Apply Givens rotation
givens_rotate(current_matrix, givens_rotation, l, l + 1)
givens_rotate(left_unitary, givens_rotation, l, l + 1)
# Compute the decomposition of current_matrix into Givens rotations
givens_rotations = []
# If m = n (the matrix is square) then we don't need to perform any
# Givens rotations!
if m != n:
# Get the maximum number of simultaneous rotations that
# will be performed
max_simul_rotations = min(m, n - m)
# There are n - 1 iterations (the circuit depth is n - 1)
for k in range(n - 1):
# Get the (row, column) indices of elements to zero out in
# parallel.
if k < max_simul_rotations - 1:
# There are k + 1 elements to zero out
start_row = 0
end_row = k + 1
start_column = n - m - k
end_column = start_column + 2 * (k + 1)
elif k > n - 1 - max_simul_rotations:
# There are n - 1 - k elements to zero out
start_row = m - (n - 1 - k)
end_row = m
start_column = m - (n - 1 - k) + 1
end_column = start_column + 2 * (n - 1 - k)
else:
# There are max_simul_rotations elements to zero out
if max_simul_rotations == m:
start_row = 0
end_row = m
start_column = n - m - k
end_column = start_column + 2 * m
else:
start_row = k + 1 - max_simul_rotations
end_row = k + 1
start_column = k + 1 - max_simul_rotations + 1
end_column = start_column + 2 * max_simul_rotations
row_indices = range(start_row, end_row)
column_indices = range(start_column, end_column, 2)
indices_to_zero_out = zip(row_indices, column_indices)
parallel_rotations = []
for i, j in indices_to_zero_out:
# Compute the Givens rotation to zero out the (i, j) element,
# if needed
right_element = current_matrix[i, j].conj()
if always_insert or abs(right_element) > EQ_TOLERANCE:
# We actually need to perform a Givens rotation
left_element = current_matrix[i, j - 1].conj()
givens_rotation = givens_matrix_elements(left_element,
right_element,
which='right')
# Add the parameters to the list
theta = numpy.arcsin(numpy.real(givens_rotation[1, 0]))
phi = numpy.angle(givens_rotation[1, 1])
parallel_rotations.append((j - 1, j, theta, phi))
# Update the matrix
givens_rotate(current_matrix,
givens_rotation,
j - 1,
j,
which='col')
# If the current list of parallel operations is not empty,
# append it to the list,
if parallel_rotations:
givens_rotations.append(tuple(parallel_rotations))
# Get the diagonal entries
diagonal = current_matrix.diagonal()
return givens_rotations, left_unitary, diagonal
def fermionic_gaussian_decomposition(unitary_rows):
r"""Decompose a matrix into a sequence of Givens rotations and
particle-hole transformations on the last fermionic mode.
The input is an $N \times 2N$ matrix $W$ with orthonormal
rows. Furthermore, $W$ must have the block form
$$
W = ( W_1 \hspace{4pt} W_2 )
$$
where $W_1$ and $W_2$ satisfy
$$
W_1 W_1^\dagger + W_2 W_2^\dagger &= I
$$
W_1 W_2^T + W_2 W_1^T &= 0.
Then $W$ can be decomposed as
$$
V W U^\dagger = ( 0 \hspace{6pt} D )
$$
where $V$ and $U$ are unitary matrices and $D$
is a diagonal unitary matrix. Furthermore, $U$ can be decomposed
as follows:
$$
U = B G_{k} \cdots B G_3 G_2 B G_1 B,
$$
where each $G_i$ is a Givens rotation, and $B$ represents
swapping the $N$-th column with the $2N$-th column,
which corresponds to a particle-hole transformation
on the last fermionic mode. This particle-hole transformation maps
$a^\dagger_N$ to $a_N$ and vice versa, while leaving the
other fermionic ladder operators invariant.
The decomposition of $U$ is returned as a list of tuples of objects
describing rotations and particle-hole transformations. The list looks
something like [('pht', ), (G_1, ), ('pht', G_2), ... ].
The objects within a tuple are either the string 'pht', which indicates
a particle-hole transformation on the last fermionic mode, or a tuple
of the form $(i, j, \theta, \varphi)$, which indicates a
Givens rotation of rows $i$ and $j$ by angles
$\theta$ and $\varphi$.
The matrix $V^T D^*$ can also be decomposed as a sequence of
Givens rotations. This decomposition is needed for a circuit that
prepares an excited state.
Args:
unitary_rows(ndarray): A matrix with orthonormal rows and
additional structure described above.
Returns
-------
decomposition (list[tuple]):
The decomposition of $U$.
left_decomposition (list[tuple]):
The decomposition of $V^T D^*$.
diagonal (ndarray):
A list of the nonzero entries of $D$.
left_diagonal (ndarray):
A list of the nonzero entries left from the decomposition
of $V^T D^*$.
"""
current_matrix = numpy.copy(unitary_rows)
n, p = current_matrix.shape
# Check that p = 2 * n
if p != 2 * n:
raise ValueError('The input matrix must have twice as many columns '
'as rows.')
# Check that left and right parts of unitary_rows satisfy the constraints
# necessary for the transformed fermionic operators to satisfy
# the fermionic anticommutation relations
left_part = unitary_rows[:, :n]
right_part = unitary_rows[:, n:]
constraint_matrix_1 = (left_part.dot(left_part.T.conj()) +
right_part.dot(right_part.T.conj()))
constraint_matrix_2 = (left_part.dot(right_part.T) +
right_part.dot(left_part.T))
discrepancy_1 = numpy.amax(abs(constraint_matrix_1 - numpy.eye(n)))
discrepancy_2 = numpy.amax(abs(constraint_matrix_2))
if discrepancy_1 > EQ_TOLERANCE or discrepancy_2 > EQ_TOLERANCE:
raise ValueError('The input matrix does not satisfy the constraints '
'necessary for a proper transformation of the '
'fermionic ladder operators.')
# Compute left_unitary using Givens rotations
left_unitary = numpy.eye(n, dtype=complex)
for k in range(n - 1):
# Zero out entries in column k
for l in range(n - 1 - k):
# Zero out entry in row l if needed
if abs(current_matrix[l, k]) > EQ_TOLERANCE:
givens_rotation = givens_matrix_elements(
current_matrix[l, k], current_matrix[l + 1, k])
# Apply Givens rotation
givens_rotate(current_matrix, givens_rotation, l, l + 1)
givens_rotate(left_unitary, givens_rotation, l, l + 1)
# Initialize list to store decomposition of current_matrix
decomposition = []
# There are 2 * n - 1 iterations (that is the circuit depth)
for k in range(2 * n - 1):
# Initialize the list of parallel operations to perform
# in this iteration
parallel_ops = []
# Perform a particle-hole transformation if necessary
if k % 2 == 0 and abs(current_matrix[k // 2, n - 1]) > EQ_TOLERANCE:
parallel_ops.append('pht')
swap_columns(current_matrix, n - 1, 2 * n - 1)
# Get the (row, column) indices of elements to zero out in parallel.
if k < n:
end_row = k
end_column = n - 1 - k
else:
end_row = n - 1
end_column = k - (n - 1)
column_indices = range(end_column, n - 1, 2)
row_indices = range(end_row, end_row - len(column_indices), -1)
indices_to_zero_out = zip(row_indices, column_indices)
for i, j in indices_to_zero_out:
# Compute the Givens rotation to zero out the (i, j) element,
# if needed
left_element = current_matrix[i, j].conj()
if abs(left_element) > EQ_TOLERANCE:
# We actually need to perform a Givens rotation
right_element = current_matrix[i, j + 1].conj()
givens_rotation = givens_matrix_elements(
left_element, right_element)
# Add the parameters to the list
theta = numpy.arcsin(numpy.real(givens_rotation[1, 0]))
phi = numpy.angle(givens_rotation[1, 1])
parallel_ops.append((j, j + 1, theta, phi))
# Update the matrix
double_givens_rotate(current_matrix,
givens_rotation,
j,
j + 1,
which='col')
# If the current list of parallel operations is not empty,
# append it to the list,
if parallel_ops:
decomposition.append(tuple(parallel_ops))
# Get the diagonal entries
diagonal = current_matrix[range(n), range(n, 2 * n)]
# Compute the decomposition of left_unitary^T * diagonal^*
current_matrix = left_unitary.T
for k in range(n):
current_matrix[:, k] *= diagonal[k].conj()
left_decomposition, left_diagonal = givens_decomposition_square(
current_matrix)
return decomposition, left_decomposition, diagonal, left_diagonal
def swap_rows(M, i, j):
"""Swap rows i and j of matrix M."""
if len(M.shape) == 1:
M[i], M[j] = M[j], M[i]
else:
row_i = M[i, :].copy()
row_j = M[j, :].copy()
M[i, :], M[j, :] = row_j, row_i
def swap_columns(M, i, j):
"""Swap columns i and j of matrix M."""
if len(M.shape) == 1:
M[i], M[j] = M[j], M[i]
else:
column_i = M[:, i].copy()
column_j = M[:, j].copy()
M[:, i], M[:, j] = column_j, column_i
|
|
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
from ryu.services.protocols.bgp import bgpspeaker
from ryu.services.protocols.bgp.rtconf.neighbors import CONNECT_MODE_ACTIVE
from neutron.services.bgp.agent import config as bgp_config
from neutron.services.bgp.driver import exceptions as bgp_driver_exc
from neutron.services.bgp.driver.ryu import driver as ryu_driver
from neutron.tests import base
# Test variables for BGP Speaker
FAKE_LOCAL_AS1 = 12345
FAKE_LOCAL_AS2 = 23456
FAKE_ROUTER_ID = '1.1.1.1'
# Test variables for BGP Peer
FAKE_PEER_AS = 45678
FAKE_PEER_IP = '2.2.2.5'
FAKE_AUTH_TYPE = 'md5'
FAKE_PEER_PASSWORD = 'awesome'
# Test variables for Route
FAKE_ROUTE = '2.2.2.0/24'
FAKE_NEXTHOP = '5.5.5.5'
class TestRyuBgpDriver(base.BaseTestCase):
def setUp(self):
super(TestRyuBgpDriver, self).setUp()
cfg.CONF.register_opts(bgp_config.BGP_PROTO_CONFIG_OPTS, 'BGP')
cfg.CONF.set_override('bgp_router_id', FAKE_ROUTER_ID, 'BGP')
self.ryu_bgp_driver = ryu_driver.RyuBgpDriver(cfg.CONF.BGP)
mock_ryu_speaker_p = mock.patch.object(bgpspeaker, 'BGPSpeaker')
self.mock_ryu_speaker = mock_ryu_speaker_p.start()
def test_add_new_bgp_speaker(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.mock_ryu_speaker.assert_called_once_with(
as_number=FAKE_LOCAL_AS1, router_id=FAKE_ROUTER_ID,
bgp_server_port=0,
best_path_change_handler=ryu_driver.best_path_change_cb,
peer_down_handler=ryu_driver.bgp_peer_down_cb,
peer_up_handler=ryu_driver.bgp_peer_up_cb)
def test_remove_bgp_speaker(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
self.ryu_bgp_driver.delete_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(0,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.assertEqual(1, speaker.shutdown.call_count)
def test_add_bgp_peer_without_password(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.add_bgp_peer(FAKE_LOCAL_AS1,
FAKE_PEER_IP,
FAKE_PEER_AS)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.neighbor_add.assert_called_once_with(
address=FAKE_PEER_IP,
remote_as=FAKE_PEER_AS,
password=None,
connect_mode=CONNECT_MODE_ACTIVE)
def test_add_bgp_peer_with_password(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.add_bgp_peer(FAKE_LOCAL_AS1,
FAKE_PEER_IP,
FAKE_PEER_AS,
FAKE_AUTH_TYPE,
FAKE_PEER_PASSWORD)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.neighbor_add.assert_called_once_with(
address=FAKE_PEER_IP,
remote_as=FAKE_PEER_AS,
password=FAKE_PEER_PASSWORD,
connect_mode=CONNECT_MODE_ACTIVE)
def test_remove_bgp_peer(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.delete_bgp_peer(FAKE_LOCAL_AS1, FAKE_PEER_IP)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.neighbor_del.assert_called_once_with(address=FAKE_PEER_IP)
def test_advertise_route(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.advertise_route(FAKE_LOCAL_AS1,
FAKE_ROUTE,
FAKE_NEXTHOP)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.prefix_add.assert_called_once_with(prefix=FAKE_ROUTE,
next_hop=FAKE_NEXTHOP)
def test_withdraw_route(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.withdraw_route(FAKE_LOCAL_AS1, FAKE_ROUTE)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.prefix_del.assert_called_once_with(prefix=FAKE_ROUTE)
def test_add_same_bgp_speakers_twice(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.BgpSpeakerAlreadyScheduled,
self.ryu_bgp_driver.add_bgp_speaker, FAKE_LOCAL_AS1)
def test_add_different_bgp_speakers_when_one_already_added(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.BgpSpeakerMaxScheduled,
self.ryu_bgp_driver.add_bgp_speaker,
FAKE_LOCAL_AS2)
def test_add_bgp_speaker_with_invalid_asnum_paramtype(self):
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.add_bgp_speaker, '12345')
def test_add_bgp_speaker_with_invalid_asnum_range(self):
self.assertRaises(bgp_driver_exc.InvalidParamRange,
self.ryu_bgp_driver.add_bgp_speaker, -1)
self.assertRaises(bgp_driver_exc.InvalidParamRange,
self.ryu_bgp_driver.add_bgp_speaker, 65536)
def test_add_bgp_peer_with_invalid_paramtype(self):
# Test with an invalid asnum data-type
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, '12345')
# Test with an invalid auth-type and an invalid password
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
'sha-1', 1234)
# Test with an invalid auth-type and a valid password
self.assertRaises(bgp_driver_exc.InvaildAuthType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
'hmac-md5', FAKE_PEER_PASSWORD)
# Test with none auth-type and a valid password
self.assertRaises(bgp_driver_exc.InvaildAuthType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
'none', FAKE_PEER_PASSWORD)
# Test with none auth-type and an invalid password
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
'none', 1234)
# Test with a valid auth-type and no password
self.assertRaises(bgp_driver_exc.PasswordNotSpecified,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
FAKE_AUTH_TYPE, None)
def test_add_bgp_peer_with_invalid_asnum_range(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamRange,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, -1)
self.assertRaises(bgp_driver_exc.InvalidParamRange,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, 65536)
def test_add_bgp_peer_without_adding_speaker(self):
self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS)
def test_remove_bgp_peer_with_invalid_paramtype(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.delete_bgp_peer,
FAKE_LOCAL_AS1, 12345)
def test_remove_bgp_peer_without_adding_speaker(self):
self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded,
self.ryu_bgp_driver.delete_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP)
def test_advertise_route_with_invalid_paramtype(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.advertise_route,
FAKE_LOCAL_AS1, 12345, FAKE_NEXTHOP)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.advertise_route,
FAKE_LOCAL_AS1, FAKE_ROUTE, 12345)
def test_advertise_route_without_adding_speaker(self):
self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded,
self.ryu_bgp_driver.advertise_route,
FAKE_LOCAL_AS1, FAKE_ROUTE, FAKE_NEXTHOP)
def test_withdraw_route_with_invalid_paramtype(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.withdraw_route,
FAKE_LOCAL_AS1, 12345)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.withdraw_route,
FAKE_LOCAL_AS1, 12345)
def test_withdraw_route_without_adding_speaker(self):
self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded,
self.ryu_bgp_driver.withdraw_route,
FAKE_LOCAL_AS1, FAKE_ROUTE)
def test_add_multiple_bgp_speakers(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.assertRaises(bgp_driver_exc.BgpSpeakerMaxScheduled,
self.ryu_bgp_driver.add_bgp_speaker,
FAKE_LOCAL_AS2)
self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded,
self.ryu_bgp_driver.delete_bgp_speaker,
FAKE_LOCAL_AS2)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.delete_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(0,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
|
|
#
# BaseImage.py -- Abstraction of an generic data image.
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import math
import numpy
import logging
from ginga.misc import Bunch, Callback
from ginga import AutoCuts
class ImageError(Exception):
pass
class BaseImage(Callback.Callbacks):
def __init__(self, data_np=None, metadata=None, logger=None):
Callback.Callbacks.__init__(self)
if logger != None:
self.logger = logger
else:
self.logger = logging.Logger('AstroImage')
if data_np == None:
data_np = numpy.zeros((1, 1))
self._data = data_np
self.metadata = {}
if metadata:
self.update_metadata(metadata)
self._set_minmax()
self.autocuts = AutoCuts.AutoCuts(self.logger)
# For callbacks
for name in ('modified', ):
self.enable_callback(name)
@property
def width(self):
# NOTE: numpy stores data in column-major layout
data = self.get_data()
return data.shape[1]
@property
def height(self):
# NOTE: numpy stores data in column-major layout
data = self.get_data()
return data.shape[0]
def get_size(self):
return (self.width, self.height)
def get_depth(self):
data = self.get_data()
if len(data.shape) > 2:
return data.shape[2]
return 1
def get_shape(self):
data = self.get_data()
return data.shape
def get_data(self):
return self._data
def copy_data(self):
return self.get_data()
def get_data_xy(self, x, y):
data = self.get_data()
val = data[y, x]
return val
def _get_dims(self, data):
height, width = data.shape[:2]
return (width, height)
def get_metadata(self):
return self.metadata.copy()
def get_header(self):
return self.get('exif', {})
def get(self, kwd, *args):
if self.metadata.has_key(kwd):
return self.metadata[kwd]
else:
# return a default if there is one
if len(args) > 0:
return args[0]
raise KeyError(kwd)
def get_list(self, *args):
return map(self.get, args)
def __getitem__(self, kwd):
return self.metadata[kwd]
def update(self, kwds):
self.metadata.update(kwds)
def set(self, **kwds):
self.update(kwds)
def __setitem__(self, kwd, value):
self.metadata[kwd] = value
def set_data(self, data_np, metadata=None, astype=None):
"""Use this method to SHARE (not copy) the incoming array.
"""
if astype:
data = data_np.astype(astype)
else:
data = data_np
self._data = data
if metadata:
self.update_metadata(metadata)
self._set_minmax()
self.make_callback('modified')
def _set_minmax(self):
data = self.get_data()
self.maxval = numpy.nanmax(data)
self.minval = numpy.nanmin(data)
# TODO: see if there is a faster way to ignore infinity
if numpy.isfinite(self.maxval):
self.maxval_noinf = self.maxval
else:
try:
self.maxval_noinf = numpy.nanmax(data[numpy.isfinite(data)])
except:
self.maxval_noinf = self.maxval
if numpy.isfinite(self.minval):
self.minval_noinf = self.minval
else:
try:
self.minval_noinf = numpy.nanmin(data[numpy.isfinite(data)])
except:
self.minval_noinf = self.minval
def get_minmax(self, noinf=False):
if not noinf:
return (self.minval, self.maxval)
else:
return (self.minval_noinf, self.maxval_noinf)
def update_metadata(self, keyDict):
for key, val in keyDict.items():
self.metadata[key] = val
def transfer(self, other, astype=None):
data = self.get_data()
other.set_data(data, metadata=self.metadata, astype=astype)
def copy(self, astype=None):
other = BaseImage()
self.transfer(other, astype=astype)
return other
def cutout_data(self, x1, y1, x2, y2, astype=None):
"""cut out data area based on coords.
"""
data = self.get_data()
data = data[y1:y2, x1:x2]
if astype:
data = data.astype(astype)
return data
def cutout_adjust(self, x1, y1, x2, y2, astype=None):
dx = x2 - x1
dy = y2 - y1
if x1 < 0:
x1 = 0; x2 = dx
else:
if x2 >= self.width:
x2 = self.width
x1 = x2 - dx
if y1 < 0:
y1 = 0; y2 = dy
else:
if y2 >= self.height:
y2 = self.height
y1 = y2 - dy
data = self.cutout_data(x1, y1, x2, y2, astype=astype)
return (data, x1, y1, x2, y2)
def cutout_radius(self, x, y, radius, astype=None):
return self.cutout_adjust(x-radius, y-radius,
x+radius+1, y+radius+1,
astype=astype)
def get_scaled_cutout_wdht(self, x1, y1, x2, y2, new_wd, new_ht):
# calculate dimensions of NON-scaled cutout
old_wd = x2 - x1 + 1
old_ht = y2 - y1 + 1
self.logger.debug("old=%dx%d new=%dx%d" % (
old_wd, old_ht, new_wd, new_ht))
data = self.get_data()
# Is there a more efficient way to do this?
yi, xi = numpy.mgrid[0:new_ht, 0:new_wd]
iscale_x = float(old_wd) / float(new_wd)
iscale_y = float(old_ht) / float(new_ht)
xi *= iscale_x
yi *= iscale_y
cutout = data[y1:y2+1, x1:x2+1]
ht, wd = cutout.shape[:2]
xi = xi.astype('int').clip(0, wd-1)
yi = yi.astype('int').clip(0, ht-1)
newdata = cutout[yi, xi]
ht, wd = newdata.shape[:2]
scale_x = float(wd) / dx
scale_y = float(ht) / dy
res = Bunch.Bunch(data=newdata, org_fac=1,
scale_x=scale_x, scale_y=scale_y)
return res
def get_scaled_cutout_basic(self, x1, y1, x2, y2, scale_x, scale_y):
# calculate dimensions of NON-scaled cutout
old_wd = x2 - x1 + 1
old_ht = y2 - y1 + 1
# calculate dimensions of scaled cutout
new_wd = int(round(scale_x * old_wd))
new_ht = int(round(scale_y * old_ht))
self.logger.debug("old=%dx%d new=%dx%d" % (
old_wd, old_ht, new_wd, new_ht))
data = self.get_data()
# Is there a more efficient way to do this?
yi, xi = numpy.mgrid[0:new_ht, 0:new_wd]
iscale_x = float(old_wd) / float(new_wd)
iscale_y = float(old_ht) / float(new_ht)
xi *= iscale_x
yi *= iscale_y
cutout = data[y1:y2+1, x1:x2+1]
ht, wd = cutout.shape[:2]
xi = xi.astype('int').clip(0, wd-1)
yi = yi.astype('int').clip(0, ht-1)
newdata = cutout[yi, xi]
ht, wd = newdata.shape[:2]
scale_x = float(wd) / old_wd
scale_y = float(ht) / old_ht
res = Bunch.Bunch(data=newdata, org_fac=1,
scale_x=scale_x, scale_y=scale_y)
return res
def get_scaled_cutout_by_dims(self, x1, y1, x2, y2, dst_wd, dst_ht,
method='basic'):
if method == 'basic':
return self.get_scaled_cutout_wdht(x1, y1, x2, y2, dst_wd, dst_ht)
raise ImageError("Method not supported: '%s'" % (method))
def get_scaled_cutout(self, x1, y1, x2, y2, scale_x, scale_y,
method='basic'):
if method == 'basic':
return self.get_scaled_cutout_basic(x1, y1, x2, y2,
scale_x, scale_y)
raise ImageError("Method not supported: '%s'" % (method))
def histogram(self, x1, y1, x2, y2, z=None, pct=1.0, numbins=2048):
data = self.get_data()
if z != None:
data = data[y1:y2, x1:x2, z]
else:
data = data[y1:y2, x1:x2]
return self.autocuts.calc_histogram(data, pct=pct, numbins=numbins)
def cut_levels(self, loval, hival, vmin=0.0, vmax=255.0):
data = self.get_data()
data = self.autocuts.cut_levels(data, loval, hival,
vmin=vmin, vmax=vmax)
self.set_data(data)
def transform(self, flip_x=False, flip_y=False, swap_xy=False):
data = self.get_data()
# Do transforms as necessary
if flip_y:
data = numpy.flipud(data)
if flip_x:
data = numpy.fliplr(data)
if swap_xy:
data = data.swapaxes(0, 1)
self.set_data(data)
def info_xy(self, data_x, data_y):
# Get the value under the data coordinates
try:
value = self.get_data_xy(int(data_x), int(data_y))
except Exception, e:
value = None
info = Bunch.Bunch(itype='base', data_x=data_x, data_y=data_y,
x=data_x, y=data_y,
value=value)
return info
#END
|
|
"""This source manages a VTK dataset given to it. When this source is
pickled or persisted, it saves the data given to it in the form of a
gzipped string.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005-2015, Enthought, Inc.
# License: BSD Style.
import sys
import os
import tempfile
# Enthought library imports.
from traits.api import Instance, List, Str, Bool, Int
from traitsui.api import View, Group, Item
from apptools.persistence.state_pickler \
import gzip_string, gunzip_string, set_state
from tvtk.api import tvtk
from tvtk import messenger
# Local imports.
from tvtk.common import is_old_pipeline, configure_input_data
from mayavi.core.source import Source
from mayavi.core.common import handle_children_state
from mayavi.core.trait_defs import DEnum
from mayavi.core.pipeline_info import (PipelineInfo,
get_tvtk_dataset_name)
from .vtk_xml_file_reader import get_all_attributes
######################################################################
# Utility functions.
######################################################################
def write_dataset_to_string(data):
"""Given a dataset, convert the dataset to an ASCII string that can
be stored for persistence.
"""
w = tvtk.DataSetWriter(write_to_output_string=1)
warn = w.global_warning_display
configure_input_data(w, data)
w.global_warning_display = 0
w.update()
if w.output_string_length == 0:
# Some VTK versions (5.2) have a bug when writing structured
# grid datasets and produce empty output. We work around this
# by writing to a file and then reading that output.
w.write_to_output_string = 0
fh, fname = tempfile.mkstemp('.vtk')
os.close(fh); os.remove(fname)
w.file_name = fname
w.write()
# Read the data and delete the file.
sdata = open(fname).read()
os.remove(fname)
else:
sdata = w.output_string
w.global_warning_display = warn
return sdata
def has_attributes(dataset):
"""Returns `True` when the given TVTK `dataset` has any attribute
arrays in point and cell data and `False` otherwise.
"""
pd = dataset.point_data
if pd is not None and pd.number_of_arrays > 0:
return True
cd = dataset.cell_data
if cd is not None and cd.number_of_arrays > 0:
return True
return False
######################################################################
# `VTKDataSource` class
######################################################################
class VTKDataSource(Source):
"""This source manages a VTK dataset given to it. When this
source is pickled or persisted, it saves the data given to it in
the form of a gzipped string.
Note that if the VTK dataset has changed internally and you need
to notify the mayavi pipeline to flush the data just call the
`modified` method of the VTK dataset and the mayavi pipeline will
update automatically.
"""
# The version of this class. Used for persistence.
__version__ = 0
# The VTK dataset to manage.
data = Instance(tvtk.DataSet, allow_none=False)
# Information about what this object can produce.
output_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
########################################
# Dynamic traits: These traits are dynamic and are updated on the
# _update_data method.
# The active point scalar name.
point_scalars_name = DEnum(values_name='_point_scalars_list',
desc='scalar point data attribute to use')
# The active point vector name.
point_vectors_name = DEnum(values_name='_point_vectors_list',
desc='vectors point data attribute to use')
# The active point tensor name.
point_tensors_name = DEnum(values_name='_point_tensors_list',
desc='tensor point data attribute to use')
# The active cell scalar name.
cell_scalars_name = DEnum(values_name='_cell_scalars_list',
desc='scalar cell data attribute to use')
# The active cell vector name.
cell_vectors_name = DEnum(values_name='_cell_vectors_list',
desc='vectors cell data attribute to use')
# The active cell tensor name.
cell_tensors_name = DEnum(values_name='_cell_tensors_list',
desc='tensor cell data attribute to use')
########################################
# Our view.
view = View(Group(Item(name='point_scalars_name'),
Item(name='point_vectors_name'),
Item(name='point_tensors_name'),
Item(name='cell_scalars_name'),
Item(name='cell_vectors_name'),
Item(name='cell_tensors_name'),
Item(name='data'),
))
########################################
# Private traits.
# These private traits store the list of available data
# attributes. The non-private traits use these lists internally.
_point_scalars_list = List(Str)
_point_vectors_list = List(Str)
_point_tensors_list = List(Str)
_cell_scalars_list = List(Str)
_cell_vectors_list = List(Str)
_cell_tensors_list = List(Str)
# This filter allows us to change the attributes of the data
# object and will ensure that the pipeline is properly taken care
# of. Directly setting the array in the VTK object will not do
# this.
_assign_attribute = Instance(tvtk.AssignAttribute, args=(),
allow_none=False)
# Toggles if this is the first time this object has been used.
_first = Bool(True)
# The ID of the observer for the data.
_observer_id = Int(-1)
######################################################################
# `object` interface
######################################################################
def __get_pure_state__(self):
d = super(VTKDataSource, self).__get_pure_state__()
for name in ('_assign_attribute', '_first', '_observer'):
d.pop(name, None)
for name in ('point_scalars', 'point_vectors',
'point_tensors', 'cell_scalars',
'cell_vectors', 'cell_tensors'):
d.pop('_' + name + '_list', None)
d.pop('_' + name + '_name', None)
data = self.data
if data is not None:
sdata = write_dataset_to_string(data)
if sys.version_info[0] > 2:
z = gzip_string(sdata.encode('ascii'))
else:
z = gzip_string(sdata)
d['data'] = z
return d
def __set_pure_state__(self, state):
z = state.data
if z is not None:
if sys.version_info[0] > 2:
d = gunzip_string(z).decode('ascii')
else:
d = gunzip_string(z)
r = tvtk.DataSetReader(read_from_input_string=1,
input_string=d)
warn = r.global_warning_display
r.global_warning_display = 0
r.update()
r.global_warning_display = warn
self.data = r.output
# Now set the remaining state without touching the children.
set_state(self, state, ignore=['children', 'data'])
# Setup the children.
handle_children_state(self.children, state.children)
# Setup the children's state.
set_state(self, state, first=['children'], ignore=['*'])
######################################################################
# `Base` interface
######################################################################
def start(self):
"""This is invoked when this object is added to the mayavi
pipeline.
"""
# Do nothing if we are already running.
if self.running:
return
# Update the data just in case.
self._update_data()
# Call the parent method to do its thing. This will typically
# start all our children.
super(VTKDataSource, self).start()
def update(self):
"""Invoke this to flush data changes downstream. This is
typically used when you change the data object and want the
mayavi pipeline to refresh.
"""
# This tells the VTK pipeline that the data has changed. This
# will fire the data_changed event automatically.
self.data.modified()
self._assign_attribute.update()
######################################################################
# Non-public interface
######################################################################
def _data_changed(self, old, new):
if has_attributes(self.data):
aa = self._assign_attribute
self.configure_input_data(aa, new)
self._update_data()
aa.update()
self.outputs = [aa.output]
else:
self.outputs = [self.data]
self.data_changed = True
self.output_info.datasets = \
[get_tvtk_dataset_name(self.outputs[0])]
# Add an observer to the VTK dataset after removing the one
# for the old dataset. We use the messenger to avoid an
# uncollectable reference cycle. See the
# tvtk.messenger module documentation for details.
if old is not None:
old.remove_observer(self._observer_id)
self._observer_id = new.add_observer('ModifiedEvent',
messenger.send)
new_vtk = tvtk.to_vtk(new)
messenger.connect(new_vtk, 'ModifiedEvent',
self._fire_data_changed)
# Change our name so that our label on the tree is updated.
self.name = self._get_name()
def _fire_data_changed(self, *args):
"""Simply fire the `data_changed` event."""
self.data_changed = True
def _set_data_name(self, data_type, attr_type, value):
if value is None:
return
dataset = self.data
if len(value) == 0:
# If the value is empty then we deactivate that attribute.
d = getattr(dataset, attr_type + '_data')
method = getattr(d, 'set_active_%s'%data_type)
method(None)
self.data_changed = True
return
aa = self._assign_attribute
data = None
if attr_type == 'point':
data = dataset.point_data
elif attr_type == 'cell':
data = dataset.cell_data
method = getattr(data, 'set_active_%s'%data_type)
method(value)
aa.assign(value, data_type.upper(), attr_type.upper() +'_DATA')
if data_type == 'scalars' and dataset.is_a('vtkImageData'):
# Set the scalar_type for image data, if not you can either
# get garbage rendered or worse.
s = getattr(dataset, attr_type + '_data').scalars
r = s.range
if is_old_pipeline():
dataset.scalar_type = s.data_type
aa.output.scalar_type = s.data_type
aa.update()
# Fire an event, so the changes propagate.
self.data_changed = True
def _point_scalars_name_changed(self, value):
self._set_data_name('scalars', 'point', value)
def _point_vectors_name_changed(self, value):
self._set_data_name('vectors', 'point', value)
def _point_tensors_name_changed(self, value):
self._set_data_name('tensors', 'point', value)
def _cell_scalars_name_changed(self, value):
self._set_data_name('scalars', 'cell', value)
def _cell_vectors_name_changed(self, value):
self._set_data_name('vectors', 'cell', value)
def _cell_tensors_name_changed(self, value):
self._set_data_name('tensors', 'cell', value)
def _update_data(self):
if self.data is None:
return
pnt_attr, cell_attr = get_all_attributes(self.data)
pd = self.data.point_data
scalars = pd.scalars
if self.data.is_a('vtkImageData') and scalars is not None:
# For some reason getting the range of the scalars flushes
# the data through to prevent some really strange errors
# when using an ImagePlaneWidget.
r = scalars.range
if is_old_pipeline():
self._assign_attribute.output.scalar_type = scalars.data_type
self.data.scalar_type = scalars.data_type
def _setup_data_traits(obj, attributes, d_type):
"""Given the object, the dict of the attributes from the
`get_all_attributes` function and the data type
(point/cell) data this will setup the object and the data.
"""
attrs = ['scalars', 'vectors', 'tensors']
aa = obj._assign_attribute
data = getattr(obj.data, '%s_data'%d_type)
for attr in attrs:
values = attributes[attr]
values.append('')
setattr(obj, '_%s_%s_list'%(d_type, attr), values)
if len(values) > 1:
default = getattr(obj, '%s_%s_name'%(d_type, attr))
if obj._first and len(default) == 0:
default = values[0]
getattr(data, 'set_active_%s'%attr)(default)
aa.assign(default, attr.upper(),
d_type.upper() +'_DATA')
aa.update()
kw = {'%s_%s_name'%(d_type, attr): default,
'trait_change_notify': False}
obj.set(**kw)
_setup_data_traits(self, pnt_attr, 'point')
_setup_data_traits(self, cell_attr, 'cell')
if self._first:
self._first = False
# Propagate the data changed event.
self.data_changed = True
def _get_name(self):
""" Gets the name to display on the tree.
"""
ret = "VTK Data (uninitialized)"
if self.data is not None:
typ = self.data.__class__.__name__
ret = "VTK Data (%s)"%typ
if '[Hidden]' in self.name:
ret += ' [Hidden]'
return ret
|
|
from Quartz import *
import math
def doEgg(context):
p0 = CGPoint(0, 0)
p1 = CGPoint(0, 200)
c1 = CGPoint(140, 5)
c2 = CGPoint(80, 198)
CGContextTranslateCTM(context, 100, 5)
CGContextBeginPath(context)
CGContextMoveToPoint(context, p0.x, p0.y)
# Create the Bezier path segment for the right side of the egg.
CGContextAddCurveToPoint(context, c1.x, c1.y, c2.x, c2.y, p1.x, p1.y)
# Create the Bezier path segment for the left side of the egg.
CGContextAddCurveToPoint(context, -c2.x, c2.y, -c1.x, c1.y, p0.x, p0.y)
CGContextClosePath(context)
CGContextSetLineWidth(context, 2)
CGContextDrawPath(context, kCGPathStroke)
def addRoundedRectToPath(context, rect, ovalWidth, ovalHeight):
# If either ovalWidth or ovalHeight is 0, draw a regular rectangle.
if ovalWidth == 0 or ovalHeight == 0:
CGContextAddRect(context, rect)
else:
CGContextSaveGState(context)
if 1:
# Translate to lower-left corner of rectangle.
CGContextTranslateCTM(context,
CGRectGetMinX(rect), CGRectGetMinY(rect))
# Scale by the oval width and height so that
# each rounded corner is 0.5 units in radius.
CGContextScaleCTM(context, ovalWidth, ovalHeight)
# Unscale the rectangle width by the amount of the X scaling.
fw = CGRectGetWidth(rect) / ovalWidth
# Unscale the rectangle height by the amount of the Y scaling.
fh = CGRectGetHeight(rect) / ovalHeight
# Start at the right edge of the rect, at the midpoint in Y.
CGContextMoveToPoint(context, fw, fh/2)
# Segment 1
CGContextAddArcToPoint(context, fw, fh, fw/2, fh, 0.5)
# Segment 2
CGContextAddArcToPoint(context, 0, fh, 0, fh/2, 0.5)
# Segment 3
CGContextAddArcToPoint(context, 0, 0, fw/2, 0, 0.5)
# Segment 4
CGContextAddArcToPoint(context, fw, 0, fw, fh/2, 0.5)
# Closing the path adds the last segment.
CGContextClosePath(context)
CGContextRestoreGState(context)
def doRoundedRects(context):
rect = CGRectMake(10, 10, 210, 150)
ovalWidth = 100
ovalHeight = 100
CGContextSetLineWidth(context, 2.)
CGContextBeginPath(context)
addRoundedRectToPath(context, rect, ovalWidth, ovalHeight)
CGContextSetRGBStrokeColor(context, 1, 0, 0, 1)
CGContextDrawPath(context, kCGPathStroke)
def doStrokeWithCTM(context):
CGContextTranslateCTM(context, 150., 180.)
CGContextSetLineWidth(context, 10)
# Draw ellipse 1 with a uniform stroke.
CGContextSaveGState(context)
if 1:
# Scale the CTM so the circular arc will be elliptical.
CGContextScaleCTM(context, 2, 1)
CGContextBeginPath(context)
# Create an arc that is a circle.
CGContextAddArc(context, 0., 0., 45., 0., 2*math.pi, 0)
# Restore the context parameters prior to stroking the path.
# CGContextRestoreGState does not affect the path in the context.
CGContextRestoreGState(context)
CGContextStrokePath(context)
# *** was 0, -120
CGContextTranslateCTM(context, 220., 0.)
# Draw ellipse 2 with non-uniform stroke.
CGContextSaveGState(context)
if 1:
# Scale the CTM so the circular arc will be elliptical.
CGContextScaleCTM(context, 2, 1)
CGContextBeginPath(context)
# Create an arc that is a circle.
CGContextAddArc(context, 0., 0., 45., 0., 2*math.pi, 0)
# Stroke the path with the scaled coordinate system in effect.
CGContextStrokePath(context)
CGContextRestoreGState(context)
def doRotatedEllipsesWithCGPath(context):
totreps = 144
tint = 1.0
tintIncrement = 1.0/totreps
# Create a new transform consisting of a 45 degree rotation.
theTransform = CGAffineTransformMakeRotation(math.pi/4)
# Apply a scaling transformation to the transform just created.
theTransform = CGAffineTransformScale(theTransform, 1, 2)
# Create a mutable CGPath object.
path = CGPathCreateMutable()
if path is None:
print >>sys.stderr, "Couldn't create path!"
return
# Add a circular arc to the CGPath object, transformed
# by an affine transform.
CGPathAddArc(path, theTransform, 0., 0., 45., 0., 2*math.pi, False);
# Close the CGPath object.
CGPathCloseSubpath(path)
# Place the first ellipse at a good location.
CGContextTranslateCTM(context, 100, 100)
for i in range(totreps):
CGContextBeginPath(context)
# Add the CGPath object to the current path in the context.
CGContextAddPath(context, path)
# Set the fill color for this instance of the ellipse.
CGContextSetRGBFillColor(context, tint, 0., 0., 1.)
# Filling the path implicitly closes it.
CGContextFillPath(context)
# Compute the next tint color.
tint -= tintIncrement
# Move over for the next ellipse.
CGContextTranslateCTM(context, 1, 0.)
def alignPointToUserSpace(context, p):
# Compute the coordinates of the point in device space.
p = CGContextConvertPointToDeviceSpace(context, p)
# Ensure that coordinates are at exactly the corner
# of a device pixel.
p.x = math.floor(p.x)
p.y = math.floor(p.y)
# Convert the device aligned coordinate back to user space.
return CGContextConvertPointToUserSpace(context, p)
def alignSizeToUserSpace(context, s):
# Compute the size in device space.
s = CGContextConvertSizeToDeviceSpace(context, s)
# Ensure that size is an integer multiple of device pixels.
s.width = math.floor(s.width)
s.height = math.floor(s.height)
# Convert back to user space.
return CGContextConvertSizeToUserSpace(context, s)
def alignRectToUserSpace(context, r):
# Compute the coordinates of the rectangle in device space.
r = CGContextConvertRectToDeviceSpace(context, r)
# Ensure that the x and y coordinates are at a pixel corner.
r.origin.x = math.floor(r.origin.x)
r.origin.y = math.floor(r.origin.y)
# Ensure that the width and height are an integer number of
# device pixels. Note that this produces a width and height
# that is less than or equal to the original width. Another
# approach is to use ceil to ensure that the new rectangle
# encloses the original one.
r.size.width = math.floor(r.size.width)
r.size.height = math.floor(r.size.height)
# Convert back to user space.
return CGContextConvertRectToUserSpace(context, r)
def doPixelAlignedFillAndStroke(context):
p1 = CGPointMake(16.7, 17.8)
p2 = CGPointMake(116.7, 17.8)
r = CGRectMake(16.7, 20.8, 100.6, 100.6)
CGContextSetLineWidth(context, 2)
CGContextSetRGBFillColor(context, 1., 0., 0., 1.)
CGContextSetRGBStrokeColor(context, 1., 0., 0., 1.)
# Unaligned drawing.
CGContextBeginPath(context)
CGContextMoveToPoint(context, p1.x, p1.y)
CGContextAddLineToPoint(context, p2.x, p2.y)
CGContextStrokePath(context)
CGContextFillRect(context, r)
# Translate to the right before drawing along
# aligned coordinates.
CGContextTranslateCTM(context, 106, 0)
# Aligned drawing.
# Compute the length of the line in user space.
s = CGSizeMake(p2.x - p1.x, p2.y - p1.y)
CGContextBeginPath(context)
# Align the starting point to a device
# pixel boundary.
p1 = alignPointToUserSpace(context, p1)
# Establish the starting point of the line.
CGContextMoveToPoint(context, p1.x, p1.y)
# Compute the line length as an integer
# number of device pixels.
s = alignSizeToUserSpace(context, s)
CGContextAddLineToPoint(context,
p1.x + s.width,
p1.y + s.height)
CGContextStrokePath(context)
# Compute a rect that is aligned to device
# space with a width that is an integer
# number of device pixels.
r = alignRectToUserSpace(context, r)
CGContextFillRect(context, r)
|
|
from .. import Availability, Class, Constant, Define, Method, Parameter, Type
gx_class = Class('SURFACE',
doc="""
The :class:`SURFACE` class allows you to create, read and alter Geosurface files (``*.geosoft_surface``).
A Geosurface file can contain one or more surface items (see :class:`SURFACEITEM` class). In turn each item can
contains one or more triangular polyhedral meshes.
""")
gx_defines = [
Define('SURFACE_OPEN',
doc="Open Modes",
constants=[
Constant('SURFACE_OPEN_READ', value='0', type=Type.INT32_T),
Constant('SURFACE_OPEN_READWRITE', value='1', type=Type.INT32_T)
])]
gx_methods = {
'Miscellaneous': [
Method('Create_SURFACE', module='geoengine.core', version='8.4.0',
availability=Availability.PUBLIC,
doc="Create a new Geosurface file",
return_type="SURFACE",
return_doc=":class:`SURFACE` Object",
parameters = [
Parameter('surface_file', type=Type.STRING,
doc="Geosurface file name"),
Parameter('ipj', type="IPJ",
doc=":class:`IPJ` containing coordinate system of the Geosurface")
]),
Method('Open_SURFACE', module='geoengine.core', version='8.4.0',
availability=Availability.PUBLIC,
doc="Open a Geosurface file",
return_type="SURFACE",
return_doc=":class:`SURFACE` Object",
parameters = [
Parameter('surface_file', type=Type.STRING,
doc="Geosurface file name"),
Parameter('mode', type=Type.INT32_T,
doc=":def:`SURFACE_OPEN`")
]),
Method('Destroy_SURFACE', module='geoengine.core', version='8.4.0',
availability=Availability.PUBLIC,
doc="Destroy the :class:`SURFACE` Object.",
return_type=Type.VOID,
parameters = [
Parameter('surface', type="SURFACE",
doc=":class:`SURFACE` Object")
]),
Method('GetIPJ_SURFACE', module='geoengine.core', version='8.4.0',
availability=Availability.PUBLIC,
doc="Get the coordinate system of the :class:`SURFACE`.",
return_type=Type.VOID,
parameters = [
Parameter('surface', type="SURFACE",
doc=":class:`SURFACE` Object"),
Parameter('ipj', type="IPJ",
doc=":class:`IPJ` in which to place the Geosurface coordinate system")
]),
Method('SetIPJ_SURFACE', module='geoengine.core', version='8.4.0',
availability=Availability.PUBLIC,
doc="Change the coordinate system of the :class:`SURFACE`.",
return_type=Type.VOID,
parameters = [
Parameter('surface', type="SURFACE",
doc=":class:`SURFACE` Object"),
Parameter('ipj', type="IPJ",
doc=":class:`IPJ` containing the new coordinate system of the Geosurface")
]),
Method('GetSurfaceItems_SURFACE', module='geoengine.core', version='8.4.0',
availability=Availability.PUBLIC,
doc="Get the surfaces items in a Geosurface file",
return_type=Type.VOID,
parameters = [
Parameter('surface', type="SURFACE",
doc=":class:`SURFACE` Object"),
Parameter('lst', type="LST",
doc=":class:`LST` to fill")
]),
Method('GetSurfaceItem_SURFACE', module='geoengine.core', version='8.4.0',
availability=Availability.PUBLIC,
doc="Get the an existing surface item from the :class:`SURFACE`",
return_type="SURFACEITEM",
return_doc=":class:`SURFACEITEM` Object",
parameters = [
Parameter('surface', type="SURFACE",
doc=":class:`SURFACE` Object"),
Parameter('guid', type=Type.STRING,
doc="Item GUID")
]),
Method('AddSurfaceItem_SURFACE', module='geoengine.core', version='8.4.0',
availability=Availability.PUBLIC,
doc="Add a new surface item to the :class:`SURFACE`",
return_type=Type.VOID,
parameters = [
Parameter('surface', type="SURFACE",
doc=":class:`SURFACE` Object"),
Parameter('surfaceitem', type="SURFACEITEM",
doc=":class:`SURFACEITEM` to add")
]),
Method('GetSurfaceNames_SURFACE', module='geoengine.core', version='8.0.0',
availability=Availability.PUBLIC,
doc="Get the surface item names in a Geosurface file",
return_type=Type.VOID,
parameters = [
Parameter('surface_file', type=Type.STRING,
doc="Geosurface file"),
Parameter('lst', type="LST",
doc=":class:`LST` to fill")
]),
Method('GetClosedSurfaceNames_SURFACE', module='geoengine.core', version='8.0.0',
availability=Availability.PUBLIC,
doc="Get the names of closed surface items in a Geosurface file (may return an empty list)",
return_type=Type.VOID,
parameters = [
Parameter('surface_file', type=Type.STRING,
doc="Geosurface file"),
Parameter('lst', type="LST",
doc=":class:`LST` to fill (may return an empty :class:`LST` if none of the surfaces are closed)")
]),
Method('GetExtents_SURFACE', module='geoengine.core', version='8.5.0',
availability=Availability.PUBLIC,
doc="Get the spatial range of all surface items.",
return_type=Type.VOID,
parameters = [
Parameter('surface', type="SURFACE",
doc=":class:`SURFACE` object"),
Parameter('min_x', type=Type.DOUBLE, is_ref=True,
doc="Minimum valid data in X."),
Parameter('min_y', type=Type.DOUBLE, is_ref=True,
doc="Minimum valid data in Y."),
Parameter('min_z', type=Type.DOUBLE, is_ref=True,
doc="Minimum valid data in Z."),
Parameter('max_x', type=Type.DOUBLE, is_ref=True,
doc="Maximum valid data in X."),
Parameter('max_y', type=Type.DOUBLE, is_ref=True,
doc="Maximum valid data in Y."),
Parameter('max_z', type=Type.DOUBLE, is_ref=True,
doc="Maximum valid data in Z.")
]),
Method('CRC_SURFACE', module='geoengine.core', version='8.0.0',
availability=Availability.PUBLIC,
doc="Compute an XML CRC of a Geosurface file.",
return_type="CRC",
return_doc="CRC Value (always 0)",
parameters = [
Parameter('surface_file', type=Type.STRING,
doc="Geosurface file"),
Parameter('output', type=Type.STRING,
doc="Output file"),
Parameter('crc', type=Type.INT32_T, is_ref=True,
doc="CRC (unused, always set to 0)")
]),
Method('Sync_SURFACE', module='geoengine.core', version='8.0.0',
availability=Availability.PUBLIC,
doc="Syncronize the Metadata for this Geosurface",
return_type=Type.VOID,
parameters = [
Parameter('name', type=Type.STRING,
doc="Geosurface file")
]),
Method('CreateFromDXF_SURFACE', module='geoengine.core', version='8.2.0',
availability=Availability.PUBLIC,
doc="Create Geosurface file from DXF file.",
return_type=Type.VOID,
parameters = [
Parameter('ipj', type="IPJ"),
Parameter('surface_file', type=Type.STRING,
doc="Geosurface file"),
Parameter('dxf_file', type=Type.STRING,
doc="DXF file")
]),
Method('CreateFromVulcanTriangulation_SURFACE', module='geoengine.interoperability', version='8.4.0',
availability=Availability.LICENSED,
doc="Create Geosurface file from a Maptek Vulcan triangulation file.",
return_type=Type.VOID,
parameters = [
Parameter('triangulation_file', type=Type.STRING,
doc="00t file"),
Parameter('ipj', type="IPJ"),
Parameter('surface_file', type=Type.STRING,
doc="Geosurface file")
]),
Method('AppendVulcanTriangulation_SURFACE', module='geoengine.interoperability', version='8.4.0',
availability=Availability.LICENSED,
doc="Create new surface from a Maptek Vulcan triangulation file and add to an existing geosurface.",
return_type=Type.VOID,
parameters = [
Parameter('triangulation_file', type=Type.STRING,
doc="00t file"),
Parameter('ipj', type="IPJ"),
Parameter('surface_file', type=Type.STRING,
doc="Geosurface file")
]),
Method('DumpGeometryToTextFile_SURFACE', module='geoengine.core', version='9.4.0',
availability=Availability.PUBLIC,
doc="Dump surface geometry to a text file.",
return_type=Type.VOID,
parameters = [
Parameter('surface_filename', type=Type.STRING,
doc="Geosurface file"),
Parameter('text_filename', type=Type.STRING,
doc="Text file")
])
]
}
|
|
"""Facebook module."""
from __future__ import absolute_import
from collections import OrderedDict
import contextlib
import itertools
import datetime
import urllib
import urlparse
import json
import facebook
import pytz
from wrap2.core import SocialNetwork
class Facebook(SocialNetwork):
"""Facebook social network wrapper."""
# Use a mobile version by default as the package is used in mobile app, (can be overridden to normal if any need).
FB_OATH_BASE = 'http://m.facebook.com/dialog/oauth?'
FB_OATH_ACCT = 'https://graph.facebook.com/oauth/access_token?'
FB_PROFILE_BASE = 'https://graph.facebook.com/me?'
def get_authorization_url(self, callback_url, scope=()):
"""
See :meth:`SocialNetwork.get_auhtorization_url()` for the basics on this
method.
Returns a tuple of length 1 containing the URL where the user can
authorize (login) into Facebook and give your application permission to
read data from his profile.
The scope is a list of permissions (as string values) for the resulting
``access_token`` from :met:`on_authorization_callback()` see:
https://developers.facebook.com/docs/reference/api/permissions/ for a
list of possible values.
"""
args = dict(client_id=self.id, redirect_uri=callback_url, scope=','.join(scope))
result = self.FB_OATH_BASE + urllib.urlencode(args)
return (result, ())
def on_authorization_callback(self, callback_url, code=None):
"""
See :meth:`SocialNetwork.on_authorization_callback()` for the basics on
this method.
The ``code`` is attached to the ``callback_url`` redirect by
Facebook after the user has logged in at the URL retrieved from
:meth:`get_authorization_url()`
:return: a tuple containing:
(<user-id>, <access-token>, <user-name>, <profile-link>)
"""
args = dict(client_id=self.id, redirect_uri=callback_url)
args['code'] = code
args['client_secret'] = self.secret
with contextlib.closing(urllib.urlopen(self.FB_OATH_ACCT + urllib.urlencode(args))) as reader:
body = reader.read()
response = urlparse.parse_qs(body)
access_token = response["access_token"][-1]
# Download the user profile
with contextlib.closing(
urllib.urlopen(self.FB_PROFILE_BASE + urllib.urlencode(dict(access_token=access_token)))) as reader:
profile = json.load(reader)
return (profile['id'], access_token, profile['name'], profile['link'])
def post(self, access_token, msg):
"""
Puts a post on the wall of the user authorized by ``access_token``.
``access_token`` can be retrieved via
:meth:`on_authorization_callback()`
Returns a dictionary containing {'id':'<id-of-post>'}
"""
graph = facebook.GraphAPI(access_token)
post = graph.put_wall_post(msg)
return post
def get(self, query, access_token, friends_only=False, count=100):
"""
Return list of objects which correspond to the ``query``
:param query: `dict` representing a query params
:param access_token: required, to access the stream
:param friends_only: flag to only output the friends related content
:param count: limit output items count
"""
graph = facebook.GraphAPI(access_token)
if friends_only:
raise NotImplementedError('Search among friends is not implemented.')
query = dict(OrderedDict(
posts='SELECT post_id,'
'actor_id, attachment, comments.count, created_time, likes, message, message_tags, '
'parent_post_id, permalink, place, post_id, privacy, share_count, source_id, type, '
'updated_time, target_id '
'FROM stream WHERE source_id in ({0}) ORDER BY created_time desc LIMIT {1}'
.format(', '.join(map(str, query['ids'])), count),
post_actor_info='SELECT uid, name FROM user '
'WHERE uid IN (SELECT actor_id, target_id FROM #posts)',
comments='SELECT id, likes, text, text_tags, time, post_id, fromid '
'FROM comment WHERE post_id in '
'(SELECT post_id FROM #posts) ORDER BY time desc LIMIT {0}'.format(count),
comment_actor_info='SELECT uid, name FROM user '
'WHERE uid IN (SELECT fromid FROM #comments)'))
result = graph.request('fql', args=dict(q=query))
#expand results json to make processing easier
result = dict(((record['name'], record['fql_result_set']) for record in result['data']))
posts = self.merge_results(result)
posts.sort(key=lambda x: x['created_at'])
return posts
def merge_results(self, result):
"""Merge result into list of posts.
param result: raw response data from a FQL call to facebook API, which gets posts, comments, profile info.
return: list of twitter-like dictionary `posts`.
"""
posts = []
posts_by_id = {}
post_data = itertools.izip_longest(
result['posts'], result['post_actor_info'], fillvalue={'name': '', 'uid': ''}
)
for post, actor_info in post_data:
posts.append(self.post_to_dict(post, actor_info))
posts_by_id[post['post_id']] = post
comment_data = itertools.izip_longest(
result['comments'], result['comment_actor_info'], fillvalue={'name': '', 'uid': ''}
)
for comment, actor_info in comment_data:
posts.append(self.comment_to_dict(comment, actor_info, posts_by_id[comment['post_id']]))
return posts
def post_to_dict(self, post, actor_info):
"""
Convert post to python dict.
Output should be in exactly same format as twitter or other network supported by
this package. But we take twitter as standard.
:param post: post object directly from FB communication
:param actor_info: actor_info object directly from FB communication. Actor info is an object describing
author of the post.
:return: `dict` in form: {'created_at': datetime.datetime(2012, 1, 1) ...} full format see on the
https://dev.twitter.com/docs/api/1.1/get/search/tweets
"""
return {"text": post["message"],
"created_at": datetime.datetime.fromtimestamp(post['created_time'], pytz.UTC),
"entities": {
"urls": [
{
"url": post['permalink'],
"expanded_url": post['permalink'],
"display_url": post['permalink'],
"indices": [] # we don't actually seach, so can't have any indices
}
]
},
"from_user": actor_info['name'],
"from_user_id": actor_info['uid'],
"from_user_id_str": str(actor_info['uid']),
"geo": None, # TODO: implement geo tags conversion from FB to TWITTER
"id": post['post_id'],
"id_str": str(post['post_id']),
"iso_language_code": "en",
"metadata": {
"recent_retweets": 0, # TODO: retweets are irrelevant for FB, but we have reshare count from FB
"result_type": "recent"
}
}
def comment_to_dict(self, comment, actor_info, post):
"""
Convert comment to python dict.
We consider comments as actually posts. Output should be in exactly same format
as twitter or other network supported by this package. But we take twitter as standard.
:param comment: comment object directly from FB communication
:param actor_info: actor_info object directly from FB communication
:param post: post_object directly from FB communication. Actor info is an object describing author of the post.
:return: `dict` in form: {'created_at': datetime.datetime(2012, 1, 1) ...} full format see on the
https://dev.twitter.com/docs/api/1.1/get/search/tweets
"""
# we convert comment to be like post
comment['permalink'] = '{0}?comment_id={1}'.format(post['permalink'], comment['id'])
comment['post_id'] = comment['id']
comment['created_time'] = comment['time']
comment['message'] = comment['text']
return self.post_to_dict(comment, actor_info)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training helper that checkpoints models and computes summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import time
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary as _summary
from tensorflow.python.training import coordinator
from tensorflow.python.training import saver as saver_mod
from tensorflow.python.training import session_manager as session_manager_mod
from tensorflow.python.training import summary_io
from tensorflow.python.training import training_util
class Supervisor(object):
"""A training helper that checkpoints models and computes summaries.
The Supervisor is a small wrapper around a `Coordinator`, a `Saver`,
and a `SessionManager` that takes care of common needs of TensorFlow
training programs.
#### Use for a single program
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that will checkpoint the model in '/tmp/mydir'.
sv = Supervisor(logdir='/tmp/mydir')
# Get a TensorFlow session managed by the supervisor.
with sv.managed_session(FLAGS.master) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
Within the `with sv.managed_session()` block all variables in the graph have
been initialized. In addition, a few services have been started to
checkpoint the model and add summaries to the event log.
If the program crashes and is restarted, the managed session automatically
reinitialize variables from the most recent checkpoint.
The supervisor is notified of any exception raised by one of the services.
After an exception is raised, `should_stop()` returns `True`. In that case
the training loop should also stop. This is why the training loop has to
check for `sv.should_stop()`.
Exceptions that indicate that the training inputs have been exhausted,
`tf.errors.OutOfRangeError`, also cause `sv.should_stop()` to return `True`
but are not re-raised from the `with` block: they indicate a normal
termination.
#### Use for multiple replicas
To train with replicas you deploy the same program in a `Cluster`.
One of the tasks must be identified as the *chief*: the task that handles
initialization, checkpoints, summaries, and recovery. The other tasks
depend on the *chief* for these services.
The only change you have to do to the single program code is to indicate
if the program is running as the *chief*.
```python
# Choose a task as the chief. This could be based on server_def.task_index,
# or job_def.name, or job_def.tasks. It's entirely up to the end user.
# But there can be only one *chief*.
is_chief = (server_def.task_index == 0)
server = tf.train.Server(server_def)
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that uses log directory on a shared file system.
# Indicate if you are the 'chief'
sv = Supervisor(logdir='/shared_directory/...', is_chief=is_chief)
# Get a Session in a TensorFlow server on the cluster.
with sv.managed_session(server.target) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
In the *chief* task, the `Supervisor` works exactly as in the first example
above. In the other tasks `sv.managed_session()` waits for the Model to have
been initialized before returning a session to the training code. The
non-chief tasks depend on the chief task for initializing the model.
If one of the tasks crashes and restarts, `managed_session()`
checks if the Model is initialized. If yes, it just creates a session and
returns it to the training code that proceeds normally. If the model needs
to be initialized, the chief task takes care of reinitializing it; the other
tasks just wait for the model to have been initialized.
NOTE: This modified program still works fine as a single program.
The single program marks itself as the chief.
#### What `master` string to use
Whether you are running on your machine or in the cluster you can use the
following values for the --master flag:
* Specifying `''` requests an in-process session that does not use RPC.
* Specifying `'local'` requests a session that uses the RPC-based
"Master interface" to run TensorFlow programs. See
[`tf.train.Server.create_local_server()`](#Server.create_local_server) for
details.
* Specifying `'grpc://hostname:port'` requests a session that uses
the RPC interface to a specific , and also allows the in-process
master to access remote tensorflow workers. Often, it is
appropriate to pass `server.target` (for some `tf.train.Server`
named `server).
#### Advanced use
##### Launching additional services
`managed_session()` launches the Checkpoint and Summary services (threads).
If you need more services to run you can simply launch them in the block
controlled by `managed_session()`.
Example: Start a thread to print losses. We want this thread to run
every 60 seconds, so we launch it with `sv.loop()`.
```python
...
sv = Supervisor(logdir='/tmp/mydir')
with sv.managed_session(FLAGS.master) as sess:
sv.loop(60, print_loss, (sess))
while not sv.should_stop():
sess.run(my_train_op)
```
##### Launching fewer services
`managed_session()` launches the "summary" and "checkpoint" threads which use
either the optionally `summary_op` and `saver` passed to the constructor, or
default ones created automatically by the supervisor. If you want to run
your own summary and checkpointing logic, disable these services by passing
`None` to the `summary_op` and `saver` parameters.
Example: Create summaries manually every 100 steps in the chief.
```python
# Create a Supervisor with no automatic summaries.
sv = Supervisor(logdir='/tmp/mydir', is_chief=is_chief, summary_op=None)
# As summary_op was None, managed_session() does not start the
# summary thread.
with sv.managed_session(FLAGS.master) as sess:
for step in xrange(1000000):
if sv.should_stop():
break
if is_chief and step % 100 == 0:
# Create the summary every 100 chief steps.
sv.summary_computed(sess, sess.run(my_summary_op))
else:
# Train normally
sess.run(my_train_op)
```
##### Custom model initialization
`managed_session()` only supports initializing the model by running an
`init_op` or restoring from the latest checkpoint. If you have special
initialization needs, see how to specify a `local_init_op` when creating the
supervisor. You can also use the `SessionManager` directly to create a
session and check if it could be initialized automatically.
@@__init__
@@managed_session
@@prepare_or_wait_for_session
@@start_standard_services
@@start_queue_runners
@@summary_computed
@@stop
@@request_stop
@@should_stop
@@stop_on_exception
@@wait_for_stop
"""
# Value to pass for the 'ready_op', 'init_op', 'summary_op', 'saver',
# and 'global_step' parameters of Supervisor.__init__() to indicate that
# the default behavior should be used.
USE_DEFAULT = 0
def __init__(self,
graph=None,
ready_op=USE_DEFAULT,
ready_for_local_init_op=USE_DEFAULT,
is_chief=True,
init_op=USE_DEFAULT,
init_feed_dict=None,
local_init_op=USE_DEFAULT,
logdir=None,
summary_op=USE_DEFAULT,
saver=USE_DEFAULT,
global_step=USE_DEFAULT,
save_summaries_secs=120,
save_model_secs=600,
recovery_wait_secs=30,
stop_grace_secs=120,
checkpoint_basename="model.ckpt",
session_manager=None,
summary_writer=USE_DEFAULT,
init_fn=None):
"""Create a `Supervisor`.
Args:
graph: A `Graph`. The graph that the model will use. Defaults to the
default `Graph`. The supervisor may add operations to the graph before
creating a session, but the graph should not be modified by the caller
after passing it to the supervisor.
ready_op: 1-D string `Tensor`. This tensor is evaluated by supervisors in
`prepare_or_wait_for_session()` to check if the model is ready to use.
The model is considered ready if it returns an empty array. Defaults to
the tensor returned from `tf.report_uninitialized_variables()` If
`None`, the model is not checked for readiness.
ready_for_local_init_op: 1-D string `Tensor`. This tensor is evaluated by
supervisors in `prepare_or_wait_for_session()` to check if the model is
ready to run the local_init_op.
The model is considered ready if it returns an empty array. Defaults to
the tensor returned from
`tf.report_uninitialized_variables(tf.global_variables())`. If `None`, the
model is not checked for readiness before running local_init_op.
is_chief: If True, create a chief supervisor in charge of initializing
and restoring the model. If False, create a supervisor that relies
on a chief supervisor for inits and restore.
init_op: `Operation`. Used by chief supervisors to initialize the model
when it can not be recovered. Defaults to an `Operation` that
initializes all variables. If `None`, no initialization is done
automatically unless you pass a value for `init_fn`, see below.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
local_init_op: `Operation`. Used by all supervisors to run initializations
that should run for every new supervisor instance. By default these
are table initializers and initializers for local variables.
If `None`, no further per supervisor-instance initialization is
done automatically.
logdir: A string. Optional path to a directory where to checkpoint the
model and log events for the visualizer. Used by chief supervisors.
The directory will be created if it does not exist.
summary_op: An `Operation` that returns a Summary for the event logs.
Used by chief supervisors if a `logdir` was specified. Defaults to the
operation returned from summary.merge_all(). If `None`, summaries are
not computed automatically.
saver: A Saver object. Used by chief supervisors if a `logdir` was
specified. Defaults to the saved returned by Saver().
If `None`, the model is not saved automatically.
global_step: An integer Tensor of size 1 that counts steps. The value
from 'global_step' is used in summaries and checkpoint filenames.
Default to the op named 'global_step' in the graph if it exists, is of
rank 1, size 1, and of type tf.int32 or tf.int64. If `None` the global
step is not recorded in summaries and checkpoint files. Used by chief
supervisors if a `logdir` was specified.
save_summaries_secs: Number of seconds between the computation of
summaries for the event log. Defaults to 120 seconds. Pass 0 to
disable summaries.
save_model_secs: Number of seconds between the creation of model
checkpoints. Defaults to 600 seconds. Pass 0 to disable checkpoints.
recovery_wait_secs: Number of seconds between checks that the model
is ready. Used by supervisors when waiting for a chief supervisor
to initialize or restore the model. Defaults to 30 seconds.
stop_grace_secs: Grace period, in seconds, given to running threads to
stop when `stop()` is called. Defaults to 120 seconds.
checkpoint_basename: The basename for checkpoint saving.
session_manager: `SessionManager`, which manages Session creation and
recovery. If it is `None`, a default `SessionManager` will be created
with the set of arguments passed in for backwards compatibility.
summary_writer: `SummaryWriter` to use or `USE_DEFAULT`. Can be `None`
to indicate that no summaries should be written.
init_fn: Optional callable used to initialize the model. Called
after the optional `init_op` is called. The callable must accept one
argument, the session being initialized.
Returns:
A `Supervisor`.
"""
# Set default values of arguments.
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
self._init_ready_op(
ready_op=ready_op, ready_for_local_init_op=ready_for_local_init_op)
self._init_init_op(init_op=init_op, init_feed_dict=init_feed_dict)
self._init_local_init_op(local_init_op=local_init_op)
self._init_saver(saver=saver)
self._init_summary_op(summary_op=summary_op)
self._init_global_step(global_step=global_step)
self._graph = graph
self._meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True),
saver_def=self._saver.saver_def if self._saver else None)
self._is_chief = is_chief
self._coord = coordinator.Coordinator()
self._recovery_wait_secs = recovery_wait_secs
self._stop_grace_secs = stop_grace_secs
self._init_fn = init_fn
# Set all attributes related to checkpointing and writing events to None.
# Afterwards, set them appropriately for chief supervisors, as these are
# the only supervisors that can write checkpoints and events.
self._logdir = None
self._save_summaries_secs = None
self._save_model_secs = None
self._save_path = None
self._summary_writer = None
if self._is_chief:
self._logdir = logdir
self._save_summaries_secs = save_summaries_secs
self._save_model_secs = save_model_secs
if self._logdir:
self._save_path = os.path.join(self._logdir, checkpoint_basename)
if summary_writer is Supervisor.USE_DEFAULT:
if self._logdir:
self._summary_writer = summary_io.SummaryWriter(self._logdir)
else:
self._summary_writer = summary_writer
self._graph_added_to_summary = False
self._init_session_manager(session_manager=session_manager)
self._verify_setup()
# The graph is not allowed to change anymore.
graph.finalize()
def _init_session_manager(self, session_manager=None):
if session_manager is None:
self._session_manager = session_manager_mod.SessionManager(
local_init_op=self._local_init_op,
ready_op=self._ready_op,
ready_for_local_init_op=self._ready_for_local_init_op,
graph=self._graph,
recovery_wait_secs=self._recovery_wait_secs)
else:
self._session_manager = session_manager
def _get_first_op_from_collection(self, key):
"""Returns the first `Operation` from a collection.
Args:
key: A string collection key.
Returns:
The first Op found in a collection, or `None` if the collection is empty.
"""
try:
op_list = ops.get_collection(key)
if len(op_list) > 1:
logging.info("Found %d %s operations. Returning the first one.",
len(op_list), key)
if op_list:
return op_list[0]
except LookupError:
pass
return None
def _init_ready_op(self,
ready_op=USE_DEFAULT,
ready_for_local_init_op=USE_DEFAULT):
"""Initializes ready_op.
Args:
ready_op: `Tensor` to check if the model is initialized.
If it's set to USE_DEFAULT, creates an op that checks all
the variables are initialized.
ready_for_local_init_op: `Tensor` to check if the model is ready to run
local_init_op.
If it's set to USE_DEFAULT, creates an op that checks all
the global variables are initialized.
"""
if ready_op is Supervisor.USE_DEFAULT:
ready_op = self._get_first_op_from_collection(ops.GraphKeys.READY_OP)
if ready_op is None:
ready_op = variables.report_uninitialized_variables()
ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
self._ready_op = ready_op
# ready_for_local_init_op defaults to None for backward compatibility
if ready_for_local_init_op is Supervisor.USE_DEFAULT:
ready_for_local_init_op = self._get_first_op_from_collection(
ops.GraphKeys.READY_FOR_LOCAL_INIT_OP)
self._ready_for_local_init_op = ready_for_local_init_op
def _init_init_op(self, init_op=USE_DEFAULT, init_feed_dict=None):
"""Initializes init_op.
Args:
init_op: `Operation` to initialize the variables. If set to USE_DEFAULT,
create an op that initializes all variables and tables.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
"""
if init_op is Supervisor.USE_DEFAULT:
init_op = self._get_first_op_from_collection(ops.GraphKeys.INIT_OP)
if init_op is None:
init_op = variables.global_variables_initializer()
ops.add_to_collection(ops.GraphKeys.INIT_OP, init_op)
self._init_op = init_op
self._init_feed_dict = init_feed_dict
def _init_local_init_op(self, local_init_op=USE_DEFAULT):
"""Initializes local_init_op.
Args:
local_init_op: `Operation` run for every new supervisor instance. If set
to USE_DEFAULT, use the first op from the GraphKeys.LOCAL_INIT_OP
collection. If the collection is empty, create an op that initializes
all local variables and all tables.
"""
if local_init_op is Supervisor.USE_DEFAULT:
local_init_op = self._get_first_op_from_collection(
ops.GraphKeys.LOCAL_INIT_OP)
if local_init_op is None:
op_list = [variables.local_variables_initializer(),
data_flow_ops.initialize_all_tables()]
if op_list:
local_init_op = control_flow_ops.group(*op_list)
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
self._local_init_op = local_init_op
def _init_saver(self, saver=USE_DEFAULT):
"""Initializes saver.
Args:
saver: A `Saver` object. If set to USE_DEFAULT, create one that
saves all the variables.
"""
if saver is Supervisor.USE_DEFAULT:
saver = self._get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is None and variables.global_variables():
saver = saver_mod.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
self._saver = saver
def _init_summary_op(self, summary_op=USE_DEFAULT):
"""Initializes summary_op.
Args:
summary_op: An Operation that returns a Summary for the event logs.
If set to USE_DEFAULT, create an op that merges all the summaries.
"""
if summary_op is Supervisor.USE_DEFAULT:
summary_op = self._get_first_op_from_collection(ops.GraphKeys.SUMMARY_OP)
if summary_op is None:
summary_op = _summary.merge_all()
if summary_op is not None:
ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)
self._summary_op = summary_op
def _init_global_step(self, global_step=USE_DEFAULT):
"""Initializes global_step.
Args:
global_step: An integer Tensor of size 1 that counts steps. If
set to USE_DEFAULT, creates global_step tensor.
"""
if global_step is Supervisor.USE_DEFAULT:
global_step = self._get_first_op_from_collection(
ops.GraphKeys.GLOBAL_STEP)
if global_step is None:
global_step = self._default_global_step_tensor()
if global_step is not None:
ops.add_to_collection(ops.GraphKeys.GLOBAL_STEP, global_step)
self._global_step = global_step
@property
def is_chief(self):
"""Return True if this is a chief supervisor.
Returns:
A bool.
"""
return self._is_chief
@property
def session_manager(self):
"""Return the SessionManager used by the Supervisor.
Returns:
A SessionManager object.
"""
return self._session_manager
@property
def coord(self):
"""Return the Coordinator used by the Supervisor.
The Coordinator can be useful if you want to run multiple threads
during your training.
Returns:
A Coordinator object.
"""
return self._coord
@property
def init_op(self):
"""Return the Init Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._init_op
@property
def init_feed_dict(self):
"""Return the feed dictionary used when evaluating the `init_op`.
Returns:
A feed dictionary or `None`.
"""
return self._init_feed_dict
@property
def ready_op(self):
"""Return the Ready Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._ready_op
@property
def ready_for_local_init_op(self):
return self._ready_for_local_init_op
@property
def summary_writer(self):
"""Return the SummaryWriter used by the chief supervisor.
Returns:
A SummaryWriter.
"""
return self._summary_writer
@property
def summary_op(self):
"""Return the Summary Tensor used by the chief supervisor.
Returns:
A string Tensor for the summary or `None`.
"""
return self._summary_op
@property
def save_summaries_secs(self):
"""Return the delay between summary computations.
Returns:
A timestamp.
"""
return self._save_summaries_secs
@property
def global_step(self):
"""Return the global_step Tensor used by the supervisor.
Returns:
An integer Tensor for the global_step.
"""
return self._global_step
@property
def saver(self):
"""Return the Saver used by the supervisor.
Returns:
A Saver object.
"""
return self._saver
@property
def save_model_secs(self):
"""Return the delay between checkpoints.
Returns:
A timestamp.
"""
return self._save_model_secs
@property
def save_path(self):
"""Return the save path used by the supervisor.
Returns:
A string.
"""
return self._save_path
def _write_graph(self):
"""Writes graph_def to `logdir` and adds it to summary if applicable."""
assert self._is_chief
if self._logdir:
training_util.write_graph(self._graph.as_graph_def(add_shapes=True),
self._logdir, "graph.pbtxt")
if self._summary_writer and not self._graph_added_to_summary:
self._summary_writer.add_graph(self._graph)
self._summary_writer.add_meta_graph(self._meta_graph_def)
self._graph_added_to_summary = True
def start_standard_services(self, sess):
"""Start the standard services for 'sess'.
This starts services in the background. The services started depend
on the parameters to the constructor and may include:
- A Summary thread computing summaries every save_summaries_secs.
- A Checkpoint thread saving the model every save_model_secs.
- A StepCounter thread measure step time.
Args:
sess: A Session.
Returns:
A list of threads that are running the standard services. You can use
the Supervisor's Coordinator to join these threads with:
sv.coord.Join(<list of threads>)
Raises:
RuntimeError: If called with a non-chief Supervisor.
ValueError: If not `logdir` was passed to the constructor as the
services need a log directory.
"""
if not self._is_chief:
raise RuntimeError("Only chief supervisor can start standard services. "
"Because only chief supervisors can write events.")
if not self._logdir:
logging.warning("Standard services need a 'logdir' "
"passed to the SessionManager")
return
if self._global_step is not None and self._summary_writer:
# Only add the session log if we keep track of global step.
# TensorBoard cannot use START message for purging expired events
# if there is no step value.
current_step = training_util.global_step(sess, self._global_step)
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START),
current_step)
threads = []
if self._save_summaries_secs and self._summary_writer:
if self._summary_op is not None:
threads.append(SVSummaryThread(self, sess))
if self._global_step is not None:
threads.append(SVStepCounterThread(self, sess))
if self.saver and self._save_model_secs:
threads.append(SVTimerCheckpointThread(self, sess))
for t in threads:
t.start()
return threads
def prepare_or_wait_for_session(self, master="", config=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
start_standard_services=True):
"""Make sure the model is ready to be used.
Create a session on 'master', recovering or initializing the model as
needed, or wait for a session to be ready. If running as the chief
and `start_standard_service` is set to True, also call the session
manager to start the standard services.
Args:
master: name of the TensorFlow master to use. See the `tf.Session`
constructor for how this is interpreted.
config: Optional ConfigProto proto used to configure the session,
which is passed as-is to create the session.
wait_for_checkpoint: Whether we should wait for the availability of a
checkpoint before creating Session. Defaults to False.
max_wait_secs: Maximum time to wait for the session to become available.
start_standard_services: Whether to start the standard services and the
queue runners.
Returns:
A Session object that can be used to drive the model.
"""
# For users who recreate the session with prepare_or_wait_for_session(), we
# need to clear the coordinator's stop_event so that threads managed by the
# coordinator can run.
self._coord.clear_stop()
if self._summary_writer:
self._summary_writer.reopen()
if self._is_chief:
sess = self._session_manager.prepare_session(
master, init_op=self.init_op, saver=self.saver,
checkpoint_dir=self._logdir, wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs, config=config,
init_feed_dict=self._init_feed_dict, init_fn=self._init_fn)
self._write_graph()
if start_standard_services:
self.start_standard_services(sess)
else:
sess = self._session_manager.wait_for_session(master,
config=config,
max_wait_secs=max_wait_secs)
if start_standard_services:
self.start_queue_runners(sess)
return sess
def start_queue_runners(self, sess, queue_runners=None):
"""Start threads for `QueueRunners`.
Note that the queue runners collected in the graph key `QUEUE_RUNNERS`
are already started automatically when you create a session with the
supervisor, so unless you have non-collected queue runners to start
you do not need to call this explicitly.
Args:
sess: A `Session`.
queue_runners: A list of `QueueRunners`. If not specified, we'll use the
list of queue runners gathered in the graph under the key
`GraphKeys.QUEUE_RUNNERS`.
Returns:
The list of threads started for the `QueueRunners`.
"""
if queue_runners is None:
queue_runners = self._graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS)
threads = []
for qr in queue_runners:
threads.extend(qr.create_threads(sess, coord=self._coord, daemon=True,
start=True))
return threads
def loop(self, timer_interval_secs, target, args=None, kwargs=None):
"""Start a LooperThread that calls a function periodically.
If `timer_interval_secs` is None the thread calls `target(*args, **kwargs)`
repeatedly. Otherwise it calls it every `timer_interval_secs`
seconds. The thread terminates when a stop is requested.
The started thread is added to the list of threads managed by the supervisor
so it does not need to be passed to the `stop()` method.
Args:
timer_interval_secs: Number. Time boundaries at which to call `target`.
target: A callable object.
args: Optional arguments to pass to `target` when calling it.
kwargs: Optional keyword arguments to pass to `target` when calling it.
Returns:
The started thread.
"""
looper = coordinator.LooperThread(self._coord, timer_interval_secs,
target=target, args=args, kwargs=kwargs)
looper.start()
return looper
def stop(self, threads=None, close_summary_writer=True):
"""Stop the services and the coordinator.
This does not close the session.
Args:
threads: Optional list of threads to join with the coordinator. If
`None`, defaults to the threads running the standard services, the
threads started for `QueueRunners`, and the threads started by the
`loop()` method. To wait on additional threads, pass the
list in this parameter.
close_summary_writer: Whether to close the `summary_writer`. Defaults to
`True` if the summary writer was created by the supervisor, `False`
otherwise.
"""
self._coord.request_stop()
try:
# coord.join() re-raises the first reported exception; the "finally"
# block ensures that we clean up whether or not an exception was
# reported.
self._coord.join(threads,
stop_grace_period_secs=self._stop_grace_secs)
finally:
# Close the writer last, in case one of the running threads was using it.
if close_summary_writer and self._summary_writer:
# Stop messages are not logged with event.step,
# since the session may have already terminated.
self._summary_writer.add_session_log(SessionLog(status=SessionLog.STOP))
self._summary_writer.close()
self._graph_added_to_summary = False
def request_stop(self, ex=None):
"""Request that the coordinator stop the threads.
See `Coordinator.request_stop()`.
Args:
ex: Optional `Exception`, or Python `exc_info` tuple as returned by
`sys.exc_info()`. If this is the first call to `request_stop()` the
corresponding exception is recorded and re-raised from `join()`.
"""
self._coord.request_stop(ex=ex)
def should_stop(self):
"""Check if the coordinator was told to stop.
See `Coordinator.should_stop()`.
Returns:
True if the coordinator was told to stop, False otherwise.
"""
return self._coord.should_stop()
def stop_on_exception(self):
"""Context handler to stop the supervisor when an exception is raised.
See `Coordinator.stop_on_exception()`.
Returns:
A context handler.
"""
return self._coord.stop_on_exception()
def wait_for_stop(self):
"""Block waiting for the coordinator to stop."""
self._coord.wait_for_stop()
def summary_computed(self, sess, summary, global_step=None):
"""Indicate that a summary was computed.
Args:
sess: A `Session` object.
summary: A Summary proto, or a string holding a serialized summary proto.
global_step: Int. global step this summary is associated with. If `None`,
it will try to fetch the current step.
Raises:
TypeError: if 'summary' is not a Summary proto or a string.
RuntimeError: if the Supervisor was created without a `logdir`.
"""
if not self._summary_writer:
raise RuntimeError("Writing a summary requires a summary writer.")
if global_step is None and self.global_step is not None:
global_step = training_util.global_step(sess, self.global_step)
self._summary_writer.add_summary(summary, global_step)
def _default_global_step_tensor(self):
"""Returns the global_step from the default graph.
Returns:
The global step `Tensor` or `None`.
"""
try:
gs = ops.get_default_graph().get_tensor_by_name("global_step:0")
if gs.dtype.base_dtype in [dtypes.int32, dtypes.int64]:
return gs
else:
logging.warning("Found 'global_step' is not an int type: %s", gs.dtype)
return None
except KeyError:
return None
def _verify_setup(self):
"""Check that all is good.
Raises:
ValueError: If something is not good.
"""
# Not running as chief means that replicas are used.
# In that case all Variables must have their device set.
if not self._is_chief:
for op in self._graph.get_operations():
if op.type in ["Variable", "VariableV2"] and not op.device:
raise ValueError("When using replicas, all Variables must have "
"their device set: %s" % op)
# pylint: disable=g-doc-return-or-yield,broad-except
@contextlib.contextmanager
def managed_session(self, master="", config=None,
start_standard_services=True,
close_summary_writer=True):
"""Returns a context manager for a managed session.
This context manager creates and automatically recovers a session. It
optionally starts the standard services that handle checkpoints and
summaries. It monitors exceptions raised from the `with` block or from the
services and stops the supervisor as needed.
The context manager is typically used as follows:
```python
def train():
sv = tf.train.Supervisor(...)
with sv.managed_session(<master>) as sess:
for step in xrange(..):
if sv.should_stop():
break
sess.run(<my training op>)
...do other things needed at each training step...
```
An exception raised from the `with` block or one of the service threads is
raised again when the block exits. This is done after stopping all threads
and closing the session. For example, an `AbortedError` exception, raised
in case of preemption of one of the workers in a distributed model, is
raised again when the block exits.
If you want to retry the training loop in case of preemption you can do it
as follows:
```python
def main(...):
while True
try:
train()
except tf.errors.Aborted:
pass
```
As a special case, exceptions used for control flow, such as
`OutOfRangeError` which reports that input queues are exhausted, are not
raised again from the `with` block: they indicate a clean termination of
the training loop and are considered normal termination.
Args:
master: name of the TensorFlow master to use. See the `tf.Session`
constructor for how this is interpreted.
config: Optional `ConfigProto` proto used to configure the session.
Passed as-is to create the session.
start_standard_services: Whether to start the standard services,
such as checkpoint, summary and step counter.
close_summary_writer: Whether to close the summary writer when
closing the session. Defaults to True.
Returns:
A context manager that yields a `Session` restored from the latest
checkpoint or initialized from scratch if not checkpoint exists. The
session is closed when the `with` block exits.
"""
try:
sess = self.prepare_or_wait_for_session(
master=master, config=config,
start_standard_services=start_standard_services)
yield sess
except Exception as e:
self.request_stop(e)
finally:
try:
# Request all the threads to stop and wait for them to do so. Any
# exception raised by the threads is raised again from stop().
# Passing stop_grace_period_secs is for blocked enqueue/dequeue
# threads which are not checking for `should_stop()`. They
# will be stopped when we close the session further down.
self.stop(close_summary_writer=close_summary_writer)
finally:
# Close the session to finish up all pending calls. We do not care
# about exceptions raised when closing. This takes care of
# blocked enqueue/dequeue calls.
try:
sess.close()
except Exception:
# Silently ignore exceptions raised by close().
pass
# pylint: enable=g-doc-return-or-yield,broad-except
class SVSummaryThread(coordinator.LooperThread):
"""A thread to save summaries on a timer."""
def __init__(self, sv, sess):
"""Create a SVSummaryThread.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVSummaryThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
if self._sv.global_step is not None:
summary_strs, global_step = self._sess.run([self._sv.summary_op,
self._sv.global_step])
else:
summary_strs = self._sess.run(self._sv.summary_op)
global_step = None
if self._sv.summary_writer:
self._sv.summary_writer.add_summary(summary_strs, global_step)
class SVStepCounterThread(coordinator.LooperThread):
"""Threads to count steps and measure their duration."""
def __init__(self, sv, sess):
"""Create a `SVStepCounterThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVStepCounterThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
self._last_time = 0.0
self._last_step = 0
self._summary_tag = "%s/sec" % self._sv.global_step.op.name
def start_loop(self):
self._last_time = time.time()
self._last_step = training_util.global_step(
self._sess, self._sv.global_step)
def run_loop(self):
# Count the steps.
current_step = training_util.global_step(self._sess, self._sv.global_step)
added_steps = current_step - self._last_step
self._last_step = current_step
# Measure the elapsed time.
current_time = time.time()
elapsed_time = current_time - self._last_time
self._last_time = current_time
# Reports the number of steps done per second
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(tag=self._summary_tag,
simple_value=steps_per_sec)])
if self._sv.summary_writer:
self._sv.summary_writer.add_summary(summary, current_step)
logging.log_first_n(logging.INFO, "%s: %g", 10,
self._summary_tag, steps_per_sec)
class SVTimerCheckpointThread(coordinator.LooperThread):
"""A thread to checkpoint on a timer."""
def __init__(self, sv, sess):
"""Create a `SVTimerCheckpointThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVTimerCheckpointThread, self).__init__(sv.coord, sv.save_model_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
self._sv.saver.save(self._sess, self._sv.save_path,
global_step=self._sv.global_step)
if self._sv.summary_writer and self._sv.global_step is not None:
current_step = training_util.global_step(self._sess, self._sv.global_step)
self._sv.summary_writer.add_session_log(
SessionLog(status=SessionLog.CHECKPOINT,
checkpoint_path=self._sv.save_path),
current_step)
# TODO(sherrym): All non-PEP8 compliant names will be deprecated shortly.
setattr(Supervisor, "PrepareSession", Supervisor.prepare_or_wait_for_session)
setattr(Supervisor, "StartQueueRunners", Supervisor.start_queue_runners)
setattr(Supervisor, "StartStandardServices", Supervisor.start_standard_services)
setattr(Supervisor, "Stop", Supervisor.stop)
setattr(Supervisor, "RequestStop", Supervisor.request_stop)
setattr(Supervisor, "Loop", Supervisor.loop)
setattr(Supervisor, "ShouldStop", Supervisor.should_stop)
setattr(Supervisor, "StopOnException", Supervisor.stop_on_exception)
setattr(Supervisor, "WaitForStop", Supervisor.wait_for_stop)
setattr(Supervisor, "SummaryComputed", Supervisor.summary_computed)
|
|
from types import SimpleNamespace
import attr
from multidict import CIMultiDict
from yarl import URL
from .client_reqrep import ClientResponse
from .signals import Signal
__all__ = (
'TraceConfig', 'TraceRequestStartParams', 'TraceRequestEndParams',
'TraceRequestExceptionParams', 'TraceConnectionQueuedStartParams',
'TraceConnectionQueuedEndParams', 'TraceConnectionCreateStartParams',
'TraceConnectionCreateEndParams', 'TraceConnectionReuseconnParams',
'TraceDnsResolveHostStartParams', 'TraceDnsResolveHostEndParams',
'TraceDnsCacheHitParams', 'TraceDnsCacheMissParams',
'TraceRequestRedirectParams'
)
class TraceConfig:
"""First-class used to trace requests launched via ClientSession
objects."""
def __init__(self, trace_config_ctx_factory=SimpleNamespace):
self._on_request_start = Signal(self)
self._on_request_end = Signal(self)
self._on_request_exception = Signal(self)
self._on_request_redirect = Signal(self)
self._on_connection_queued_start = Signal(self)
self._on_connection_queued_end = Signal(self)
self._on_connection_create_start = Signal(self)
self._on_connection_create_end = Signal(self)
self._on_connection_reuseconn = Signal(self)
self._on_dns_resolvehost_start = Signal(self)
self._on_dns_resolvehost_end = Signal(self)
self._on_dns_cache_hit = Signal(self)
self._on_dns_cache_miss = Signal(self)
self._trace_config_ctx_factory = trace_config_ctx_factory
def trace_config_ctx(self, trace_request_ctx=None):
""" Return a new trace_config_ctx instance """
return self._trace_config_ctx_factory(
trace_request_ctx=trace_request_ctx)
def freeze(self):
self._on_request_start.freeze()
self._on_request_end.freeze()
self._on_request_exception.freeze()
self._on_request_redirect.freeze()
self._on_connection_queued_start.freeze()
self._on_connection_queued_end.freeze()
self._on_connection_create_start.freeze()
self._on_connection_create_end.freeze()
self._on_connection_reuseconn.freeze()
self._on_dns_resolvehost_start.freeze()
self._on_dns_resolvehost_end.freeze()
self._on_dns_cache_hit.freeze()
self._on_dns_cache_miss.freeze()
@property
def on_request_start(self):
return self._on_request_start
@property
def on_request_end(self):
return self._on_request_end
@property
def on_request_exception(self):
return self._on_request_exception
@property
def on_request_redirect(self):
return self._on_request_redirect
@property
def on_connection_queued_start(self):
return self._on_connection_queued_start
@property
def on_connection_queued_end(self):
return self._on_connection_queued_end
@property
def on_connection_create_start(self):
return self._on_connection_create_start
@property
def on_connection_create_end(self):
return self._on_connection_create_end
@property
def on_connection_reuseconn(self):
return self._on_connection_reuseconn
@property
def on_dns_resolvehost_start(self):
return self._on_dns_resolvehost_start
@property
def on_dns_resolvehost_end(self):
return self._on_dns_resolvehost_end
@property
def on_dns_cache_hit(self):
return self._on_dns_cache_hit
@property
def on_dns_cache_miss(self):
return self._on_dns_cache_miss
@attr.s(frozen=True, slots=True)
class TraceRequestStartParams:
""" Parameters sent by the `on_request_start` signal"""
method = attr.ib(type=str)
url = attr.ib(type=URL)
headers = attr.ib(type=CIMultiDict)
@attr.s(frozen=True, slots=True)
class TraceRequestEndParams:
""" Parameters sent by the `on_request_end` signal"""
method = attr.ib(type=str)
url = attr.ib(type=URL)
headers = attr.ib(type=CIMultiDict)
resp = attr.ib(type=ClientResponse)
@attr.s(frozen=True, slots=True)
class TraceRequestExceptionParams:
""" Parameters sent by the `on_request_exception` signal"""
method = attr.ib(type=str)
url = attr.ib(type=URL)
headers = attr.ib(type=CIMultiDict)
exception = attr.ib(type=Exception)
@attr.s(frozen=True, slots=True)
class TraceRequestRedirectParams:
""" Parameters sent by the `on_request_redirect` signal"""
method = attr.ib(type=str)
url = attr.ib(type=URL)
headers = attr.ib(type=CIMultiDict)
resp = attr.ib(type=ClientResponse)
@attr.s(frozen=True, slots=True)
class TraceConnectionQueuedStartParams:
""" Parameters sent by the `on_connection_queued_start` signal"""
@attr.s(frozen=True, slots=True)
class TraceConnectionQueuedEndParams:
""" Parameters sent by the `on_connection_queued_end` signal"""
@attr.s(frozen=True, slots=True)
class TraceConnectionCreateStartParams:
""" Parameters sent by the `on_connection_create_start` signal"""
@attr.s(frozen=True, slots=True)
class TraceConnectionCreateEndParams:
""" Parameters sent by the `on_connection_create_end` signal"""
@attr.s(frozen=True, slots=True)
class TraceConnectionReuseconnParams:
""" Parameters sent by the `on_connection_reuseconn` signal"""
@attr.s(frozen=True, slots=True)
class TraceDnsResolveHostStartParams:
""" Parameters sent by the `on_dns_resolvehost_start` signal"""
host = attr.ib(type=str)
@attr.s(frozen=True, slots=True)
class TraceDnsResolveHostEndParams:
""" Parameters sent by the `on_dns_resolvehost_end` signal"""
host = attr.ib(type=str)
@attr.s(frozen=True, slots=True)
class TraceDnsCacheHitParams:
""" Parameters sent by the `on_dns_cache_hit` signal"""
host = attr.ib(type=str)
@attr.s(frozen=True, slots=True)
class TraceDnsCacheMissParams:
""" Parameters sent by the `on_dns_cache_miss` signal"""
host = attr.ib(type=str)
class Trace:
""" Internal class used to keep together the main dependencies used
at the moment of send a signal."""
def __init__(self, session, trace_config, trace_config_ctx):
self._trace_config = trace_config
self._trace_config_ctx = trace_config_ctx
self._session = session
async def send_request_start(self, method, url, headers):
return await self._trace_config.on_request_start.send(
self._session,
self._trace_config_ctx,
TraceRequestStartParams(method, url, headers)
)
async def send_request_end(self, method, url, headers, response):
return await self._trace_config.on_request_end.send(
self._session,
self._trace_config_ctx,
TraceRequestEndParams(method, url, headers, response)
)
async def send_request_exception(self, method, url, headers, exception):
return await self._trace_config.on_request_exception.send(
self._session,
self._trace_config_ctx,
TraceRequestExceptionParams(method, url, headers, exception)
)
async def send_request_redirect(self, method, url, headers, response):
return await self._trace_config._on_request_redirect.send(
self._session,
self._trace_config_ctx,
TraceRequestRedirectParams(method, url, headers, response)
)
async def send_connection_queued_start(self):
return await self._trace_config.on_connection_queued_start.send(
self._session,
self._trace_config_ctx,
TraceConnectionQueuedStartParams()
)
async def send_connection_queued_end(self):
return await self._trace_config.on_connection_queued_end.send(
self._session,
self._trace_config_ctx,
TraceConnectionQueuedEndParams()
)
async def send_connection_create_start(self):
return await self._trace_config.on_connection_create_start.send(
self._session,
self._trace_config_ctx,
TraceConnectionCreateStartParams()
)
async def send_connection_create_end(self):
return await self._trace_config.on_connection_create_end.send(
self._session,
self._trace_config_ctx,
TraceConnectionCreateEndParams()
)
async def send_connection_reuseconn(self):
return await self._trace_config.on_connection_reuseconn.send(
self._session,
self._trace_config_ctx,
TraceConnectionReuseconnParams()
)
async def send_dns_resolvehost_start(self, host):
return await self._trace_config.on_dns_resolvehost_start.send(
self._session,
self._trace_config_ctx,
TraceDnsResolveHostStartParams(host)
)
async def send_dns_resolvehost_end(self, host):
return await self._trace_config.on_dns_resolvehost_end.send(
self._session,
self._trace_config_ctx,
TraceDnsResolveHostEndParams(host)
)
async def send_dns_cache_hit(self, host):
return await self._trace_config.on_dns_cache_hit.send(
self._session,
self._trace_config_ctx,
TraceDnsCacheHitParams(host)
)
async def send_dns_cache_miss(self, host):
return await self._trace_config.on_dns_cache_miss.send(
self._session,
self._trace_config_ctx,
TraceDnsCacheMissParams(host)
)
|
|
"""Tests for Plex config flow."""
from unittest.mock import MagicMock, Mock, patch, PropertyMock
import plexapi.exceptions
import requests.exceptions
from homeassistant.components.plex import config_flow
from homeassistant.const import (
CONF_HOST,
CONF_PORT,
CONF_SSL,
CONF_VERIFY_SSL,
CONF_TOKEN,
CONF_URL,
)
from tests.common import MockConfigEntry
from .mock_classes import MOCK_HOST_1, MOCK_PORT_1, MockAvailableServer, MockConnections
MOCK_NAME_1 = "Plex Server 1"
MOCK_ID_1 = "unique_id_123"
MOCK_NAME_2 = "Plex Server 2"
MOCK_ID_2 = "unique_id_456"
MOCK_TOKEN = "secret_token"
MOCK_FILE_CONTENTS = {
f"{MOCK_HOST_1}:{MOCK_PORT_1}": {"ssl": False, "token": MOCK_TOKEN, "verify": True}
}
MOCK_SERVER_1 = MockAvailableServer(MOCK_NAME_1, MOCK_ID_1)
MOCK_SERVER_2 = MockAvailableServer(MOCK_NAME_2, MOCK_ID_2)
DEFAULT_OPTIONS = {
config_flow.MP_DOMAIN: {
config_flow.CONF_USE_EPISODE_ART: False,
config_flow.CONF_SHOW_ALL_CONTROLS: False,
}
}
def init_config_flow(hass):
"""Init a configuration flow."""
flow = config_flow.PlexFlowHandler()
flow.hass = hass
return flow
async def test_bad_credentials(hass):
"""Test when provided credentials are rejected."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch(
"plexapi.myplex.MyPlexAccount", side_effect=plexapi.exceptions.Unauthorized
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_TOKEN: MOCK_TOKEN, "manual_setup": False},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"]["base"] == "faulty_credentials"
async def test_import_file_from_discovery(hass):
"""Test importing a legacy file during discovery."""
file_host_and_port, file_config = list(MOCK_FILE_CONTENTS.items())[0]
used_url = f"http://{file_host_and_port}"
with patch("plexapi.server.PlexServer") as mock_plex_server, patch(
"homeassistant.components.plex.config_flow.load_json",
return_value=MOCK_FILE_CONTENTS,
):
type(mock_plex_server.return_value).machineIdentifier = PropertyMock(
return_value=MOCK_ID_1
)
type(mock_plex_server.return_value).friendlyName = PropertyMock(
return_value=MOCK_NAME_1
)
type( # pylint: disable=protected-access
mock_plex_server.return_value
)._baseurl = PropertyMock(return_value=used_url)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "discovery"},
data={CONF_HOST: MOCK_HOST_1, CONF_PORT: MOCK_PORT_1},
)
assert result["type"] == "create_entry"
assert result["title"] == MOCK_NAME_1
assert result["data"][config_flow.CONF_SERVER] == MOCK_NAME_1
assert result["data"][config_flow.CONF_SERVER_IDENTIFIER] == MOCK_ID_1
assert result["data"][config_flow.PLEX_SERVER_CONFIG][CONF_URL] == used_url
assert (
result["data"][config_flow.PLEX_SERVER_CONFIG][CONF_TOKEN]
== file_config[CONF_TOKEN]
)
async def test_discovery(hass):
"""Test starting a flow from discovery."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "discovery"},
data={CONF_HOST: MOCK_HOST_1, CONF_PORT: MOCK_PORT_1},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
async def test_discovery_while_in_progress(hass):
"""Test starting a flow from discovery."""
await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "discovery"},
data={CONF_HOST: MOCK_HOST_1, CONF_PORT: MOCK_PORT_1},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_import_success(hass):
"""Test a successful configuration import."""
mock_connections = MockConnections(ssl=True)
mm_plex_account = MagicMock()
mm_plex_account.resources = Mock(return_value=[MOCK_SERVER_1])
mm_plex_account.resource = Mock(return_value=mock_connections)
with patch("plexapi.server.PlexServer") as mock_plex_server:
type(mock_plex_server.return_value).machineIdentifier = PropertyMock(
return_value=MOCK_SERVER_1.clientIdentifier
)
type(mock_plex_server.return_value).friendlyName = PropertyMock(
return_value=MOCK_SERVER_1.name
)
type( # pylint: disable=protected-access
mock_plex_server.return_value
)._baseurl = PropertyMock(return_value=mock_connections.connections[0].httpuri)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "import"},
data={
CONF_TOKEN: MOCK_TOKEN,
CONF_URL: f"https://{MOCK_HOST_1}:{MOCK_PORT_1}",
},
)
assert result["type"] == "create_entry"
assert result["title"] == MOCK_SERVER_1.name
assert result["data"][config_flow.CONF_SERVER] == MOCK_SERVER_1.name
assert (
result["data"][config_flow.CONF_SERVER_IDENTIFIER]
== MOCK_SERVER_1.clientIdentifier
)
assert (
result["data"][config_flow.PLEX_SERVER_CONFIG][CONF_URL]
== mock_connections.connections[0].httpuri
)
assert result["data"][config_flow.PLEX_SERVER_CONFIG][CONF_TOKEN] == MOCK_TOKEN
async def test_import_bad_hostname(hass):
"""Test when an invalid address is provided."""
with patch(
"plexapi.server.PlexServer", side_effect=requests.exceptions.ConnectionError
):
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "import"},
data={
CONF_TOKEN: MOCK_TOKEN,
CONF_URL: f"http://{MOCK_HOST_1}:{MOCK_PORT_1}",
},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"]["base"] == "not_found"
async def test_unknown_exception(hass):
"""Test when an unknown exception is encountered."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch("plexapi.myplex.MyPlexAccount", side_effect=Exception):
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "user"},
data={CONF_TOKEN: MOCK_TOKEN, "manual_setup": False},
)
assert result["type"] == "abort"
assert result["reason"] == "unknown"
async def test_no_servers_found(hass):
"""Test when no servers are on an account."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
mm_plex_account = MagicMock()
mm_plex_account.resources = Mock(return_value=[])
with patch("plexapi.myplex.MyPlexAccount", return_value=mm_plex_account):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_TOKEN: MOCK_TOKEN, "manual_setup": False},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"]["base"] == "no_servers"
async def test_single_available_server(hass):
"""Test creating an entry with one server available."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
mock_connections = MockConnections()
mm_plex_account = MagicMock()
mm_plex_account.resources = Mock(return_value=[MOCK_SERVER_1])
mm_plex_account.resource = Mock(return_value=mock_connections)
with patch("plexapi.myplex.MyPlexAccount", return_value=mm_plex_account), patch(
"plexapi.server.PlexServer"
) as mock_plex_server:
type(mock_plex_server.return_value).machineIdentifier = PropertyMock(
return_value=MOCK_SERVER_1.clientIdentifier
)
type(mock_plex_server.return_value).friendlyName = PropertyMock(
return_value=MOCK_SERVER_1.name
)
type( # pylint: disable=protected-access
mock_plex_server.return_value
)._baseurl = PropertyMock(return_value=mock_connections.connections[0].httpuri)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_TOKEN: MOCK_TOKEN, "manual_setup": False},
)
assert result["type"] == "create_entry"
assert result["title"] == MOCK_SERVER_1.name
assert result["data"][config_flow.CONF_SERVER] == MOCK_SERVER_1.name
assert (
result["data"][config_flow.CONF_SERVER_IDENTIFIER]
== MOCK_SERVER_1.clientIdentifier
)
assert (
result["data"][config_flow.PLEX_SERVER_CONFIG][CONF_URL]
== mock_connections.connections[0].httpuri
)
assert result["data"][config_flow.PLEX_SERVER_CONFIG][CONF_TOKEN] == MOCK_TOKEN
async def test_multiple_servers_with_selection(hass):
"""Test creating an entry with multiple servers available."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
mock_connections = MockConnections()
mm_plex_account = MagicMock()
mm_plex_account.resources = Mock(return_value=[MOCK_SERVER_1, MOCK_SERVER_2])
mm_plex_account.resource = Mock(return_value=mock_connections)
with patch("plexapi.myplex.MyPlexAccount", return_value=mm_plex_account), patch(
"plexapi.server.PlexServer"
) as mock_plex_server:
type(mock_plex_server.return_value).machineIdentifier = PropertyMock(
return_value=MOCK_SERVER_1.clientIdentifier
)
type(mock_plex_server.return_value).friendlyName = PropertyMock(
return_value=MOCK_SERVER_1.name
)
type( # pylint: disable=protected-access
mock_plex_server.return_value
)._baseurl = PropertyMock(return_value=mock_connections.connections[0].httpuri)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_TOKEN: MOCK_TOKEN, "manual_setup": False},
)
assert result["type"] == "form"
assert result["step_id"] == "select_server"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={config_flow.CONF_SERVER: MOCK_SERVER_1.name}
)
assert result["type"] == "create_entry"
assert result["title"] == MOCK_SERVER_1.name
assert result["data"][config_flow.CONF_SERVER] == MOCK_SERVER_1.name
assert (
result["data"][config_flow.CONF_SERVER_IDENTIFIER]
== MOCK_SERVER_1.clientIdentifier
)
assert (
result["data"][config_flow.PLEX_SERVER_CONFIG][CONF_URL]
== mock_connections.connections[0].httpuri
)
assert result["data"][config_flow.PLEX_SERVER_CONFIG][CONF_TOKEN] == MOCK_TOKEN
async def test_adding_last_unconfigured_server(hass):
"""Test automatically adding last unconfigured server when multiple servers on account."""
MockConfigEntry(
domain=config_flow.DOMAIN,
data={
config_flow.CONF_SERVER_IDENTIFIER: MOCK_ID_2,
config_flow.CONF_SERVER: MOCK_NAME_2,
},
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
mock_connections = MockConnections()
mm_plex_account = MagicMock()
mm_plex_account.resources = Mock(return_value=[MOCK_SERVER_1, MOCK_SERVER_2])
mm_plex_account.resource = Mock(return_value=mock_connections)
with patch("plexapi.myplex.MyPlexAccount", return_value=mm_plex_account), patch(
"plexapi.server.PlexServer"
) as mock_plex_server:
type(mock_plex_server.return_value).machineIdentifier = PropertyMock(
return_value=MOCK_SERVER_1.clientIdentifier
)
type(mock_plex_server.return_value).friendlyName = PropertyMock(
return_value=MOCK_SERVER_1.name
)
type( # pylint: disable=protected-access
mock_plex_server.return_value
)._baseurl = PropertyMock(return_value=mock_connections.connections[0].httpuri)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_TOKEN: MOCK_TOKEN, "manual_setup": False},
)
assert result["type"] == "create_entry"
assert result["title"] == MOCK_SERVER_1.name
assert result["data"][config_flow.CONF_SERVER] == MOCK_SERVER_1.name
assert (
result["data"][config_flow.CONF_SERVER_IDENTIFIER]
== MOCK_SERVER_1.clientIdentifier
)
assert (
result["data"][config_flow.PLEX_SERVER_CONFIG][CONF_URL]
== mock_connections.connections[0].httpuri
)
assert result["data"][config_flow.PLEX_SERVER_CONFIG][CONF_TOKEN] == MOCK_TOKEN
async def test_already_configured(hass):
"""Test a duplicated successful flow."""
flow = init_config_flow(hass)
MockConfigEntry(
domain=config_flow.DOMAIN, data={config_flow.CONF_SERVER_IDENTIFIER: MOCK_ID_1}
).add_to_hass(hass)
mock_connections = MockConnections()
mm_plex_account = MagicMock()
mm_plex_account.resources = Mock(return_value=[MOCK_SERVER_1])
mm_plex_account.resource = Mock(return_value=mock_connections)
with patch("plexapi.server.PlexServer") as mock_plex_server:
type(mock_plex_server.return_value).machineIdentifier = PropertyMock(
return_value=MOCK_SERVER_1.clientIdentifier
)
type(mock_plex_server.return_value).friendlyName = PropertyMock(
return_value=MOCK_SERVER_1.name
)
type( # pylint: disable=protected-access
mock_plex_server.return_value
)._baseurl = PropertyMock(return_value=mock_connections.connections[0].httpuri)
result = await flow.async_step_import(
{CONF_TOKEN: MOCK_TOKEN, CONF_URL: f"http://{MOCK_HOST_1}:{MOCK_PORT_1}"}
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_all_available_servers_configured(hass):
"""Test when all available servers are already configured."""
MockConfigEntry(
domain=config_flow.DOMAIN,
data={
config_flow.CONF_SERVER_IDENTIFIER: MOCK_ID_1,
config_flow.CONF_SERVER: MOCK_NAME_1,
},
).add_to_hass(hass)
MockConfigEntry(
domain=config_flow.DOMAIN,
data={
config_flow.CONF_SERVER_IDENTIFIER: MOCK_ID_2,
config_flow.CONF_SERVER: MOCK_NAME_2,
},
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
mock_connections = MockConnections()
mm_plex_account = MagicMock()
mm_plex_account.resources = Mock(return_value=[MOCK_SERVER_1, MOCK_SERVER_2])
mm_plex_account.resource = Mock(return_value=mock_connections)
with patch("plexapi.myplex.MyPlexAccount", return_value=mm_plex_account):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_TOKEN: MOCK_TOKEN, "manual_setup": False},
)
assert result["type"] == "abort"
assert result["reason"] == "all_configured"
async def test_manual_config(hass):
"""Test creating via manual configuration."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_TOKEN: "", "manual_setup": True}
)
assert result["type"] == "form"
assert result["step_id"] == "manual_setup"
mock_connections = MockConnections(ssl=True)
with patch("plexapi.server.PlexServer") as mock_plex_server:
type(mock_plex_server.return_value).machineIdentifier = PropertyMock(
return_value=MOCK_SERVER_1.clientIdentifier
)
type(mock_plex_server.return_value).friendlyName = PropertyMock(
return_value=MOCK_SERVER_1.name
)
type( # pylint: disable=protected-access
mock_plex_server.return_value
)._baseurl = PropertyMock(return_value=mock_connections.connections[0].httpuri)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: MOCK_HOST_1,
CONF_PORT: int(MOCK_PORT_1),
CONF_SSL: True,
CONF_VERIFY_SSL: True,
CONF_TOKEN: MOCK_TOKEN,
},
)
assert result["type"] == "create_entry"
assert result["title"] == MOCK_SERVER_1.name
assert result["data"][config_flow.CONF_SERVER] == MOCK_SERVER_1.name
assert (
result["data"][config_flow.CONF_SERVER_IDENTIFIER]
== MOCK_SERVER_1.clientIdentifier
)
assert (
result["data"][config_flow.PLEX_SERVER_CONFIG][CONF_URL]
== mock_connections.connections[0].httpuri
)
assert result["data"][config_flow.PLEX_SERVER_CONFIG][CONF_TOKEN] == MOCK_TOKEN
async def test_no_token(hass):
"""Test failing when no token provided."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"manual_setup": False}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"][CONF_TOKEN] == "no_token"
async def test_option_flow(hass):
"""Test config flow selection of one of two bridges."""
entry = MockConfigEntry(domain=config_flow.DOMAIN, data={}, options=DEFAULT_OPTIONS)
entry.add_to_hass(hass)
result = await hass.config_entries.options.flow.async_init(
entry.entry_id, context={"source": "test"}, data=None
)
assert result["type"] == "form"
assert result["step_id"] == "plex_mp_settings"
result = await hass.config_entries.options.flow.async_configure(
result["flow_id"],
user_input={
config_flow.CONF_USE_EPISODE_ART: True,
config_flow.CONF_SHOW_ALL_CONTROLS: True,
},
)
assert result["type"] == "create_entry"
assert result["data"] == {
config_flow.MP_DOMAIN: {
config_flow.CONF_USE_EPISODE_ART: True,
config_flow.CONF_SHOW_ALL_CONTROLS: True,
}
}
|
|
import os
from decimal import Decimal as D
from django.utils.translation import ugettext_lazy as _
from oscar.apps.catalogue.categories import create_from_breadcrumbs
from oscar.apps.dashboard.reports.csv_utils import CsvUnicodeReader
from oscar.core.loading import get_class, get_classes
try:
from django.db.transaction import atomic as atomic_compat
except ImportError:
from django.db.transaction import commit_on_success as atomic_compat
ImportError = get_class('partner.exceptions', 'ImportError')
Partner, StockRecord = get_classes('partner.models', ['Partner',
'StockRecord'])
ProductClass, Product, Category, ProductCategory = get_classes(
'catalogue.models', ('ProductClass', 'Product', 'Category',
'ProductCategory'))
class StockImporter(object):
def __init__(self, logger, partner, delimiter):
self.logger = logger
self._delimiter = delimiter
try:
self._partner = Partner.objects.get(name=partner)
except Partner.DoesNotExist:
name_list = ", ".join([d['name']
for d in Partner.objects.values('name')])
raise ImportError(_("Partner named '%(partner)s' does not exist"
" (existing partners: %(list)s)")
% {'partner': partner, 'list': name_list})
def handle(self, file_path=None):
u"""Handles the actual import process"""
if not file_path:
raise ImportError(_("No file path supplied"))
Validator().validate(file_path)
self._import(file_path)
def _import(self, file_path):
u"""Imports given file"""
stats = {'updated_items': 0,
'unchanged_items': 0,
'unmatched_items': 0}
row_number = 0
for row in CsvUnicodeReader(open(file_path, 'rb'),
delimiter=self._delimiter, quotechar='"',
escapechar='\\'):
row_number += 1
self._import_row(row_number, row, stats)
msg = "\tUpdated items: %d\n\tUnchanged items: %d\n" \
"\tUnmatched items: %d" % (stats['updated_items'],
stats['unchanged_items'],
stats['unmatched_items'])
self.logger.info(msg)
def _import_row(self, row_number, row, stats):
if len(row) != 3:
self.logger.error("Row number %d has an invalid number of fields,"
" skipping..." % row_number)
else:
self._update_stockrecord(*row[:3], row_number=row_number,
stats=stats)
def _update_stockrecord(self, partner_sku, price_excl_tax, num_in_stock,
row_number, stats):
try:
stock = StockRecord.objects.get(partner=self._partner,
partner_sku=partner_sku)
except StockRecord.DoesNotExist:
stats['unmatched_items'] += 1
self.logger.error("\t - Row %d: StockRecord for partner '%s' and"
" sku '%s' does not exist, skipping..."
% (row_number, self._partner, partner_sku))
return
price_changed = False
if stock.price_excl_tax != D(price_excl_tax):
stock.price_excl_tax = D(price_excl_tax)
price_changed = True
stock_changed = False
if stock.num_in_stock != int(num_in_stock):
stock.num_in_stock = num_in_stock
stock_changed = True
if price_changed or stock_changed:
stock.save()
msg = " SKU %s:" % (partner_sku)
if price_changed:
msg += '\n - Price set to %s' % (price_excl_tax)
if stock_changed:
msg += '\n - Stock set to %s' % num_in_stock
self.logger.info(msg)
stats['updated_items'] += 1
else:
stats['unchanged_items'] += 1
# Deprecated
class CatalogueImporter(object):
"""
A catalogue importer object
"""
_flush = False
def __init__(self, logger, delimiter=",", flush=False):
self.logger = logger
self._delimiter = delimiter
self._flush = flush
def handle(self, file_path=None):
u"""Handles the actual import process"""
if not file_path:
raise ImportError(_("No file path supplied"))
Validator().validate(file_path)
if self._flush is True:
self.logger.info(" - Flushing product data before import")
self._flush_product_data()
self._import(file_path)
def _flush_product_data(self):
u"""Flush out product and stock models"""
ProductClass.objects.all().delete()
Product.objects.all().delete()
Partner.objects.all().delete()
StockRecord.objects.all().delete()
@atomic_compat
def _import(self, file_path):
u"""Imports given file"""
stats = {'new_items': 0,
'updated_items': 0}
row_number = 0
for row in CsvUnicodeReader(open(file_path, 'rb'),
delimiter=self._delimiter, quotechar='"',
escapechar='\\'):
row_number += 1
self._import_row(row_number, row, stats)
msg = "New items: %d, updated items: %d" % (stats['new_items'],
stats['updated_items'])
self.logger.info(msg)
def _import_row(self, row_number, row, stats):
if len(row) != 5 and len(row) != 9:
self.logger.error("Row number %d has an invalid number of fields"
" (%d), skipping..." % (row_number, len(row)))
return
item = self._create_item(*row[:5], stats=stats)
if len(row) == 9:
# With stock data
self._create_stockrecord(item, *row[5:9], stats=stats)
def _create_item(self, product_class, category_str, upc, title,
description, stats):
# Ignore any entries that are NULL
if description == 'NULL':
description = ''
# Create item class and item
product_class, __ \
= ProductClass.objects.get_or_create(name=product_class)
try:
item = Product.objects.get(upc=upc)
stats['updated_items'] += 1
except Product.DoesNotExist:
item = Product()
stats['new_items'] += 1
item.upc = upc
item.title = title
item.description = description
item.product_class = product_class
item.save()
# Category
cat = create_from_breadcrumbs(category_str)
ProductCategory.objects.create(product=item, category=cat)
return item
def _create_stockrecord(self, item, partner_name, partner_sku,
price_excl_tax, num_in_stock, stats):
# Create partner and stock record
partner, _ = Partner.objects.get_or_create(
name=partner_name)
try:
stock = StockRecord.objects.get(partner_sku=partner_sku)
except StockRecord.DoesNotExist:
stock = StockRecord()
stock.product = item
stock.partner = partner
stock.partner_sku = partner_sku
stock.price_excl_tax = D(price_excl_tax)
stock.num_in_stock = num_in_stock
stock.save()
class Validator(object):
def validate(self, file_path):
self._exists(file_path)
self._is_file(file_path)
self._is_readable(file_path)
def _exists(self, file_path):
u"""Check whether a file exists"""
if not os.path.exists(file_path):
raise ImportError(_("%s does not exist") % (file_path))
def _is_file(self, file_path):
u"""Check whether file is actually a file type"""
if not os.path.isfile(file_path):
raise ImportError(_("%s is not a file") % (file_path))
def _is_readable(self, file_path):
u"""Check file is readable"""
try:
f = open(file_path, 'r')
f.close()
except:
raise ImportError(_("%s is not readable") % (file_path))
|
|
##########################################################################
#
# Copyright (c) 2014-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import stat
import shutil
import unittest
import time
import inspect
import functools
import imath
import IECore
import Gaffer
import GafferTest
import GafferDispatch
import GafferDispatchTest
class LocalDispatcherTest( GafferTest.TestCase ) :
def __createLocalDispatcher( self ) :
result = GafferDispatch.LocalDispatcher()
result["jobsDirectory"].setValue( self.temporaryDirectory() )
return result
def testDispatcherRegistration( self ) :
self.failUnless( "Local" in GafferDispatch.Dispatcher.registeredDispatchers() )
self.failUnless( GafferDispatch.Dispatcher.create( "Local" ).isInstanceOf( GafferDispatch.LocalDispatcher.staticTypeId() ) )
def testDispatch( self ) :
dispatcher = self.__createLocalDispatcher()
fileName = self.temporaryDirectory() + "/result.txt"
def createWriter( text ) :
node = GafferDispatchTest.TextWriter()
node["mode"].setValue( "a" )
node["fileName"].setValue( fileName )
node["text"].setValue( text + " on ${frame};" )
return node
# Create a tree of dependencies for execution:
# n1 requires:
# - n2 requires:
# -n2a
# -n2b
s = Gaffer.ScriptNode()
s["n1"] = createWriter( "n1" )
s["n2"] = createWriter( "n2" )
s["n2a"] = createWriter( "n2a" )
s["n2b"] = createWriter( "n2b" )
s["n1"]["preTasks"][0].setInput( s["n2"]["task"] )
s["n2"]["preTasks"][0].setInput( s["n2a"]["task"] )
s["n2"]["preTasks"][1].setInput( s["n2b"]["task"] )
# No files should exist yet
self.assertEqual( os.path.isfile( fileName ), False )
# Executing n1 should trigger execution of all of them
dispatcher.dispatch( [ s["n1"] ] )
self.assertEqual( os.path.isfile( fileName ), True )
with open( fileName, "r" ) as f :
text = f.read()
expectedText = s.context().substitute( "n2a on ${frame};n2b on ${frame};n2 on ${frame};n1 on ${frame};" )
self.assertEqual( text, expectedText )
# Executing n1 and anything else, should be the same as just n1, but forcing n2b execution puts it before n2a
os.remove( fileName )
dispatcher.dispatch( [ s["n2b"], s["n1"] ] )
self.assertEqual( os.path.isfile( fileName ), True )
with open( fileName, "r" ) as f :
text = f.read()
expectedText = s.context().substitute( "n2b on ${frame};n2a on ${frame};n2 on ${frame};n1 on ${frame};" )
self.assertEqual( text, expectedText )
# Executing all nodes should be the same as just n1
os.remove( fileName )
dispatcher.dispatch( [ s["n2"], s["n2b"], s["n1"], s["n2a"] ] )
self.assertEqual( os.path.isfile( fileName ), True )
with open( fileName, "r" ) as f :
text = f.read()
expectedText = s.context().substitute( "n2a on ${frame};n2b on ${frame};n2 on ${frame};n1 on ${frame};" )
self.assertEqual( text, expectedText )
# Executing a sub-branch (n2) should only trigger execution in that branch
os.remove( fileName )
dispatcher.dispatch( [ s["n2"] ] )
self.assertEqual( os.path.isfile( fileName ), True )
with open( fileName, "r" ) as f :
text = f.read()
expectedText = s.context().substitute( "n2a on ${frame};n2b on ${frame};n2 on ${frame};" )
self.assertEqual( text, expectedText )
# Executing a leaf node, should not trigger other executions.
os.remove( fileName )
dispatcher.dispatch( [ s["n2b"] ] )
self.assertEqual( os.path.isfile( fileName ), True )
with open( fileName, "r" ) as f :
text = f.read()
expectedText = s.context().substitute( "n2b on ${frame};" )
self.assertEqual( text, expectedText )
def testDispatchDifferentFrame( self ) :
s = Gaffer.ScriptNode()
s["n1"] = GafferDispatchTest.TextWriter()
s["n1"]["fileName"].setValue( self.temporaryDirectory() + "/n1_####.txt" )
s["n1"]["text"].setValue( "n1 on ${frame}" )
context = Gaffer.Context( s.context() )
context.setFrame( s.context().getFrame() + 10 )
with context :
self.__createLocalDispatcher().dispatch( [ s["n1"] ] )
fileName = context.substitute( s["n1"]["fileName"].getValue() )
self.assertTrue( os.path.isfile( fileName ) )
with open( fileName, "r" ) as f :
text = f.read()
self.assertEqual( text, "%s on %d" % ( s["n1"].getName(), context.getFrame() ) )
def testDispatchFullRange( self ) :
dispatcher = self.__createLocalDispatcher()
dispatcher["framesMode"].setValue( GafferDispatch.Dispatcher.FramesMode.FullRange )
frameList = IECore.FrameList.parse( "5-7" )
fileName = self.temporaryDirectory() + "/result.txt"
def createWriter( text ) :
node = GafferDispatchTest.TextWriter()
node["mode"].setValue( "a" )
node["fileName"].setValue( fileName )
node["text"].setValue( text + " on ${frame};" )
return node
# Create a tree of dependencies for execution:
# n1 requires:
# - n2 requires:
# -n2a
# -n2b
s = Gaffer.ScriptNode()
s["frameRange"]["start"].setValue( 5 )
s["frameRange"]["end"].setValue( 7 )
s["n1"] = createWriter( "n1" )
s["n2"] = createWriter( "n2" )
s["n2a"] = createWriter( "n2a" )
s["n2b"] = createWriter( "n2b" )
s["n1"]["preTasks"][0].setInput( s["n2"]["task"] )
s["n2"]["preTasks"][0].setInput( s["n2a"]["task"] )
s["n2"]["preTasks"][1].setInput( s["n2b"]["task"] )
# No files should exist yet
self.assertEqual( os.path.isfile( fileName ), False )
# Executing n1 should trigger execution of all of them
dispatcher.dispatch( [ s["n1"] ] )
self.assertEqual( os.path.isfile( fileName ), True )
with open( fileName, "r" ) as f :
text = f.read()
expectedText = ""
for frame in frameList.asList() :
context = Gaffer.Context( s.context() )
context.setFrame( frame )
expectedText += context.substitute( "n2a on ${frame};n2b on ${frame};n2 on ${frame};n1 on ${frame};" )
self.assertEqual( text, expectedText )
# Executing a leaf node, should not trigger other executions.
os.remove( fileName )
dispatcher.dispatch( [ s["n2b"] ] )
self.assertEqual( os.path.isfile( fileName ), True )
with open( fileName, "r" ) as f :
text = f.read()
expectedText = ""
for frame in frameList.asList() :
context = Gaffer.Context( s.context() )
context.setFrame( frame )
expectedText += context.substitute( "n2b on ${frame};" )
self.assertEqual( text, expectedText )
def testDispatchCustomRange( self ) :
dispatcher = self.__createLocalDispatcher()
dispatcher["framesMode"].setValue( GafferDispatch.Dispatcher.FramesMode.CustomRange )
frameList = IECore.FrameList.parse( "2-6x2" )
dispatcher["frameRange"].setValue( str(frameList) )
fileName = self.temporaryDirectory() + "/result.txt"
def createWriter( text ) :
node = GafferDispatchTest.TextWriter()
node["mode"].setValue( "a" )
node["fileName"].setValue( fileName )
node["text"].setValue( text + " on ${frame};" )
return node
# Create a tree of dependencies for execution:
# n1 requires:
# - n2 requires:
# -n2a
# -n2b
s = Gaffer.ScriptNode()
s["n1"] = createWriter( "n1" )
s["n2"] = createWriter( "n2" )
s["n2a"] = createWriter( "n2a" )
s["n2b"] = createWriter( "n2b" )
s["n1"]["preTasks"][0].setInput( s["n2"]["task"] )
s["n2"]["preTasks"][0].setInput( s["n2a"]["task"] )
s["n2"]["preTasks"][1].setInput( s["n2b"]["task"] )
# No files should exist yet
self.assertEqual( os.path.isfile( fileName ), False )
# Executing n1 should trigger execution of all of them
dispatcher.dispatch( [ s["n1"] ] )
self.assertEqual( os.path.isfile( fileName ), True )
with open( fileName, "r" ) as f :
text = f.read()
expectedText = ""
for frame in frameList.asList() :
context = Gaffer.Context( s.context() )
context.setFrame( frame )
expectedText += context.substitute( "n2a on ${frame};n2b on ${frame};n2 on ${frame};n1 on ${frame};" )
self.assertEqual( text, expectedText )
# Executing a leaf node, should not trigger other executions.
os.remove( fileName )
dispatcher.dispatch( [ s["n2b"] ] )
self.assertEqual( os.path.isfile( fileName ), True )
with open( fileName, "r" ) as f :
text = f.read()
expectedText = ""
for frame in frameList.asList() :
context = Gaffer.Context( s.context() )
context.setFrame( frame )
expectedText += context.substitute( "n2b on ${frame};" )
self.assertEqual( text, expectedText )
def testDispatchBadCustomRange( self ) :
dispatcher = self.__createLocalDispatcher()
dispatcher["framesMode"].setValue( GafferDispatch.Dispatcher.FramesMode.CustomRange )
dispatcher["frameRange"].setValue( "notAFrameRange" )
s = Gaffer.ScriptNode()
s["n1"] = GafferDispatchTest.TextWriter()
s["n1"]["fileName"].setValue( self.temporaryDirectory() + "/n1_####.txt" )
s["n1"]["text"].setValue( "n1 on ${frame}" )
self.assertRaises( RuntimeError, dispatcher.dispatch, [ s["n1"] ] )
self.assertFalse( os.path.isfile( s.context().substitute( s["n1"]["fileName"].getValue() ) ) )
def testContextVariation( self ) :
s = Gaffer.ScriptNode()
context = Gaffer.Context( s.context() )
context["script:name"] = "notTheRealScriptName"
context["textWriter:replace"] = IECore.StringVectorData( [ " ", "\n" ] )
s["n1"] = GafferDispatchTest.TextWriter()
s["n1"]["fileName"].setValue( self.temporaryDirectory() + "/${script:name}_####.txt" )
s["n1"]["text"].setValue( "${script:name} on ${frame}" )
fileName = context.substitute( s["n1"]["fileName"].getValue() )
self.assertFalse( os.path.isfile( fileName ) )
with context :
self.__createLocalDispatcher().dispatch( [ s["n1"] ] )
self.assertTrue( os.path.isfile( fileName ) )
self.assertTrue( os.path.basename( fileName ).startswith( context["script:name"] ) )
with open( fileName, "r" ) as f :
text = f.read()
expected = "%s on %d" % ( context["script:name"], context.getFrame() )
expected = expected.replace( context["textWriter:replace"][0], context["textWriter:replace"][1] )
self.assertEqual( text, expected )
def testDispatcherSignals( self ) :
preCs = GafferTest.CapturingSlot( GafferDispatch.Dispatcher.preDispatchSignal() )
self.assertEqual( len( preCs ), 0 )
dispatchCs = GafferTest.CapturingSlot( GafferDispatch.Dispatcher.preDispatchSignal() )
self.assertEqual( len( dispatchCs ), 0 )
postCs = GafferTest.CapturingSlot( GafferDispatch.Dispatcher.postDispatchSignal() )
self.assertEqual( len( postCs ), 0 )
s = Gaffer.ScriptNode()
s["n1"] = GafferDispatchTest.TextWriter()
s["n1"]["fileName"].setValue( self.temporaryDirectory() + "/n1_####.txt" )
s["n1"]["text"].setValue( "n1 on ${frame}" )
dispatcher = self.__createLocalDispatcher()
dispatcher.dispatch( [ s["n1"] ] )
self.assertEqual( len( preCs ), 1 )
self.failUnless( preCs[0][0].isSame( dispatcher ) )
self.assertEqual( preCs[0][1], [ s["n1"] ] )
self.assertEqual( len( dispatchCs ), 1 )
self.failUnless( dispatchCs[0][0].isSame( dispatcher ) )
self.assertEqual( dispatchCs[0][1], [ s["n1"] ] )
self.assertEqual( len( postCs ), 1 )
self.failUnless( postCs[0][0].isSame( dispatcher ) )
self.assertEqual( postCs[0][1], [ s["n1"] ] )
def testExecuteInBackground( self ) :
preCs = GafferTest.CapturingSlot( GafferDispatch.LocalDispatcher.preDispatchSignal() )
self.assertEqual( len( preCs ), 0 )
dispatchCs = GafferTest.CapturingSlot( GafferDispatch.LocalDispatcher.dispatchSignal() )
self.assertEqual( len( dispatchCs ), 0 )
postCs = GafferTest.CapturingSlot( GafferDispatch.LocalDispatcher.postDispatchSignal() )
self.assertEqual( len( postCs ), 0 )
s = Gaffer.ScriptNode()
s["n1"] = GafferDispatchTest.TextWriter()
s["n1"]["fileName"].setValue( self.temporaryDirectory() + "/n1_####.txt" )
s["n1"]["text"].setValue( "n1 on ${frame}" )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
dispatcher.dispatch( [ s["n1"] ] )
# the dispatching started and finished
self.assertEqual( len( preCs ), 1 )
self.assertEqual( len( dispatchCs ), 1 )
self.assertEqual( len( postCs ), 1 )
# but the execution hasn't finished yet
self.assertFalse( os.path.isfile( s.context().substitute( s["n1"]["fileName"].getValue() ) ) )
# wait long enough to finish execution
self.assertEqual( len(dispatcher.jobPool().jobs()), 1 )
dispatcher.jobPool().waitForAll()
self.assertEqual( len(dispatcher.jobPool().jobs()), 0 )
self.assertTrue( os.path.isfile( s.context().substitute( s["n1"]["fileName"].getValue() ) ) )
def testMixedImmediateAndBackground( self ) :
preCs = GafferTest.CapturingSlot( GafferDispatch.LocalDispatcher.preDispatchSignal() )
self.assertEqual( len( preCs ), 0 )
dispatchCs = GafferTest.CapturingSlot( GafferDispatch.LocalDispatcher.dispatchSignal() )
self.assertEqual( len( dispatchCs ), 0 )
postCs = GafferTest.CapturingSlot( GafferDispatch.LocalDispatcher.postDispatchSignal() )
self.assertEqual( len( postCs ), 0 )
fileName = self.temporaryDirectory() + "/result.txt"
def createWriter( text ) :
node = GafferDispatchTest.TextWriter()
node["mode"].setValue( "a" )
node["fileName"].setValue( fileName )
node["text"].setValue( text + " on ${frame};" )
return node
s = Gaffer.ScriptNode()
# Create a tree of dependencies for execution:
# n1 requires:
# - n2 requires:
# -n2a
# -n2b
# - n3
s = Gaffer.ScriptNode()
s["n1"] = createWriter( "n1" )
s["n2"] = createWriter( "n2" )
# force the entire n2 tree to execute in the foreground
s["n2"]["dispatcher"]["immediate"].setValue( True )
s["n2a"] = createWriter( "n2a" )
s["n2b"] = createWriter( "n2b" )
s["n3"] = createWriter( "n3" )
s["n1"]["preTasks"][0].setInput( s["n2"]["task"] )
s["n1"]["preTasks"][1].setInput( s["n3"]["task"] )
s["n2"]["preTasks"][0].setInput( s["n2a"]["task"] )
s["n2"]["preTasks"][1].setInput( s["n2b"]["task"] )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
dispatcher["framesMode"].setValue( GafferDispatch.Dispatcher.FramesMode.CustomRange )
frameList = IECore.FrameList.parse( "2-6x2" )
dispatcher["frameRange"].setValue( str(frameList) )
dispatcher.dispatch( [ s["n1"] ] )
# the dispatching started and finished
self.assertEqual( len( preCs ), 1 )
self.assertEqual( len( dispatchCs ), 1 )
self.assertEqual( len( postCs ), 1 )
# all the foreground execution has finished
self.assertEqual( os.path.isfile( fileName ), True )
with open( fileName, "r" ) as f :
text = f.read()
expectedText = ""
for frame in frameList.asList() :
context = Gaffer.Context( s.context() )
context.setFrame( frame )
expectedText += context.substitute( "n2a on ${frame};n2b on ${frame};n2 on ${frame};" )
self.assertEqual( text, expectedText )
# wait long enough for background execution to finish
self.assertEqual( len(dispatcher.jobPool().jobs()), 1 )
dispatcher.jobPool().waitForAll()
self.assertEqual( len(dispatcher.jobPool().jobs()), 0 )
self.assertEqual( os.path.isfile( fileName ), True )
with open( fileName, "r" ) as f :
text = f.read()
# don't reset the expectedText since we're still appending
for frame in frameList.asList() :
context = Gaffer.Context( s.context() )
context.setFrame( frame )
expectedText += context.substitute( "n3 on ${frame};n1 on ${frame};" )
self.assertEqual( text, expectedText )
def testMultipleDispatchers( self ) :
s = Gaffer.ScriptNode()
s["n1"] = GafferDispatchTest.TextWriter()
s["n1"]["fileName"].setValue( self.temporaryDirectory() + "/n1_####.txt" )
s["n1"]["text"].setValue( "n1 on ${frame}" )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
dispatcher2 = self.__createLocalDispatcher()
dispatcher2["executeInBackground"].setValue( True )
dispatcher.dispatch( [ s["n1"] ] )
c = s.context()
c.setFrame( 2 )
with c :
dispatcher2.dispatch( [ s["n1"] ] )
# wait long enough for background execution to finish
self.assertEqual( len(dispatcher.jobPool().jobs()), 2 )
dispatcher.jobPool().waitForAll()
self.assertEqual( len(dispatcher.jobPool().jobs()), 0 )
self.assertTrue( os.path.isfile( s.context().substitute( s["n1"]["fileName"].getValue() ) ) )
self.assertTrue( os.path.isfile( c.substitute( s["n1"]["fileName"].getValue() ) ) )
def testFailure( self ) :
s = Gaffer.ScriptNode()
s["n1"] = GafferDispatchTest.TextWriter()
s["n1"]["fileName"].setValue( self.temporaryDirectory() + "/n1_####.txt" )
s["n1"]["text"].setValue( "n1 on ${frame}" )
s["n2"] = GafferDispatchTest.TextWriter()
s["n2"]["fileName"].setValue( "" )
s["n2"]["text"].setValue( "n2 on ${frame}" )
s["n3"] = GafferDispatchTest.TextWriter()
s["n3"]["fileName"].setValue( self.temporaryDirectory() + "/n3_####.txt" )
s["n3"]["text"].setValue( "n3 on ${frame}" )
s["n1"]["preTasks"][0].setInput( s["n2"]["task"] )
s["n2"]["preTasks"][0].setInput( s["n3"]["task"] )
dispatcher = self.__createLocalDispatcher()
# fails because n2 doesn't have a valid fileName
self.assertRaisesRegexp( RuntimeError, "No such file or directory", functools.partial( dispatcher.dispatch, [ s["n1"] ] ) )
# it still cleans up the JobPool
self.assertEqual( len(dispatcher.jobPool().jobs()), 0 )
# n3 executed correctly
self.assertTrue( os.path.isfile( s.context().substitute( s["n3"]["fileName"].getValue() ) ) )
with open( s.context().substitute( s["n3"]["fileName"].getValue() ), "r" ) as f :
text = f.read()
self.assertEqual( text, "n3 on %d" % s.context().getFrame() )
# n2 failed, so n1 never executed
self.assertFalse( os.path.isfile( s.context().substitute( s["n2"]["fileName"].getValue() ) ) )
self.assertFalse( os.path.isfile( s.context().substitute( s["n1"]["fileName"].getValue() ) ) )
self.tearDown()
dispatcher["executeInBackground"].setValue( True )
dispatcher.dispatch( [ s["n1"] ] )
# wait long enough for background execution to finish
self.assertEqual( len(dispatcher.jobPool().jobs()), 1 )
dispatcher.jobPool().waitForAll()
self.assertEqual( len(dispatcher.jobPool().jobs()), 0 )
# n3 executed correctly
self.assertTrue( os.path.isfile( s.context().substitute( s["n3"]["fileName"].getValue() ) ) )
with open( s.context().substitute( s["n3"]["fileName"].getValue() ), "r" ) as f :
text = f.read()
self.assertEqual( text, "n3 on %d" % s.context().getFrame() )
# n2 failed, so n1 never executed
self.assertFalse( os.path.isfile( s.context().substitute( s["n2"]["fileName"].getValue() ) ) )
self.assertFalse( os.path.isfile( s.context().substitute( s["n1"]["fileName"].getValue() ) ) )
def testKill( self ) :
s = Gaffer.ScriptNode()
s["n1"] = GafferDispatchTest.TextWriter()
s["n1"]["fileName"].setValue( self.temporaryDirectory() + "/n1_####.txt" )
s["n1"]["text"].setValue( "n1 on ${frame}" )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
self.assertEqual( len(dispatcher.jobPool().jobs()), 0 )
dispatcher.dispatch( [ s["n1"] ] )
self.assertEqual( len(dispatcher.jobPool().jobs()), 1 )
# the execution hasn't finished yet
self.assertFalse( os.path.isfile( s.context().substitute( s["n1"]["fileName"].getValue() ) ) )
# kill the job
dispatcher.jobPool().jobs()[0].kill()
# wait long enough for the process to die
dispatcher.jobPool().waitForAll()
self.assertEqual( len(dispatcher.jobPool().jobs()), 0 )
# make sure it never wrote the file
self.assertFalse( os.path.isfile( s.context().substitute( s["n1"]["fileName"].getValue() ) ) )
def testSpacesInContext( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferDispatchTest.TextWriter()
s["n"]["fileName"].setValue( self.temporaryDirectory() + "/test.txt" )
s["n"]["text"].setValue( "${test}" )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
c = Gaffer.Context()
c["test"] = "i am a string with spaces"
with c :
dispatcher.dispatch( [ s["n"] ] )
dispatcher.jobPool().waitForAll()
text = "".join( open( self.temporaryDirectory() + "/test.txt" ).readlines() )
self.assertEqual( text, "i am a string with spaces" )
def testUIContextEntriesIgnored( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferDispatchTest.TextWriter()
s["n"]["fileName"].setValue( self.temporaryDirectory() + "/out.txt" )
s["n"]["text"].setValue( "${foo} ${ui:foo}" )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
c = Gaffer.Context()
c["ui:foo"] = "uiFoo"
c["foo"] = "foo"
with c :
dispatcher.dispatch( [ s["n"] ] )
dispatcher.jobPool().waitForAll()
text = "".join( open( self.temporaryDirectory() + "/out.txt" ).readlines() )
self.assertEqual( text, "foo " )
def testContextLockedDuringBackgroundDispatch( self ) :
s = Gaffer.ScriptNode()
s["n1"] = GafferDispatchTest.TextWriter()
s["n1"]["fileName"].setValue( self.temporaryDirectory() + "/out.txt" )
s["n1"]["text"].setValue( "n1 on ${frame} with ${foo}" )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
c = Gaffer.Context( s.context() )
c["foo"] = "foo"
with c :
dispatcher.dispatch( [ s["n1"] ] )
self.assertFalse( os.path.isfile( self.temporaryDirectory() + "/out.txt" ) )
foo = s["variables"].addChild( Gaffer.NameValuePlug( "foo", IECore.StringData( "foo" ) ) )
dispatcher.jobPool().waitForAll()
self.assertTrue( os.path.isfile( self.temporaryDirectory() + "/out.txt" ) )
text = "".join( open( self.temporaryDirectory() + "/out.txt" ).readlines() )
self.assertEqual( text, "n1 on 1 with foo" )
def testNodeNamesLockedDuringBackgroundDispatch( self ) :
s = Gaffer.ScriptNode()
s["n1"] = GafferDispatchTest.TextWriter()
s["n1"]["fileName"].setValue( self.temporaryDirectory() + "/out.txt" )
s["n1"]["text"].setValue( "n1 on ${frame}" )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
dispatcher.dispatch( [ s["n1"] ] )
self.assertFalse( os.path.isfile( self.temporaryDirectory() + "/out.txt" ) )
s["n1"].setName( "n2" )
dispatcher.jobPool().waitForAll()
self.assertTrue( os.path.isfile( self.temporaryDirectory() + "/out.txt" ) )
text = "".join( open( self.temporaryDirectory() + "/out.txt" ).readlines() )
self.assertEqual( text, "n1 on 1" )
def testIgnoreScriptLoadErrors( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferDispatchTest.TextWriter()
s["n"]["fileName"].setValue( self.temporaryDirectory() + "/scriptLoadErrorTest.txt" )
s["n"]["text"].setValue( "test" )
# because this doesn't have the dynamic flag set,
# it won't serialise/load properly.
s["n"]["user"]["badPlug"] = Gaffer.IntPlug()
s["n"]["user"]["badPlug"].setValue( 10 )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
dispatcher.dispatch( [ s["n"] ] )
dispatcher.jobPool().waitForAll()
self.assertFalse( os.path.isfile( self.temporaryDirectory() + "/scriptLoadErrorTest.txt" ) )
dispatcher["ignoreScriptLoadErrors"].setValue( True )
dispatcher.dispatch( [ s["n"] ] )
dispatcher.jobPool().waitForAll()
self.assertTrue( os.path.isfile( self.temporaryDirectory() + "/scriptLoadErrorTest.txt" ) )
def testBackgroundBatchesCanAccessJobDirectory( self ) :
s = Gaffer.ScriptNode()
s["w"] = GafferDispatchTest.TextWriter()
s["w"]["fileName"].setValue( "${dispatcher:jobDirectory}/test.####.txt" )
s["w"]["text"].setValue( "w on ${frame} from ${dispatcher:jobDirectory}" )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
dispatcher["framesMode"].setValue( GafferDispatch.Dispatcher.FramesMode.CustomRange )
frameList = IECore.FrameList.parse( "2-6x2" )
dispatcher["frameRange"].setValue( str(frameList) )
dispatcher.dispatch( [ s["w"] ] )
dispatcher.jobPool().waitForAll()
# a single dispatch should have the same job directory for all batches
jobDir = dispatcher["jobsDirectory"].getValue() + "/000000"
self.assertEqual( next( open( "%s/test.0002.txt" % jobDir ) ), "w on 2 from %s" % jobDir )
self.assertEqual( next( open( "%s/test.0004.txt" % jobDir ) ), "w on 4 from %s" % jobDir )
self.assertEqual( next( open( "%s/test.0006.txt" % jobDir ) ), "w on 6 from %s" % jobDir )
def testEnvironmentCommand( self ) :
s = Gaffer.ScriptNode()
testFile = os.path.join( self.temporaryDirectory(), "test" )
s["c"] = GafferDispatch.SystemCommand()
s["c"]["command"].setValue( "echo HELLO \$GAFFERDISPATCHTEST_ENVVAR > " + testFile )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
dispatcher["framesMode"].setValue( GafferDispatch.Dispatcher.FramesMode.CurrentFrame )
dispatcher.dispatch( [ s["c"] ] )
dispatcher.jobPool().waitForAll()
with open( testFile ) as f :
self.assertEqual( f.readlines(), [ "HELLO\n" ] )
dispatcher["environmentCommand"].setValue( "env GAFFERDISPATCHTEST_ENVVAR=WORLD" )
dispatcher.dispatch( [ s["c"] ] )
dispatcher.jobPool().waitForAll()
with open( testFile ) as f :
self.assertEqual( f.readlines(), [ "HELLO WORLD\n" ] )
def testEnvironmentCommandSubstitutions( self ) :
s = Gaffer.ScriptNode()
testFile = os.path.join( self.temporaryDirectory(), "test" )
s["c"] = GafferDispatch.SystemCommand()
s["c"]["command"].setValue( "echo HELLO \$GAFFERDISPATCHTEST_ENVVAR > " + testFile )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
dispatcher["framesMode"].setValue( GafferDispatch.Dispatcher.FramesMode.CurrentFrame )
dispatcher["environmentCommand"].setValue( "env GAFFERDISPATCHTEST_ENVVAR=$world" )
with Gaffer.Context() as c :
c["world"] = "WORLD"
dispatcher.dispatch( [ s["c"] ] )
dispatcher.jobPool().waitForAll()
with open( testFile ) as f :
self.assertEqual( f.readlines(), [ "HELLO WORLD\n" ] )
def testScaling( self ) :
# See DispatcherTest.testScaling for details.
s = Gaffer.ScriptNode()
lastTask = None
for i in range( 0, 5 ) :
perFrame = GafferDispatch.PythonCommand()
perFrame["command"].setValue( "context.getFrame()" )
s["perFrame%d" % i] = perFrame
if lastTask is not None :
perFrame["preTasks"][0].setInput( lastTask["task"] )
perSequence = GafferDispatch.PythonCommand()
perSequence["command"].setValue( "pass" )
perSequence["sequence"].setValue( True )
perSequence["preTasks"][0].setInput( perFrame["task"] )
s["perSequence%d" % i] = perSequence
lastTask = perSequence
d = self.__createLocalDispatcher()
d["framesMode"].setValue( d.FramesMode.CustomRange )
d["frameRange"].setValue( "1-1000" )
t = time.clock()
d.dispatch( [ lastTask ] )
timeLimit = 6
if Gaffer.isDebug():
timeLimit *= 2
self.assertLess( time.clock() - t, timeLimit )
d["executeInBackground"].setValue( True )
d.dispatch( [ lastTask ] )
t = time.clock()
d.jobPool().jobs()[0].kill()
self.assertLess( time.clock() - t, 1 )
d.jobPool().waitForAll()
def testImathContextVariable( self ) :
s = Gaffer.ScriptNode()
s["t"] = GafferDispatchTest.TextWriter()
s["t"]["fileName"].setValue( self.temporaryDirectory() + "/test.txt" )
s["e"] = Gaffer.Expression()
s["e"].setExpression( inspect.cleandoc(
"""
c = context["c"]
parent["t"]["text"] = "{0} {1} {2}".format( *c )
"""
) )
s["v"] = GafferDispatch.TaskContextVariables()
s["v"]["variables"].addChild( Gaffer.NameValuePlug( "c", imath.Color3f( 0, 1, 2 ) ) )
s["v"]["preTasks"][0].setInput( s["t"]["task"] )
d = self.__createLocalDispatcher()
d["executeInBackground"].setValue( True )
d.dispatch( [ s["v"] ] )
d.jobPool().waitForAll()
self.assertEqual(
open( s["t"]["fileName"].getValue() ).read(),
"0.0 1.0 2.0"
)
def testNestedDispatchBorrowingOuterJobDirectory( self ) :
s = Gaffer.ScriptNode()
s["nestedTask"] = GafferDispatchTest.TextWriter()
s["nestedTask"]["fileName"].setValue( self.temporaryDirectory() + "/nested.txt" )
s["nestedTask"]["text"].setValue( "${dispatcher:jobDirectory} : ${dispatcher:scriptFileName}" )
s["dispatchTask"] = GafferDispatch.PythonCommand()
s["dispatchTask"]["command"].setValue( inspect.cleandoc(
"""
import GafferDispatch
dispatcher = GafferDispatch.LocalDispatcher()
dispatcher.dispatch( [ self.parent()["nestedTask"] ] )
"""
) )
s["outerTask"] = GafferDispatchTest.TextWriter()
s["outerTask"]["preTasks"][0].setInput( s["dispatchTask"]["task"] )
s["outerTask"]["fileName"].setValue( self.temporaryDirectory() + "/outer.txt" )
s["outerTask"]["text"].setValue( "${dispatcher:jobDirectory} : ${dispatcher:scriptFileName}" )
d = self.__createLocalDispatcher()
d["executeInBackground"].setValue( True )
d.dispatch( [ s["outerTask"] ] )
d.jobPool().waitForAll()
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/nested.txt" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/outer.txt" ) )
self.assertEqual(
open( self.temporaryDirectory() + "/nested.txt" ).readlines(),
open( self.temporaryDirectory() + "/outer.txt" ).readlines(),
)
if __name__ == "__main__":
unittest.main()
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import hashlib
import os
from django.conf import settings
from django.core import urlresolvers
from django.core.files.base import ContentFile
from django.db import models
from django.utils.translation import ugettext_lazy as _
from . import mixins
from .. import settings as filer_settings
from ..fields.multistorage_file import MultiStorageFileField
from ..utils.compatibility import LTE_DJANGO_1_7, python_2_unicode_compatible
from .foldermodels import Folder
try:
from polymorphic.models import PolymorphicModel
from polymorphic.managers import PolymorphicManager
except ImportError:
# django-polymorphic < 0.8
from polymorphic import PolymorphicModel, PolymorphicManager
class FileManager(PolymorphicManager):
def find_all_duplicates(self):
r = {}
for file_obj in self.all():
if file_obj.sha1:
q = self.filter(sha1=file_obj.sha1)
if len(q) > 1:
r[file_obj.sha1] = q
return r
def find_duplicates(self, file_obj):
return [i for i in self.exclude(pk=file_obj.pk).filter(sha1=file_obj.sha1)]
@python_2_unicode_compatible
class File(PolymorphicModel, mixins.IconsMixin):
file_type = 'File'
_icon = "file"
folder = models.ForeignKey(Folder, verbose_name=_('folder'), related_name='all_files',
null=True, blank=True)
file = MultiStorageFileField(_('file'), null=True, blank=True, max_length=255)
_file_size = models.IntegerField(_('file size'), null=True, blank=True)
sha1 = models.CharField(_('sha1'), max_length=40, blank=True, default='')
has_all_mandatory_data = models.BooleanField(_('has all mandatory data'), default=False, editable=False)
original_filename = models.CharField(_('original filename'), max_length=255, blank=True, null=True)
name = models.CharField(max_length=255, default="", blank=True,
verbose_name=_('name'))
description = models.TextField(null=True, blank=True,
verbose_name=_('description'))
owner = models.ForeignKey(getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),
related_name='owned_%(class)ss', on_delete=models.SET_NULL,
null=True, blank=True, verbose_name=_('owner'))
uploaded_at = models.DateTimeField(_('uploaded at'), auto_now_add=True)
modified_at = models.DateTimeField(_('modified at'), auto_now=True)
is_public = models.BooleanField(
default=filer_settings.FILER_IS_PUBLIC_DEFAULT,
verbose_name=_('Permissions disabled'),
help_text=_('Disable any permission checking for this '
'file. File will be publicly accessible '
'to anyone.'))
objects = FileManager()
@classmethod
def matches_file_type(cls, iname, ifile, request):
return True # I match all files...
def __init__(self, *args, **kwargs):
super(File, self).__init__(*args, **kwargs)
self._old_is_public = self.is_public
def _move_file(self):
"""
Move the file from src to dst.
"""
src_file_name = self.file.name
dst_file_name = self._meta.get_field('file').generate_filename(
self, self.original_filename)
if self.is_public:
src_storage = self.file.storages['private']
dst_storage = self.file.storages['public']
else:
src_storage = self.file.storages['public']
dst_storage = self.file.storages['private']
# delete the thumbnail
# We are toggling the is_public to make sure that easy_thumbnails can
# delete the thumbnails
self.is_public = not self.is_public
self.file.delete_thumbnails()
self.is_public = not self.is_public
# This is needed because most of the remote File Storage backend do not
# open the file.
src_file = src_storage.open(src_file_name)
src_file.open()
self.file = dst_storage.save(dst_file_name,
ContentFile(src_file.read()))
src_storage.delete(src_file_name)
def _copy_file(self, destination, overwrite=False):
"""
Copies the file to a destination files and returns it.
"""
if overwrite:
# If the destination file already exists default storage backend
# does not overwrite it but generates another filename.
# TODO: Find a way to override this behavior.
raise NotImplementedError
src_file_name = self.file.name
storage = self.file.storages['public' if self.is_public else 'private']
# This is needed because most of the remote File Storage backend do not
# open the file.
src_file = storage.open(src_file_name)
src_file.open()
return storage.save(destination, ContentFile(src_file.read()))
def generate_sha1(self):
sha = hashlib.sha1()
self.file.seek(0)
while True:
buf = self.file.read(104857600)
if not buf:
break
sha.update(buf)
self.sha1 = sha.hexdigest()
# to make sure later operations can read the whole file
self.file.seek(0)
def save(self, *args, **kwargs):
# check if this is a subclass of "File" or not and set
# _file_type_plugin_name
if self.__class__ == File:
# what should we do now?
# maybe this has a subclass, but is being saved as a File instance
# anyway. do we need to go check all possible subclasses?
pass
elif issubclass(self.__class__, File):
self._file_type_plugin_name = self.__class__.__name__
# cache the file size
# TODO: only do this if needed (depending on the storage backend the whole file will be downloaded)
try:
self._file_size = self.file.size
except:
pass
if self._old_is_public != self.is_public and self.pk:
self._move_file()
self._old_is_public = self.is_public
# generate SHA1 hash
# TODO: only do this if needed (depending on the storage backend the whole file will be downloaded)
try:
self.generate_sha1()
except Exception:
pass
super(File, self).save(*args, **kwargs)
save.alters_data = True
def delete(self, *args, **kwargs):
# Delete the model before the file
super(File, self).delete(*args, **kwargs)
# Delete the file if there are no other Files referencing it.
if not File.objects.filter(file=self.file.name, is_public=self.is_public).exists():
self.file.delete(False)
delete.alters_data = True
@property
def label(self):
if self.name in ['', None]:
text = self.original_filename or 'unnamed file'
else:
text = self.name
text = "%s" % (text,)
return text
def __lt__(self, other):
return self.label.lower() < other.label.lower()
def has_edit_permission(self, request):
return self.has_generic_permission(request, 'edit')
def has_read_permission(self, request):
return self.has_generic_permission(request, 'read')
def has_add_children_permission(self, request):
return self.has_generic_permission(request, 'add_children')
def has_generic_permission(self, request, permission_type):
"""
Return true if the current user has permission on this
image. Return the string 'ALL' if the user has all rights.
"""
user = request.user
if not user.is_authenticated():
return False
elif user.is_superuser:
return True
elif user == self.owner:
return True
elif self.folder:
return self.folder.has_generic_permission(request, permission_type)
else:
return False
def __str__(self):
if self.name in ('', None):
text = "%s" % (self.original_filename,)
else:
text = "%s" % (self.name,)
return text
def get_admin_change_url(self):
if LTE_DJANGO_1_7:
model_name = self._meta.module_name
else:
model_name = self._meta.model_name
return urlresolvers.reverse(
'admin:{0}_{1}_change'.format(self._meta.app_label, model_name),
args=(self.pk,)
)
def get_admin_delete_url(self):
try:
# Django <=1.6
model_name = self._meta.module_name
except AttributeError:
# Django >1.6
model_name = self._meta.model_name
return urlresolvers.reverse(
'admin:{0}_{1}_delete'.format(self._meta.app_label, model_name,),
args=(self.pk,))
@property
def url(self):
"""
to make the model behave like a file field
"""
try:
r = self.file.url
except:
r = ''
return r
@property
def canonical_url(self):
url = ''
if self.file and self.is_public:
try:
url = urlresolvers.reverse('canonical', kwargs={
'uploaded_at': self.uploaded_at.strftime('%s'),
'file_id': self.id
})
except urlresolvers.NoReverseMatch:
pass # No canonical url, return empty string
return url
@property
def path(self):
try:
return self.file.path
except:
return ""
@property
def size(self):
return self._file_size or 0
@property
def extension(self):
filetype = os.path.splitext(self.file.name)[1].lower()
if len(filetype) > 0:
filetype = filetype[1:]
return filetype
@property
def logical_folder(self):
"""
if this file is not in a specific folder return the Special "unfiled"
Folder object
"""
if not self.folder:
from .virtualitems import UnsortedImages
return UnsortedImages()
else:
return self.folder
@property
def logical_path(self):
"""
Gets logical path of the folder in the tree structure.
Used to generate breadcrumbs
"""
folder_path = []
if self.folder:
folder_path.extend(self.folder.get_ancestors())
folder_path.append(self.logical_folder)
return folder_path
@property
def duplicates(self):
return File.objects.find_duplicates(self)
class Meta(object):
app_label = 'filer'
verbose_name = _('file')
verbose_name_plural = _('files')
|
|
"Test the functionality of Python classes implementing operators."
import unittest
testmeths = [
# Binary operations
"add",
"radd",
"sub",
"rsub",
"mul",
"rmul",
"matmul",
"rmatmul",
"truediv",
"rtruediv",
"floordiv",
"rfloordiv",
"mod",
"rmod",
"divmod",
"rdivmod",
"pow",
"rpow",
"rshift",
"rrshift",
"lshift",
"rlshift",
"and",
"rand",
"or",
"ror",
"xor",
"rxor",
# List/dict operations
"contains",
"getitem",
"setitem",
"delitem",
# Unary operations
"neg",
"pos",
"abs",
# generic operations
"init",
]
# These need to return something other than None
# "hash",
# "str",
# "repr",
# "int",
# "float",
# These are separate because they can influence the test of other methods.
# "getattr",
# "setattr",
# "delattr",
callLst = []
def trackCall(f):
def track(*args, **kwargs):
callLst.append((f.__name__, args))
return f(*args, **kwargs)
return track
statictests = """
@trackCall
def __hash__(self, *args):
return hash(id(self))
@trackCall
def __str__(self, *args):
return "AllTests"
@trackCall
def __repr__(self, *args):
return "AllTests"
@trackCall
def __int__(self, *args):
return 1
@trackCall
def __index__(self, *args):
return 1
@trackCall
def __float__(self, *args):
return 1.0
@trackCall
def __eq__(self, *args):
return True
@trackCall
def __ne__(self, *args):
return False
@trackCall
def __lt__(self, *args):
return False
@trackCall
def __le__(self, *args):
return True
@trackCall
def __gt__(self, *args):
return False
@trackCall
def __ge__(self, *args):
return True
"""
# Synthesize all the other AllTests methods from the names in testmeths.
method_template = """\
@trackCall
def __%s__(self, *args):
pass
"""
d = {}
exec(statictests, globals(), d)
for method in testmeths:
exec(method_template % method, globals(), d)
AllTests = type("AllTests", (object,), d)
del d, statictests, method, method_template
class ClassTests(unittest.TestCase):
def setUp(self):
callLst[:] = []
def assertCallStack(self, expected_calls):
actualCallList = callLst[:] # need to copy because the comparison below will add
# additional calls to callLst
if expected_calls != actualCallList:
self.fail("Expected call list:\n %s\ndoes not match actual call list\n %s" %
(expected_calls, actualCallList))
def testInit(self):
foo = AllTests()
self.assertCallStack([("__init__", (foo,))])
def testBinaryOps(self):
testme = AllTests()
# Binary operations
callLst[:] = []
testme + 1
self.assertCallStack([("__add__", (testme, 1))])
callLst[:] = []
1 + testme
self.assertCallStack([("__radd__", (testme, 1))])
callLst[:] = []
testme - 1
self.assertCallStack([("__sub__", (testme, 1))])
callLst[:] = []
1 - testme
self.assertCallStack([("__rsub__", (testme, 1))])
callLst[:] = []
testme * 1
self.assertCallStack([("__mul__", (testme, 1))])
callLst[:] = []
1 * testme
self.assertCallStack([("__rmul__", (testme, 1))])
callLst[:] = []
testme @ 1
self.assertCallStack([("__matmul__", (testme, 1))])
callLst[:] = []
1 @ testme
self.assertCallStack([("__rmatmul__", (testme, 1))])
callLst[:] = []
testme / 1
self.assertCallStack([("__truediv__", (testme, 1))])
callLst[:] = []
1 / testme
self.assertCallStack([("__rtruediv__", (testme, 1))])
callLst[:] = []
testme // 1
self.assertCallStack([("__floordiv__", (testme, 1))])
callLst[:] = []
1 // testme
self.assertCallStack([("__rfloordiv__", (testme, 1))])
callLst[:] = []
testme % 1
self.assertCallStack([("__mod__", (testme, 1))])
callLst[:] = []
1 % testme
self.assertCallStack([("__rmod__", (testme, 1))])
callLst[:] = []
divmod(testme,1)
self.assertCallStack([("__divmod__", (testme, 1))])
callLst[:] = []
divmod(1, testme)
self.assertCallStack([("__rdivmod__", (testme, 1))])
callLst[:] = []
testme ** 1
self.assertCallStack([("__pow__", (testme, 1))])
callLst[:] = []
1 ** testme
self.assertCallStack([("__rpow__", (testme, 1))])
callLst[:] = []
testme >> 1
self.assertCallStack([("__rshift__", (testme, 1))])
callLst[:] = []
1 >> testme
self.assertCallStack([("__rrshift__", (testme, 1))])
callLst[:] = []
testme << 1
self.assertCallStack([("__lshift__", (testme, 1))])
callLst[:] = []
1 << testme
self.assertCallStack([("__rlshift__", (testme, 1))])
callLst[:] = []
testme & 1
self.assertCallStack([("__and__", (testme, 1))])
callLst[:] = []
1 & testme
self.assertCallStack([("__rand__", (testme, 1))])
callLst[:] = []
testme | 1
self.assertCallStack([("__or__", (testme, 1))])
callLst[:] = []
1 | testme
self.assertCallStack([("__ror__", (testme, 1))])
callLst[:] = []
testme ^ 1
self.assertCallStack([("__xor__", (testme, 1))])
callLst[:] = []
1 ^ testme
self.assertCallStack([("__rxor__", (testme, 1))])
def testListAndDictOps(self):
testme = AllTests()
# List/dict operations
class Empty: pass
try:
1 in Empty()
self.fail('failed, should have raised TypeError')
except TypeError:
pass
callLst[:] = []
1 in testme
self.assertCallStack([('__contains__', (testme, 1))])
callLst[:] = []
testme[1]
self.assertCallStack([('__getitem__', (testme, 1))])
callLst[:] = []
testme[1] = 1
self.assertCallStack([('__setitem__', (testme, 1, 1))])
callLst[:] = []
del testme[1]
self.assertCallStack([('__delitem__', (testme, 1))])
callLst[:] = []
testme[:42]
self.assertCallStack([('__getitem__', (testme, slice(None, 42)))])
callLst[:] = []
testme[:42] = "The Answer"
self.assertCallStack([('__setitem__', (testme, slice(None, 42),
"The Answer"))])
callLst[:] = []
del testme[:42]
self.assertCallStack([('__delitem__', (testme, slice(None, 42)))])
callLst[:] = []
testme[2:1024:10]
self.assertCallStack([('__getitem__', (testme, slice(2, 1024, 10)))])
callLst[:] = []
testme[2:1024:10] = "A lot"
self.assertCallStack([('__setitem__', (testme, slice(2, 1024, 10),
"A lot"))])
callLst[:] = []
del testme[2:1024:10]
self.assertCallStack([('__delitem__', (testme, slice(2, 1024, 10)))])
callLst[:] = []
testme[:42, ..., :24:, 24, 100]
self.assertCallStack([('__getitem__', (testme, (slice(None, 42, None),
Ellipsis,
slice(None, 24, None),
24, 100)))])
callLst[:] = []
testme[:42, ..., :24:, 24, 100] = "Strange"
self.assertCallStack([('__setitem__', (testme, (slice(None, 42, None),
Ellipsis,
slice(None, 24, None),
24, 100), "Strange"))])
callLst[:] = []
del testme[:42, ..., :24:, 24, 100]
self.assertCallStack([('__delitem__', (testme, (slice(None, 42, None),
Ellipsis,
slice(None, 24, None),
24, 100)))])
def testUnaryOps(self):
testme = AllTests()
callLst[:] = []
-testme
self.assertCallStack([('__neg__', (testme,))])
callLst[:] = []
+testme
self.assertCallStack([('__pos__', (testme,))])
callLst[:] = []
abs(testme)
self.assertCallStack([('__abs__', (testme,))])
callLst[:] = []
int(testme)
self.assertCallStack([('__int__', (testme,))])
callLst[:] = []
float(testme)
self.assertCallStack([('__float__', (testme,))])
callLst[:] = []
oct(testme)
self.assertCallStack([('__index__', (testme,))])
callLst[:] = []
hex(testme)
self.assertCallStack([('__index__', (testme,))])
def testMisc(self):
testme = AllTests()
callLst[:] = []
hash(testme)
self.assertCallStack([('__hash__', (testme,))])
callLst[:] = []
repr(testme)
self.assertCallStack([('__repr__', (testme,))])
callLst[:] = []
str(testme)
self.assertCallStack([('__str__', (testme,))])
callLst[:] = []
testme == 1
self.assertCallStack([('__eq__', (testme, 1))])
callLst[:] = []
testme < 1
self.assertCallStack([('__lt__', (testme, 1))])
callLst[:] = []
testme > 1
self.assertCallStack([('__gt__', (testme, 1))])
callLst[:] = []
testme != 1
self.assertCallStack([('__ne__', (testme, 1))])
callLst[:] = []
1 == testme
self.assertCallStack([('__eq__', (1, testme))])
callLst[:] = []
1 < testme
self.assertCallStack([('__gt__', (1, testme))])
callLst[:] = []
1 > testme
self.assertCallStack([('__lt__', (1, testme))])
callLst[:] = []
1 != testme
self.assertCallStack([('__ne__', (1, testme))])
def testGetSetAndDel(self):
# Interfering tests
class ExtraTests(AllTests):
@trackCall
def __getattr__(self, *args):
return "SomeVal"
@trackCall
def __setattr__(self, *args):
pass
@trackCall
def __delattr__(self, *args):
pass
testme = ExtraTests()
callLst[:] = []
testme.spam
self.assertCallStack([('__getattr__', (testme, "spam"))])
callLst[:] = []
testme.eggs = "spam, spam, spam and ham"
self.assertCallStack([('__setattr__', (testme, "eggs",
"spam, spam, spam and ham"))])
callLst[:] = []
del testme.cardinal
self.assertCallStack([('__delattr__', (testme, "cardinal"))])
def testDel(self):
x = []
class DelTest:
def __del__(self):
x.append("crab people, crab people")
testme = DelTest()
del testme
import gc
gc.collect()
self.assertEqual(["crab people, crab people"], x)
def testBadTypeReturned(self):
# return values of some method are type-checked
class BadTypeClass:
def __int__(self):
return None
__float__ = __int__
__complex__ = __int__
__str__ = __int__
__repr__ = __int__
__bytes__ = __int__
__bool__ = __int__
__index__ = __int__
def index(x):
return [][x]
for f in [float, complex, str, repr, bytes, bin, oct, hex, bool, index]:
self.assertRaises(TypeError, f, BadTypeClass())
def testHashStuff(self):
# Test correct errors from hash() on objects with comparisons but
# no __hash__
class C0:
pass
hash(C0()) # This should work; the next two should raise TypeError
class C2:
def __eq__(self, other): return 1
self.assertRaises(TypeError, hash, C2())
def testSFBug532646(self):
# Test for SF bug 532646
class A:
pass
A.__call__ = A()
a = A()
try:
a() # This should not segfault
except RecursionError:
pass
else:
self.fail("Failed to raise RecursionError")
def testForExceptionsRaisedInInstanceGetattr2(self):
# Tests for exceptions raised in instance_getattr2().
def booh(self):
raise AttributeError("booh")
class A:
a = property(booh)
try:
A().a # Raised AttributeError: A instance has no attribute 'a'
except AttributeError as x:
if str(x) != "booh":
self.fail("attribute error for A().a got masked: %s" % x)
class E:
__eq__ = property(booh)
E() == E() # In debug mode, caused a C-level assert() to fail
class I:
__init__ = property(booh)
try:
# In debug mode, printed XXX undetected error and
# raises AttributeError
I()
except AttributeError:
pass
else:
self.fail("attribute error for I.__init__ got masked")
def assertNotOrderable(self, a, b):
with self.assertRaises(TypeError):
a < b
with self.assertRaises(TypeError):
a > b
with self.assertRaises(TypeError):
a <= b
with self.assertRaises(TypeError):
a >= b
def testHashComparisonOfMethods(self):
# Test comparison and hash of methods
class A:
def __init__(self, x):
self.x = x
def f(self):
pass
def g(self):
pass
def __eq__(self, other):
return True
def __hash__(self):
raise TypeError
class B(A):
pass
a1 = A(1)
a2 = A(1)
self.assertTrue(a1.f == a1.f)
self.assertFalse(a1.f != a1.f)
self.assertFalse(a1.f == a2.f)
self.assertTrue(a1.f != a2.f)
self.assertFalse(a1.f == a1.g)
self.assertTrue(a1.f != a1.g)
self.assertNotOrderable(a1.f, a1.f)
self.assertEqual(hash(a1.f), hash(a1.f))
self.assertFalse(A.f == a1.f)
self.assertTrue(A.f != a1.f)
self.assertFalse(A.f == A.g)
self.assertTrue(A.f != A.g)
self.assertTrue(B.f == A.f)
self.assertFalse(B.f != A.f)
self.assertNotOrderable(A.f, A.f)
self.assertEqual(hash(B.f), hash(A.f))
# the following triggers a SystemError in 2.4
a = A(hash(A.f)^(-1))
hash(a.f)
def testSetattrWrapperNameIntern(self):
# Issue #25794: __setattr__ should intern the attribute name
class A:
pass
def add(self, other):
return 'summa'
name = str(b'__add__', 'ascii') # shouldn't be optimized
self.assertIsNot(name, '__add__') # not interned
type.__setattr__(A, name, add)
self.assertEqual(A() + 1, 'summa')
name2 = str(b'__add__', 'ascii')
self.assertIsNot(name2, '__add__')
self.assertIsNot(name2, name)
type.__delattr__(A, name2)
with self.assertRaises(TypeError):
A() + 1
def testSetattrNonStringName(self):
class A:
pass
with self.assertRaises(TypeError):
type.__setattr__(A, b'x', None)
def testConstructorErrorMessages(self):
# bpo-31506: Improves the error message logic for object_new & object_init
# Class without any method overrides
class C:
pass
error_msg = r'C.__init__\(\) takes exactly one argument \(the instance to initialize\)'
with self.assertRaisesRegex(TypeError, r'C\(\) takes no arguments'):
C(42)
with self.assertRaisesRegex(TypeError, r'C\(\) takes no arguments'):
C.__new__(C, 42)
with self.assertRaisesRegex(TypeError, error_msg):
C().__init__(42)
with self.assertRaisesRegex(TypeError, r'C\(\) takes no arguments'):
object.__new__(C, 42)
with self.assertRaisesRegex(TypeError, error_msg):
object.__init__(C(), 42)
# Class with both `__init__` & `__new__` method overridden
class D:
def __new__(cls, *args, **kwargs):
super().__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
error_msg = r'object.__new__\(\) takes exactly one argument \(the type to instantiate\)'
with self.assertRaisesRegex(TypeError, error_msg):
D(42)
with self.assertRaisesRegex(TypeError, error_msg):
D.__new__(D, 42)
with self.assertRaisesRegex(TypeError, error_msg):
object.__new__(D, 42)
# Class that only overrides __init__
class E:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
error_msg = r'object.__init__\(\) takes exactly one argument \(the instance to initialize\)'
with self.assertRaisesRegex(TypeError, error_msg):
E().__init__(42)
with self.assertRaisesRegex(TypeError, error_msg):
object.__init__(E(), 42)
if __name__ == '__main__':
unittest.main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import socket
import mock
import testtools
import webob
from neutron.agent.metadata import agent
from neutron.common import constants
from neutron.common import utils
from neutron.tests import base
class FakeConf(object):
admin_user = 'neutron'
admin_password = 'password'
admin_tenant_name = 'tenant'
auth_url = 'http://127.0.0.1'
auth_strategy = 'keystone'
auth_region = 'region'
auth_insecure = False
auth_ca_cert = None
endpoint_type = 'adminURL'
nova_metadata_ip = '9.9.9.9'
nova_metadata_port = 8775
metadata_proxy_shared_secret = 'secret'
class TestMetadataProxyHandler(base.BaseTestCase):
def setUp(self):
super(TestMetadataProxyHandler, self).setUp()
self.qclient_p = mock.patch('neutronclient.v2_0.client.Client')
self.qclient = self.qclient_p.start()
self.addCleanup(self.qclient_p.stop)
self.log_p = mock.patch.object(agent, 'LOG')
self.log = self.log_p.start()
self.addCleanup(self.log_p.stop)
self.handler = agent.MetadataProxyHandler(FakeConf)
def test_call(self):
req = mock.Mock()
with mock.patch.object(self.handler,
'_get_instance_and_tenant_id') as get_ids:
get_ids.return_value = ('instance_id', 'tenant_id')
with mock.patch.object(self.handler, '_proxy_request') as proxy:
proxy.return_value = 'value'
retval = self.handler(req)
self.assertEqual(retval, 'value')
def test_call_no_instance_match(self):
req = mock.Mock()
with mock.patch.object(self.handler,
'_get_instance_and_tenant_id') as get_ids:
get_ids.return_value = None, None
retval = self.handler(req)
self.assertIsInstance(retval, webob.exc.HTTPNotFound)
def test_call_internal_server_error(self):
req = mock.Mock()
with mock.patch.object(self.handler,
'_get_instance_and_tenant_id') as get_ids:
get_ids.side_effect = Exception
retval = self.handler(req)
self.assertIsInstance(retval, webob.exc.HTTPInternalServerError)
self.assertEqual(len(self.log.mock_calls), 2)
def _get_instance_and_tenant_id_helper(self, headers, list_ports_retval,
networks=None, router_id=None):
headers['X-Forwarded-For'] = '192.168.1.1'
req = mock.Mock(headers=headers)
def mock_list_ports(*args, **kwargs):
return {'ports': list_ports_retval.pop(0)}
self.qclient.return_value.list_ports.side_effect = mock_list_ports
instance_id, tenant_id = self.handler._get_instance_and_tenant_id(req)
expected = [
mock.call(
username=FakeConf.admin_user,
tenant_name=FakeConf.admin_tenant_name,
region_name=FakeConf.auth_region,
auth_url=FakeConf.auth_url,
password=FakeConf.admin_password,
auth_strategy=FakeConf.auth_strategy,
token=None,
insecure=FakeConf.auth_insecure,
ca_cert=FakeConf.auth_ca_cert,
endpoint_url=None,
endpoint_type=FakeConf.endpoint_type)
]
if router_id:
expected.append(
mock.call().list_ports(
device_id=router_id,
device_owner=constants.DEVICE_OWNER_ROUTER_INTF
)
)
expected.append(
mock.call().list_ports(
network_id=networks or [],
fixed_ips=['ip_address=192.168.1.1'])
)
self.qclient.assert_has_calls(expected)
return (instance_id, tenant_id)
def test_get_instance_id_router_id(self):
router_id = 'the_id'
headers = {
'X-Neutron-Router-ID': router_id
}
networks = ['net1', 'net2']
ports = [
[{'network_id': 'net1'}, {'network_id': 'net2'}],
[{'device_id': 'device_id', 'tenant_id': 'tenant_id'}]
]
self.assertEqual(
self._get_instance_and_tenant_id_helper(headers, ports,
networks=networks,
router_id=router_id),
('device_id', 'tenant_id')
)
def test_get_instance_id_router_id_no_match(self):
router_id = 'the_id'
headers = {
'X-Neutron-Router-ID': router_id
}
networks = ['net1', 'net2']
ports = [
[{'network_id': 'net1'}, {'network_id': 'net2'}],
[]
]
self.assertEqual(
self._get_instance_and_tenant_id_helper(headers, ports,
networks=networks,
router_id=router_id),
(None, None)
)
def test_get_instance_id_network_id(self):
network_id = 'the_id'
headers = {
'X-Neutron-Network-ID': network_id
}
ports = [
[{'device_id': 'device_id',
'tenant_id': 'tenant_id'}]
]
self.assertEqual(
self._get_instance_and_tenant_id_helper(headers, ports,
networks=['the_id']),
('device_id', 'tenant_id')
)
def test_get_instance_id_network_id_no_match(self):
network_id = 'the_id'
headers = {
'X-Neutron-Network-ID': network_id
}
ports = [[]]
self.assertEqual(
self._get_instance_and_tenant_id_helper(headers, ports,
networks=['the_id']),
(None, None)
)
def _proxy_request_test_helper(self, response_code=200, method='GET'):
hdrs = {'X-Forwarded-For': '8.8.8.8'}
body = 'body'
req = mock.Mock(path_info='/the_path', query_string='', headers=hdrs,
method=method, body=body)
resp = mock.MagicMock(status=response_code)
req.response = resp
with mock.patch.object(self.handler, '_sign_instance_id') as sign:
sign.return_value = 'signed'
with mock.patch('httplib2.Http') as mock_http:
resp.__getitem__.return_value = "text/plain"
mock_http.return_value.request.return_value = (resp, 'content')
retval = self.handler._proxy_request('the_id', 'tenant_id',
req)
mock_http.assert_has_calls([
mock.call().request(
'http://9.9.9.9:8775/the_path',
method=method,
headers={
'X-Forwarded-For': '8.8.8.8',
'X-Instance-ID-Signature': 'signed',
'X-Instance-ID': 'the_id',
'X-Tenant-ID': 'tenant_id'
},
body=body
)]
)
return retval
def test_proxy_request_post(self):
response = self._proxy_request_test_helper(method='POST')
self.assertEqual(response.content_type, "text/plain")
self.assertEqual(response.body, 'content')
def test_proxy_request_200(self):
response = self._proxy_request_test_helper(200)
self.assertEqual(response.content_type, "text/plain")
self.assertEqual(response.body, 'content')
def test_proxy_request_403(self):
self.assertIsInstance(self._proxy_request_test_helper(403),
webob.exc.HTTPForbidden)
def test_proxy_request_404(self):
self.assertIsInstance(self._proxy_request_test_helper(404),
webob.exc.HTTPNotFound)
def test_proxy_request_409(self):
self.assertIsInstance(self._proxy_request_test_helper(409),
webob.exc.HTTPConflict)
def test_proxy_request_500(self):
self.assertIsInstance(self._proxy_request_test_helper(500),
webob.exc.HTTPInternalServerError)
def test_proxy_request_other_code(self):
with testtools.ExpectedException(Exception):
self._proxy_request_test_helper(302)
def test_sign_instance_id(self):
self.assertEqual(
self.handler._sign_instance_id('foo'),
'773ba44693c7553d6ee20f61ea5d2757a9a4f4a44d2841ae4e95b52e4cd62db4'
)
class TestUnixDomainHttpProtocol(base.BaseTestCase):
def test_init_empty_client(self):
u = agent.UnixDomainHttpProtocol(mock.Mock(), '', mock.Mock())
self.assertEqual(u.client_address, ('<local>', 0))
def test_init_with_client(self):
u = agent.UnixDomainHttpProtocol(mock.Mock(), 'foo', mock.Mock())
self.assertEqual(u.client_address, 'foo')
class TestUnixDomainWSGIServer(base.BaseTestCase):
def setUp(self):
super(TestUnixDomainWSGIServer, self).setUp()
self.eventlet_p = mock.patch.object(agent, 'eventlet')
self.eventlet = self.eventlet_p.start()
self.addCleanup(self.eventlet_p.stop)
self.server = agent.UnixDomainWSGIServer('test')
def test_start(self):
mock_app = mock.Mock()
with mock.patch.object(self.server, 'pool') as pool:
self.server.start(mock_app, '/the/path', workers=0, backlog=128)
self.eventlet.assert_has_calls([
mock.call.listen(
'/the/path',
family=socket.AF_UNIX,
backlog=128
)]
)
pool.spawn_n.assert_called_once_with(
self.server._run,
mock_app,
self.eventlet.listen.return_value
)
@mock.patch('neutron.openstack.common.service.ProcessLauncher')
def test_start_multiple_workers(self, process_launcher):
launcher = process_launcher.return_value
mock_app = mock.Mock()
self.server.start(mock_app, '/the/path', workers=2, backlog=128)
launcher.running = True
launcher.launch_service.assert_called_once_with(self.server._server,
workers=2)
self.server.stop()
self.assertFalse(launcher.running)
self.server.wait()
launcher.wait.assert_called_once_with()
def test_run(self):
with mock.patch.object(agent, 'logging') as logging:
self.server._run('app', 'sock')
self.eventlet.wsgi.server.called_once_with(
'sock',
'app',
self.server.pool,
agent.UnixDomainHttpProtocol,
mock.ANY
)
self.assertTrue(len(logging.mock_calls))
class TestUnixDomainMetadataProxy(base.BaseTestCase):
def setUp(self):
super(TestUnixDomainMetadataProxy, self).setUp()
self.cfg_p = mock.patch.object(agent, 'cfg')
self.cfg = self.cfg_p.start()
looping_call_p = mock.patch(
'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall')
self.looping_mock = looping_call_p.start()
self.cfg.CONF.metadata_proxy_socket = '/the/path'
self.cfg.CONF.metadata_workers = 0
self.cfg.CONF.metadata_backlog = 128
def test_init_doesnot_exists(self):
with mock.patch('os.path.isdir') as isdir:
with mock.patch('os.makedirs') as makedirs:
isdir.return_value = False
agent.UnixDomainMetadataProxy(mock.Mock())
isdir.assert_called_once_with('/the')
makedirs.assert_called_once_with('/the', 0o755)
def test_init_exists(self):
with mock.patch('os.path.isdir') as isdir:
with mock.patch('os.unlink') as unlink:
isdir.return_value = True
agent.UnixDomainMetadataProxy(mock.Mock())
isdir.assert_called_once_with('/the')
unlink.assert_called_once_with('/the/path')
def test_init_exists_unlink_no_file(self):
with mock.patch('os.path.isdir') as isdir:
with mock.patch('os.unlink') as unlink:
with mock.patch('os.path.exists') as exists:
isdir.return_value = True
exists.return_value = False
unlink.side_effect = OSError
agent.UnixDomainMetadataProxy(mock.Mock())
isdir.assert_called_once_with('/the')
unlink.assert_called_once_with('/the/path')
exists.assert_called_once_with('/the/path')
def test_init_exists_unlink_fails_file_still_exists(self):
with mock.patch('os.path.isdir') as isdir:
with mock.patch('os.unlink') as unlink:
with mock.patch('os.path.exists') as exists:
isdir.return_value = True
exists.return_value = True
unlink.side_effect = OSError
with testtools.ExpectedException(OSError):
agent.UnixDomainMetadataProxy(mock.Mock())
isdir.assert_called_once_with('/the')
unlink.assert_called_once_with('/the/path')
exists.assert_called_once_with('/the/path')
def test_run(self):
with mock.patch.object(agent, 'MetadataProxyHandler') as handler:
with mock.patch.object(agent, 'UnixDomainWSGIServer') as server:
with mock.patch('os.path.isdir') as isdir:
with mock.patch('os.makedirs') as makedirs:
isdir.return_value = False
p = agent.UnixDomainMetadataProxy(self.cfg.CONF)
p.run()
isdir.assert_called_once_with('/the')
makedirs.assert_called_once_with('/the', 0o755)
server.assert_has_calls([
mock.call('neutron-metadata-agent'),
mock.call().start(handler.return_value,
'/the/path', workers=0,
backlog=128),
mock.call().wait()]
)
def test_main(self):
with mock.patch.object(agent, 'UnixDomainMetadataProxy') as proxy:
with mock.patch('eventlet.monkey_patch') as eventlet:
with mock.patch.object(agent, 'config') as config:
with mock.patch.object(agent, 'cfg') as cfg:
with mock.patch.object(utils, 'cfg'):
agent.main()
self.assertTrue(eventlet.called)
self.assertTrue(config.setup_logging.called)
proxy.assert_has_calls([
mock.call(cfg.CONF),
mock.call().run()]
)
def test_init_state_reporting(self):
with mock.patch('os.makedirs'):
proxy = agent.UnixDomainMetadataProxy(mock.Mock())
self.looping_mock.assert_called_once_with(proxy._report_state)
self.looping_mock.return_value.start.assert_called_once_with(
interval=mock.ANY)
def test_report_state(self):
with mock.patch('neutron.agent.rpc.PluginReportStateAPI') as state_api:
with mock.patch('os.makedirs'):
proxy = agent.UnixDomainMetadataProxy(mock.Mock())
self.assertTrue(proxy.agent_state['start_flag'])
proxy._report_state()
self.assertNotIn('start_flag', proxy.agent_state)
state_api_inst = state_api.return_value
state_api_inst.report_state.assert_called_once_with(
proxy.context, proxy.agent_state, use_call=True)
|
|
import random
from datetime import date
from decimal import Decimal
from django.test import TestCase
from dateutil import relativedelta
from unittest.mock import Mock
from dimagi.utils.dates import add_months_to_date
from corehq.apps.accounting import tasks, utils
from corehq.apps.accounting.invoicing import LineItemFactory
from corehq.apps.accounting.models import (
CreditLine,
CustomerInvoice,
DefaultProductPlan,
DomainUserHistory,
FeatureType,
InvoicingPlan,
SoftwarePlanEdition,
Subscription,
)
from corehq.apps.accounting.tasks import calculate_users_in_all_domains
from corehq.apps.accounting.tests import generator
from corehq.apps.accounting.tests.base_tests import BaseAccountingTest
from corehq.apps.domain.models import Domain
from corehq.apps.sms.models import INCOMING
from corehq.apps.smsbillables.models import (
SmsBillable,
SmsGatewayFee,
SmsGatewayFeeCriteria,
SmsUsageFee,
SmsUsageFeeCriteria,
)
from corehq.apps.smsbillables.tests.generator import (
arbitrary_sms_billables_for_domain,
)
from corehq.util.dates import get_previous_month_date_range
class BaseCustomerInvoiceCase(BaseAccountingTest):
is_using_test_plans = False
@classmethod
def setUpClass(cls):
super(BaseCustomerInvoiceCase, cls).setUpClass()
if cls.is_using_test_plans:
generator.bootstrap_test_software_plan_versions()
cls.billing_contact = generator.create_arbitrary_web_user_name()
cls.dimagi_user = generator.create_arbitrary_web_user_name(is_dimagi=True)
cls.account = generator.billing_account(
cls.dimagi_user, cls.billing_contact)
cls.domain = generator.arbitrary_domain()
cls.account.is_customer_billing_account = True
cls.account.save()
cls.advanced_plan = DefaultProductPlan.get_default_plan_version(edition=SoftwarePlanEdition.ADVANCED)
cls.advanced_plan.plan.is_customer_software_plan = True
cls.subscription_length = 15 # months
subscription_start_date = date(2016, 2, 23)
subscription_end_date = add_months_to_date(subscription_start_date, cls.subscription_length)
cls.subscription = generator.generate_domain_subscription(
cls.account,
cls.domain,
date_start=subscription_start_date,
date_end=subscription_end_date,
)
advanced_subscription_end_date = add_months_to_date(subscription_end_date, 2)
cls.domain2 = generator.arbitrary_domain()
cls.sub2 = generator.generate_domain_subscription(
cls.account,
cls.domain2,
date_start=subscription_start_date,
date_end=advanced_subscription_end_date,
plan_version=cls.advanced_plan
)
cls.domain3 = generator.arbitrary_domain()
cls.sub3 = generator.generate_domain_subscription(
cls.account,
cls.domain3,
date_start=subscription_start_date,
date_end=advanced_subscription_end_date,
plan_version=cls.advanced_plan
)
# This subscription should not be included in any customer invoices in these tests
cls.domain_community = generator.arbitrary_domain()
cls.sub3 = generator.generate_domain_subscription(
cls.account,
cls.domain3,
date_start=subscription_start_date,
date_end=advanced_subscription_end_date,
plan_version=DefaultProductPlan.get_default_plan_version(edition=SoftwarePlanEdition.COMMUNITY)
)
def tearDown(self):
for user in self.domain.all_users():
user.delete(self.domain.name, deleted_by=None)
for user in self.domain2.all_users():
user.delete(self.domain2.name, deleted_by=None)
for user in self.domain3.all_users():
user.delete(self.domain3.name, deleted_by=None)
for user in self.domain_community.all_users():
user.delete(self.domain_community.name, deleted_by=None)
if self.is_using_test_plans:
utils.clear_plan_version_cache()
super(BaseAccountingTest, self).tearDown()
@classmethod
def tearDownClass(cls):
cls.domain.delete()
cls.domain2.delete()
cls.domain3.delete()
cls.domain_community.delete()
super(BaseCustomerInvoiceCase, cls).tearDownClass()
class TestCustomerInvoice(BaseCustomerInvoiceCase):
def test_multiple_subscription_invoice(self):
invoice_date = utils.months_from_date(self.subscription.date_start,
random.randint(3, self.subscription_length))
calculate_users_in_all_domains(invoice_date)
tasks.generate_invoices_based_on_date(invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
self.assertGreater(invoice.balance, Decimal('0.0000'))
self.assertEqual(invoice.account, self.account)
num_product_line_items = invoice.lineitem_set.get_products().count()
self.assertEqual(num_product_line_items, 2)
num_feature_line_items = invoice.lineitem_set.get_features().count()
self.assertEqual(num_feature_line_items, self.subscription.plan_version.feature_rates.count() +
self.sub2.plan_version.feature_rates.count())
def test_only_invoice_active_subscriptions(self):
"""
Test that only active subscriptions are invoiced.
Two subscriptions of the same plan only create one product line item and one set of feature line items
"""
invoice_date = utils.months_from_date(self.sub2.date_end, 1)
calculate_users_in_all_domains(invoice_date)
tasks.generate_invoices_based_on_date(invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
self.assertEqual(invoice.balance, Decimal('851.6200'))
self.assertEqual(invoice.account, self.account)
num_product_line_items = invoice.lineitem_set.get_products().count()
self.assertEqual(num_product_line_items, 1)
num_feature_line_items = invoice.lineitem_set.get_features().count()
self.assertEqual(num_feature_line_items, self.sub2.plan_version.feature_rates.count())
def test_no_invoice_before_start(self):
"""
Test that an invoice is not created if its subscriptions didn't start in the previous month.
"""
calculate_users_in_all_domains(self.subscription.date_start)
tasks.generate_invoices_based_on_date(self.subscription.date_start)
self.assertEqual(CustomerInvoice.objects.count(), 0)
def test_no_invoice_after_end(self):
"""
No invoices should be generated for the months after the end date of the subscriptions.
"""
invoice_date = utils.months_from_date(self.sub2.date_end, 2)
calculate_users_in_all_domains(invoice_date)
tasks.generate_invoices_based_on_date(invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 0)
def test_deleted_domain_in_multiple_subscription_invoice(self):
invoice_date = utils.months_from_date(self.subscription.date_start, 2)
domain_to_be_deleted = generator.arbitrary_domain()
generator.generate_domain_subscription(
self.account,
domain_to_be_deleted,
date_start=self.sub2.date_start,
date_end=self.sub2.date_end,
plan_version=self.advanced_plan
)
domain_to_be_deleted.delete(leave_tombstone=True)
calculate_users_in_all_domains(invoice_date)
tasks.generate_invoices_based_on_date(invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
num_product_line_items = invoice.lineitem_set.get_products().count()
self.assertEqual(num_product_line_items, 2)
class TestProductLineItem(BaseCustomerInvoiceCase):
"""
Tests that the Product line item is properly generated in an invoice.
Customer level Invoice do not prorate monthly costs
"""
def setUp(self):
super(TestProductLineItem, self).setUp()
self.product_rate = self.subscription.plan_version.product_rate
def test_product_line_items(self):
invoice_date = utils.months_from_date(self.subscription.date_start,
random.randint(2, self.subscription_length))
calculate_users_in_all_domains(invoice_date)
tasks.generate_invoices_based_on_date(invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
product_line_items = invoice.lineitem_set.get_products()
self.assertEqual(product_line_items.count(), 2)
product_descriptions = [line_item.base_description for line_item in product_line_items]
self.assertItemsEqual(product_descriptions, ['One month of CommCare Advanced Edition Software Plan.',
'One month of CommCare Standard Edition Software Plan.'])
product_costs = [line_item.base_cost for line_item in product_line_items]
self.assertItemsEqual(product_costs, [self.product_rate.monthly_fee,
self.advanced_plan.product_rate.monthly_fee])
def test_product_line_items_in_quarterly_invoice(self):
self.account.invoicing_plan = InvoicingPlan.QUARTERLY
self.account.save()
invoice_date = utils.months_from_date(self.subscription.date_start, 14)
for months_before_invoice_date in range(3):
user_date = date(invoice_date.year, invoice_date.month, 1)
user_date -= relativedelta.relativedelta(months=months_before_invoice_date)
calculate_users_in_all_domains(user_date)
tasks.generate_invoices_based_on_date(invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
self.assertEqual(invoice.balance, Decimal('4500.0000'))
self.assertEqual(invoice.account, self.account)
# There should be two product line items, with 3 months billed for each
num_product_line_items = invoice.lineitem_set.get_products().count()
self.assertEqual(num_product_line_items, 2)
for product_line_item in invoice.lineitem_set.get_products().all():
self.assertEqual(product_line_item.quantity, 3)
def test_product_line_items_in_yearly_invoice(self):
self.account.invoicing_plan = InvoicingPlan.YEARLY
self.account.save()
invoice_date = utils.months_from_date(self.subscription.date_start, 14)
for months_before_invoice_date in range(12):
user_date = date(invoice_date.year, invoice_date.month, 1)
user_date -= relativedelta.relativedelta(months=months_before_invoice_date)
calculate_users_in_all_domains(user_date)
tasks.generate_invoices_based_on_date(invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
self.assertEqual(invoice.balance, Decimal('18000.0000'))
self.assertEqual(invoice.account, self.account)
# There should be two product line items, with 3 months billed for each
num_product_line_items = invoice.lineitem_set.get_products().count()
self.assertEqual(num_product_line_items, 2)
for product_line_item in invoice.lineitem_set.get_products().all():
self.assertEqual(product_line_item.quantity, 12)
def test_subscriptions_marked_do_not_invoice_not_included(self):
self.subscription.do_not_invoice = True
invoice_date = utils.months_from_date(self.sub2.date_end, 1)
calculate_users_in_all_domains(invoice_date)
tasks.generate_invoices_based_on_date(invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
self.assertEqual(invoice.balance, Decimal('851.6200'))
self.assertEqual(invoice.account, self.account)
product_line_items = invoice.lineitem_set.get_products()
self.assertEqual(product_line_items.count(), 1)
self.assertEqual(
product_line_items.first().base_description,
None
)
self.assertEqual(
product_line_items.first().unit_description,
'22 days of CommCare Advanced Edition Software Plan. (Jul 1 - Jul 22)'
)
num_feature_line_items = invoice.lineitem_set.get_features().count()
self.assertEqual(num_feature_line_items, self.sub2.plan_version.feature_rates.count())
def test_account_level_product_credits(self):
CreditLine.add_credit(
amount=self.subscription.plan_version.product_rate.monthly_fee / 2,
account=self.account,
is_product=True
)
invoice_date = utils.months_from_date(self.subscription.date_start,
random.randint(2, self.subscription_length))
calculate_users_in_all_domains(invoice_date)
tasks.generate_invoices_based_on_date(invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
self.assertEqual(invoice.balance, Decimal('1350.0000'))
def test_subscription_level_product_credits(self):
CreditLine.add_credit(
self.subscription.plan_version.product_rate.monthly_fee / 2,
is_product=True,
subscription=self.subscription
)
CreditLine.add_credit(
self.sub2.plan_version.product_rate.monthly_fee / 4,
is_product=True,
subscription=self.sub2,
)
invoice_date = utils.months_from_date(self.subscription.date_start,
random.randint(2, self.subscription_length))
calculate_users_in_all_domains(invoice_date)
tasks.generate_invoices_based_on_date(invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
self.assertEqual(invoice.balance, Decimal('1050.0000'))
class TestUserLineItem(BaseCustomerInvoiceCase):
is_using_test_plans = True
def setUp(self):
super(TestUserLineItem, self).setUp()
self.user_rate = self.subscription.plan_version.feature_rates \
.filter(feature__feature_type=FeatureType.USER).get()
self.advanced_rate = self.advanced_plan.feature_rates.filter(feature__feature_type=FeatureType.USER).get()
self.invoice_date = utils.months_from_date(self.subscription.date_start,
random.randint(2, self.subscription_length))
def test_under_limit(self):
num_users = random.randint(0, self.user_rate.monthly_limit)
generator.arbitrary_commcare_users_for_domain(self.domain.name, num_users)
num_users_advanced = random.randint(0, self.advanced_rate.monthly_limit)
generator.arbitrary_commcare_users_for_domain(self.domain2.name, num_users_advanced)
calculate_users_in_all_domains(self.invoice_date)
tasks.generate_invoices_based_on_date(self.invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
self.assertEqual(invoice.balance, Decimal('1500.0000'))
user_line_items = invoice.lineitem_set.get_feature_by_type(FeatureType.USER)
self.assertEqual(user_line_items.count(), 2)
for user_line_item in user_line_items:
self.assertEqual(user_line_item.quantity, 0)
self.assertEqual(user_line_item.subtotal, Decimal('0.0000'))
self.assertEqual(user_line_item.total, Decimal('0.0000'))
self.assertIsNone(user_line_item.base_description)
self.assertEqual(user_line_item.base_cost, Decimal('0.0000'))
self.assertIsNone(user_line_item.unit_description)
self.assertEqual(user_line_item.unit_cost, Decimal('1.0000'))
def test_over_limit(self):
num_users = self.user_rate.monthly_limit + 1
generator.arbitrary_commcare_users_for_domain(self.domain.name, num_users)
num_users_advanced = self.advanced_rate.monthly_limit + 1
generator.arbitrary_commcare_users_for_domain(self.domain2.name, num_users_advanced)
calculate_users_in_all_domains(self.invoice_date)
tasks.generate_invoices_based_on_date(self.invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
user_line_items = invoice.lineitem_set.get_feature_by_type(FeatureType.USER)
self.assertEqual(user_line_items.count(), 2)
for user_line_item in user_line_items:
self.assertIsNone(user_line_item.base_description)
self.assertEqual(user_line_item.base_cost, Decimal('0.0000'))
num_to_charge = num_users - self.user_rate.monthly_limit
self.assertEqual(num_to_charge, user_line_item.quantity)
if self.user_rate.feature.name == user_line_item.feature_rate.feature.name:
self.assertEqual(user_line_item.unit_cost, self.user_rate.per_excess_fee)
self.assertEqual(user_line_item.total, self.user_rate.per_excess_fee * num_to_charge)
self.assertEqual(user_line_item.subtotal, self.user_rate.per_excess_fee * num_to_charge)
elif user_line_item.feature_rate.feature.name == self.advanced_rate.feature.name:
self.assertEqual(user_line_item.unit_cost, self.advanced_rate.per_excess_fee)
self.assertEqual(user_line_item.total, self.advanced_rate.per_excess_fee * num_to_charge)
self.assertEqual(user_line_item.subtotal, self.advanced_rate.per_excess_fee * num_to_charge)
def test_account_level_user_credits(self):
# Add User usage
num_users = self.user_rate.monthly_limit + 10
generator.arbitrary_commcare_users_for_domain(self.domain.name, num_users)
num_users_advanced = self.advanced_rate.monthly_limit + 1
generator.arbitrary_commcare_users_for_domain(self.domain2.name, num_users_advanced)
# Cover the cost of 1 User
CreditLine.add_credit(
amount=Decimal(2.0000),
feature_type=FeatureType.USER,
account=self.account,
)
calculate_users_in_all_domains(self.invoice_date)
tasks.generate_invoices_based_on_date(self.invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
self.assertEqual(invoice.balance, Decimal(1509.0000))
def test_subscription_level_user_credits(self):
# Add User usage
num_users = self.user_rate.monthly_limit + 10
generator.arbitrary_commcare_users_for_domain(self.domain.name, num_users)
num_users_advanced = self.advanced_rate.monthly_limit + 1
generator.arbitrary_commcare_users_for_domain(self.domain2.name, num_users_advanced)
# Cover the cost of 1 User on the Standard subscription
CreditLine.add_credit(
amount=Decimal(2.0000),
feature_type=FeatureType.USER,
subscription=self.subscription
)
# Cover the cost of 5 Users on the Advanced subscription
CreditLine.add_credit(
amount=Decimal(10.0000),
feature_type=FeatureType.USER,
subscription=self.sub2
)
calculate_users_in_all_domains(self.invoice_date)
tasks.generate_invoices_based_on_date(self.invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
self.assertEqual(invoice.balance, Decimal(1500.0000))
def test_one_subscription_level_user_credit(self):
# Add User usage
num_users = self.user_rate.monthly_limit + 10
generator.arbitrary_commcare_users_for_domain(self.domain.name, num_users)
num_users_advanced = self.advanced_rate.monthly_limit + 1
generator.arbitrary_commcare_users_for_domain(self.domain2.name, num_users_advanced)
# Cover the cost of 2 Users on the Advanced subscription
CreditLine.add_credit(
amount=Decimal(4.0000),
feature_type=FeatureType.USER,
subscription=self.sub2
)
invoice_date = utils.months_from_date(self.subscription.date_start,
random.randint(2, self.subscription_length))
calculate_users_in_all_domains(invoice_date)
tasks.generate_invoices_based_on_date(invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
self.assertEqual(invoice.balance, Decimal(1507.0000))
class TestSmsLineItem(BaseCustomerInvoiceCase):
def setUp(self):
super(TestSmsLineItem, self).setUp()
self.sms_rate = self.subscription.plan_version.feature_rates.filter(
feature__feature_type=FeatureType.SMS
).get()
self.advanced_rate = self.advanced_plan.feature_rates.filter(feature__feature_type=FeatureType.SMS).get()
self.invoice_date = utils.months_from_date(
self.subscription.date_start, random.randint(2, self.subscription_length)
)
self.sms_date = utils.months_from_date(self.invoice_date, -1)
def tearDown(self):
self._delete_sms_billables()
super(TestSmsLineItem, self).tearDown()
def test_under_limit(self):
num_sms = self.sms_rate.monthly_limit // 2
arbitrary_sms_billables_for_domain(
self.domain, self.sms_date, num_sms, direction=INCOMING
)
num_sms_advanced = self.advanced_rate.monthly_limit // 2
arbitrary_sms_billables_for_domain(
self.domain2, self.sms_date, num_sms_advanced, direction=INCOMING
)
sms_line_items = self._create_sms_line_items()
self.assertEqual(sms_line_items.count(), 2)
for sms_line_item in sms_line_items:
self.assertIsNone(sms_line_item.base_description)
self.assertEqual(sms_line_item.base_cost, Decimal('0.0000'))
self.assertEqual(sms_line_item.quantity, 1)
self.assertEqual(sms_line_item.unit_cost, Decimal('0.0000'))
self.assertIsNotNone(sms_line_item.unit_description)
self.assertEqual(sms_line_item.subtotal, Decimal('0.0000'))
self.assertEqual(sms_line_item.total, Decimal('0.0000'))
def test_over_limit(self):
num_sms = random.randint(self.sms_rate.monthly_limit + 1, self.sms_rate.monthly_limit + 2)
billables = arbitrary_sms_billables_for_domain(
self.domain, self.sms_date, num_sms
)
num_sms_advanced = random.randint(self.advanced_rate.monthly_limit + 1,
self.advanced_rate.monthly_limit + 2)
advanced_billables = arbitrary_sms_billables_for_domain(
self.domain2, self.sms_date, num_sms_advanced
)
sms_line_items = self._create_sms_line_items()
self.assertEqual(sms_line_items.count(), 2)
for sms_line_item in sms_line_items:
self.assertIsNone(sms_line_item.base_description)
self.assertEqual(sms_line_item.base_cost, Decimal('0.0000'))
self.assertEqual(sms_line_item.quantity, 1)
if self.advanced_rate.feature == sms_line_item.feature_rate.feature:
sms_cost = sum(
billable.gateway_charge + billable.usage_charge
for billable in advanced_billables[self.advanced_rate.monthly_limit:]
)
else:
sms_cost = sum(
billable.gateway_charge + billable.usage_charge
for billable in billables[self.sms_rate.monthly_limit:]
)
self.assertEqual(sms_line_item.unit_cost, sms_cost)
self.assertEqual(sms_line_item.total, sms_cost)
def test_subscription_level_sms_credits(self):
# Add SMS usage
arbitrary_sms_billables_for_domain(
self.domain, self.sms_date, self.sms_rate.monthly_limit + 1
)
arbitrary_sms_billables_for_domain(
self.domain2, self.sms_date, num_sms=self.advanced_rate.monthly_limit + 10
)
# Cover the cost of 1 SMS on the Standard subscription
CreditLine.add_credit(
amount=Decimal(0.7500),
feature_type=FeatureType.SMS,
subscription=self.subscription
)
# Cover the cost of 10 SMS on the Advanced subscription
CreditLine.add_credit(
amount=Decimal(7.5000),
feature_type=FeatureType.SMS,
subscription=self.sub2,
)
calculate_users_in_all_domains(self.invoice_date)
tasks.generate_invoices_based_on_date(self.invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
self.assertEqual(invoice.balance, Decimal('1500.0000'))
def test_one_subscription_level_sms_credit(self):
# Add SMS usage
arbitrary_sms_billables_for_domain(
self.domain, self.sms_date, self.sms_rate.monthly_limit + 1
)
arbitrary_sms_billables_for_domain(
self.domain2, self.sms_date, num_sms=self.advanced_rate.monthly_limit + 10
)
# Cover the cost of 1 SMS on the Standard subscription
CreditLine.add_credit(
amount=Decimal(0.7500),
feature_type=FeatureType.SMS,
subscription=self.subscription
)
calculate_users_in_all_domains(self.invoice_date)
tasks.generate_invoices_based_on_date(self.invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
self.assertEqual(invoice.balance, Decimal('1507.5000'))
def test_account_level_sms_credits(self):
# Add SMS usage
arbitrary_sms_billables_for_domain(
self.domain, self.sms_date, self.sms_rate.monthly_limit + 1
)
arbitrary_sms_billables_for_domain(
self.domain2, self.sms_date, num_sms=self.advanced_rate.monthly_limit + 10
)
# Cover the cost of 1 SMS
CreditLine.add_credit(
amount=Decimal(0.5000),
feature_type=FeatureType.SMS,
account=self.account,
)
calculate_users_in_all_domains(self.invoice_date)
tasks.generate_invoices_based_on_date(self.invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
self.assertEqual(invoice.balance, Decimal('1507.7500'))
def _create_sms_line_items(self):
calculate_users_in_all_domains(self.invoice_date)
tasks.generate_invoices_based_on_date(self.invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
return invoice.lineitem_set.get_feature_by_type(FeatureType.SMS)
@classmethod
def _delete_sms_billables(cls):
SmsBillable.objects.all().delete()
SmsGatewayFee.objects.all().delete()
SmsGatewayFeeCriteria.objects.all().delete()
SmsUsageFee.objects.all().delete()
SmsUsageFeeCriteria.objects.all().delete()
class TestQuarterlyInvoicing(BaseCustomerInvoiceCase):
is_using_test_plans = True
def setUp(self):
super(TestQuarterlyInvoicing, self).setUp()
self.user_rate = self.subscription.plan_version.feature_rates \
.filter(feature__feature_type=FeatureType.USER).get()
self.advanced_rate = self.advanced_plan.feature_rates.filter(feature__feature_type=FeatureType.USER).get()
self.initialize_domain_user_history_objects()
self.sms_rate = self.subscription.plan_version.feature_rates.filter(
feature__feature_type=FeatureType.SMS
).get()
self.advanced_sms_rate = self.advanced_plan.feature_rates.filter(
feature__feature_type=FeatureType.SMS
).get()
self.invoice_date = utils.months_from_date(
self.subscription.date_start, random.randint(2, self.subscription_length)
)
self.sms_date = utils.months_from_date(self.invoice_date, -1)
def initialize_domain_user_history_objects(self):
record_dates = []
month_end = self.subscription.date_end
while month_end > self.subscription.date_start:
record_dates.append(month_end)
_, month_end = get_previous_month_date_range(month_end)
num_users = self.user_rate.monthly_limit + 1
for record_date in record_dates:
DomainUserHistory.objects.create(
domain=self.domain,
num_users=num_users,
record_date=record_date
)
num_users = self.advanced_rate.monthly_limit + 2
for record_date in record_dates:
DomainUserHistory.objects.create(
domain=self.domain2,
num_users=num_users,
record_date=record_date
)
for record_date in record_dates:
DomainUserHistory.objects.create(
domain=self.domain3,
num_users=0,
record_date=record_date
)
def test_user_over_limit_in_quarterly_invoice(self):
num_users = self.user_rate.monthly_limit + 1
generator.arbitrary_commcare_users_for_domain(self.domain.name, num_users)
num_users_advanced = self.advanced_rate.monthly_limit + 2
generator.arbitrary_commcare_users_for_domain(self.domain2.name, num_users_advanced)
self.account.invoicing_plan = InvoicingPlan.QUARTERLY
self.account.save()
invoice_date = utils.months_from_date(self.subscription.date_start, 14)
tasks.generate_invoices_based_on_date(invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
user_line_items = invoice.lineitem_set.get_feature_by_type(FeatureType.USER)
self.assertEqual(user_line_items.count(), 2)
for user_line_item in user_line_items:
if self.user_rate.feature.name == user_line_item.feature_rate.feature.name:
self.assertEqual(user_line_item.quantity, 3)
elif user_line_item.feature_rate.feature.name == self.advanced_rate.feature.name:
self.assertEqual(user_line_item.quantity, 6)
def test_user_over_limit_in_yearly_invoice(self):
num_users = self.user_rate.monthly_limit + 1
generator.arbitrary_commcare_users_for_domain(self.domain.name, num_users)
num_users_advanced = self.advanced_rate.monthly_limit + 2
generator.arbitrary_commcare_users_for_domain(self.domain2.name, num_users_advanced)
self.account.invoicing_plan = InvoicingPlan.YEARLY
self.account.save()
invoice_date = utils.months_from_date(self.subscription.date_start, 14)
tasks.generate_invoices_based_on_date(invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
user_line_items = invoice.lineitem_set.get_feature_by_type(FeatureType.USER)
self.assertEqual(user_line_items.count(), 2)
for user_line_item in user_line_items:
if self.user_rate.feature.name == user_line_item.feature_rate.feature.name:
self.assertEqual(user_line_item.quantity, 12)
elif user_line_item.feature_rate.feature.name == self.advanced_rate.feature.name:
self.assertEqual(user_line_item.quantity, 24)
def test_sms_over_limit_in_quarterly_invoice(self):
num_sms = random.randint(self.sms_rate.monthly_limit + 1, self.sms_rate.monthly_limit + 2)
billables = arbitrary_sms_billables_for_domain(
self.domain, self.sms_date, num_sms
)
num_sms_advanced = random.randint(self.advanced_sms_rate.monthly_limit + 1,
self.advanced_sms_rate.monthly_limit + 2)
advanced_billables = arbitrary_sms_billables_for_domain(
self.domain2, self.sms_date, num_sms_advanced
)
tasks.generate_invoices_based_on_date(self.invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
sms_line_items = invoice.lineitem_set.get_feature_by_type(FeatureType.SMS)
self.assertEqual(sms_line_items.count(), 2)
for sms_line_item in sms_line_items:
self.assertIsNone(sms_line_item.base_description)
self.assertEqual(sms_line_item.base_cost, Decimal('0.0000'))
self.assertEqual(sms_line_item.quantity, 1)
if self.advanced_sms_rate.feature == sms_line_item.feature_rate.feature:
sms_cost = sum(
billable.gateway_charge + billable.usage_charge
for billable in advanced_billables[self.advanced_sms_rate.monthly_limit:]
)
else:
sms_cost = sum(
billable.gateway_charge + billable.usage_charge
for billable in billables[self.sms_rate.monthly_limit:]
)
self.assertEqual(sms_line_item.unit_cost, sms_cost)
self.assertEqual(sms_line_item.total, sms_cost)
def test_sms_over_limit_in_yearly_invoice(self):
num_sms = random.randint(self.sms_rate.monthly_limit + 1, self.sms_rate.monthly_limit + 2)
billables = arbitrary_sms_billables_for_domain(
self.domain, self.sms_date, num_sms
)
num_sms_advanced = random.randint(self.advanced_sms_rate.monthly_limit + 1,
self.advanced_sms_rate.monthly_limit + 2)
advanced_billables = arbitrary_sms_billables_for_domain(
self.domain2, self.sms_date, num_sms_advanced
)
tasks.generate_invoices_based_on_date(self.invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
sms_line_items = invoice.lineitem_set.get_feature_by_type(FeatureType.SMS)
self.assertEqual(sms_line_items.count(), 2)
for sms_line_item in sms_line_items:
self.assertIsNone(sms_line_item.base_description)
self.assertEqual(sms_line_item.base_cost, Decimal('0.0000'))
self.assertEqual(sms_line_item.quantity, 1)
if self.advanced_sms_rate.feature == sms_line_item.feature_rate.feature:
sms_cost = sum(
billable.gateway_charge + billable.usage_charge
for billable in advanced_billables[self.advanced_sms_rate.monthly_limit:]
)
else:
sms_cost = sum(
billable.gateway_charge + billable.usage_charge
for billable in billables[self.sms_rate.monthly_limit:]
)
self.assertEqual(sms_line_item.unit_cost, sms_cost)
self.assertEqual(sms_line_item.total, sms_cost)
def _create_sms_line_items_for_quarter(self):
tasks.generate_invoices_based_on_date(self.invoice_date)
self.assertEqual(CustomerInvoice.objects.count(), 1)
invoice = CustomerInvoice.objects.first()
return invoice.lineitem_set.get_feature_by_type(FeatureType.SMS)
class TestDomainsInLineItemForCustomerInvoicing(TestCase):
@classmethod
def setUpClass(cls):
super(TestDomainsInLineItemForCustomerInvoicing, cls).setUpClass()
cls.customer_account = generator.billing_account('test@test.com', 'test@test.com')
cls.customer_account.is_customer_billing_account = True
cls.customer_account.save()
cls.customer_plan_version = DefaultProductPlan.get_default_plan_version()
cls.customer_plan_version.plan.is_customer_software_plan = True
cls.customer_plan_version.plan.save()
cls.mock_customer_invoice = Mock()
cls.mock_customer_invoice.date_start = date(2019, 5, 1)
cls.mock_customer_invoice.date_end = date(2019, 5, 31)
cls.domain = Domain(name='test_domain')
cls.domain.save()
@classmethod
def tearDownClass(cls):
cls.domain.delete()
super(TestDomainsInLineItemForCustomerInvoicing, cls).tearDownClass()
def test_past_subscription_is_excluded(self):
past_subscription = Subscription.new_domain_subscription(
account=self.customer_account,
domain=self.domain.name,
plan_version=self.customer_plan_version,
date_start=date(2019, 4, 1),
date_end=date(2019, 5, 1),
)
line_item_factory = LineItemFactory(past_subscription, None, self.mock_customer_invoice)
self.assertEqual(line_item_factory.subscribed_domains, [])
def test_future_subscription_is_excluded(self):
future_subscription = Subscription.new_domain_subscription(
account=self.customer_account,
domain=self.domain.name,
plan_version=self.customer_plan_version,
date_start=date(2019, 6, 1),
date_end=date(2019, 7, 1),
)
line_item_factory = LineItemFactory(future_subscription, None, self.mock_customer_invoice)
self.assertEqual(line_item_factory.subscribed_domains, [])
def test_preexisting_subscription_is_included(self):
preexisting_subscription = Subscription.new_domain_subscription(
account=self.customer_account,
domain=self.domain.name,
plan_version=self.customer_plan_version,
date_start=date(2019, 4, 30),
date_end=date(2019, 5, 2),
)
line_item_factory = LineItemFactory(preexisting_subscription, None, self.mock_customer_invoice)
self.assertEqual(line_item_factory.subscribed_domains, [self.domain.name])
def test_preexisting_subscription_without_end_date_is_included(self):
preexisting_subscription = Subscription.new_domain_subscription(
account=self.customer_account,
domain=self.domain.name,
plan_version=self.customer_plan_version,
date_start=date(2019, 4, 30),
)
line_item_factory = LineItemFactory(preexisting_subscription, None, self.mock_customer_invoice)
self.assertEqual(line_item_factory.subscribed_domains, [self.domain.name])
def test_new_subscription_is_included(self):
new_subscription = Subscription.new_domain_subscription(
account=self.customer_account,
domain=self.domain.name,
plan_version=self.customer_plan_version,
date_start=date(2019, 5, 31),
date_end=date(2019, 6, 1),
)
line_item_factory = LineItemFactory(new_subscription, None, self.mock_customer_invoice)
self.assertEqual(line_item_factory.subscribed_domains, [self.domain.name])
def test_new_subscription_without_end_date_is_included(self):
new_subscription = Subscription.new_domain_subscription(
account=self.customer_account,
domain=self.domain.name,
plan_version=self.customer_plan_version,
date_start=date(2019, 5, 31),
)
line_item_factory = LineItemFactory(new_subscription, None, self.mock_customer_invoice)
self.assertEqual(line_item_factory.subscribed_domains, [self.domain.name])
|
|
"""Weight updating functions."""
import math
import pickle
import logging
import warnings
import numpy
from .ndarray import (NDArray, zeros, clip, sqrt, sign, array, maximum, abs as NDabs)
from .ndarray import (sgd_update, sgd_mom_update, adam_update, rmsprop_update, rmspropalex_update,
mp_sgd_update, mp_sgd_mom_update)
from .random import normal
class Optimizer(object):
"""The base class inherited by all optimizers.
Parameters
----------
rescale_grad : float, optional
Multiply the gradient with `rescale_grad` before updating. Often
choose to be ``1.0/batch_size``.
param_idx2name : dict from int to string, optional
A dictionary that maps int index to string name.
clip_gradient : float, optional
Clip the gradient by projecting onto the box ``[-clip_gradient, clip_gradient]``.
learning_rate : float, optional
The initial learning rate.
lr_scheduler : LRScheduler, optional
The learning rate scheduler.
wd : float, optional
The weight decay (or L2 regularization) coefficient. Modifies objective
by adding a penalty for having large weights.
sym: Symbol, optional
The Symbol this optimizer is applying to.
begin_num_update : int, optional
The initial number of updates.
"""
def __init__(self, rescale_grad=1., param_idx2name=None, wd=0.,
clip_gradient=None, learning_rate=0.01,
lr_scheduler=None, sym=None, begin_num_update=0):
self.rescale_grad = rescale_grad
self.lr = learning_rate
self.lr_scheduler = lr_scheduler
if lr_scheduler is not None:
self.lr_scheduler.base_lr = learning_rate
self.wd = wd
self.lr_mult = {}
self.wd_mult = {}
self.begin_num_update = begin_num_update
self.num_update = begin_num_update
self._index_update_count = {}
self.clip_gradient = clip_gradient
if param_idx2name is None:
param_idx2name = {}
assert isinstance(param_idx2name, dict), \
'param_idx2name should be a dict of param indexes to names.'
self.idx2name = param_idx2name.copy()
self.sym = sym
self.set_lr_mult({})
self.set_wd_mult({})
opt_registry = {}
@staticmethod
def register(klass):
"""Registers a new optimizer.
Once an optimizer is registered, we can create an instance of this
optimizer with `create_optimizer` later.
Examples
--------
>>> @mx.optimizer.Optimizer.register
... class MyOptimizer(mx.optimizer.Optimizer):
... pass
>>> optim = mx.optimizer.Optimizer.create_optimizer('MyOptimizer')
>>> print(type(optim))
<class '__main__.MyOptimizer'>
"""
assert(isinstance(klass, type))
name = klass.__name__.lower()
if name in Optimizer.opt_registry:
logging.warning('WARNING: New optimizer %s.%s is overriding '
'existing optimizer %s.%s',
klass.__module__, klass.__name__,
Optimizer.opt_registry[name].__module__,
Optimizer.opt_registry[name].__name__)
Optimizer.opt_registry[name] = klass
return klass
@staticmethod
def create_optimizer(name, **kwargs):
"""Instantiates an optimizer with a given name and kwargs.
.. note:: We can use the alias `create` for ``Optimizer.create_optimizer``.
Parameters
----------
name: str
Name of the optimizer. Should be the name
of a subclass of Optimizer. Case insensitive.
kwargs: dict
Parameters for the optimizer.
Returns
-------
Optimizer
An instantiated optimizer.
Examples
--------
>>> sgd = mx.optimizer.Optimizer.create_optimizer('sgd')
>>> type(sgd)
<class 'mxnet.optimizer.SGD'>
>>> adam = mx.optimizer.create('adam', learning_rate=.1)
>>> type(adam)
<class 'mxnet.optimizer.Adam'>
"""
if name.lower() in Optimizer.opt_registry:
return Optimizer.opt_registry[name.lower()](**kwargs)
else:
raise ValueError('Cannot find optimizer %s' % name)
def create_state(self, index, weight):
"""Creates auxiliary state for a given weight.
Some optimizers require additional states, e.g. as momentum, in addition
to gradients in order to update weights. This function creates state
for a given weight which will be used in `update`. This function is
called only once for each weight.
Parameters
----------
index : int
An unique index to identify the weight.
weight : NDArray
The weight.
Returns
-------
state : any obj
The state associated with the weight.
"""
def update(self, index, weight, grad, state):
"""Updates the given parameter using the corresponding gradient and state.
Parameters
----------
index : int
The unique index of the parameter into the individual learning
rates and weight decays. Learning rates and weight decay
may be set via `set_lr_mult()` and `set_wd_mult()`, respectively.
weight : NDArray
The parameter to be updated.
grad : NDArray
The gradient of the objective with respect to this parameter.
state : any obj
The state returned by `create_state()`.
"""
raise NotImplementedError()
def set_lr_scale(self, args_lrscale): # pylint: disable=unused-argument
"""[DEPRECATED] Sets lr scale. Use set_lr_mult instead."""
raise DeprecationWarning
def set_lr_mult(self, args_lr_mult):
"""Sets an individual learning rate multiplier for each parameter.
If you specify a learning rate multiplier for a parameter, then
the learning rate for the parameter will be set as the product of
the global learning rate `self.lr` and its multiplier.
.. note:: The default learning rate multiplier of a `Variable`
can be set with `lr_mult` argument in the constructor.
Parameters
----------
args_lr_mult : dict of str/int to float
For each of its key-value entries, the learning rate multipler for the
parameter specified in the key will be set as the given value.
You can specify the parameter with either its name or its index.
If you use the name, you should pass `sym` in the constructor,
and the name you specified in the key of `args_lr_mult` should match
the name of the parameter in `sym`. If you use the index, it should
correspond to the index of the parameter used in the `update` method.
Specifying a parameter by its index is only supported for backward
compatibility, and we recommend to use the name instead.
"""
self.lr_mult = {}
if self.sym is not None:
attr = self.sym.attr_dict()
for name in self.sym.list_arguments():
if name in attr and '__lr_mult__' in attr[name]:
self.lr_mult[name] = float(attr[name]['__lr_mult__'])
self.lr_mult.update(args_lr_mult)
def set_wd_mult(self, args_wd_mult):
"""Sets an individual weight decay multiplier for each parameter.
By default, if `param_idx2name` was provided in the
constructor, the weight decay multipler is set as 0 for all
parameters whose name don't end with ``_weight`` or
``_gamma``.
.. note:: The default weight decay multiplier for a `Variable`
can be set with its `wd_mult` argument in the constructor.
Parameters
----------
args_wd_mult : dict of string/int to float
For each of its key-value entries, the weight decay multipler for the
parameter specified in the key will be set as the given value.
You can specify the parameter with either its name or its index.
If you use the name, you should pass `sym` in the constructor,
and the name you specified in the key of `args_lr_mult` should match
the name of the parameter in `sym`. If you use the index, it should
correspond to the index of the parameter used in the `update` method.
Specifying a parameter by its index is only supported for backward
compatibility, and we recommend to use the name instead.
"""
self.wd_mult = {}
for n in self.idx2name.values():
if not (n.endswith('_weight') or n.endswith('_gamma')):
self.wd_mult[n] = 0.0
if self.sym is not None:
attr = self.sym.attr_dict()
for name in self.sym.list_arguments():
if name in attr and '__wd_mult__' in attr[name]:
self.wd_mult[name] = float(attr[name]['__wd_mult__'])
self.wd_mult.update(args_wd_mult)
def _update_count(self, index):
"""Updates num_update.
Parameters
----------
index : int
The index to be updated.
"""
if index not in self._index_update_count:
self._index_update_count[index] = self.begin_num_update
self._index_update_count[index] += 1
self.num_update = max(self._index_update_count[index], self.num_update)
def _get_lr(self, index):
"""Gets the learning rate given the index of the weight.
Parameters
----------
index : int
The index corresponding to the weight.
Returns
-------
lr : float
Learning rate for this index.
"""
if self.lr_scheduler is not None:
lr = self.lr_scheduler(self.num_update)
else:
lr = self.lr
if index in self.lr_mult:
lr *= self.lr_mult[index]
elif index in self.idx2name:
lr *= self.lr_mult.get(self.idx2name[index], 1.0)
return lr
def _get_wd(self, index):
"""Gets weight decay for index.
Returns 0 for non-weights if the name of weights are provided for `__init__`.
Parameters
----------
index : int
The index for weight.
Returns
-------
wd : float
Weight decay for this index.
"""
wd = self.wd
if index in self.wd_mult:
wd *= self.wd_mult[index]
elif index in self.idx2name:
wd *= self.wd_mult.get(self.idx2name[index], 1.0)
return wd
# convenience wrapper for Optimizer.Register
register = Optimizer.register # pylint: disable=invalid-name
@register
class SGD(Optimizer):
"""The SGD optimizer with momentum and weight decay.
The optimizer updates the weight by::
state = momentum * state + lr * rescale_grad * clip(grad, clip_gradient) + wd * weight
weight = weight - state
For details of the update algorithm see :class:`~mxnet.ndarray.sgd_update` and
:class:`~mxnet.ndarray.sgd_mom_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
momentum : float, optional
The momentum value.
multi_precision: bool, optional
Flag to control the internal precision of the optimizer.
``False`` results in using the same precision as the weights (default),
``True`` makes internal 32-bit copy of the weights and applies gradients
in 32-bit precision even if actual weights used in the model have lower precision.
Turning this on can improve convergence and accuracy when training with float16.
"""
def __init__(self, momentum=0.0, multi_precision=False, **kwargs):
super(SGD, self).__init__(**kwargs)
self.momentum = momentum
self.multi_precision = multi_precision
def create_state(self, index, weight):
momentum = None
weight_master_copy = None
if self.multi_precision and weight.dtype == numpy.float16:
weight_master_copy = array(weight, ctx=weight.context, dtype=numpy.float32)
if self.momentum != 0.0:
momentum = zeros(weight.shape, weight.context, dtype=numpy.float32)
return (momentum, weight_master_copy)
if weight.dtype == numpy.float16 and not self.multi_precision:
warnings.warn("Accumulating with float16 in optimizer can lead to "
"poor accuracy or slow convergence. "
"Consider using multi_precision=True option of the "
"SGD optimizer")
if self.momentum != 0.0:
momentum = zeros(weight.shape, weight.context, dtype=weight.dtype)
return momentum
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
kwargs = {'rescale_grad': self.rescale_grad}
if self.momentum > 0:
kwargs['momentum'] = self.momentum
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
use_multi_precision = isinstance(state, (list, tuple))
if not use_multi_precision:
if state is not None:
sgd_mom_update(weight, grad, state, out=weight,
lr=lr, wd=wd, **kwargs)
else:
sgd_update(weight, grad, out=weight,
lr=lr, wd=wd, **kwargs)
else:
if state[0] is not None:
mp_sgd_mom_update(weight, grad, state[0], state[1], out=weight,
lr=lr, wd=wd, **kwargs)
else:
mp_sgd_update(weight, grad, state[1], out=weight,
lr=lr, wd=wd, **kwargs)
@register
class DCASGD(Optimizer):
"""The DCASGD optimizer.
This class implements the optimizer described in *Asynchronous Stochastic Gradient Descent
with Delay Compensation for Distributed Deep Learning*,
available at https://arxiv.org/abs/1609.08326.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
momentum : float, optional
The momentum value.
lamda : float, optional
Scale DC value.
"""
def __init__(self, momentum=0.0, lamda=0.04, **kwargs):
super(DCASGD, self).__init__(**kwargs)
self.momentum = momentum
self.weight_previous = {}
self.lamda = lamda
def create_state(self, index, weight):
if self.momentum == 0.0:
return (None,
weight.copy()) # previous weight
else:
return (zeros(weight.shape, weight.context, dtype=weight.dtype), # momentum
weight.copy()) # previous weight
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
mom, previous_weight = state
if mom:
mom[:] *= self.momentum
mom[:] += -lr * (grad + wd * weight + self.lamda \
* grad * grad * (weight - previous_weight))
else:
assert(self.momentum == 0.0)
mom = -lr * (grad + wd * weight + self.lamda \
* grad * grad * (weight - previous_weight))
previous_weight[:] = weight
weight[:] += mom
@register
class NAG(SGD):
"""Nesterov accelerated SGD.
This optimizer updates each weight by::
state = momentum * state + grad + wd * weight
weight = weight - (lr * (grad + momentum * state))
This optimizer accepts the same arguments as :class:`.SGD`.
"""
def __init__(self, **kwargs):
super(NAG, self).__init__(**kwargs)
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
if state is not None:
mom = state
mom[:] *= self.momentum
grad += wd * weight
mom[:] += grad
grad[:] += self.momentum * mom
weight[:] += -lr * grad
else:
assert self.momentum == 0.0
weight[:] += -lr * (grad + wd * weight)
@register
class SGLD(Optimizer):
"""Stochastic Gradient Riemannian Langevin Dynamics.
This class implements the optimizer described in the paper *Stochastic Gradient
Riemannian Langevin Dynamics on the Probability Simplex*, available at
https://papers.nips.cc/paper/4883-stochastic-gradient-riemannian-langevin-dynamics-on-the-probability-simplex.pdf.
"""
def __init__(self, **kwargs):
super(SGLD, self).__init__(**kwargs)
def create_state(self, index, weight):
return None
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
weight[:] += - lr/2 * (grad + wd * weight) + normal(0, math.sqrt(lr),
weight.shape, weight.context)
@register # pylint: disable=invalid-name
class ccSGD(SGD):
"""[DEPRECATED] Same as `SGD`. Left here for backward compatibility."""
def __init__(self, *args, **kwargs):
super(ccSGD, self).__init__(*args, **kwargs)
@register
class Adam(Optimizer):
"""The Adam optimizer.
This class implements the optimizer described in *Adam: A Method for
Stochastic Optimization*, available at http://arxiv.org/abs/1412.6980.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
For details of the update algorithm, see :class:`ndarray.adam_update`.
Parameters
----------
beta1 : float, optional
Exponential decay rate for the first moment estimates.
beta2 : float, optional
Exponential decay rate for the second moment estimates.
epsilon : float, optional
Small value to avoid division by 0.
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
**kwargs):
super(Adam, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context, dtype=weight.dtype), # mean
zeros(weight.shape, weight.context, dtype=weight.dtype)) # variance
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
t = self._index_update_count[index]
coef1 = 1. - self.beta1**t
coef2 = 1. - self.beta2**t
lr *= math.sqrt(coef2)/coef1
kwargs = {'beta1': self.beta1, 'beta2': self.beta2, 'epsilon': self.epsilon,
'rescale_grad': self.rescale_grad}
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
mean, var = state
adam_update(weight, grad, mean, var, out=weight,
lr=lr, wd=wd, **kwargs)
@register
class AdaGrad(Optimizer):
"""AdaGrad optimizer.
This class implements the AdaGrad optimizer described in *Adaptive Subgradient
Methods for Online Learning and Stochastic Optimization*, and available at
http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
eps: float, optional
Small value to avoid division by 0.
"""
def __init__(self, eps=1e-7, **kwargs):
super(AdaGrad, self).__init__(**kwargs)
self.float_stable_eps = eps
def create_state(self, index, weight):
return zeros(weight.shape, weight.context) # history
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
history = state
history[:] += (grad * grad)
weight[:] += -lr * (grad / sqrt(history + self.float_stable_eps) + wd * weight)
@register
class RMSProp(Optimizer):
"""The RMSProp optimizer.
Two versions of RMSProp are implemented:
If ``centered=False``, we follow
http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf by
Tieleman & Hinton, 2012.
For details of the update algorithm see :class:`~mxnet.ndarray.rmsprop_update`.
If ``centered=True``, we follow http://arxiv.org/pdf/1308.0850v5.pdf (38)-(45)
by Alex Graves, 2013.
For details of the update algorithm see :class:`~mxnet.ndarray.rmspropalex_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
gamma1: float, optional
A decay factor of moving average over past squared gradient.
gamma2: float, optional
A "momentum" factor. Only used if `centered`=``True``.
epsilon : float, optional
Small value to avoid division by 0.
centered : bool, optional
Flag to control which version of RMSProp to use.
``True`` will use Graves's version of `RMSProp`,
``False`` will use Tieleman & Hinton's version of `RMSProp`.
clip_weights : float, optional
Clips weights into range ``[-clip_weights, clip_weights]``.
"""
def __init__(self, learning_rate=0.001, gamma1=0.9, gamma2=0.9,
epsilon=1e-8, centered=False, clip_weights=None, **kwargs):
super(RMSProp, self).__init__(learning_rate=learning_rate, **kwargs)
self.gamma1 = gamma1
self.gamma2 = gamma2
self.centered = centered
self.epsilon = epsilon
self.clip_weights = clip_weights
def create_state(self, index, weight):
if self.centered:
return (
zeros(weight.shape, weight.context), # n
zeros(weight.shape, weight.context), # g
zeros(weight.shape, weight.context)) # delta
else:
return (zeros(weight.shape, weight.context), ) # n
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
kwargs = {'gamma1': self.gamma1, 'epsilon': self.epsilon,
'rescale_grad': self.rescale_grad}
if self.centered:
kwargs['gamma2'] = self.gamma2
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
if self.clip_weights:
kwargs['clip_weights'] = self.clip_weights
if not self.centered:
(n, ) = state
rmsprop_update(
weight, grad, n, out=weight, lr=lr, wd=wd, **kwargs)
else:
n, g, delta = state
rmspropalex_update(weight, grad, n, g, delta, out=weight,
lr=lr, wd=wd, **kwargs)
@register
class AdaDelta(Optimizer):
"""The AdaDelta optimizer.
This class implements AdaDelta, an optimizer described in *ADADELTA: An adaptive
learning rate method*, available at https://arxiv.org/abs/1212.5701.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
rho: float
Decay rate for both squared gradients and delta.
epsilon : float
Small value to avoid division by 0.
"""
def __init__(self, rho=0.90, epsilon=1e-5, **kwargs):
super(AdaDelta, self).__init__(**kwargs)
self.rho = rho
self.epsilon = epsilon
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context), # accumulated g
zeros(weight.shape, weight.context)) # accumulated delta
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
wd = self._get_wd(index)
self._update_count(index)
# preprocess grad
grad *= self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
# accumulated g and delta initlization
acc_g, acc_delta = state
# update g, delta
acc_g[:] = self.rho * acc_g + (1. - self.rho) * grad * grad
current_delta = sqrt(acc_delta + self.epsilon) / sqrt(acc_g + self.epsilon) * grad
acc_delta[:] = self.rho * acc_delta + (1. - self.rho) * current_delta * current_delta
# update weight
weight[:] -= current_delta + wd * weight
#pylint: disable=invalid-name
@register
class Ftrl(Optimizer):
"""The Ftrl optimizer.
Referenced from *Ad Click Prediction: a View from the Trenches*, available at
http://dl.acm.org/citation.cfm?id=2488200.
Parameters
----------
lamda1 : float, optional
L1 regularization coefficient.
learning_rate : float, optional
The initial learning rate.
beta : float, optional
Per-coordinate learning rate correlation parameter.
eta :
.. math::
\\eta_{t,i} = \\frac{learningrate}{\\beta+\\sqrt{\\sum_{s=1}^tg_{s,i}^t}}
"""
def __init__(self, lamda1=0.01, learning_rate=0.1, beta=1, **kwargs):
super(Ftrl, self).__init__(**kwargs)
self.lamda1 = lamda1
self.beta = beta
self.lr = learning_rate
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context), # dn
zeros(weight.shape, weight.context)) # n
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
wd = self._get_wd(index)
lr = self._get_lr(index)
# preprocess grad
grad *= self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
# accumulated g and delta initialization
dn, n = state
#update dn, n
dn += grad - (sqrt(n + grad * grad) - sqrt(n)) * weight / lr
n += grad * grad
# update weight
weight[:] = (sign(dn) * self.lamda1 - dn) / \
((self.beta + sqrt(n)) / lr + wd) * (NDabs(dn) > self.lamda1)
@register
class Adamax(Optimizer):
"""The AdaMax optimizer.
It is a variant of Adam based on the infinity norm
available at http://arxiv.org/abs/1412.6980 Section 7.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
beta1 : float, optional
Exponential decay rate for the first moment estimates.
beta2 : float, optional
Exponential decay rate for the second moment estimates.
"""
def __init__(self, learning_rate=0.002, beta1=0.9, beta2=0.999, **kwargs):
super(Adamax, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context, dtype=weight.dtype), # mean
zeros(weight.shape, weight.context, dtype=weight.dtype)) # variance
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
t = self._index_update_count[index]
lr /= (1. - self.beta1**t)
# preprocess grad
grad = grad * self.rescale_grad + wd * weight
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
# update m_t and u_t
m_t, u_t = state
m_t[:] = self.beta1 * m_t + (1. - self.beta1) * grad
u_t[:] = maximum(self.beta2 * u_t, NDabs(grad))
# update weight
weight[:] -= lr * m_t / u_t
@register
class Nadam(Optimizer):
"""The Nesterov Adam optimizer.
Much like Adam is essentially RMSprop with momentum,
Nadam is Adam RMSprop with Nesterov momentum available
at http://cs229.stanford.edu/proj2015/054_report.pdf.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
beta1 : float, optional
Exponential decay rate for the first moment estimates.
beta2 : float, optional
Exponential decay rate for the second moment estimates.
epsilon : float, optional
Small value to avoid division by 0.
schedule_decay : float, optional
Exponential decay rate for the momentum schedule
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
schedule_decay=0.004, **kwargs):
super(Nadam, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.schedule_decay = schedule_decay
self.m_schedule = 1.
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context, dtype=weight.dtype), # mean
zeros(weight.shape, weight.context, dtype=weight.dtype)) # variance
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
t = self._index_update_count[index]
# preprocess grad
grad *= self.rescale_grad + wd * weight
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
# warming momentum schedule
momentum_t = self.beta1 * (1. - 0.5 * (pow(0.96, t * self.schedule_decay)))
momentum_t_1 = self.beta1 * (1. - 0.5 * (pow(0.96, (t + 1) * self.schedule_decay)))
self.m_schedule = self.m_schedule * momentum_t
m_schedule_next = self.m_schedule * momentum_t_1
# update m_t and v_t
m_t, v_t = state
m_t[:] = self.beta1 * m_t + (1. - self.beta1) * grad
v_t[:] = self.beta2 * v_t + (1. - self.beta2) * grad * grad
grad_prime = grad / (1. - self.m_schedule)
m_t_prime = m_t / (1. - m_schedule_next)
v_t_prime = v_t / (1. - pow(self.beta2, t))
m_t_bar = (1. - momentum_t) * grad_prime + momentum_t_1 * m_t_prime
# update weight
weight[:] -= lr * m_t_bar / (sqrt(v_t_prime) + self.epsilon)
@register
class Test(Optimizer):
"""The Test optimizer"""
def __init__(self, **kwargs):
super(Test, self).__init__(**kwargs)
def create_state(self, index, weight):
"""Creates a state to duplicate weight."""
return zeros(weight.shape, weight.context)
def update(self, index, weight, grad, state):
"""Performs w += rescale_grad * grad."""
weight[:] += grad * self.rescale_grad
state[:] = weight
# backward compatibility wrapper for Optimizer.CreateOptimizer
create = Optimizer.create_optimizer # pylint: disable=invalid-name
class Updater(object):
"""Updater for kvstore."""
def __init__(self, optimizer):
self.optimizer = optimizer
self.states = {}
self.states_synced = {}
def __call__(self, index, grad, weight):
"""Updates weight given gradient and index."""
if index not in self.states:
self.states[index] = self.optimizer.create_state(index, weight)
self.states_synced[index] = True
elif not self.states_synced[index]:
self.states[index] = \
self.sync_state_context(self.states[index], weight.context)
self.states_synced[index] = True
self.optimizer.update(index, weight, grad, self.states[index])
def sync_state_context(self, state, context):
if isinstance(state, NDArray):
return state.as_in_context(context)
elif isinstance(state, (tuple, list)):
synced_state = (self.sync_state_context(i, context) for i in state)
if isinstance(state, tuple):
return tuple(synced_state)
else:
return list(synced_state)
else:
return state
def set_states(self, states):
"""Sets updater states."""
self.states = pickle.loads(states)
self.states_synced = dict.fromkeys(self.states.keys(), False)
def get_states(self):
"""Gets updater states."""
return pickle.dumps(self.states)
def get_updater(optimizer):
"""Returns a closure of the updater needed for kvstore.
Parameters
----------
optimizer: Optimizer
The optimizer.
Returns
-------
updater: function
The closure of the updater.
"""
return Updater(optimizer)
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Composer Extension
Downloads, installs and runs Composer.
"""
import os
import os.path
import sys
import logging
import re
import json
import StringIO
import copy
import shutil
from build_pack_utils import utils
from build_pack_utils import stream_output
from compile_helpers import warn_invalid_php_version
from extension_helpers import ExtensionHelper
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'vendor', 'node-semver'))
from semver import max_satisfying
from build_pack_utils.compile_extensions import CompileExtensions
_log = logging.getLogger('composer')
def find_composer_path(file_name, ctx):
build_dir = ctx['BUILD_DIR']
webdir = ctx['WEBDIR']
path = None
paths = [
os.path.join(build_dir, file_name),
os.path.join(build_dir, webdir, file_name)
]
env_path = os.getenv('COMPOSER_PATH')
if env_path is not None:
paths = paths + [
os.path.join(build_dir, env_path, file_name),
os.path.join(build_dir, webdir, env_path, file_name)
]
for p in paths:
if os.path.exists(p):
path = p
return path
def find_composer_paths(ctx):
return (
find_composer_path("composer.json", ctx),
find_composer_path("composer.lock", ctx)
)
class ComposerConfiguration(object):
def __init__(self, ctx):
self._ctx = ctx
self._log = _log
self._init_composer_paths()
def _init_composer_paths(self):
self.json_path = find_composer_path("composer.json", self._ctx)
self.lock_path = find_composer_path("composer.lock", self._ctx)
self.auth_path = find_composer_path("auth.json", self._ctx)
def read_exts_from_path(self, path):
exts = []
if path:
req_pat = re.compile(r'"require"\s?\:\s?\{(.*?)\}', re.DOTALL)
ext_pat = re.compile(r'"ext-(.*?)"')
with open(path, 'rt') as fp:
data = fp.read()
for req_match in req_pat.finditer(data):
for ext_match in ext_pat.finditer(req_match.group(1)):
exts.append(ext_match.group(1))
return exts
def pick_php_version(self, requested):
selected = None
if requested is None or requested is '':
return self._ctx['PHP_VERSION']
# requested is coming from the composer.json file and is a unicode string type.
# Since it's just a semver string, it shouldn't actually contain any unicode
# characters. So it should be safe to turn it into an ASCII string
translated_requirement = str(requested.replace('>=', '~>'))
selected = max_satisfying(self._ctx['ALL_PHP_VERSIONS'], translated_requirement, loose=False)
if selected is None:
docs_link = 'http://docs.cloudfoundry.org/buildpacks/php/gsg-php-composer.html'
warn_invalid_php_version(requested, self._ctx['PHP_DEFAULT'], docs_link)
selected = self._ctx['PHP_DEFAULT']
return selected
def get_composer_contents(self, file_path):
try:
composer = json.load(open(file_path, 'r'))
except ValueError, e:
sys.tracebacklimit = 0
sys.stderr.write('-------> Invalid JSON present in {0}. Parser said: "{1}"'
.format(os.path.basename(file_path), e.message))
sys.stderr.write("\n")
sys.exit(1)
return composer
def read_version_from_composer(self, key):
if self.json_path is not None:
composer = self.get_composer_contents(self.json_path)
require = composer.get('require', {})
return require.get(key, None)
if self.lock_path is not None:
composer = self.get_composer_contents(self.lock_path)
platform = composer.get('platform', {})
return platform.get(key, None)
return None
def configure(self):
if self.json_path or self.lock_path:
exts = []
# include any existing extensions
exts.extend(self._ctx.get('PHP_EXTENSIONS', []))
# add 'openssl' extension
exts.append('openssl')
# add platform extensions from composer.json & composer.lock
exts.extend(self.read_exts_from_path(self.json_path))
exts.extend(self.read_exts_from_path(self.lock_path))
# update context with new list of extensions,
# if composer.json exists
php_version = self.read_version_from_composer('php')
self._log.debug('Composer picked PHP Version [%s]',
php_version)
self._ctx['PHP_VERSION'] = self.pick_php_version(php_version)
self._ctx['PHP_EXTENSIONS'] = utils.unique(exts)
self._ctx['PHP_VM'] = 'php'
class ComposerExtension(ExtensionHelper):
def __init__(self, ctx):
ExtensionHelper.__init__(self, ctx)
self._log = _log
self._init_composer_paths()
def _init_composer_paths(self):
self.json_path = find_composer_path("composer.json", self._ctx)
self.lock_path = find_composer_path("composer.lock", self._ctx)
self.auth_path = find_composer_path("auth.json", self._ctx)
def _defaults(self):
manifest_file_path = os.path.join(self._ctx["BP_DIR"], "manifest.yml")
compile_ext = CompileExtensions(self._ctx["BP_DIR"])
_, default_version = compile_ext.default_version_for(manifest_file_path=manifest_file_path, dependency="composer")
return {
'COMPOSER_VERSION': default_version,
'COMPOSER_PACKAGE': 'composer.phar',
'COMPOSER_DOWNLOAD_URL': '/composer/'
'{COMPOSER_VERSION}/{COMPOSER_PACKAGE}',
'COMPOSER_INSTALL_OPTIONS': ['--no-interaction', '--no-dev'],
'COMPOSER_VENDOR_DIR': '{BUILD_DIR}/{LIBDIR}/vendor',
'COMPOSER_BIN_DIR': '{BUILD_DIR}/php/bin',
'COMPOSER_HOME': '{CACHE_DIR}/composer',
'COMPOSER_CACHE_DIR': '{COMPOSER_HOME}/cache',
'COMPOSER_INSTALL_GLOBAL': []
}
def _should_compile(self):
return (self.json_path is not None or self.lock_path is not None)
def _compile(self, install):
self._builder = install.builder
self.composer_runner = ComposerCommandRunner(self._ctx, self._builder)
self.clean_cache_dir()
self.move_local_vendor_folder()
self.install()
self.run()
def clean_cache_dir(self):
if not os.path.exists(self._ctx['COMPOSER_CACHE_DIR']):
self._log.debug("Old style cache directory exists, removing")
shutil.rmtree(self._ctx['COMPOSER_HOME'], ignore_errors=True)
def move_local_vendor_folder(self):
vendor_path = os.path.join(self._ctx['BUILD_DIR'],
self._ctx['WEBDIR'],
'vendor')
if os.path.exists(vendor_path):
self._log.debug("Vendor [%s] exists, moving to LIBDIR",
vendor_path)
(self._builder.move()
.under('{BUILD_DIR}/{WEBDIR}')
.into('{BUILD_DIR}/{LIBDIR}')
.where_name_matches('^%s/.*$' % vendor_path)
.done())
def install(self):
self._builder.install().package('PHP').done()
if self._ctx['COMPOSER_VERSION'] == 'latest':
dependencies_path = os.path.join(self._ctx['BP_DIR'],
'dependencies')
if os.path.exists(dependencies_path):
raise RuntimeError('"COMPOSER_VERSION": "latest" ' \
'is not supported in the cached buildpack. Please vendor your preferred version of composer with your app, or use the provided default composer version.')
self._ctx['COMPOSER_DOWNLOAD_URL'] = \
'https://getcomposer.org/composer.phar'
self._builder.install()._installer.install_binary_direct(
self._ctx['COMPOSER_DOWNLOAD_URL'], None,
os.path.join(self._ctx['BUILD_DIR'], 'php', 'bin'),
extract=False)
else:
self._builder.install()._installer._install_binary_from_manifest(
self._ctx['COMPOSER_DOWNLOAD_URL'],
os.path.join(self._ctx['BUILD_DIR'], 'php', 'bin'),
extract=False)
def _github_oauth_token_is_valid(self, candidate_oauth_token):
stringio_writer = StringIO.StringIO()
curl_command = 'curl -H "Authorization: token %s" ' \
'https://api.github.com/rate_limit' % candidate_oauth_token
stream_output(stringio_writer,
curl_command,
env=os.environ,
cwd=self._ctx['BUILD_DIR'],
shell=True)
github_response = stringio_writer.getvalue()
github_response_json = json.loads(github_response)
return 'resources' in github_response_json
def _github_rate_exceeded(self, token_is_valid):
stringio_writer = StringIO.StringIO()
if token_is_valid:
candidate_oauth_token = os.getenv('COMPOSER_GITHUB_OAUTH_TOKEN')
curl_command = 'curl -H "Authorization: token %s" ' \
'https://api.github.com/rate_limit' % candidate_oauth_token
else:
curl_command = 'curl https://api.github.com/rate_limit'
stream_output(stringio_writer,
curl_command,
env=os.environ,
cwd=self._ctx['BUILD_DIR'],
shell=True)
github_response = stringio_writer.getvalue()
github_response_json = json.loads(github_response)
rate = github_response_json['rate']
num_remaining = rate['remaining']
return num_remaining <= 0
def setup_composer_github_token(self):
github_oauth_token = os.getenv('COMPOSER_GITHUB_OAUTH_TOKEN')
if self._github_oauth_token_is_valid(github_oauth_token):
print('-----> Using custom GitHub OAuth token in'
' $COMPOSER_GITHUB_OAUTH_TOKEN')
self.composer_runner.run('config', '-g',
'github-oauth.github.com',
'"%s"' % github_oauth_token)
return True
else:
print('-----> The GitHub OAuth token supplied from '
'$COMPOSER_GITHUB_OAUTH_TOKEN is invalid')
return False
def check_github_rate_exceeded(self, token_is_valid):
if self._github_rate_exceeded(token_is_valid):
print('-----> The GitHub api rate limit has been exceeded. '
'Composer will continue by downloading from source, which might result in slower downloads. '
'You can increase your rate limit with a GitHub OAuth token. '
'Please obtain a GitHub OAuth token by registering your application at '
'https://github.com/settings/applications/new. '
'Then set COMPOSER_GITHUB_OAUTH_TOKEN in your environment to the value of this token.')
def move_to_build_dir(self, file_path):
if file_path is not None and os.path.dirname(file_path) != self._ctx['BUILD_DIR']:
(self._builder.move()
.under(os.path.dirname(file_path))
.where_name_is(os.path.basename(file_path))
.into('BUILD_DIR')
.done())
def run(self):
# Move composer files into root directory
self.move_to_build_dir(self.json_path)
self.move_to_build_dir(self.lock_path)
self.move_to_build_dir(self.auth_path)
# Sanity Checks
if not os.path.exists(os.path.join(self._ctx['BUILD_DIR'],
'composer.lock')):
msg = (
'PROTIP: Include a `composer.lock` file with your '
'application! This will make sure the exact same version '
'of dependencies are used when you deploy to CloudFoundry.')
self._log.warning(msg)
print msg
# dump composer version, if in debug mode
if self._ctx.get('BP_DEBUG', False):
self.composer_runner.run('-V')
if not os.path.exists(os.path.join(self._ctx['BP_DIR'], 'dependencies')):
token_is_valid = False
# config composer to use github token, if provided
if os.getenv('COMPOSER_GITHUB_OAUTH_TOKEN', False):
token_is_valid = self.setup_composer_github_token()
# check that the api rate limit has not been exceeded, otherwise exit
self.check_github_rate_exceeded(token_is_valid)
# install global Composer dependencies
if len(self._ctx['COMPOSER_INSTALL_GLOBAL']) > 0:
globalCtx = copy.deepcopy(self._ctx)
globalCtx['COMPOSER_VENDOR_DIR'] = '{COMPOSER_HOME}/vendor'
globalCtx['COMPOSER_BIN_DIR'] = '{COMPOSER_HOME}/bin'
globalRunner = ComposerCommandRunner(globalCtx, self._builder)
globalRunner.run('global', 'require', '--no-progress',
*self._ctx['COMPOSER_INSTALL_GLOBAL'])
# install dependencies w/Composer
self.composer_runner.run('install', '--no-progress',
*self._ctx['COMPOSER_INSTALL_OPTIONS'])
class ComposerCommandRunner(object):
def __init__(self, ctx, builder):
self._log = _log
self._ctx = ctx
self._strategy = PHPComposerStrategy(ctx)
self._php_path = self._strategy.binary_path()
self._composer_path = os.path.join(ctx['BUILD_DIR'], 'php',
'bin', 'composer.phar')
self._strategy.write_config(builder)
def _build_composer_environment(self):
env = {}
for key in os.environ.keys():
val = self._ctx.get(key, '')
env[key] = val if type(val) == str else json.dumps(val)
# add basic composer vars
env['COMPOSER_HOME'] = self._ctx['COMPOSER_HOME']
env['COMPOSER_VENDOR_DIR'] = self._ctx['COMPOSER_VENDOR_DIR']
env['COMPOSER_BIN_DIR'] = self._ctx['COMPOSER_BIN_DIR']
env['COMPOSER_CACHE_DIR'] = self._ctx['COMPOSER_CACHE_DIR']
env['COMPOSER_INSTALL_OPTIONS'] = ' '.join(self._ctx['COMPOSER_INSTALL_OPTIONS'])
# prevent key system variables from being overridden
env['LD_LIBRARY_PATH'] = self._strategy.ld_library_path()
env['PHPRC'] = self._ctx['TMPDIR']
env['PATH'] = ':'.join(filter(None,
[env.get('PATH', ''),
os.path.dirname(self._php_path),
os.path.join(self._ctx['COMPOSER_HOME'], 'bin')]))
for key, val in env.iteritems():
self._log.debug("ENV IS: %s=%s (%s)", key, val, type(val))
return env
def run(self, *args):
try:
cmd = [self._php_path, self._composer_path]
cmd.extend(args)
self._log.debug("Running command [%s]", ' '.join(cmd))
stream_output(sys.stdout,
' '.join(cmd),
env=self._build_composer_environment(),
cwd=self._ctx['BUILD_DIR'],
shell=True)
except:
print "-----> Composer command failed"
raise
class PHPComposerStrategy(object):
def __init__(self, ctx):
self._ctx = ctx
def binary_path(self):
return os.path.join(
self._ctx['BUILD_DIR'], 'php', 'bin', 'php')
def write_config(self, builder):
# rewrite a temp copy of php.ini for use by composer
(builder.copy()
.under('{BUILD_DIR}/php/etc')
.where_name_is('php.ini')
.into('TMPDIR')
.done())
utils.rewrite_cfgs(os.path.join(self._ctx['TMPDIR'], 'php.ini'),
{'TMPDIR': self._ctx['TMPDIR'],
'HOME': self._ctx['BUILD_DIR']},
delim='@')
def ld_library_path(self):
return os.path.join(
self._ctx['BUILD_DIR'], 'php', 'lib')
# Extension Methods
def configure(ctx):
config = ComposerConfiguration(ctx)
config.configure()
def preprocess_commands(ctx):
composer = ComposerExtension(ctx)
return composer.preprocess_commands()
def service_commands(ctx):
composer = ComposerExtension(ctx)
return composer.service_commands()
def service_environment(ctx):
composer = ComposerExtension(ctx)
return composer.service_environment()
def compile(install):
composer = ComposerExtension(install.builder._ctx)
return composer.compile(install)
|
|
#!/usr/bin/env python
from contextlib import contextmanager
import datetime
import glob
import logging
import os
from os import path
import Queue
import re
import sys
import time
import ansi2html
import sh
import six
LOG = logging.getLogger('bash')
def execute(command_line, repeat=1, log_name=None):
command_line = list(command_line)
log_level = logging.WARNING
for arg in list(command_line[:-1]):
if arg.startswith('--repeat='):
repeat = arg.split('=')[1].strip()
command_line.remove(arg)
elif arg.startswith('--log-name='):
log_name = arg.split('=')[1].strip()
command_line.remove(arg)
elif arg == '-v':
log_level -= 10
command_line.remove(arg)
elif arg == '-q':
log_level += 10
command_line.remove(arg)
if log_name is None and command_line:
log_name = command_line[-1].split(
'#', 1)[-1].strip().split(' ')[0].strip()
return Bash(log_name=log_name, log_level=log_level).execute(
command_line, repeat=int(repeat))
class Bash(object):
timestamp_format = '%Y-%m-%d_%H:%M:%S'
log_format = '%(asctime)-15s | %(message)s'
log_dir = os.environ.get('LOG_DIR') or path.join(os.getcwd(), 'logs')
log_file_name_format = '{timestamp}_{log_name}_RUNNING'
log_path = None
status = 'BEGIN'
def __init__(self, log_name=None, log_level=logging.WARNING):
self.log_name = log_name
self.log_level = log_level
def execute(self, command_line, repeat=1):
with self.use_logging():
for i in range(repeat):
if repeat > 1:
LOG.debug('Execution #%d of %d', i + 1, repeat)
result = self._execute(command_line)
if result != 0 and repeat > 1:
LOG.error('Failed after %d executions.', i + 1)
return result
return result
def _execute(self, command_line):
LOG.debug(
'Begin executing commmand: %s',
' '.join("'" + arg + "'" for arg in command_line))
self.status = "EXECUTING"
self.exit_code = None
try:
sh.bash('-x', *command_line,
_out=self.write_stdout,
_err=self.write_stderr)
except sh.ErrorReturnCode as error:
exit_code = error.exit_code
status = "FAILED"
severity = logging.ERROR
except Exception:
exit_code = 1
status = "ERROR"
severity = logging.ERROR
LOG.exception('Internal error.')
except BaseException:
exit_code = 1
severity = logging.WARNING
status = "INTERRUPTED"
else:
exit_code = 0
status = 'SUCCESS'
severity = logging.DEBUG
self.exit_code = exit_code
self.status = status
if exit_code != 0 and self.log_level < logging.ERROR:
stream = sys.stderr
stream.write('=' * 79 + '\n')
sh.tail('-n', '100', self.log_path + '.ansi', _out=stream)
stream.write('=' * 79 + '\n')
LOG.log(
severity,
'Finished executing command:\n'
' Command line: %s\n'
' Status: %s\n'
' Exit code: %s\n'
' Log file: %s\n',
command_line, status, exit_code, self.log_path)
return exit_code
def write_stdout(self, msg):
self._write_message(msg, logger=self.output_logger)
def write_stderr(self, msg):
self._write_message(msg, logger=self.error_logger)
def _write_message(self, msg, logger):
msg = msg[:-1]
today = datetime.datetime.now().strftime('%Y-%m-%d')
while msg.startswith(today):
msg = msg.split(' ', 2)[-1]
if msg.startswith('| '):
msg = msg[2:]
if msg.startswith('+'):
LOG.info(msg)
else:
logger.debug(msg)
def new_log_path(self):
timestamp = datetime.datetime.now().strftime(self.timestamp_format)
self.log_file_name = log_file_name = self.log_file_name_format.format(
timestamp=timestamp, log_name=self.log_name)
return path.join(self.log_dir, log_file_name)
@contextmanager
def use_logging(self):
logging._acquireLock()
try:
root = logging.root
if len(root.handlers) == 0:
root.setLevel(logging.DEBUG)
logging.getLogger('sh').setLevel(logging.WARNING)
formatter = logging.Formatter(self.log_format)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(self.log_level)
stream_handler.setFormatter(formatter)
LOG.addHandler(stream_handler)
log_path = self.new_log_path()
while path.isfile(log_path):
time.sleep(.1)
log_path = self.new_log_path()
if path.isdir(path.dirname(log_path)):
file_handler = logging.FileHandler(log_path + '.ansi', 'wt')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
root.addHandler(file_handler)
html_handler = HtmlFileHandler(log_path + '.html', 'wt')
html_handler.setLevel(logging.DEBUG)
html_handler.setFormatter(formatter)
root.addHandler(html_handler)
txt_handler = TxtFileHandler(log_path + '.txt', 'wt')
txt_handler.setLevel(logging.DEBUG)
txt_handler.setFormatter(formatter)
root.addHandler(txt_handler)
else:
log_path = file_handler = html_handler = txt_handler = None
self.log_path = log_path
finally:
self.output_logger = logging.getLogger('out')
self.error_logger = logging.getLogger('err')
logging._releaseLock()
try:
yield log_path
finally:
if file_handler:
file_handler.close()
if html_handler:
html_handler.close()
if txt_handler:
html_handler.close()
if log_path:
for file_name in glob.glob(log_path + '.*'):
new_file_name = file_name.replace(
'_RUNNING.', '_' + self.status + '.')
os.rename(file_name, new_file_name)
class HtmlFileHandler(logging.FileHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def _open(self):
output = super(HtmlFileHandler, self)._open()
input = Ansi2HtmlStream(output)
input.open()
return input
class TxtFileHandler(logging.FileHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
ansi_to_txt = re.compile(r'\x1b[^m]*m').sub
def emit(self, record):
record.msg = self.ansi_to_txt('', record.msg)
return super(TxtFileHandler, self).emit(record)
_html_header = six.u(
"""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=%(output_encoding)s">
<title>%(title)s</title>
<style type="text/css">\n%(style)s\n</style>
</head>
<body class="body_foreground" bgcolor="#FFFFFF" style="font-size: %(font_size)s;" >
<pre class="ansi2html-content">
""")
_html_footer = six.u(
"""
</pre>
</body>
</html>
""")
class Ansi2HtmlStream(object):
HEADER = _html_header
FOOTER = _html_footer
Ansi2HTMLConverter = ansi2html.Ansi2HTMLConverter
scheme = "xterm"
dark_bg = False
def __init__(self, stream, ensure_trailing_newline=False, converter=None):
self.stream = stream
if not converter:
converter = self.Ansi2HTMLConverter()
self.converter = converter
self.ensure_trailing_newline = ensure_trailing_newline
def open(self):
return self.stream.write(
self.HEADER % {
'style' : "\n".join(
str(s)
for s in ansi2html.style.get_styles(
self.dark_bg, self.scheme)),
'title' : self.converter.title,
'font_size' : self.converter.font_size,
'output_encoding' : self.converter.output_encoding})
self._indent += 1
return self
def write(self, ansi):
html = self.converter.convert(
ansi, full=False,
ensure_trailing_newline=self.ensure_trailing_newline)
return self.stream.write(html)
def flush(self):
self.stream.flush()
def close(self):
self.stream.write(self.FOOTER)
self.stream.close()
_indent = 0
def __enter__(self):
if self._indent < 1:
self.open()
self.indent = 1
else:
self._indent += 1
return self
def __exit__(self, arg):
if self._indent > 1:
self._indent -= 1
else:
self.close()
self.indent = 0
if __name__ == '__main__':
exit(execute(sys.argv[1:]))
|
|
from __future__ import division, print_function, absolute_import
import warnings
import sys
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_allclose, assert_equal, assert_)
from scipy._lib._numpy_compat import suppress_warnings
import pytest
from pytest import raises as assert_raises
from scipy.cluster.vq import (kmeans, kmeans2, py_vq, vq, whiten,
ClusterError, _krandinit)
from scipy.cluster import _vq
from scipy.sparse.sputils import matrix
TESTDATA_2D = np.array([
-2.2, 1.17, -1.63, 1.69, -2.04, 4.38, -3.09, 0.95, -1.7, 4.79, -1.68, 0.68,
-2.26, 3.34, -2.29, 2.55, -1.72, -0.72, -1.99, 2.34, -2.75, 3.43, -2.45,
2.41, -4.26, 3.65, -1.57, 1.87, -1.96, 4.03, -3.01, 3.86, -2.53, 1.28,
-4.0, 3.95, -1.62, 1.25, -3.42, 3.17, -1.17, 0.12, -3.03, -0.27, -2.07,
-0.55, -1.17, 1.34, -2.82, 3.08, -2.44, 0.24, -1.71, 2.48, -5.23, 4.29,
-2.08, 3.69, -1.89, 3.62, -2.09, 0.26, -0.92, 1.07, -2.25, 0.88, -2.25,
2.02, -4.31, 3.86, -2.03, 3.42, -2.76, 0.3, -2.48, -0.29, -3.42, 3.21,
-2.3, 1.73, -2.84, 0.69, -1.81, 2.48, -5.24, 4.52, -2.8, 1.31, -1.67,
-2.34, -1.18, 2.17, -2.17, 2.82, -1.85, 2.25, -2.45, 1.86, -6.79, 3.94,
-2.33, 1.89, -1.55, 2.08, -1.36, 0.93, -2.51, 2.74, -2.39, 3.92, -3.33,
2.99, -2.06, -0.9, -2.83, 3.35, -2.59, 3.05, -2.36, 1.85, -1.69, 1.8,
-1.39, 0.66, -2.06, 0.38, -1.47, 0.44, -4.68, 3.77, -5.58, 3.44, -2.29,
2.24, -1.04, -0.38, -1.85, 4.23, -2.88, 0.73, -2.59, 1.39, -1.34, 1.75,
-1.95, 1.3, -2.45, 3.09, -1.99, 3.41, -5.55, 5.21, -1.73, 2.52, -2.17,
0.85, -2.06, 0.49, -2.54, 2.07, -2.03, 1.3, -3.23, 3.09, -1.55, 1.44,
-0.81, 1.1, -2.99, 2.92, -1.59, 2.18, -2.45, -0.73, -3.12, -1.3, -2.83,
0.2, -2.77, 3.24, -1.98, 1.6, -4.59, 3.39, -4.85, 3.75, -2.25, 1.71, -3.28,
3.38, -1.74, 0.88, -2.41, 1.92, -2.24, 1.19, -2.48, 1.06, -1.68, -0.62,
-1.3, 0.39, -1.78, 2.35, -3.54, 2.44, -1.32, 0.66, -2.38, 2.76, -2.35,
3.95, -1.86, 4.32, -2.01, -1.23, -1.79, 2.76, -2.13, -0.13, -5.25, 3.84,
-2.24, 1.59, -4.85, 2.96, -2.41, 0.01, -0.43, 0.13, -3.92, 2.91, -1.75,
-0.53, -1.69, 1.69, -1.09, 0.15, -2.11, 2.17, -1.53, 1.22, -2.1, -0.86,
-2.56, 2.28, -3.02, 3.33, -1.12, 3.86, -2.18, -1.19, -3.03, 0.79, -0.83,
0.97, -3.19, 1.45, -1.34, 1.28, -2.52, 4.22, -4.53, 3.22, -1.97, 1.75,
-2.36, 3.19, -0.83, 1.53, -1.59, 1.86, -2.17, 2.3, -1.63, 2.71, -2.03,
3.75, -2.57, -0.6, -1.47, 1.33, -1.95, 0.7, -1.65, 1.27, -1.42, 1.09, -3.0,
3.87, -2.51, 3.06, -2.6, 0.74, -1.08, -0.03, -2.44, 1.31, -2.65, 2.99,
-1.84, 1.65, -4.76, 3.75, -2.07, 3.98, -2.4, 2.67, -2.21, 1.49, -1.21,
1.22, -5.29, 2.38, -2.85, 2.28, -5.6, 3.78, -2.7, 0.8, -1.81, 3.5, -3.75,
4.17, -1.29, 2.99, -5.92, 3.43, -1.83, 1.23, -1.24, -1.04, -2.56, 2.37,
-3.26, 0.39, -4.63, 2.51, -4.52, 3.04, -1.7, 0.36, -1.41, 0.04, -2.1, 1.0,
-1.87, 3.78, -4.32, 3.59, -2.24, 1.38, -1.99, -0.22, -1.87, 1.95, -0.84,
2.17, -5.38, 3.56, -1.27, 2.9, -1.79, 3.31, -5.47, 3.85, -1.44, 3.69,
-2.02, 0.37, -1.29, 0.33, -2.34, 2.56, -1.74, -1.27, -1.97, 1.22, -2.51,
-0.16, -1.64, -0.96, -2.99, 1.4, -1.53, 3.31, -2.24, 0.45, -2.46, 1.71,
-2.88, 1.56, -1.63, 1.46, -1.41, 0.68, -1.96, 2.76, -1.61,
2.11]).reshape((200, 2))
# Global data
X = np.array([[3.0, 3], [4, 3], [4, 2],
[9, 2], [5, 1], [6, 2], [9, 4],
[5, 2], [5, 4], [7, 4], [6, 5]])
CODET1 = np.array([[3.0000, 3.0000],
[6.2000, 4.0000],
[5.8000, 1.8000]])
CODET2 = np.array([[11.0/3, 8.0/3],
[6.7500, 4.2500],
[6.2500, 1.7500]])
LABEL1 = np.array([0, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1])
class TestWhiten(object):
def test_whiten(self):
desired = np.array([[5.08738849, 2.97091878],
[3.19909255, 0.69660580],
[4.51041982, 0.02640918],
[4.38567074, 0.95120889],
[2.32191480, 1.63195503]])
for tp in np.array, matrix:
obs = tp([[0.98744510, 0.82766775],
[0.62093317, 0.19406729],
[0.87545741, 0.00735733],
[0.85124403, 0.26499712],
[0.45067590, 0.45464607]])
assert_allclose(whiten(obs), desired, rtol=1e-5)
def test_whiten_zero_std(self):
desired = np.array([[0., 1.0, 2.86666544],
[0., 1.0, 1.32460034],
[0., 1.0, 3.74382172]])
for tp in np.array, matrix:
obs = tp([[0., 1., 0.74109533],
[0., 1., 0.34243798],
[0., 1., 0.96785929]])
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_allclose(whiten(obs), desired, rtol=1e-5)
assert_equal(len(w), 1)
assert_(issubclass(w[-1].category, RuntimeWarning))
def test_whiten_not_finite(self):
for tp in np.array, matrix:
for bad_value in np.nan, np.inf, -np.inf:
obs = tp([[0.98744510, bad_value],
[0.62093317, 0.19406729],
[0.87545741, 0.00735733],
[0.85124403, 0.26499712],
[0.45067590, 0.45464607]])
assert_raises(ValueError, whiten, obs)
class TestVq(object):
def test_py_vq(self):
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
for tp in np.array, matrix:
label1 = py_vq(tp(X), tp(initc))[0]
assert_array_equal(label1, LABEL1)
def test_vq(self):
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
for tp in np.array, matrix:
label1, dist = _vq.vq(tp(X), tp(initc))
assert_array_equal(label1, LABEL1)
tlabel1, tdist = vq(tp(X), tp(initc))
def test_vq_1d(self):
# Test special rank 1 vq algo, python implementation.
data = X[:, 0]
initc = data[:3]
a, b = _vq.vq(data, initc)
ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis])
assert_array_equal(a, ta)
assert_array_equal(b, tb)
def test__vq_sametype(self):
a = np.array([1.0, 2.0], dtype=np.float64)
b = a.astype(np.float32)
assert_raises(TypeError, _vq.vq, a, b)
def test__vq_invalid_type(self):
a = np.array([1, 2], dtype=int)
assert_raises(TypeError, _vq.vq, a, a)
def test_vq_large_nfeat(self):
X = np.random.rand(20, 20)
code_book = np.random.rand(3, 20)
codes0, dis0 = _vq.vq(X, code_book)
codes1, dis1 = py_vq(X, code_book)
assert_allclose(dis0, dis1, 1e-5)
assert_array_equal(codes0, codes1)
X = X.astype(np.float32)
code_book = code_book.astype(np.float32)
codes0, dis0 = _vq.vq(X, code_book)
codes1, dis1 = py_vq(X, code_book)
assert_allclose(dis0, dis1, 1e-5)
assert_array_equal(codes0, codes1)
def test_vq_large_features(self):
X = np.random.rand(10, 5) * 1000000
code_book = np.random.rand(2, 5) * 1000000
codes0, dis0 = _vq.vq(X, code_book)
codes1, dis1 = py_vq(X, code_book)
assert_allclose(dis0, dis1, 1e-5)
assert_array_equal(codes0, codes1)
class TestKMean(object):
def test_large_features(self):
# Generate a data set with large values, and run kmeans on it to
# (regression for 1077).
d = 300
n = 100
m1 = np.random.randn(d)
m2 = np.random.randn(d)
x = 10000 * np.random.randn(n, d) - 20000 * m1
y = 10000 * np.random.randn(n, d) + 20000 * m2
data = np.empty((x.shape[0] + y.shape[0], d), np.double)
data[:x.shape[0]] = x
data[x.shape[0]:] = y
kmeans(data, 2)
def test_kmeans_simple(self):
np.random.seed(54321)
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
for tp in np.array, matrix:
code1 = kmeans(tp(X), tp(initc), iter=1)[0]
assert_array_almost_equal(code1, CODET2)
def test_kmeans_lost_cluster(self):
# This will cause kmeans to have a cluster with no points.
data = TESTDATA_2D
initk = np.array([[-1.8127404, -0.67128041],
[2.04621601, 0.07401111],
[-2.31149087, -0.05160469]])
kmeans(data, initk)
with suppress_warnings() as sup:
sup.filter(UserWarning,
"One of the clusters is empty. Re-run kmeans with a "
"different initialization")
kmeans2(data, initk, missing='warn')
assert_raises(ClusterError, kmeans2, data, initk, missing='raise')
def test_kmeans2_simple(self):
np.random.seed(12345678)
initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
for tp in np.array, matrix:
code1 = kmeans2(tp(X), tp(initc), iter=1)[0]
code2 = kmeans2(tp(X), tp(initc), iter=2)[0]
assert_array_almost_equal(code1, CODET1)
assert_array_almost_equal(code2, CODET2)
def test_kmeans2_rank1(self):
data = TESTDATA_2D
data1 = data[:, 0]
initc = data1[:3]
code = initc.copy()
kmeans2(data1, code, iter=1)[0]
kmeans2(data1, code, iter=2)[0]
def test_kmeans2_rank1_2(self):
data = TESTDATA_2D
data1 = data[:, 0]
kmeans2(data1, 2, iter=1)
def test_kmeans2_high_dim(self):
# test kmeans2 when the number of dimensions exceeds the number
# of input points
data = TESTDATA_2D
data = data.reshape((20, 20))[:10]
kmeans2(data, 2)
def test_kmeans2_init(self):
np.random.seed(12345)
data = TESTDATA_2D
kmeans2(data, 3, minit='points')
kmeans2(data[:, :1], 3, minit='points') # special case (1-D)
kmeans2(data, 3, minit='++')
kmeans2(data[:, :1], 3, minit='++') # special case (1-D)
# minit='random' can give warnings, filter those
with suppress_warnings() as sup:
sup.filter(message="One of the clusters is empty. Re-run")
kmeans2(data, 3, minit='random')
kmeans2(data[:, :1], 3, minit='random') # special case (1-D)
@pytest.mark.skipif(sys.platform == 'win32',
reason='Fails with MemoryError in Wine.')
def test_krandinit(self):
data = TESTDATA_2D
datas = [data.reshape((200, 2)), data.reshape((20, 20))[:10]]
k = int(1e6)
for data in datas:
np.random.seed(1234)
init = _krandinit(data, k)
orig_cov = np.cov(data, rowvar=0)
init_cov = np.cov(init, rowvar=0)
assert_allclose(orig_cov, init_cov, atol=1e-2)
def test_kmeans2_empty(self):
# Regression test for gh-1032.
assert_raises(ValueError, kmeans2, [], 2)
def test_kmeans_0k(self):
# Regression test for gh-1073: fail when k arg is 0.
assert_raises(ValueError, kmeans, X, 0)
assert_raises(ValueError, kmeans2, X, 0)
assert_raises(ValueError, kmeans2, X, np.array([]))
def test_kmeans_large_thres(self):
# Regression test for gh-1774
x = np.array([1, 2, 3, 4, 10], dtype=float)
res = kmeans(x, 1, thresh=1e16)
assert_allclose(res[0], np.array([4.]))
assert_allclose(res[1], 2.3999999999999999)
|
|
import datetime
import hashlib
import json
import logging
import os
import shutil
import subprocess
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.files.storage import default_storage as storage
from django.core.urlresolvers import reverse
from django.template import Context, loader
from django.test.client import RequestFactory
import pytz
import requests
from celery import chord
from celery.exceptions import RetryTaskError
from celery import task
from requests.exceptions import RequestException
from tower import ugettext as _
import mkt
from lib.post_request_task.task import task as post_request_task
from mkt.abuse.models import AbuseReport
from mkt.constants.categories import CATEGORY_REDIRECTS
from mkt.constants.regions import RESTOFWORLD
from mkt.developers.models import ActivityLog, AppLog
from mkt.developers.tasks import (_fetch_manifest, fetch_icon, pngcrush_image,
resize_preview, validator)
from mkt.files.models import FileUpload
from mkt.files.utils import WebAppParser
from mkt.reviewers.models import EscalationQueue, RereviewQueue
from mkt.site.decorators import set_task_user, use_master, write
from mkt.site.helpers import absolutify
from mkt.site.mail import send_mail_jinja
from mkt.site.utils import chunked, JSONEncoder
from mkt.users.models import UserProfile
from mkt.users.utils import get_task_user
from mkt.webapps.indexers import WebappIndexer
from mkt.webapps.models import AppManifest, Preview, Webapp
from mkt.webapps.utils import get_locale_properties
task_log = logging.getLogger('z.task')
@task
@write
def version_changed(addon_id, **kw):
update_last_updated(addon_id)
def update_last_updated(addon_id):
qs = Webapp._last_updated_queries()
if not Webapp.objects.filter(pk=addon_id).exists():
task_log.info(
'[1@None] Updating last updated for %s failed, no addon found'
% addon_id)
return
task_log.info('[1@None] Updating last updated for %s.' % addon_id)
res = (qs.filter(pk=addon_id)
.using('default')
.values_list('id', 'last_updated'))
if res:
pk, t = res[0]
Webapp.objects.filter(pk=pk).update(last_updated=t)
@task
def delete_preview_files(id, **kw):
task_log.info('[1@None] Removing preview with id of %s.' % id)
p = Preview(id=id)
for f in (p.thumbnail_path, p.image_path):
try:
storage.delete(f)
except Exception, e:
task_log.error('Error deleting preview file (%s): %s' % (f, e))
def _get_content_hash(content):
return 'sha256:%s' % hashlib.sha256(content).hexdigest()
def _log(webapp, message, rereview=False, exc_info=False):
if rereview:
message = u'(Re-review) ' + unicode(message)
task_log.info(u'[Webapp:%s] %s' % (webapp, unicode(message)),
exc_info=exc_info)
@task
@write
def update_manifests(ids, **kw):
retry_secs = 3600
task_log.info('[%s@%s] Update manifests.' %
(len(ids), update_manifests.rate_limit))
check_hash = kw.pop('check_hash', True)
retries = kw.pop('retries', {})
# Since we'll be logging the updated manifest change to the users log,
# we'll need to log in as user.
mkt.set_user(get_task_user())
for id in ids:
_update_manifest(id, check_hash, retries)
if retries:
try:
update_manifests.retry(args=(retries.keys(),),
kwargs={'check_hash': check_hash,
'retries': retries},
eta=datetime.datetime.now() +
datetime.timedelta(seconds=retry_secs),
max_retries=5)
except RetryTaskError:
_log(id, 'Retrying task in %d seconds.' % retry_secs)
return retries
def notify_developers_of_failure(app, error_message, has_link=False):
if (app.status not in mkt.WEBAPPS_APPROVED_STATUSES or
RereviewQueue.objects.filter(addon=app).exists()):
# If the app isn't public, or has already been reviewed, we don't
# want to send the mail.
return
# FIXME: how to integrate with commbadge?
for author in app.authors.all():
context = {
'error_message': error_message,
'SITE_URL': settings.SITE_URL,
'SUPPORT_GROUP': settings.SUPPORT_GROUP,
'has_link': has_link
}
to = [author.email]
with author.activate_lang():
# Re-fetch the app to get translations in the right language.
context['app'] = Webapp.objects.get(pk=app.pk)
subject = _(u'Issue with your app "{app}" on the Firefox '
u'Marketplace').format(**context)
send_mail_jinja(subject,
'webapps/emails/update_manifest_failure.txt',
context, recipient_list=to)
def _update_manifest(id, check_hash, failed_fetches):
webapp = Webapp.objects.get(pk=id)
version = webapp.versions.latest()
file_ = version.files.latest()
_log(webapp, u'Fetching webapp manifest')
if not file_:
_log(webapp, u'Ignoring, no existing file')
return
# Fetch manifest, catching and logging any exception.
try:
content = _fetch_manifest(webapp.manifest_url)
except Exception, e:
msg = u'Failed to get manifest from %s. Error: %s' % (
webapp.manifest_url, e)
failed_fetches[id] = failed_fetches.get(id, 0) + 1
if failed_fetches[id] == 3:
# This is our 3rd attempt, let's send the developer(s) an email to
# notify him of the failures.
notify_developers_of_failure(webapp, u'Validation errors:\n' + msg)
elif failed_fetches[id] >= 4:
# This is our 4th attempt, we should already have notified the
# developer(s). Let's put the app in the re-review queue.
_log(webapp, msg, rereview=True, exc_info=True)
if webapp.status in mkt.WEBAPPS_APPROVED_STATUSES:
RereviewQueue.flag(webapp, mkt.LOG.REREVIEW_MANIFEST_CHANGE,
msg)
del failed_fetches[id]
else:
_log(webapp, msg, rereview=False, exc_info=True)
return
# Check hash.
if check_hash:
hash_ = _get_content_hash(content)
if file_.hash == hash_:
_log(webapp, u'Manifest the same')
return
_log(webapp, u'Manifest different')
# Validate the new manifest.
upload = FileUpload.objects.create()
upload.add_file([content], webapp.manifest_url, len(content))
validator(upload.pk)
upload = FileUpload.objects.get(pk=upload.pk)
if upload.validation:
v8n = json.loads(upload.validation)
if v8n['errors']:
v8n_url = absolutify(reverse(
'mkt.developers.upload_detail', args=[upload.uuid]))
msg = u'Validation errors:\n'
for m in v8n['messages']:
if m['type'] == u'error':
msg += u'* %s\n' % m['message']
msg += u'\nValidation Result:\n%s' % v8n_url
_log(webapp, msg, rereview=True)
if webapp.status in mkt.WEBAPPS_APPROVED_STATUSES:
notify_developers_of_failure(webapp, msg, has_link=True)
RereviewQueue.flag(webapp, mkt.LOG.REREVIEW_MANIFEST_CHANGE,
msg)
return
else:
_log(webapp,
u'Validation for upload UUID %s has no result' % upload.uuid)
# Get the old manifest before we overwrite it.
new = json.loads(content)
old = webapp.get_manifest_json(file_)
# New manifest is different and validates, update version/file.
try:
webapp.manifest_updated(content, upload)
except:
_log(webapp, u'Failed to create version', exc_info=True)
# Check for any name changes at root and in locales. If any were added or
# updated, send to re-review queue.
msg = []
rereview = False
# Some changes require a new call to IARC's SET_STOREFRONT_DATA.
iarc_storefront = False
if old and old.get('name') != new.get('name'):
rereview = True
iarc_storefront = True
msg.append(u'Manifest name changed from "%s" to "%s".' % (
old.get('name'), new.get('name')))
new_version = webapp.versions.latest()
# Compare developer_name between old and new version using the property
# that fallbacks to the author name instead of using the db field directly.
# This allows us to avoid forcing a re-review on old apps which didn't have
# developer name in their manifest initially and upload a new version that
# does, providing that it matches the original author name.
if version.developer_name != new_version.developer_name:
rereview = True
iarc_storefront = True
msg.append(u'Developer name changed from "%s" to "%s".'
% (version.developer_name, new_version.developer_name))
# Get names in "locales" as {locale: name}.
locale_names = get_locale_properties(new, 'name', webapp.default_locale)
# Check changes to default_locale.
locale_changed = webapp.update_default_locale(new.get('default_locale'))
if locale_changed:
msg.append(u'Default locale changed from "%s" to "%s".'
% locale_changed)
# Update names
crud = webapp.update_names(locale_names)
if any(crud.values()):
webapp.save()
if crud.get('added'):
rereview = True
msg.append(u'Locales added: %s' % crud.get('added'))
if crud.get('updated'):
rereview = True
msg.append(u'Locales updated: %s' % crud.get('updated'))
# Check if supported_locales changed and update if so.
webapp.update_supported_locales(manifest=new, latest=True)
if rereview:
msg = ' '.join(msg)
_log(webapp, msg, rereview=True)
if webapp.status in mkt.WEBAPPS_APPROVED_STATUSES:
RereviewQueue.flag(webapp, mkt.LOG.REREVIEW_MANIFEST_CHANGE, msg)
if iarc_storefront:
webapp.set_iarc_storefront_data()
@post_request_task
@write
def update_cached_manifests(id, **kw):
try:
webapp = Webapp.objects.get(pk=id)
except Webapp.DoesNotExist:
_log(id, u'Webapp does not exist')
return
if not webapp.is_packaged:
return
# Rebuilds the packaged app mini manifest and stores it in cache.
webapp.get_cached_manifest(force=True)
_log(webapp, u'Updated cached mini manifest')
@task
@write
def add_uuids(ids, **kw):
for chunk in chunked(ids, 50):
for app in Webapp.objects.filter(id__in=chunk):
# Save triggers the creation of a guid if the app doesn't currently
# have one.
app.save()
@task
@write
def update_supported_locales(ids, **kw):
"""
Task intended to run via command line to update all apps' supported locales
based on the current version.
"""
for chunk in chunked(ids, 50):
for app in Webapp.objects.filter(id__in=chunk):
try:
if app.update_supported_locales():
_log(app, u'Updated supported locales')
except Exception:
_log(app, u'Updating supported locales failed.', exc_info=True)
@post_request_task(acks_late=True)
@write
def index_webapps(ids, **kw):
# DEPRECATED: call WebappIndexer.index_ids directly.
WebappIndexer.index_ids(ids, no_delay=True)
@post_request_task(acks_late=True)
@write
def unindex_webapps(ids, **kw):
# DEPRECATED: call WebappIndexer.unindexer directly.
WebappIndexer.unindexer(ids)
@task
def dump_app(id, **kw):
from mkt.webapps.serializers import AppSerializer
# Because @robhudson told me to.
# Note: not using storage because all these operations should be local.
target_dir = os.path.join(settings.DUMPED_APPS_PATH, 'apps',
str(id / 1000))
target_file = os.path.join(target_dir, str(id) + '.json')
try:
obj = Webapp.objects.get(pk=id)
except Webapp.DoesNotExist:
task_log.info(u'Webapp does not exist: {0}'.format(id))
return
req = RequestFactory().get('/')
req.user = AnonymousUser()
req.REGION = RESTOFWORLD
if not os.path.exists(target_dir):
os.makedirs(target_dir)
task_log.info('Dumping app {0} to {1}'.format(id, target_file))
res = AppSerializer(obj, context={'request': req}).data
json.dump(res, open(target_file, 'w'), cls=JSONEncoder)
return target_file
@task
def clean_apps(pks, **kw):
app_dir = os.path.join(settings.DUMPED_APPS_PATH, 'apps')
rm_directory(app_dir)
return pks
@task(ignore_result=False)
def dump_apps(ids, **kw):
task_log.info(u'Dumping apps {0} to {1}. [{2}]'
.format(ids[0], ids[-1], len(ids)))
for id in ids:
dump_app(id)
@task
def zip_apps(*args, **kw):
today = datetime.datetime.today().strftime('%Y-%m-%d')
files = ['apps'] + compile_extra_files(date=today)
tarball = compress_export(filename=today, files=files)
link_latest_export(tarball)
return tarball
def link_latest_export(tarball):
"""
Atomically links basename(tarball) to
DUMPED_APPS_PATH/tarballs/latest.tgz.
"""
tarball_name = os.path.basename(tarball)
target_dir = os.path.join(settings.DUMPED_APPS_PATH, 'tarballs')
target_file = os.path.join(target_dir, 'latest.tgz')
tmp_file = os.path.join(target_dir, '.latest.tgz')
if os.path.lexists(tmp_file):
os.unlink(tmp_file)
os.symlink(tarball_name, tmp_file)
os.rename(tmp_file, target_file)
return target_file
def rm_directory(path):
if os.path.exists(path):
shutil.rmtree(path)
def dump_all_apps_tasks():
all_pks = (Webapp.objects.visible()
.values_list('pk', flat=True)
.order_by('pk'))
return [dump_apps.si(pks) for pks in chunked(all_pks, 100)]
@task
def export_data(name=None):
today = datetime.datetime.today().strftime('%Y-%m-%d')
if name is None:
name = today
root = settings.DUMPED_APPS_PATH
directories = ['apps']
for directory in directories:
rm_directory(os.path.join(root, directory))
files = directories + compile_extra_files(date=today)
chord(dump_all_apps_tasks(),
compress_export.si(filename=name, files=files)).apply_async()
def compile_extra_files(date):
# Put some .txt files in place.
context = Context({'date': date, 'url': settings.SITE_URL})
files = ['license.txt', 'readme.txt']
if not os.path.exists(settings.DUMPED_APPS_PATH):
os.makedirs(settings.DUMPED_APPS_PATH)
created_files = []
for f in files:
template = loader.get_template('webapps/dump/apps/' + f)
dest = os.path.join(settings.DUMPED_APPS_PATH, f)
open(dest, 'w').write(template.render(context))
created_files.append(f)
return created_files
@task
def compress_export(filename, files):
# Note: not using storage because all these operations should be local.
target_dir = os.path.join(settings.DUMPED_APPS_PATH, 'tarballs')
target_file = os.path.join(target_dir, filename + '.tgz')
if not os.path.exists(target_dir):
os.makedirs(target_dir)
# Put some .txt files in place.
cmd = ['tar', 'czf', target_file, '-C',
settings.DUMPED_APPS_PATH] + files
task_log.info(u'Creating dump {0}'.format(target_file))
subprocess.call(cmd)
return target_file
@task(ignore_result=False)
def dump_user_installs(ids, **kw):
task_log.info(u'Dumping user installs {0} to {1}. [{2}]'
.format(ids[0], ids[-1], len(ids)))
users = (UserProfile.objects.filter(enable_recommendations=True)
.filter(id__in=ids))
for user in users:
hash = user.recommendation_hash
target_dir = os.path.join(settings.DUMPED_USERS_PATH, 'users', hash[0])
target_file = os.path.join(target_dir, '%s.json' % hash)
if not os.path.exists(target_dir):
try:
os.makedirs(target_dir)
except OSError:
pass # Catch race condition if file exists now.
# Gather data about user.
installed = []
zone = pytz.timezone(settings.TIME_ZONE)
for install in user.installed_set.all():
try:
app = install.addon
except Webapp.DoesNotExist:
continue
installed.append({
'id': app.id,
'slug': app.app_slug,
'installed': pytz.utc.normalize(
zone.localize(install.created)).strftime(
'%Y-%m-%dT%H:%M:%S')
})
data = {
'user': hash,
'region': user.region,
'lang': user.lang,
'installed_apps': installed,
}
task_log.info('Dumping user {0} to {1}'.format(user.id, target_file))
json.dump(data, open(target_file, 'w'), cls=JSONEncoder)
@task
def zip_users(*args, **kw):
# Note: not using storage because all these operations should be local.
today = datetime.datetime.utcnow().strftime('%Y-%m-%d')
target_dir = os.path.join(settings.DUMPED_USERS_PATH, 'tarballs')
target_file = os.path.join(target_dir, '{0}.tgz'.format(today))
if not os.path.exists(target_dir):
os.makedirs(target_dir)
# Put some .txt files in place.
context = Context({'date': today, 'url': settings.SITE_URL})
files = ['license.txt', 'readme.txt']
for f in files:
template = loader.get_template('webapps/dump/users/' + f)
dest = os.path.join(settings.DUMPED_USERS_PATH, 'users', f)
open(dest, 'w').write(template.render(context))
cmd = ['tar', 'czf', target_file, '-C',
settings.DUMPED_USERS_PATH, 'users']
task_log.info(u'Creating user dump {0}'.format(target_file))
subprocess.call(cmd)
return target_file
def _fix_missing_icons(id):
try:
webapp = Webapp.objects.get(pk=id)
except Webapp.DoesNotExist:
_log(id, u'Webapp does not exist')
return
# Check for missing icons. If we find one important size missing, call
# fetch_icon for this app.
dirname = webapp.get_icon_dir()
destination = os.path.join(dirname, '%s' % webapp.id)
for size in (64, 128):
filename = '%s-%s.png' % (destination, size)
if not storage.exists(filename):
_log(id, u'Webapp is missing icon size %d' % (size, ))
return fetch_icon(webapp)
@task
@write
def fix_missing_icons(ids, **kw):
for id in ids:
_fix_missing_icons(id)
def _regenerate_icons_and_thumbnails(pk):
try:
webapp = Webapp.objects.get(pk=pk)
except Webapp.DoesNotExist:
_log(id, u'Webapp does not exist')
return
# Previews.
for preview in webapp.all_previews:
# Re-resize each preview by calling the task with the image that we
# have and asking the task to only deal with the thumbnail. We no
# longer have the original, but it's fine, the image should be large
# enough for us to generate a thumbnail.
resize_preview.delay(preview.image_path, preview, generate_image=False)
# Icons. The only thing we need to do is crush the 64x64 icon.
icon_path = os.path.join(webapp.get_icon_dir(), '%s-64.png' % webapp.id)
pngcrush_image.delay(icon_path)
@task
@write
def regenerate_icons_and_thumbnails(ids, **kw):
for pk in ids:
_regenerate_icons_and_thumbnails(pk)
@task
@write
def import_manifests(ids, **kw):
for app in Webapp.objects.filter(id__in=ids):
for version in app.versions.all():
try:
file_ = version.files.latest()
if file_.status == mkt.STATUS_DISABLED:
file_path = file_.guarded_file_path
else:
file_path = file_.file_path
manifest = WebAppParser().get_json_data(file_path)
m, c = AppManifest.objects.get_or_create(
version=version, manifest=json.dumps(manifest))
if c:
task_log.info(
'[Webapp:%s] Imported manifest for version %s' % (
app.id, version.id))
else:
task_log.info(
'[Webapp:%s] App manifest exists for version %s' % (
app.id, version.id))
except Exception as e:
task_log.info('[Webapp:%s] Error loading manifest for version '
'%s: %s' % (app.id, version.id, e))
class PreGenAPKError(Exception):
"""
An error encountered while trying to pre-generate an APK.
"""
@task
@use_master
def pre_generate_apk(app_id, **kw):
app = Webapp.objects.get(pk=app_id)
manifest_url = app.get_manifest_url()
task_log.info(u'pre-generating APK for app {a} at {url}'
.format(a=app, url=manifest_url))
if not manifest_url:
raise PreGenAPKError(u'Webapp {w} has an empty manifest URL'
.format(w=app))
try:
res = requests.get(
settings.PRE_GENERATE_APK_URL,
params={'manifestUrl': manifest_url},
headers={'User-Agent': settings.MARKETPLACE_USER_AGENT})
res.raise_for_status()
except RequestException, exc:
raise PreGenAPKError(u'Error pre-generating APK for app {a} at {url}; '
u'generator={gen} (SSL cert ok?); '
u'{e.__class__.__name__}: {e}'
.format(a=app, url=manifest_url, e=exc,
gen=settings.PRE_GENERATE_APK_URL))
# The factory returns a binary APK blob but we don't need it.
res.close()
del res
@task
@use_master
def set_storefront_data(app_id, disable=False, **kw):
"""
Call IARC's SET_STOREFRONT_DATA endpoint.
"""
try:
app = Webapp.with_deleted.get(pk=app_id)
except Webapp.DoesNotExist:
return
app.set_iarc_storefront_data(disable=disable)
@task
@write
def fix_excluded_regions(ids, **kw):
"""
Task to fix an app's excluded_region set.
This will remove all excluded regions (minus special regions).
Note: We only do this on apps with `_geodata__restricted` as false because
restricted apps have user defined region exclusions.
"""
apps = Webapp.objects.filter(id__in=ids).filter(_geodata__restricted=False)
for app in apps:
# Delete all excluded regions, except special regions.
#
# TODO: Add special region logic to `get_excluded_region_ids`?
app.addonexcludedregion.exclude(
region__in=mkt.regions.SPECIAL_REGION_IDS).delete()
task_log.info(u'[Webapp:%s] Excluded Regions cleared.' % app.pk)
# Trigger a re-index to update `region_exclusions` in ES.
index_webapps([app.pk for app in apps])
@task
def delete_logs(items, **kw):
task_log.info('[%s@%s] Deleting logs'
% (len(items), delete_logs.rate_limit))
ActivityLog.objects.filter(pk__in=items).exclude(
action__in=mkt.LOG_KEEP).delete()
@task
@set_task_user
def find_abuse_escalations(addon_id, **kw):
weekago = datetime.date.today() - datetime.timedelta(days=7)
add_to_queue = True
for abuse in AbuseReport.recent_high_abuse_reports(1, weekago, addon_id):
if EscalationQueue.objects.filter(addon=abuse.addon).exists():
# App is already in the queue, no need to re-add it.
task_log.info(u'[app:%s] High abuse reports, but already '
u'escalated' % abuse.addon)
add_to_queue = False
# We have an abuse report... has it been detected and dealt with?
logs = (AppLog.objects.filter(
activity_log__action=mkt.LOG.ESCALATED_HIGH_ABUSE.id,
addon=abuse.addon).order_by('-created'))
if logs:
abuse_since_log = AbuseReport.recent_high_abuse_reports(
1, logs[0].created, addon_id)
# If no abuse reports have happened since the last logged abuse
# report, do not add to queue.
if not abuse_since_log:
task_log.info(u'[app:%s] High abuse reports, but none since '
u'last escalation' % abuse.addon)
continue
# If we haven't bailed out yet, escalate this app.
msg = u'High number of abuse reports detected'
if add_to_queue:
EscalationQueue.objects.create(addon=abuse.addon)
mkt.log(mkt.LOG.ESCALATED_HIGH_ABUSE, abuse.addon,
abuse.addon.current_version, details={'comments': msg})
task_log.info(u'[app:%s] %s' % (abuse.addon, msg))
@task
@write
def populate_is_offline(ids, **kw):
for webapp in Webapp.objects.filter(pk__in=ids).iterator():
if webapp.guess_is_offline():
webapp.update(is_offline=True)
@task
@write
def adjust_categories(ids, **kw):
NEW_APP_CATEGORIES = {
425986: ['weather'],
444314: ['travel', 'weather'],
445008: ['travel', 'weather'],
450602: ['weather'],
455256: ['weather'],
455660: ['travel', 'weather'],
459364: ['weather'],
461279: ['social', 'weather'],
461371: ['lifestyle', 'weather'],
462257: ['utilities', 'weather'],
463108: ['weather'],
466698: ['utilities', 'weather'],
468173: ['weather'],
470946: ['travel', 'weather'],
482869: ['utilities', 'weather'],
482961: ['weather'],
496946: ['weather'],
499699: ['weather'],
501553: ['weather'],
501581: ['lifestyle', 'weather'],
501583: ['social', 'weather'],
502171: ['weather', 'photo-video'],
502173: ['weather', 'photo-video'],
502685: ['weather'],
503765: ['weather'],
505437: ['weather'],
506317: ['weather'],
506543: ['weather'],
506553: ['weather'],
506623: ['weather', 'travel'],
507091: ['weather'],
507139: ['weather'],
509150: ['weather'],
510118: ['weather', 'utilities'],
510334: ['weather', 'travel'],
510726: ['weather'],
511364: ['weather', 'utilities'],
424184: ['food-drink', 'health-fitness'],
439994: ['food-drink'],
442842: ['maps-navigation', 'food-drink'],
444056: ['lifestyle', 'food-drink'],
444070: ['lifestyle', 'food-drink'],
444222: ['food-drink', 'health-fitness'],
444694: ['lifestyle', 'food-drink'],
454558: ['food-drink', 'travel'],
455620: ['food-drink', 'entertainment'],
459304: ['food-drink', 'health-fitness'],
465445: ['shopping', 'food-drink'],
465700: ['food-drink', 'books-comics'],
467828: ['food-drink', 'education'],
469104: ['food-drink'],
470145: ['food-drink', 'health-fitness'],
471349: ['lifestyle', 'food-drink'],
476155: ['lifestyle', 'food-drink'],
477015: ['food-drink', 'travel'],
497282: ['food-drink', 'health-fitness'],
500359: ['food-drink', 'books-comics'],
501249: ['food-drink'],
501573: ['food-drink', 'entertainment'],
504143: ['health-fitness', 'food-drink'],
506111: ['health-fitness', 'food-drink'],
506691: ['health-fitness', 'food-drink'],
507921: ['books-comics', 'food-drink'],
508211: ['food-drink', 'lifestyle'],
508215: ['food-drink', 'lifestyle'],
508990: ['food-drink', 'games'],
506369: ['books-comics', 'humor'],
509746: ['entertainment', 'humor'],
509848: ['entertainment', 'humor'],
511390: ['entertainment', 'humor'],
511504: ['entertainment', 'humor'],
488424: ['internet', 'reference'],
489052: ['social', 'internet'],
499644: ['internet', 'utilities'],
500651: ['reference', 'internet'],
505043: ['utilities', 'internet'],
505407: ['utilities', 'internet'],
505949: ['internet', 'reference'],
508828: ['utilities', 'internet'],
508830: ['utilities', 'internet'],
509160: ['productivity', 'internet'],
509606: ['productivity', 'internet'],
509722: ['productivity', 'internet'],
510114: ['news', 'internet'],
364752: ['games', 'kids'],
364941: ['games', 'kids'],
449560: ['entertainment', 'kids'],
466557: ['education', 'kids'],
466811: ['photo-video', 'kids'],
473532: ['education', 'kids'],
473620: ['education', 'kids'],
473865: ['education', 'kids'],
500527: ['games', 'kids'],
502263: ['photo-video', 'kids'],
507497: ['education', 'kids'],
508089: ['education', 'kids'],
508229: ['education', 'kids'],
508239: ['education', 'kids'],
508247: ['education', 'kids'],
509404: ['education', 'kids'],
509464: ['education', 'kids'],
509468: ['education', 'kids'],
509470: ['education', 'kids'],
509472: ['education', 'kids'],
509474: ['education', 'kids'],
509476: ['education', 'kids'],
509478: ['education', 'kids'],
509484: ['education', 'kids'],
509486: ['education', 'kids'],
509488: ['education', 'kids'],
509490: ['education', 'kids'],
509492: ['education', 'kids'],
509494: ['education', 'kids'],
509496: ['education', 'kids'],
509498: ['education', 'kids'],
509500: ['education', 'kids'],
509502: ['education', 'kids'],
509504: ['education', 'kids'],
509508: ['education', 'kids'],
509512: ['education', 'kids'],
509538: ['education', 'kids'],
509540: ['education', 'kids'],
511502: ['games', 'kids'],
367693: ['utilities', 'science-tech'],
424272: ['science-tech', 'news'],
460891: ['science-tech', 'news'],
468278: ['science-tech', 'education'],
468406: ['science-tech', 'education'],
469765: ['science-tech', 'productivity'],
480750: ['science-tech', 'education'],
502187: ['science-tech', 'education'],
504637: ['science-tech', 'reference'],
506187: ['science-tech', 'utilities'],
508672: ['news', 'science-tech'],
510050: ['science-tech', 'education'],
511370: ['science-tech', 'reference'],
511376: ['science-tech', 'games'],
512174: ['education', 'science-tech'],
512194: ['utilities', 'science-tech'],
377564: ['lifestyle', 'personalization'],
451302: ['entertainment', 'personalization'],
452888: ['personalization', 'photo-video'],
466637: ['personalization', 'photo-video'],
477186: ['photo-video', 'personalization'],
477304: ['photo-video', 'personalization'],
477314: ['photo-video', 'personalization'],
480489: ['photo-video', 'personalization'],
480495: ['photo-video', 'personalization'],
481512: ['photo-video', 'personalization'],
482162: ['music', 'personalization'],
488892: ['social', 'personalization'],
500037: ['entertainment', 'personalization'],
500041: ['entertainment', 'personalization'],
506495: ['personalization', 'music'],
506581: ['entertainment', 'personalization'],
}
# Adjust apps whose categories have changed.
for chunk in chunked(ids, 100):
for app in Webapp.objects.filter(pk__in=chunk):
save = False
for k, v in CATEGORY_REDIRECTS.items():
if k in app.categories:
save = True
app.categories.remove(k)
app.categories.append(v)
if save:
task_log.info(u'[app:{0}] Adjusted categories: {1}'
.format(app, app.categories))
app.save()
# Add apps to new categories.
for pk, categories in NEW_APP_CATEGORIES.items():
try:
app = Webapp.objects.get(pk=pk)
except Webapp.DoesNotExist:
continue
app.categories = categories
app.save()
task_log.info(u'[app:{0}] Updated app categories: {1}'
.format(app, categories))
|
|
"""Stuff to parse AIFF-C and AIFF files.
Unless explicitly stated otherwise, the description below is true
both for AIFF-C files and AIFF files.
An AIFF-C file has the following structure.
+-----------------+
| FORM |
+-----------------+
| <size> |
+----+------------+
| | AIFC |
| +------------+
| | <chunks> |
| | . |
| | . |
| | . |
+----+------------+
An AIFF file has the string "AIFF" instead of "AIFC".
A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
big endian order), followed by the data. The size field does not include
the size of the 8 byte header.
The following chunk types are recognized.
FVER
<version number of AIFF-C defining document> (AIFF-C only).
MARK
<# of markers> (2 bytes)
list of markers:
<marker ID> (2 bytes, must be > 0)
<position> (4 bytes)
<marker name> ("pstring")
COMM
<# of channels> (2 bytes)
<# of sound frames> (4 bytes)
<size of the samples> (2 bytes)
<sampling frequency> (10 bytes, IEEE 80-bit extended
floating point)
in AIFF-C files only:
<compression type> (4 bytes)
<human-readable version of compression type> ("pstring")
SSND
<offset> (4 bytes, not used by this program)
<blocksize> (4 bytes, not used by this program)
<sound data>
A pstring consists of 1 byte length, a string of characters, and 0 or 1
byte pad to make the total length even.
Usage.
Reading AIFF files:
f = aifc.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
In some types of audio files, if the setpos() method is not used,
the seek() method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for AIFF files)
getcompname() -- returns human-readable version of
compression type ('not compressed' for AIFF files)
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- get the list of marks in the audio file or None
if there are no marks
getmark(id) -- get mark with the specified id (raises an error
if the mark does not exist)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell(), the position given to setpos() and
the position of marks are all compatible and have nothing to do with
the actual position in the file.
The close() method is called automatically when the class instance
is destroyed.
Writing AIFF files:
f = aifc.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
aiff() -- create an AIFF file (AIFF-C default)
aifc() -- create an AIFF-C file
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
setmark(id, pos, name)
-- add specified mark to the list of marks
tell() -- return current position in output file (useful
in combination with setmark())
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
Marks can be added anytime. If there are any marks, ypu must call
close() after all frames have been written.
The close() method is called automatically when the class instance
is destroyed.
When a file is opened with the extension '.aiff', an AIFF file is
written, otherwise an AIFF-C file is written. This default can be
changed by calling aiff() or aifc() before the first writeframes or
writeframesraw.
"""
import struct
import builtins
__all__ = ["Error", "open", "openfp"]
class Error(Exception):
pass
_AIFC_version = 0xA2805140 # Version 1 of AIFF-C
_skiplist = b'COMT', b'INST', b'MIDI', b'AESD', \
b'APPL', b'NAME', b'AUTH', b'(c) ', b'ANNO'
def _read_long(file):
try:
return struct.unpack('>l', file.read(4))[0]
except struct.error:
raise EOFError
def _read_ulong(file):
try:
return struct.unpack('>L', file.read(4))[0]
except struct.error:
raise EOFError
def _read_short(file):
try:
return struct.unpack('>h', file.read(2))[0]
except struct.error:
raise EOFError
def _read_string(file):
length = ord(file.read(1))
if length == 0:
data = b''
else:
data = file.read(length)
if length & 1 == 0:
dummy = file.read(1)
return data
_HUGE_VAL = 1.79769313486231e+308 # See <limits.h>
def _read_float(f): # 10 bytes
expon = _read_short(f) # 2 bytes
sign = 1
if expon < 0:
sign = -1
expon = expon + 0x8000
himant = _read_ulong(f) # 4 bytes
lomant = _read_ulong(f) # 4 bytes
if expon == himant == lomant == 0:
f = 0.0
elif expon == 0x7FFF:
f = _HUGE_VAL
else:
expon = expon - 16383
f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63)
return sign * f
def _write_short(f, x):
f.write(struct.pack('>h', x))
def _write_long(f, x):
f.write(struct.pack('>L', x))
def _write_string(f, s):
if len(s) > 255:
raise ValueError("string exceeds maximum pstring length")
f.write(struct.pack('b', len(s)))
f.write(s)
if len(s) & 1 == 0:
f.write(b'\x00')
def _write_float(f, x):
import math
if x < 0:
sign = 0x8000
x = x * -1
else:
sign = 0
if x == 0:
expon = 0
himant = 0
lomant = 0
else:
fmant, expon = math.frexp(x)
if expon > 16384 or fmant >= 1: # Infinity or NaN
expon = sign|0x7FFF
himant = 0
lomant = 0
else: # Finite
expon = expon + 16382
if expon < 0: # denormalized
fmant = math.ldexp(fmant, expon)
expon = 0
expon = expon | sign
fmant = math.ldexp(fmant, 32)
fsmant = math.floor(fmant)
himant = int(fsmant)
fmant = math.ldexp(fmant - fsmant, 32)
fsmant = math.floor(fmant)
lomant = int(fsmant)
_write_short(f, expon)
_write_long(f, himant)
_write_long(f, lomant)
from chunk import Chunk
class Aifc_read:
# Variables used in this class:
#
# These variables are available to the user though appropriate
# methods of this class:
# _file -- the open file with methods read(), close(), and seek()
# set through the __init__() method
# _nchannels -- the number of audio channels
# available through the getnchannels() method
# _nframes -- the number of audio frames
# available through the getnframes() method
# _sampwidth -- the number of bytes per audio sample
# available through the getsampwidth() method
# _framerate -- the sampling frequency
# available through the getframerate() method
# _comptype -- the AIFF-C compression type ('NONE' if AIFF)
# available through the getcomptype() method
# _compname -- the human-readable AIFF-C compression type
# available through the getcomptype() method
# _markers -- the marks in the audio file
# available through the getmarkers() and getmark()
# methods
# _soundpos -- the position in the audio stream
# available through the tell() method, set through the
# setpos() method
#
# These variables are used internally only:
# _version -- the AIFF-C version number
# _decomp -- the decompressor from builtin module cl
# _comm_chunk_read -- 1 iff the COMM chunk has been read
# _aifc -- 1 iff reading an AIFF-C file
# _ssnd_seek_needed -- 1 iff positioned correctly in audio
# file for readframes()
# _ssnd_chunk -- instantiation of a chunk class for the SSND chunk
# _framesize -- size of one frame in the file
def initfp(self, file):
self._version = 0
self._convert = None
self._markers = []
self._soundpos = 0
self._file = Chunk(file)
if self._file.getname() != b'FORM':
raise Error('file does not start with FORM id')
formdata = self._file.read(4)
if formdata == b'AIFF':
self._aifc = 0
elif formdata == b'AIFC':
self._aifc = 1
else:
raise Error('not an AIFF or AIFF-C file')
self._comm_chunk_read = 0
while 1:
self._ssnd_seek_needed = 1
try:
chunk = Chunk(self._file)
except EOFError:
break
chunkname = chunk.getname()
if chunkname == b'COMM':
self._read_comm_chunk(chunk)
self._comm_chunk_read = 1
elif chunkname == b'SSND':
self._ssnd_chunk = chunk
dummy = chunk.read(8)
self._ssnd_seek_needed = 0
elif chunkname == b'FVER':
self._version = _read_ulong(chunk)
elif chunkname == b'MARK':
self._readmark(chunk)
elif chunkname in _skiplist:
pass
else:
raise Error('unrecognized chunk type ' +
chunkname.decode('latin1'))
chunk.skip()
if not self._comm_chunk_read or not self._ssnd_chunk:
raise Error('COMM chunk and/or SSND chunk missing')
def __init__(self, f):
if isinstance(f, str):
f = builtins.open(f, 'rb')
# else, assume it is an open file object already
self.initfp(f)
#
# User visible methods.
#
def getfp(self):
return self._file
def rewind(self):
self._ssnd_seek_needed = 1
self._soundpos = 0
def close(self):
self._file = None
def tell(self):
return self._soundpos
def getnchannels(self):
return self._nchannels
def getnframes(self):
return self._nframes
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
## def getversion(self):
## return self._version
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def getmarkers(self):
if len(self._markers) == 0:
return None
return self._markers
def getmark(self, id):
for marker in self._markers:
if id == marker[0]:
return marker
raise Error('marker {0!r} does not exist'.format(id))
def setpos(self, pos):
if pos < 0 or pos > self._nframes:
raise Error('position not in range')
self._soundpos = pos
self._ssnd_seek_needed = 1
def readframes(self, nframes):
if self._ssnd_seek_needed:
self._ssnd_chunk.seek(0)
dummy = self._ssnd_chunk.read(8)
pos = self._soundpos * self._framesize
if pos:
self._ssnd_chunk.seek(pos + 8)
self._ssnd_seek_needed = 0
if nframes == 0:
return b''
data = self._ssnd_chunk.read(nframes * self._framesize)
if self._convert and data:
data = self._convert(data)
self._soundpos = self._soundpos + len(data) // (self._nchannels
* self._sampwidth)
return data
#
# Internal methods.
#
def _alaw2lin(self, data):
import audioop
return audioop.alaw2lin(data, 2)
def _ulaw2lin(self, data):
import audioop
return audioop.ulaw2lin(data, 2)
def _adpcm2lin(self, data):
import audioop
if not hasattr(self, '_adpcmstate'):
# first time
self._adpcmstate = None
data, self._adpcmstate = audioop.adpcm2lin(data, 2, self._adpcmstate)
return data
def _read_comm_chunk(self, chunk):
self._nchannels = _read_short(chunk)
self._nframes = _read_long(chunk)
self._sampwidth = (_read_short(chunk) + 7) // 8
self._framerate = int(_read_float(chunk))
self._framesize = self._nchannels * self._sampwidth
if self._aifc:
#DEBUG: SGI's soundeditor produces a bad size :-(
kludge = 0
if chunk.chunksize == 18:
kludge = 1
print('Warning: bad COMM chunk size')
chunk.chunksize = 23
#DEBUG end
self._comptype = chunk.read(4)
#DEBUG start
if kludge:
length = ord(chunk.file.read(1))
if length & 1 == 0:
length = length + 1
chunk.chunksize = chunk.chunksize + length
chunk.file.seek(-1, 1)
#DEBUG end
self._compname = _read_string(chunk)
if self._comptype != b'NONE':
if self._comptype == b'G722':
self._convert = self._adpcm2lin
self._framesize = self._framesize // 4
elif self._comptype in (b'ulaw', b'ULAW'):
self._convert = self._ulaw2lin
self._framesize = self._framesize // 2
elif self._comptype in (b'alaw', b'ALAW'):
self._convert = self._alaw2lin
self._framesize = self._framesize // 2
else:
raise Error('unsupported compression type')
else:
self._comptype = b'NONE'
self._compname = b'not compressed'
def _readmark(self, chunk):
nmarkers = _read_short(chunk)
# Some files appear to contain invalid counts.
# Cope with this by testing for EOF.
try:
for i in range(nmarkers):
id = _read_short(chunk)
pos = _read_long(chunk)
name = _read_string(chunk)
if pos or name:
# some files appear to have
# dummy markers consisting of
# a position 0 and name ''
self._markers.append((id, pos, name))
except EOFError:
print('Warning: MARK chunk contains only', end=' ')
print(len(self._markers), end=' ')
if len(self._markers) == 1: print('marker', end=' ')
else: print('markers', end=' ')
print('instead of', nmarkers)
class Aifc_write:
# Variables used in this class:
#
# These variables are user settable through appropriate methods
# of this class:
# _file -- the open file with methods write(), close(), tell(), seek()
# set through the __init__() method
# _comptype -- the AIFF-C compression type ('NONE' in AIFF)
# set through the setcomptype() or setparams() method
# _compname -- the human-readable AIFF-C compression type
# set through the setcomptype() or setparams() method
# _nchannels -- the number of audio channels
# set through the setnchannels() or setparams() method
# _sampwidth -- the number of bytes per audio sample
# set through the setsampwidth() or setparams() method
# _framerate -- the sampling frequency
# set through the setframerate() or setparams() method
# _nframes -- the number of audio frames written to the header
# set through the setnframes() or setparams() method
# _aifc -- whether we're writing an AIFF-C file or an AIFF file
# set through the aifc() method, reset through the
# aiff() method
#
# These variables are used internally only:
# _version -- the AIFF-C version number
# _comp -- the compressor from builtin module cl
# _nframeswritten -- the number of audio frames actually written
# _datalength -- the size of the audio samples written to the header
# _datawritten -- the size of the audio samples actually written
def __init__(self, f):
if isinstance(f, str):
filename = f
f = builtins.open(f, 'wb')
else:
# else, assume it is an open file object already
filename = '???'
self.initfp(f)
if filename[-5:] == '.aiff':
self._aifc = 0
else:
self._aifc = 1
def initfp(self, file):
self._file = file
self._version = _AIFC_version
self._comptype = b'NONE'
self._compname = b'not compressed'
self._convert = None
self._nchannels = 0
self._sampwidth = 0
self._framerate = 0
self._nframes = 0
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._markers = []
self._marklength = 0
self._aifc = 1 # AIFF-C is default
def __del__(self):
if self._file:
self.close()
#
# User visible methods.
#
def aiff(self):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
self._aifc = 0
def aifc(self):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
self._aifc = 1
def setnchannels(self, nchannels):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
if nchannels < 1:
raise Error('bad # of channels')
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error('number of channels not set')
return self._nchannels
def setsampwidth(self, sampwidth):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
if sampwidth < 1 or sampwidth > 4:
raise Error('bad sample width')
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._sampwidth:
raise Error('sample width not set')
return self._sampwidth
def setframerate(self, framerate):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
if framerate <= 0:
raise Error('bad frame rate')
self._framerate = framerate
def getframerate(self):
if not self._framerate:
raise Error('frame rate not set')
return self._framerate
def setnframes(self, nframes):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, comptype, compname):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
if comptype not in (b'NONE', b'ulaw', b'ULAW',
b'alaw', b'ALAW', b'G722'):
raise Error('unsupported compression type')
self._comptype = comptype
self._compname = compname
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
## def setversion(self, version):
## if self._nframeswritten:
## raise Error, 'cannot change parameters after starting to write'
## self._version = version
def setparams(self, params):
nchannels, sampwidth, framerate, nframes, comptype, compname = params
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
if comptype not in (b'NONE', b'ulaw', b'ULAW',
b'alaw', b'ALAW', b'G722'):
raise Error('unsupported compression type')
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
if not self._nchannels or not self._sampwidth or not self._framerate:
raise Error('not all parameters set')
return self._nchannels, self._sampwidth, self._framerate, \
self._nframes, self._comptype, self._compname
def setmark(self, id, pos, name):
if id <= 0:
raise Error('marker ID must be > 0')
if pos < 0:
raise Error('marker position must be >= 0')
if not isinstance(name, str):
raise Error('marker name must be a string')
for i in range(len(self._markers)):
if id == self._markers[i][0]:
self._markers[i] = id, pos, name
return
self._markers.append((id, pos, name))
def getmark(self, id):
for marker in self._markers:
if id == marker[0]:
return marker
raise Error('marker {0!r} does not exist'.format(id))
def getmarkers(self):
if len(self._markers) == 0:
return None
return self._markers
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
self._ensure_header_written(len(data))
nframes = len(data) // (self._sampwidth * self._nchannels)
if self._convert:
data = self._convert(data)
self._file.write(data)
self._nframeswritten = self._nframeswritten + nframes
self._datawritten = self._datawritten + len(data)
def writeframes(self, data):
self.writeframesraw(data)
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten:
self._patchheader()
def close(self):
self._ensure_header_written(0)
if self._datawritten & 1:
# quick pad to even size
self._file.write(b'\x00')
self._datawritten = self._datawritten + 1
self._writemarkers()
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten or \
self._marklength:
self._patchheader()
self._file.flush()
self._file = None
#
# Internal methods.
#
def _lin2alaw(self, data):
import audioop
return audioop.lin2alaw(data, 2)
def _lin2ulaw(self, data):
import audioop
return audioop.lin2ulaw(data, 2)
def _lin2adpcm(self, data):
import audioop
if not hasattr(self, '_adpcmstate'):
self._adpcmstate = None
data, self._adpcmstate = audioop.lin2adpcm(data, 2, self._adpcmstate)
return data
def _ensure_header_written(self, datasize):
if not self._nframeswritten:
if self._comptype in (b'ULAW', b'ALAW'):
if not self._sampwidth:
self._sampwidth = 2
if self._sampwidth != 2:
raise Error('sample width must be 2 when compressing '
'with ulaw/ULAW or alaw/ALAW')
if self._comptype == b'G722':
if not self._sampwidth:
self._sampwidth = 2
if self._sampwidth != 2:
raise Error('sample width must be 2 when compressing '
'with G7.22 (ADPCM)')
if not self._nchannels:
raise Error('# channels not specified')
if not self._sampwidth:
raise Error('sample width not specified')
if not self._framerate:
raise Error('sampling rate not specified')
self._write_header(datasize)
def _init_compression(self):
if self._comptype == b'G722':
self._convert = self._lin2adpcm
elif self._comptype in (b'ulaw', b'ULAW'):
self._convert = self._lin2ulaw
elif self._comptype in (b'alaw', b'ALAW'):
self._convert = self._lin2alaw
else:
raise Error('unsupported compression type')
def _write_header(self, initlength):
if self._aifc and self._comptype != b'NONE':
self._init_compression()
self._file.write(b'FORM')
if not self._nframes:
self._nframes = initlength // (self._nchannels * self._sampwidth)
self._datalength = self._nframes * self._nchannels * self._sampwidth
if self._datalength & 1:
self._datalength = self._datalength + 1
if self._aifc:
if self._comptype in (b'ulaw', b'ULAW', b'alaw', b'ALAW'):
self._datalength = self._datalength // 2
if self._datalength & 1:
self._datalength = self._datalength + 1
elif self._comptype == b'G722':
self._datalength = (self._datalength + 3) // 4
if self._datalength & 1:
self._datalength = self._datalength + 1
self._form_length_pos = self._file.tell()
commlength = self._write_form_length(self._datalength)
if self._aifc:
self._file.write(b'AIFC')
self._file.write(b'FVER')
_write_long(self._file, 4)
_write_long(self._file, self._version)
else:
self._file.write(b'AIFF')
self._file.write(b'COMM')
_write_long(self._file, commlength)
_write_short(self._file, self._nchannels)
self._nframes_pos = self._file.tell()
_write_long(self._file, self._nframes)
_write_short(self._file, self._sampwidth * 8)
_write_float(self._file, self._framerate)
if self._aifc:
self._file.write(self._comptype)
_write_string(self._file, self._compname)
self._file.write(b'SSND')
self._ssnd_length_pos = self._file.tell()
_write_long(self._file, self._datalength + 8)
_write_long(self._file, 0)
_write_long(self._file, 0)
def _write_form_length(self, datalength):
if self._aifc:
commlength = 18 + 5 + len(self._compname)
if commlength & 1:
commlength = commlength + 1
verslength = 12
else:
commlength = 18
verslength = 0
_write_long(self._file, 4 + verslength + self._marklength + \
8 + commlength + 16 + datalength)
return commlength
def _patchheader(self):
curpos = self._file.tell()
if self._datawritten & 1:
datalength = self._datawritten + 1
self._file.write(b'\x00')
else:
datalength = self._datawritten
if datalength == self._datalength and \
self._nframes == self._nframeswritten and \
self._marklength == 0:
self._file.seek(curpos, 0)
return
self._file.seek(self._form_length_pos, 0)
dummy = self._write_form_length(datalength)
self._file.seek(self._nframes_pos, 0)
_write_long(self._file, self._nframeswritten)
self._file.seek(self._ssnd_length_pos, 0)
_write_long(self._file, datalength + 8)
self._file.seek(curpos, 0)
self._nframes = self._nframeswritten
self._datalength = datalength
def _writemarkers(self):
if len(self._markers) == 0:
return
self._file.write(b'MARK')
length = 2
for marker in self._markers:
id, pos, name = marker
length = length + len(name) + 1 + 6
if len(name) & 1 == 0:
length = length + 1
_write_long(self._file, length)
self._marklength = length + 8
_write_short(self._file, len(self._markers))
for marker in self._markers:
id, pos, name = marker
_write_short(self._file, id)
_write_long(self._file, pos)
_write_string(self._file, name)
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in ('r', 'rb'):
return Aifc_read(f)
elif mode in ('w', 'wb'):
return Aifc_write(f)
else:
raise Error("mode must be 'r', 'rb', 'w', or 'wb'")
openfp = open # B/W compatibility
if __name__ == '__main__':
import sys
if not sys.argv[1:]:
sys.argv.append('/usr/demos/data/audio/bach.aiff')
fn = sys.argv[1]
f = open(fn, 'r')
print("Reading", fn)
print("nchannels =", f.getnchannels())
print("nframes =", f.getnframes())
print("sampwidth =", f.getsampwidth())
print("framerate =", f.getframerate())
print("comptype =", f.getcomptype())
print("compname =", f.getcompname())
if sys.argv[2:]:
gn = sys.argv[2]
print("Writing", gn)
g = open(gn, 'w')
g.setparams(f.getparams())
while 1:
data = f.readframes(1024)
if not data:
break
g.writeframes(data)
g.close()
f.close()
print("Done.")
|
|
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to provide util for actions
.. moduleauthor:: Xiaodong Wang ,xiaodongwang@huawei.com>
"""
import logging
import redis
from contextlib import contextmanager
from compass.db.api import adapter_holder as adapter_db
from compass.db.api import cluster as cluster_db
from compass.db.api import host as host_db
from compass.db.api import machine as machine_db
from compass.deployment.utils import constants as const
@contextmanager
def lock(lock_name, blocking=True, timeout=10):
redis_instance = redis.Redis()
instance_lock = redis_instance.lock(lock_name, timeout=timeout)
owned = False
try:
locked = instance_lock.acquire(blocking=blocking)
if locked:
owned = True
logging.debug('acquired lock %s', lock_name)
yield instance_lock
else:
logging.info('lock %s is already hold', lock_name)
yield None
except Exception as error:
logging.info(
'redis fails to acquire the lock %s', lock_name)
logging.exception(error)
yield None
finally:
if owned:
instance_lock.acquired_until = 0
instance_lock.release()
logging.debug('released lock %s', lock_name)
else:
logging.debug('nothing to release %s', lock_name)
class ActionHelper(object):
@staticmethod
def get_adapter_info(adapter_id, cluster_id, user):
"""Get adapter information. Return a dictionary as below,
{
"id": 1,
"name": "xxx",
"flavors": [
{
"flavor_name": "xxx",
"roles": ['xxx', 'yyy', ...],
"template": "xxx.tmpl"
},
...
],
"metadata": {
"os_config": {
...
},
"package_config": {
...
}
},
"os_installer": {
"name": "cobbler",
"settings": {....}
},
"pk_installer": {
"name": "chef",
"settings": {....}
},
...
}
To view a complete output, please refer to backend doc.
"""
adapter_info = adapter_db.get_adapter(adapter_id, user=user)
metadata = cluster_db.get_cluster_metadata(cluster_id, user=user)
adapter_info.update({const.METADATA: metadata})
for flavor_info in adapter_info[const.FLAVORS]:
roles = flavor_info[const.ROLES]
flavor_info[const.ROLES] = ActionHelper._get_role_names(roles)
return adapter_info
@staticmethod
def _get_role_names(roles):
return [role[const.NAME] for role in roles]
@staticmethod
def get_cluster_info(cluster_id, user):
"""Get cluster information.Return a dictionary as below,
{
"id": 1,
"adapter_id": 1,
"os_version": "CentOS-6.5-x86_64",
"name": "cluster_01",
"flavor": {
"flavor_name": "zzz",
"template": "xx.tmpl",
"roles": [...]
}
"os_config": {..},
"package_config": {...},
"deployed_os_config": {},
"deployed_package_config": {},
"owner": "xxx"
}
"""
cluster_info = cluster_db.get_cluster(cluster_id, user=user)
# convert roles retrieved from db into a list of role names
roles_info = cluster_info.setdefault(
const.FLAVOR, {}).setdefault(const.ROLES, [])
cluster_info[const.FLAVOR][const.ROLES] = \
ActionHelper._get_role_names(roles_info)
# get cluster config info
cluster_config = cluster_db.get_cluster_config(cluster_id, user=user)
cluster_info.update(cluster_config)
deploy_config = cluster_db.get_cluster_deployed_config(cluster_id,
user=user)
cluster_info.update(deploy_config)
return cluster_info
@staticmethod
def get_hosts_info(cluster_id, hosts_id_list, user):
"""Get hosts information. Return a dictionary as below,
{
"hosts": {
1($host_id): {
"reinstall_os": True,
"mac": "xxx",
"name": "xxx",
"roles": [xxx, yyy]
},
"networks": {
"eth0": {
"ip": "192.168.1.1",
"netmask": "255.255.255.0",
"is_mgmt": True,
"is_promiscuous": False,
"subnet": "192.168.1.0/24"
},
"eth1": {...}
},
"os_config": {},
"package_config": {},
"deployed_os_config": {},
"deployed_package_config": {}
},
2: {...},
....
}
}
"""
hosts_info = {}
for host_id in hosts_id_list:
info = cluster_db.get_cluster_host(cluster_id, host_id, user=user)
logging.debug("checking on info %r %r" % (host_id, info))
info[const.ROLES] = ActionHelper._get_role_names(info[const.ROLES])
# TODO(grace): Is following line necessary??
info.setdefault(const.ROLES, [])
config = cluster_db.get_cluster_host_config(cluster_id,
host_id,
user=user)
info.update(config)
networks = info[const.NETWORKS]
networks_dict = {}
# Convert networks from list to dictionary format
for entry in networks:
nic_info = {}
nic_info = {
entry[const.NIC]: {
const.IP_ADDR: entry[const.IP_ADDR],
const.NETMASK: entry[const.NETMASK],
const.MGMT_NIC_FLAG: entry[const.MGMT_NIC_FLAG],
const.PROMISCUOUS_FLAG: entry[const.PROMISCUOUS_FLAG],
const.SUBNET: entry[const.SUBNET]
}
}
networks_dict.update(nic_info)
info[const.NETWORKS] = networks_dict
hosts_info[host_id] = info
return hosts_info
@staticmethod
def save_deployed_config(deployed_config, user):
cluster_config = deployed_config[const.CLUSTER]
cluster_id = cluster_config[const.ID]
del cluster_config[const.ID]
cluster_db.update_cluster_deployed_config(cluster_id, user=user,
**cluster_config)
hosts_id_list = deployed_config[const.HOSTS].keys()
for host_id in hosts_id_list:
config = deployed_config[const.HOSTS][host_id]
cluster_db.update_cluster_host_deployed_config(cluster_id,
host_id,
user=user,
**config)
@staticmethod
def update_state(
cluster_id, host_id_list, user, **kwargs
):
# update all clusterhosts state
for host_id in host_id_list:
cluster_db.update_cluster_host_state(
cluster_id,
host_id,
user=user,
**kwargs
)
# update cluster state
cluster_db.update_cluster_state(
cluster_id,
user=user,
**kwargs
)
@staticmethod
def delete_cluster(
cluster_id, host_id_list, user, delete_underlying_host=False
):
if delete_underlying_host:
for host_id in host_id_list:
host_db.del_host(
host_id, True, True, user=user
)
cluster_db.del_cluster(
cluster_id, True, True, user=user
)
@staticmethod
def delete_cluster_host(
cluster_id, host_id, user, delete_underlying_host=False
):
if delete_underlying_host:
host_db.del_host(
host_id, True, True, user=user
)
cluster_db.del_cluster_host(
cluster_id, host_id, True, True, user=user
)
@staticmethod
def delete_host(host_id, user):
host_db.del_host(
host_id, True, True, user=user
)
@staticmethod
def host_ready(host_id, from_database_only, user):
host_db.update_host_state_internal(
host_id, from_database_only=from_database_only,
user=user, ready=True
)
@staticmethod
def cluster_host_ready(
cluster_id, host_id, from_database_only, user
):
cluster_db.update_cluster_host_state_internal(
cluster_id, host_id, from_database_only=from_database_only,
user=user, ready=True
)
@staticmethod
def is_cluster_os_ready(cluster_id, user=None):
return cluster_db.is_cluster_os_ready(cluster_id, user=user)
@staticmethod
def cluster_ready(cluster_id, from_database_only, user):
cluster_db.update_cluster_state_internal(
cluster_id, from_database_only=from_database_only,
user=user, ready=True
)
@staticmethod
def get_machine_IPMI(machine_id, user):
machine_info = machine_db.get_machine(machine_id, user=user)
return machine_info[const.IPMI_CREDS]
|
|
from __future__ import unicode_literals
import base64
from django.contrib.auth.models import Group, Permission, User
from django.core.urlresolvers import ResolverMatch
from django.db import models
from django.test import TestCase
from django.utils import unittest
from rest_framework import (
HTTP_HEADER_ENCODING, authentication, generics, permissions, serializers,
status
)
from rest_framework.compat import get_model_name, guardian
from rest_framework.filters import DjangoObjectPermissionsFilter
from rest_framework.routers import DefaultRouter
from rest_framework.test import APIRequestFactory
from tests.models import BasicModel
factory = APIRequestFactory()
class BasicSerializer(serializers.ModelSerializer):
class Meta:
model = BasicModel
class RootView(generics.ListCreateAPIView):
queryset = BasicModel.objects.all()
serializer_class = BasicSerializer
authentication_classes = [authentication.BasicAuthentication]
permission_classes = [permissions.DjangoModelPermissions]
class InstanceView(generics.RetrieveUpdateDestroyAPIView):
queryset = BasicModel.objects.all()
serializer_class = BasicSerializer
authentication_classes = [authentication.BasicAuthentication]
permission_classes = [permissions.DjangoModelPermissions]
class GetQuerySetListView(generics.ListCreateAPIView):
serializer_class = BasicSerializer
authentication_classes = [authentication.BasicAuthentication]
permission_classes = [permissions.DjangoModelPermissions]
def get_queryset(self):
return BasicModel.objects.all()
class EmptyListView(generics.ListCreateAPIView):
queryset = BasicModel.objects.none()
serializer_class = BasicSerializer
authentication_classes = [authentication.BasicAuthentication]
permission_classes = [permissions.DjangoModelPermissions]
root_view = RootView.as_view()
api_root_view = DefaultRouter().get_api_root_view()
instance_view = InstanceView.as_view()
get_queryset_list_view = GetQuerySetListView.as_view()
empty_list_view = EmptyListView.as_view()
def basic_auth_header(username, password):
credentials = ('%s:%s' % (username, password))
base64_credentials = base64.b64encode(credentials.encode(HTTP_HEADER_ENCODING)).decode(HTTP_HEADER_ENCODING)
return 'Basic %s' % base64_credentials
class ModelPermissionsIntegrationTests(TestCase):
def setUp(self):
User.objects.create_user('disallowed', 'disallowed@example.com', 'password')
user = User.objects.create_user('permitted', 'permitted@example.com', 'password')
user.user_permissions = [
Permission.objects.get(codename='add_basicmodel'),
Permission.objects.get(codename='change_basicmodel'),
Permission.objects.get(codename='delete_basicmodel')
]
user = User.objects.create_user('updateonly', 'updateonly@example.com', 'password')
user.user_permissions = [
Permission.objects.get(codename='change_basicmodel'),
]
self.permitted_credentials = basic_auth_header('permitted', 'password')
self.disallowed_credentials = basic_auth_header('disallowed', 'password')
self.updateonly_credentials = basic_auth_header('updateonly', 'password')
BasicModel(text='foo').save()
def test_has_create_permissions(self):
request = factory.post('/', {'text': 'foobar'}, format='json',
HTTP_AUTHORIZATION=self.permitted_credentials)
response = root_view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_api_root_view_discard_default_django_model_permission(self):
"""
We check that DEFAULT_PERMISSION_CLASSES can
apply to APIRoot view. More specifically we check expected behavior of
``_ignore_model_permissions`` attribute support.
"""
request = factory.get('/', format='json',
HTTP_AUTHORIZATION=self.permitted_credentials)
request.resolver_match = ResolverMatch('get', (), {})
response = api_root_view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_queryset_has_create_permissions(self):
request = factory.post('/', {'text': 'foobar'}, format='json',
HTTP_AUTHORIZATION=self.permitted_credentials)
response = get_queryset_list_view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_has_put_permissions(self):
request = factory.put('/1', {'text': 'foobar'}, format='json',
HTTP_AUTHORIZATION=self.permitted_credentials)
response = instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_has_delete_permissions(self):
request = factory.delete('/1', HTTP_AUTHORIZATION=self.permitted_credentials)
response = instance_view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_does_not_have_create_permissions(self):
request = factory.post('/', {'text': 'foobar'}, format='json',
HTTP_AUTHORIZATION=self.disallowed_credentials)
response = root_view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_does_not_have_put_permissions(self):
request = factory.put('/1', {'text': 'foobar'}, format='json',
HTTP_AUTHORIZATION=self.disallowed_credentials)
response = instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_does_not_have_delete_permissions(self):
request = factory.delete('/1', HTTP_AUTHORIZATION=self.disallowed_credentials)
response = instance_view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_options_permitted(self):
request = factory.options(
'/',
HTTP_AUTHORIZATION=self.permitted_credentials
)
response = root_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('actions', response.data)
self.assertEqual(list(response.data['actions'].keys()), ['POST'])
request = factory.options(
'/1',
HTTP_AUTHORIZATION=self.permitted_credentials
)
response = instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('actions', response.data)
self.assertEqual(list(response.data['actions'].keys()), ['PUT'])
def test_options_disallowed(self):
request = factory.options(
'/',
HTTP_AUTHORIZATION=self.disallowed_credentials
)
response = root_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNotIn('actions', response.data)
request = factory.options(
'/1',
HTTP_AUTHORIZATION=self.disallowed_credentials
)
response = instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNotIn('actions', response.data)
def test_options_updateonly(self):
request = factory.options(
'/',
HTTP_AUTHORIZATION=self.updateonly_credentials
)
response = root_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNotIn('actions', response.data)
request = factory.options(
'/1',
HTTP_AUTHORIZATION=self.updateonly_credentials
)
response = instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('actions', response.data)
self.assertEqual(list(response.data['actions'].keys()), ['PUT'])
def test_empty_view_does_not_assert(self):
request = factory.get('/1', HTTP_AUTHORIZATION=self.permitted_credentials)
response = empty_list_view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class BasicPermModel(models.Model):
text = models.CharField(max_length=100)
class Meta:
app_label = 'tests'
permissions = (
('view_basicpermmodel', 'Can view basic perm model'),
# add, change, delete built in to django
)
class BasicPermSerializer(serializers.ModelSerializer):
class Meta:
model = BasicPermModel
# Custom object-level permission, that includes 'view' permissions
class ViewObjectPermissions(permissions.DjangoObjectPermissions):
perms_map = {
'GET': ['%(app_label)s.view_%(model_name)s'],
'OPTIONS': ['%(app_label)s.view_%(model_name)s'],
'HEAD': ['%(app_label)s.view_%(model_name)s'],
'POST': ['%(app_label)s.add_%(model_name)s'],
'PUT': ['%(app_label)s.change_%(model_name)s'],
'PATCH': ['%(app_label)s.change_%(model_name)s'],
'DELETE': ['%(app_label)s.delete_%(model_name)s'],
}
class ObjectPermissionInstanceView(generics.RetrieveUpdateDestroyAPIView):
queryset = BasicPermModel.objects.all()
serializer_class = BasicPermSerializer
authentication_classes = [authentication.BasicAuthentication]
permission_classes = [ViewObjectPermissions]
object_permissions_view = ObjectPermissionInstanceView.as_view()
class ObjectPermissionListView(generics.ListAPIView):
queryset = BasicPermModel.objects.all()
serializer_class = BasicPermSerializer
authentication_classes = [authentication.BasicAuthentication]
permission_classes = [ViewObjectPermissions]
object_permissions_list_view = ObjectPermissionListView.as_view()
class GetQuerysetObjectPermissionInstanceView(generics.RetrieveUpdateDestroyAPIView):
serializer_class = BasicPermSerializer
authentication_classes = [authentication.BasicAuthentication]
permission_classes = [ViewObjectPermissions]
def get_queryset(self):
return BasicPermModel.objects.all()
get_queryset_object_permissions_view = GetQuerysetObjectPermissionInstanceView.as_view()
@unittest.skipUnless(guardian, 'django-guardian not installed')
class ObjectPermissionsIntegrationTests(TestCase):
"""
Integration tests for the object level permissions API.
"""
def setUp(self):
from guardian.shortcuts import assign_perm
# create users
create = User.objects.create_user
users = {
'fullaccess': create('fullaccess', 'fullaccess@example.com', 'password'),
'readonly': create('readonly', 'readonly@example.com', 'password'),
'writeonly': create('writeonly', 'writeonly@example.com', 'password'),
'deleteonly': create('deleteonly', 'deleteonly@example.com', 'password'),
}
# give everyone model level permissions, as we are not testing those
everyone = Group.objects.create(name='everyone')
model_name = get_model_name(BasicPermModel)
app_label = BasicPermModel._meta.app_label
f = '{0}_{1}'.format
perms = {
'view': f('view', model_name),
'change': f('change', model_name),
'delete': f('delete', model_name)
}
for perm in perms.values():
perm = '{0}.{1}'.format(app_label, perm)
assign_perm(perm, everyone)
everyone.user_set.add(*users.values())
# appropriate object level permissions
readers = Group.objects.create(name='readers')
writers = Group.objects.create(name='writers')
deleters = Group.objects.create(name='deleters')
model = BasicPermModel.objects.create(text='foo')
assign_perm(perms['view'], readers, model)
assign_perm(perms['change'], writers, model)
assign_perm(perms['delete'], deleters, model)
readers.user_set.add(users['fullaccess'], users['readonly'])
writers.user_set.add(users['fullaccess'], users['writeonly'])
deleters.user_set.add(users['fullaccess'], users['deleteonly'])
self.credentials = {}
for user in users.values():
self.credentials[user.username] = basic_auth_header(user.username, 'password')
# Delete
def test_can_delete_permissions(self):
request = factory.delete('/1', HTTP_AUTHORIZATION=self.credentials['deleteonly'])
response = object_permissions_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_cannot_delete_permissions(self):
request = factory.delete('/1', HTTP_AUTHORIZATION=self.credentials['readonly'])
response = object_permissions_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# Update
def test_can_update_permissions(self):
request = factory.patch(
'/1', {'text': 'foobar'}, format='json',
HTTP_AUTHORIZATION=self.credentials['writeonly']
)
response = object_permissions_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('text'), 'foobar')
def test_cannot_update_permissions(self):
request = factory.patch(
'/1', {'text': 'foobar'}, format='json',
HTTP_AUTHORIZATION=self.credentials['deleteonly']
)
response = object_permissions_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_cannot_update_permissions_non_existing(self):
request = factory.patch(
'/999', {'text': 'foobar'}, format='json',
HTTP_AUTHORIZATION=self.credentials['deleteonly']
)
response = object_permissions_view(request, pk='999')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
# Read
def test_can_read_permissions(self):
request = factory.get('/1', HTTP_AUTHORIZATION=self.credentials['readonly'])
response = object_permissions_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_cannot_read_permissions(self):
request = factory.get('/1', HTTP_AUTHORIZATION=self.credentials['writeonly'])
response = object_permissions_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_can_read_get_queryset_permissions(self):
"""
same as ``test_can_read_permissions`` but with a view
that rely on ``.get_queryset()`` instead of ``.queryset``.
"""
request = factory.get('/1', HTTP_AUTHORIZATION=self.credentials['readonly'])
response = get_queryset_object_permissions_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Read list
def test_can_read_list_permissions(self):
request = factory.get('/', HTTP_AUTHORIZATION=self.credentials['readonly'])
object_permissions_list_view.cls.filter_backends = (DjangoObjectPermissionsFilter,)
response = object_permissions_list_view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data[0].get('id'), 1)
def test_cannot_read_list_permissions(self):
request = factory.get('/', HTTP_AUTHORIZATION=self.credentials['writeonly'])
object_permissions_list_view.cls.filter_backends = (DjangoObjectPermissionsFilter,)
response = object_permissions_list_view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertListEqual(response.data, [])
class BasicPerm(permissions.BasePermission):
def has_permission(self, request, view):
return False
class BasicPermWithDetail(permissions.BasePermission):
message = 'Custom: You cannot access this resource'
def has_permission(self, request, view):
return False
class BasicObjectPerm(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return False
class BasicObjectPermWithDetail(permissions.BasePermission):
message = 'Custom: You cannot access this resource'
def has_object_permission(self, request, view, obj):
return False
class PermissionInstanceView(generics.RetrieveUpdateDestroyAPIView):
queryset = BasicModel.objects.all()
serializer_class = BasicSerializer
class DeniedView(PermissionInstanceView):
permission_classes = (BasicPerm,)
class DeniedViewWithDetail(PermissionInstanceView):
permission_classes = (BasicPermWithDetail,)
class DeniedObjectView(PermissionInstanceView):
permission_classes = (BasicObjectPerm,)
class DeniedObjectViewWithDetail(PermissionInstanceView):
permission_classes = (BasicObjectPermWithDetail,)
denied_view = DeniedView.as_view()
denied_view_with_detail = DeniedViewWithDetail.as_view()
denied_object_view = DeniedObjectView.as_view()
denied_object_view_with_detail = DeniedObjectViewWithDetail.as_view()
class CustomPermissionsTests(TestCase):
def setUp(self):
BasicModel(text='foo').save()
User.objects.create_user('username', 'username@example.com', 'password')
credentials = basic_auth_header('username', 'password')
self.request = factory.get('/1', format='json', HTTP_AUTHORIZATION=credentials)
self.custom_message = 'Custom: You cannot access this resource'
def test_permission_denied(self):
response = denied_view(self.request, pk=1)
detail = response.data.get('detail')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertNotEqual(detail, self.custom_message)
def test_permission_denied_with_custom_detail(self):
response = denied_view_with_detail(self.request, pk=1)
detail = response.data.get('detail')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(detail, self.custom_message)
def test_permission_denied_for_object(self):
response = denied_object_view(self.request, pk=1)
detail = response.data.get('detail')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertNotEqual(detail, self.custom_message)
def test_permission_denied_for_object_with_custom_detail(self):
response = denied_object_view_with_detail(self.request, pk=1)
detail = response.data.get('detail')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(detail, self.custom_message)
|
|
import argparse
import json
import os
import sys
from concurrent.futures import as_completed
from contextlib import contextmanager
from pprint import pformat
from typing import Tuple, Iterable, List, Callable
from Tests.Marketplace.marketplace_constants import GCPConfig, PACKS_FOLDER, PACKS_FULL_PATH, IGNORED_FILES
from Tests.scripts.utils.log_util import install_logging
from Tests.scripts.utils import logging_wrapper as logging
from demisto_sdk.commands.find_dependencies.find_dependencies import PackDependencies, parse_for_pack_metadata
from pebble import ProcessPool, ProcessFuture
PROCESS_FAILURE = False
def option_handler():
"""Validates and parses script arguments.
Returns:
Namespace: Parsed arguments object.
"""
parser = argparse.ArgumentParser(description="Create json file of all packs dependencies.")
parser.add_argument('-o', '--output_path', help="The full path to store created file", required=True)
parser.add_argument('-i', '--id_set_path', help="The full path of id set", required=True)
return parser.parse_args()
@contextmanager
def ProcessPoolHandler() -> ProcessPool:
""" Process pool Handler which terminate all processes in case of Exception.
Yields:
ProcessPool: Pebble process pool.
"""
with ProcessPool(max_workers=3) as pool:
try:
yield pool
except Exception:
logging.exception("Gracefully release all resources due to Error...")
raise
finally:
pool.close()
pool.join()
def wait_futures_complete(futures: List[ProcessFuture], done_fn: Callable):
"""Wait for all futures to complete, Raise exception if occurred.
Args:
futures: futures to wait for.
done_fn: Function to run on result.
Raises:
Exception: Raise caught exception for further cleanups.
"""
for future in as_completed(futures):
try:
result = future.result()
done_fn(result)
except Exception as e:
logging.exception(e)
raise
def calculate_single_pack_dependencies(pack: str, dependency_graph: object) -> Tuple[dict, list, str]:
"""
Calculates pack dependencies given a pack and a dependencies graph.
First is extract the dependencies subgraph of the given graph only using DFS algorithm with the pack as source.
Then, for all the dependencies of that pack it Replaces the 'mandatory_for_packs' key with a boolean key 'mandatory'
which indicates whether this dependency is mandatory for this pack or not.
Then using that subgraph we get the first-level dependencies and all-levels dependencies.
Args:
pack: The pack for which we need to calculate the dependencies
dependency_graph: The full dependencies graph
Returns:
first_level_dependencies: A dict of the form {'dependency_name': {'mandatory': < >, 'display_name': < >}}
all_level_dependencies: A list with all dependencies names
pack: The pack name
"""
install_logging('Calculate_Packs_Dependencies.log', include_process_name=True, logger=logging)
first_level_dependencies = {}
all_level_dependencies = []
try:
logging.info(f"Calculating {pack} pack dependencies.")
subgraph = PackDependencies.get_dependencies_subgraph_by_dfs(dependency_graph, pack)
for dependency_pack, additional_data in subgraph.nodes(data=True):
logging.debug(f'Iterating dependency {dependency_pack} for pack {pack}')
additional_data['mandatory'] = pack in additional_data['mandatory_for_packs']
del additional_data['mandatory_for_packs']
first_level_dependencies, all_level_dependencies = parse_for_pack_metadata(subgraph, pack)
except Exception:
logging.exception(f"Failed calculating {pack} pack dependencies")
raise
return first_level_dependencies, all_level_dependencies, pack
def get_all_packs_dependency_graph(id_set: dict, packs: list) -> Iterable:
"""
Gets a graph with dependencies for all packs
Args:
id_set: The content of id_set file
packs: The packs that should be part of the dependencies calculation
Returns:
A graph with all packs dependencies
"""
logging.info("Calculating pack dependencies.")
try:
dependency_graph = PackDependencies.build_all_dependencies_graph(packs, id_set=id_set, verbose=False)
return dependency_graph
except Exception:
logging.exception("Failed calculating dependencies graph")
sys.exit(2)
def select_packs_for_calculation() -> list:
"""
Select the packs on which the dependencies will be calculated on
Returns:
A list of packs
"""
IGNORED_FILES.append(GCPConfig.BASE_PACK) # skip dependency calculation of Base pack
packs = []
for pack in os.scandir(PACKS_FULL_PATH):
if not pack.is_dir() or pack.name in IGNORED_FILES:
logging.warning(f"Skipping dependency calculation of {pack.name} pack.")
continue # skipping ignored packs
packs.append(pack.name)
return packs
def get_id_set(id_set_path: str) -> dict:
"""
Parses the content of id_set_path and returns its content.
Args:
id_set_path: The path of the id_set file
Returns:
The parsed content of id_set
"""
with open(id_set_path, 'r') as id_set_file:
id_set = json.load(id_set_file)
return id_set
def calculate_all_packs_dependencies(pack_dependencies_result: dict, id_set: dict, packs: list) -> None:
"""
Calculates the pack dependencies and adds them to 'pack_dependencies_result' in parallel.
First - the method generates the full dependency graph.
Them - using a process pool we extract the dependencies of each pack and adds them to the 'pack_dependencies_result'
Args:
pack_dependencies_result: The dict to which the results should be added
id_set: The id_set content
packs: The packs that should be part of the dependencies calculation
"""
def add_pack_metadata_results(results: Tuple) -> None:
"""
This is a callback that should be called once the result of the future is ready.
The results include: first_level_dependencies, all_level_dependencies, pack_name
Using these results we write the dependencies
"""
try:
first_level_dependencies, all_level_dependencies, pack_name = results
logging.debug(f'Got dependencies for pack {pack_name}\n: {pformat(all_level_dependencies)}')
pack_dependencies_result[pack_name] = {
"dependencies": first_level_dependencies,
"displayedImages": list(first_level_dependencies.keys()),
"allLevelDependencies": all_level_dependencies,
"path": os.path.join(PACKS_FOLDER, pack_name),
"fullPath": os.path.abspath(os.path.join(PACKS_FOLDER, pack_name))
}
except Exception:
logging.exception('Failed to collect pack dependencies results')
raise
# Generating one graph with dependencies for all packs
dependency_graph = get_all_packs_dependency_graph(id_set, packs)
with ProcessPoolHandler() as pool:
futures = []
for pack in dependency_graph:
futures.append(pool.schedule(calculate_single_pack_dependencies, args=(pack, dependency_graph), timeout=10))
wait_futures_complete(futures=futures, done_fn=add_pack_metadata_results)
def main():
""" Main function for iterating over existing packs folder in content repo and creating json of all
packs dependencies. The logic of pack dependency is identical to sdk find-dependencies command.
"""
install_logging('Calculate_Packs_Dependencies.log', include_process_name=True, logger=logging)
option = option_handler()
output_path = option.output_path
id_set_path = option.id_set_path
id_set = get_id_set(id_set_path)
pack_dependencies_result: dict = {}
logging.info("Selecting packs for dependencies calculation")
packs = select_packs_for_calculation()
calculate_all_packs_dependencies(pack_dependencies_result, id_set, packs)
logging.info(f"Number of created pack dependencies entries: {len(pack_dependencies_result.keys())}")
# finished iteration over pack folders
logging.success("Finished dependencies calculation")
with open(output_path, 'w') as pack_dependencies_file:
json.dump(pack_dependencies_result, pack_dependencies_file, indent=4)
logging.success(f"Created packs dependencies file at: {output_path}")
if __name__ == "__main__":
main()
|
|
from importlib import import_module
import os
import sys
from django.apps import apps
from django.db.migrations.recorder import MigrationRecorder
from django.db.migrations.graph import MigrationGraph
from django.db.migrations.migration import Migration
from django.db.migrations.state import ModelState
from django.db.migrations import operations
from django.utils import six
from django.conf import settings
MIGRATIONS_MODULE_NAME = 'migrations'
class MigrationLoader(object):
"""
Loads migration files from disk, and their status from the database.
Migration files are expected to live in the "migrations" directory of
an app. Their names are entirely unimportant from a code perspective,
but will probably follow the 1234_name.py convention.
On initialization, this class will scan those directories, and open and
read the python files, looking for a class called Migration, which should
inherit from django.db.migrations.Migration. See
django.db.migrations.migration for what that looks like.
Some migrations will be marked as "replacing" another set of migrations.
These are loaded into a separate set of migrations away from the main ones.
If all the migrations they replace are either unapplied or missing from
disk, then they are injected into the main set, replacing the named migrations.
Any dependency pointers to the replaced migrations are re-pointed to the
new migration.
This does mean that this class MUST also talk to the database as well as
to disk, but this is probably fine. We're already not just operating
in memory.
"""
def __init__(self, connection, load=True):
self.connection = connection
self.disk_migrations = None
self.applied_migrations = None
if load:
self.build_graph()
@classmethod
def migrations_module(cls, app_label):
if app_label in settings.MIGRATION_MODULES:
return settings.MIGRATION_MODULES[app_label]
else:
app_package_name = apps.get_app_config(app_label).name
return '%s.%s' % (app_package_name, MIGRATIONS_MODULE_NAME)
def load_disk(self):
"""
Loads the migrations from all INSTALLED_APPS from disk.
"""
self.disk_migrations = {}
self.unmigrated_apps = set()
self.migrated_apps = set()
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
# Get the migrations module directory
module_name = self.migrations_module(app_config.label)
was_loaded = module_name in sys.modules
try:
module = import_module(module_name)
except ImportError as e:
# I hate doing this, but I don't want to squash other import errors.
# Might be better to try a directory check directly.
if "No module named" in str(e) and MIGRATIONS_MODULE_NAME in str(e):
self.unmigrated_apps.add(app_config.label)
continue
raise
else:
# PY3 will happily import empty dirs as namespaces.
if not hasattr(module, '__file__'):
continue
# Module is not a package (e.g. migrations.py).
if not hasattr(module, '__path__'):
continue
# Force a reload if it's already loaded (tests need this)
if was_loaded:
six.moves.reload_module(module)
self.migrated_apps.add(app_config.label)
directory = os.path.dirname(module.__file__)
# Scan for .py[c|o] files
migration_names = set()
for name in os.listdir(directory):
if name.endswith(".py") or name.endswith(".pyc") or name.endswith(".pyo"):
import_name = name.rsplit(".", 1)[0]
if import_name[0] not in "_.~":
migration_names.add(import_name)
# Load them
south_style_migrations = False
for migration_name in migration_names:
try:
migration_module = import_module("%s.%s" % (module_name, migration_name))
except ImportError as e:
# Ignore South import errors, as we're triggering them
if "south" in str(e).lower():
south_style_migrations = True
break
raise
if not hasattr(migration_module, "Migration"):
raise BadMigrationError("Migration %s in app %s has no Migration class" % (migration_name, app_config.label))
# Ignore South-style migrations
if hasattr(migration_module.Migration, "forwards"):
south_style_migrations = True
break
self.disk_migrations[app_config.label, migration_name] = migration_module.Migration(migration_name, app_config.label)
if south_style_migrations:
self.unmigrated_apps.add(app_config.label)
def get_migration(self, app_label, name_prefix):
"Gets the migration exactly named, or raises KeyError"
return self.graph.nodes[app_label, name_prefix]
def get_migration_by_prefix(self, app_label, name_prefix):
"Returns the migration(s) which match the given app label and name _prefix_"
# Do the search
results = []
for l, n in self.disk_migrations:
if l == app_label and n.startswith(name_prefix):
results.append((l, n))
if len(results) > 1:
raise AmbiguityError("There is more than one migration for '%s' with the prefix '%s'" % (app_label, name_prefix))
elif len(results) == 0:
raise KeyError("There no migrations for '%s' with the prefix '%s'" % (app_label, name_prefix))
else:
return self.disk_migrations[results[0]]
def build_graph(self, ignore_unmigrated=False):
"""
Builds a migration dependency graph using both the disk and database.
You'll need to rebuild the graph if you apply migrations. This isn't
usually a problem as generally migration stuff runs in a one-shot process.
"""
# Load disk data
self.load_disk()
# Load database data
recorder = MigrationRecorder(self.connection)
self.applied_migrations = recorder.applied_migrations()
# Do a first pass to separate out replacing and non-replacing migrations
normal = {}
replacing = {}
for key, migration in self.disk_migrations.items():
if migration.replaces:
replacing[key] = migration
else:
normal[key] = migration
# Calculate reverse dependencies - i.e., for each migration, what depends on it?
# This is just for dependency re-pointing when applying replacements,
# so we ignore run_before here.
reverse_dependencies = {}
for key, migration in normal.items():
for parent in migration.dependencies:
reverse_dependencies.setdefault(parent, set()).add(key)
# Carry out replacements if we can - that is, if all replaced migrations
# are either unapplied or missing.
for key, migration in replacing.items():
# Ensure this replacement migration is not in applied_migrations
self.applied_migrations.discard(key)
# Do the check. We can replace if all our replace targets are
# applied, or if all of them are unapplied.
applied_statuses = [(target in self.applied_migrations) for target in migration.replaces]
can_replace = all(applied_statuses) or (not any(applied_statuses))
if not can_replace:
continue
# Alright, time to replace. Step through the replaced migrations
# and remove, repointing dependencies if needs be.
for replaced in migration.replaces:
if replaced in normal:
# We don't care if the replaced migration doesn't exist;
# the usage pattern here is to delete things after a while.
del normal[replaced]
for child_key in reverse_dependencies.get(replaced, set()):
if child_key in migration.replaces:
continue
normal[child_key].dependencies.remove(replaced)
normal[child_key].dependencies.append(key)
normal[key] = migration
# Mark the replacement as applied if all its replaced ones are
if all(applied_statuses):
self.applied_migrations.add(key)
# Finally, make a graph and load everything into it
self.graph = MigrationGraph()
for key, migration in normal.items():
self.graph.add_node(key, migration)
for key, migration in normal.items():
for parent in migration.dependencies:
# Special-case __first__, which means "the first migration" for
# migrated apps, and is ignored for unmigrated apps. It allows
# makemigrations to declare dependencies on apps before they
# even have migrations.
if parent[1] == "__first__" and parent not in self.graph:
if parent[0] in self.unmigrated_apps:
if ignore_unmigrated:
migration.dependencies.remove(parent)
parent = None
else:
# This app isn't migrated, but something depends on it.
# We'll add a fake initial migration for it into the
# graph.
app_config = apps.get_app_config(parent[0])
ops = []
for model in app_config.get_models():
model_state = ModelState.from_model(model)
ops.append(
operations.CreateModel(
name=model_state.name,
fields=model_state.fields,
options=model_state.options,
bases=model_state.bases,
)
)
new_migration = type(
"FakeInitialMigration",
(Migration, ),
{"operations": ops},
)(parent[1], parent[0])
self.graph.add_node(parent, new_migration)
self.applied_migrations.add(parent)
elif parent[0] in self.migrated_apps:
parent = list(self.graph.root_nodes(parent[0]))[0]
else:
raise ValueError("Dependency on unknown app %s" % parent[0])
if parent is not None:
self.graph.add_dependency(key, parent)
def detect_conflicts(self):
"""
Looks through the loaded graph and detects any conflicts - apps
with more than one leaf migration. Returns a dict of the app labels
that conflict with the migration names that conflict.
"""
seen_apps = {}
conflicting_apps = set()
for app_label, migration_name in self.graph.leaf_nodes():
if app_label in seen_apps:
conflicting_apps.add(app_label)
seen_apps.setdefault(app_label, set()).add(migration_name)
return dict((app_label, seen_apps[app_label]) for app_label in conflicting_apps)
class BadMigrationError(Exception):
"""
Raised when there's a bad migration (unreadable/bad format/etc.)
"""
pass
class AmbiguityError(Exception):
"""
Raised when more than one migration matches a name prefix
"""
pass
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple, end-to-end, LeNet-5-like convolutional MNIST model example.
This should achieve a test error of 0.7%. Please keep this model as simple and
linear as possible, it is meant as a tutorial for simple convolutional models.
Run with --self_test on the command line to execute a short self-test.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import gzip
import os
import sys
import time
import numpy
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
WORK_DIRECTORY = 'data'
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
NUM_LABELS = 10
VALIDATION_SIZE = 5000 # Size of the validation set.
SEED = 66478 # Set to None for random seed.
BATCH_SIZE = 64
NUM_EPOCHS = 10
EVAL_BATCH_SIZE = 64
EVAL_FREQUENCY = 100 # Number of steps between evaluations.
FLAGS = None
def data_type():
"""Return the type of the activations, weights, and placeholder variables."""
if FLAGS.use_fp16:
return tf.float16
else:
return tf.float32
def maybe_download(filename):
"""Download the data from Yann's website, unless it's already here."""
if not tf.gfile.Exists(WORK_DIRECTORY):
tf.gfile.MakeDirs(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not tf.gfile.Exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
def extract_data(filename, num_images):
"""Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)
data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)
data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)
return data
def extract_labels(filename, num_images):
"""Extract the labels into a vector of int64 label IDs."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)
return labels
def fake_data(num_images):
"""Generate a fake dataset that matches the dimensions of MNIST."""
data = numpy.ndarray(
shape=(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS),
dtype=numpy.float32)
labels = numpy.zeros(shape=(num_images,), dtype=numpy.int64)
for image in xrange(num_images):
label = image % 2
data[image, :, :, 0] = label - 0.5
labels[image] = label
return data, labels
def error_rate(predictions, labels):
"""Return the error rate based on dense predictions and sparse labels."""
return 100.0 - (
100.0 *
numpy.sum(numpy.argmax(predictions, 1) == labels) /
predictions.shape[0])
def main(argv=None): # pylint: disable=unused-argument
if FLAGS.self_test:
print('Running self-test.')
train_data, train_labels = fake_data(256)
validation_data, validation_labels = fake_data(EVAL_BATCH_SIZE)
test_data, test_labels = fake_data(EVAL_BATCH_SIZE)
num_epochs = 1
else:
# Get the data.
train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')
# Extract it into numpy arrays.
train_data = extract_data(train_data_filename, 60000)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
test_labels = extract_labels(test_labels_filename, 10000)
# Generate a validation set.
validation_data = train_data[:VALIDATION_SIZE, ...]
validation_labels = train_labels[:VALIDATION_SIZE]
train_data = train_data[VALIDATION_SIZE:, ...]
train_labels = train_labels[VALIDATION_SIZE:]
num_epochs = NUM_EPOCHS
train_size = train_labels.shape[0]
# This is where training samples and labels are fed to the graph.
# These placeholder nodes will be fed a batch of training data at each
# training step using the {feed_dict} argument to the Run() call below.
train_data_node = tf.placeholder(
data_type(),
shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
train_labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE,))
eval_data = tf.placeholder(
data_type(),
shape=(EVAL_BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
# The variables below hold all the trainable weights. They are passed an
# initial value which will be assigned when we call:
# {tf.initialize_all_variables().run()}
conv1_weights = tf.Variable(
tf.truncated_normal([5, 5, NUM_CHANNELS, 32], # 5x5 filter, depth 32.
stddev=0.1,
seed=SEED, dtype=data_type()))
conv1_biases = tf.Variable(tf.zeros([32], dtype=data_type()))
conv2_weights = tf.Variable(tf.truncated_normal(
[5, 5, 32, 64], stddev=0.1,
seed=SEED, dtype=data_type()))
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64], dtype=data_type()))
fc1_weights = tf.Variable( # fully connected, depth 512.
tf.truncated_normal([IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],
stddev=0.1,
seed=SEED,
dtype=data_type()))
fc1_biases = tf.Variable(tf.constant(0.1, shape=[512], dtype=data_type()))
fc2_weights = tf.Variable(tf.truncated_normal([512, NUM_LABELS],
stddev=0.1,
seed=SEED,
dtype=data_type()))
fc2_biases = tf.Variable(tf.constant(
0.1, shape=[NUM_LABELS], dtype=data_type()))
# We will replicate the model structure for the training subgraph, as well
# as the evaluation subgraphs, while sharing the trainable parameters.
def model(data, train=False):
"""The Model definition."""
# 2D convolution, with 'SAME' padding (i.e. the output feature map has
# the same size as the input). Note that {strides} is a 4D array whose
# shape matches the data layout: [image index, y, x, depth].
conv = tf.nn.conv2d(data,
conv1_weights,
strides=[1, 1, 1, 1],
padding='SAME')
# Bias and rectified linear non-linearity.
relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
# Max pooling. The kernel size spec {ksize} also follows the layout of
# the data. Here we have a pooling window of 2, and a stride of 2.
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
conv = tf.nn.conv2d(pool,
conv2_weights,
strides=[1, 1, 1, 1],
padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
# Reshape the feature map cuboid into a 2D matrix to feed it to the
# fully connected layers.
pool_shape = pool.get_shape().as_list()
reshape = tf.reshape(
pool,
[pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
# Add a 50% dropout during training only. Dropout also scales
# activations such that no rescaling is needed at evaluation time.
if train:
hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)
return tf.matmul(hidden, fc2_weights) + fc2_biases
# Training computation: logits + cross-entropy loss.
logits = model(train_data_node, True)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, train_labels_node))
# L2 regularization for the fully connected parameters.
regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +
tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
# Add the regularization term to the loss.
loss += 5e-4 * regularizers
# Optimizer: set up a variable that's incremented once per batch and
# controls the learning rate decay.
batch = tf.Variable(0, dtype=data_type())
# Decay once per epoch, using an exponential schedule starting at 0.01.
learning_rate = tf.train.exponential_decay(
0.01, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
train_size, # Decay step.
0.95, # Decay rate.
staircase=True)
# Use simple momentum for the optimization.
optimizer = tf.train.MomentumOptimizer(learning_rate,
0.9).minimize(loss,
global_step=batch)
# Predictions for the current training minibatch.
train_prediction = tf.nn.softmax(logits)
# Predictions for the test and validation, which we'll compute less often.
eval_prediction = tf.nn.softmax(model(eval_data))
# Small utility function to evaluate a dataset by feeding batches of data to
# {eval_data} and pulling the results from {eval_predictions}.
# Saves memory and enables this to run on smaller GPUs.
def eval_in_batches(data, sess):
"""Get all predictions for a dataset by running it in small batches."""
size = data.shape[0]
if size < EVAL_BATCH_SIZE:
raise ValueError("batch size for evals larger than dataset: %d" % size)
predictions = numpy.ndarray(shape=(size, NUM_LABELS), dtype=numpy.float32)
for begin in xrange(0, size, EVAL_BATCH_SIZE):
end = begin + EVAL_BATCH_SIZE
if end <= size:
predictions[begin:end, :] = sess.run(
eval_prediction,
feed_dict={eval_data: data[begin:end, ...]})
else:
batch_predictions = sess.run(
eval_prediction,
feed_dict={eval_data: data[-EVAL_BATCH_SIZE:, ...]})
predictions[begin:, :] = batch_predictions[begin - size:, :]
return predictions
# Create a local session to run the training.
start_time = time.time()
with tf.Session() as sess:
# Run all the initializers to prepare the trainable parameters.
tf.initialize_all_variables().run()
print('Initialized!')
# Loop through training steps.
for step in xrange(int(num_epochs * train_size) // BATCH_SIZE):
# Compute the offset of the current minibatch in the data.
# Note that we could use better randomization across epochs.
offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
batch_data = train_data[offset:(offset + BATCH_SIZE), ...]
batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
# This dictionary maps the batch data (as a numpy array) to the
# node in the graph it should be fed to.
feed_dict = {train_data_node: batch_data,
train_labels_node: batch_labels}
# Run the graph and fetch some of the nodes.
_, l, lr, predictions = sess.run(
[optimizer, loss, learning_rate, train_prediction],
feed_dict=feed_dict)
if step % EVAL_FREQUENCY == 0:
elapsed_time = time.time() - start_time
start_time = time.time()
print('Step %d (epoch %.2f), %.1f ms' %
(step, float(step) * BATCH_SIZE / train_size,
1000 * elapsed_time / EVAL_FREQUENCY))
print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
print('Minibatch error: %.1f%%' % error_rate(predictions, batch_labels))
print('Validation error: %.1f%%' % error_rate(
eval_in_batches(validation_data, sess), validation_labels))
sys.stdout.flush()
# Finally print the result!
test_error = error_rate(eval_in_batches(test_data, sess), test_labels)
print('Test error: %.1f%%' % test_error)
if FLAGS.self_test:
print('test_error', test_error)
assert test_error == 0.0, 'expected 0.0 test_error, got %.2f' % (
test_error,)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--use_fp16',
default=False,
help='Use half floats instead of full floats if True.',
action='store_true'
)
parser.add_argument(
'--self_test',
default=False,
action='store_true',
help='True if running a self test.'
)
FLAGS = parser.parse_args()
tf.app.run()
|
|
__version__ = "0.0.9"
__status__ = "Development"
__license__ = "MIT"
from pysphere import VIServer
import time
import sys
import signal
try:
import argparse
except:
print("Update to version 2.7 or install argparse")
sys.exit(1)
import logging
def signal_handler(signal, frame):
logger.warn("Caught Ctrl+C")
logger.warn("Will attempt a graceful exit")
sys.exit(0)
def logger_init(verbose=False, logPath=None):
"""Logger Initialization
verbose: Nimmt einen Bool entgegen. Dieser sendet den output auf die Konsole.
logPath: Nimmt einen String entgegen. Ort der Logdatei. Wird automatisch angelegt oder haengt Logs an.
Gibt logger zurueck.
"""
logger = logging.getLogger('USV2')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if verbose or logPath is None:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
if logPath:
fh = logging.FileHandler(logPath)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def vConnect(VIHost, VIUser, VIPassword):
"""Verbindungs Funktion zum Server
Baut eine Verbindung mit VCS Server auf und speichert die Verbindungsinformationen in der VISerever variable des Typs "VIServer".
"""
logger.info("Connecting to vSphere: " + VIHost)
VirtualServerInstance = VIServer()
VirtualServerInstance.connect(VIHost, VIUser, VIPassword)
if VirtualServerInstance:
logger.info("Connected to vSphere: " + VIHost)
logger.info(VirtualServerInstance.get_api_type() + " API " + VirtualServerInstance.get_api_version())
return VirtualServerInstance
else:
logger.error("Connection to the vSphere failed.")
sys.exit(2)
def vDisconnect(VIServer):
"""Verbindungs-Trennungs Funktion
Baut die Verbindung mit dem VCS Server ab.
"""
logger.info("Disconnecting from vSphere")
return VIServer.disconnect()
def VirtualMachine_List(VIServer, VirtualMachineList):
"""
Listet Virtuelle Maschienen
"""
logger.info("Listing found Virtual Machines using provided information")
logger.info("Virtual Machine count is " + str(len(VirtualMachineList)))
for VirtualMachineEntry in range(len(VirtualMachineList)):
VirtualMachine = VIServer.get_vm_by_path(VirtualMachineList[VirtualMachineEntry])
logger.info("("+ str(VirtualMachineEntry+1) +"/"+ str(len(VirtualMachineList)) +") " + VirtualMachine.get_property("name"))
def VirtualMachine_Start(VIServer, VirtualMachineList):
"""VMs Start Funktion
Faehrt die OS auf den VMs asyncron hoch.
Wenn die VMWare Tools nich installiert sind, werden die VMs ignoriert und dieser vorfall gelogt.
"""
for VirtualMachineEntry in VirtualMachineList:
VirtualMachine = VIServer.get_vm_by_path(VirtualMachineEntry)
if VirtualMachine.is_powered_off():
try:
logger.info("Sending Power-On signal to " + VirtualMachine.get_property("name"))
VirtualMachine.power_on()
except:
logger.error(VirtualMachine.get_property("name") + " could not be turned on")
def VirtualMachine_Shutdown(VIServer, VirtualMachineList):
"""VMs Shutdown Funktion
Faehrt die OS auf den VMs asyncron runter.
Wenn die VMWare Tools nich installiert sind, werden die VMs mit dem Befehl "Power Off" heruntergefahren.
"""
for VirtualMachineEntry in VirtualMachineList:
VirtualMachine = VIServer.get_vm_by_path(VirtualMachineEntry)
if VirtualMachine.is_powered_on():
try:
logger.info("Sending Guest Shutdown signal to " + VirtualMachine.get_property("name"))
VirtualMachine.shutdown_guest()
except:
logger.error("Problem with VMWare Tools on " + VirtualMachine.get_property("name"))
logger.warn("Switching to Power-Off sequence on " + VirtualMachine.get_property("name"))
VirtualMachine.power_off(sync_run=False)
def VirtualMachine_PowerOff(VIServer, VirtualMachineList):
"""VMs Power Off Funktion
Faehrt VMs mit dem Befehl "Power Off" asyncron runter.
"""
for VirtualMachineEntry in VirtualMachineList:
VirtualMachine = VIServer.get_vm_by_path(VirtualMachineEntry)
if VirtualMachine.is_powered_on():
try:
logger.info("Sending Power-Off signal to " + VirtualMachine.get_property("name"))
VirtualMachine.power_off(sync_run=False)
except:
logger.critical("Power-Off signal failed on " + VirtualMachine.get_property("name"))
def VirtualMachine_ServerList(VIServer, categories, cluster=None, status=None, tagID=201):
"""
Die Funktion hollt sich als erstes die Informationen "customValue" und "config.files.vmPathName" aka. den Pfad aller VMs aus dem Server raus.
Daraus werden nur die VMs genomen, die den Schluessel 201 aka. "Shutdown-Reihenfolge" besitzen und der mit den uebergeben Parametern uebereinstimmt.
Folgend darauf werden alle laufende VMs gelistet. Diese werden dann mit den VMs die zufor ausgefiltert worden sind abgegliechen und als eine Liste zurueck gegeben.
"""
if status and cluster:
VirtualMachineFetchResult = VIServer.get_registered_vms(cluster=cluster, status=status)
logger.info("Fetching Virtual Machines from cluster: '" + cluster + "' with the status: '" + status + "'")
elif status and cluster is None:
VirtualMachineFetchResult = VIServer.get_registered_vms(status=status)
logger.info("Fetching Virtual Machines with the status: '" + status + "'")
elif cluster and status is None:
VirtualMachineFetchResult = VIServer.get_registered_vms(cluster=cluster)
logger.info("Fetching Virtual Machines from cluster: '" + cluster + "'")
else:
VirtualMachineFetchResult = VIServer.get_registered_vms()
logger.info("Fetching all Virtual Machines from vSphere")
if categories and tagID:
logger.info(
"Filtering Virtual Machines by annotation ID: " + str(tagID) + " with the following content: " + ", ".join(
categories))
VirtualMachineList = []
VirtualMachinesWithAnnotation = []
VirtualMachinesRelated = []
ProperetyNames = ['customValue', 'config.files.vmPathName', 'config.template']
ProperetyResults = VIServer._retrieve_properties_traversal(property_names=ProperetyNames, obj_type="VirtualMachine")
for obj in ProperetyResults:
VirtualMachine = {'annotations': []}
VirtualMachineList.append(VirtualMachine)
if not hasattr(obj, "PropSet"):
continue
for prop in obj.PropSet:
if prop.Name == "name":
VirtualMachine['name'] = prop.Val
elif prop.Name == "config.files.vmPathName":
VirtualMachine['path'] = prop.Val
elif prop.Name == "config.template":
VirtualMachine['is_template'] = prop.Val
elif prop.Name == "customValue":
for annotation in prop.Val.CustomFieldValue:
VirtualMachine['annotations'].append((annotation.Key, annotation.Value))
for VirtualMachine in VirtualMachineList:
if not VirtualMachine['is_template']:
for annotation in VirtualMachine['annotations']:
if tagID in annotation:
for category in categories:
if category in annotation:
VirtualMachinesWithAnnotation.append(VirtualMachine['path'])
for i in VirtualMachinesWithAnnotation:
if i in VirtualMachineFetchResult:
VirtualMachinesRelated.append(i)
return VirtualMachinesRelated
else:
return VirtualMachineFetchResult
parser = argparse.ArgumentParser(description='Automatisierter Shutdown von ESX-Hosts und VMs bei einem Stormausfall')
parser.add_argument('-u', '--user', dest='VIUser', required=True, help='vSphere User')
parser.add_argument('-p', '--paswsword', dest='VIPassword', required=True, help='vSphere User Password')
parser.add_argument('-H', '--host', dest='VIHost', required=True, help='vSphere Host Adress')
parser.add_argument('-t', '--tag', dest='VICategories', nargs='*', help='Tag to be used to filter Virtual Machines')
parser.add_argument('-id', '--id', dest='tagID', type=int, default=201, help='ID of the Tag Field used by vSphere (default: 201)')
parser.add_argument('-w', '--wait', dest='maxWaitVMs', metavar='N', type=int, default=900, help='Seconds to wait between the Shutdown and Kill sequence (default:900)')
parser.add_argument('-a', '--action', choices=['shutdown', 'kill', 'start', 'list'], dest='VMAction', default="list", help='Action to perform on found Virtual Machines')
parser.add_argument('-s', '--status', choices=['poweredOn', 'poweredOff', 'suspended'], dest='VIStatus', help='Status used to search for Virtual Machines')
parser.add_argument('-c', '--cluster', dest='VICluster', help='Cluster where Virtual Machines will be searched')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='will print the logs to STDOUT if the -o/--out option is provided.')
parser.add_argument('-o', '--out', dest='logPath', help='Path to the file where the logs will be stored.')
if __name__ == "__main__":
args = parser.parse_args()
global logger
logger = logger_init(args.verbose, args.logPath)
logger.info("Logger was successfully initialized")
signal.signal(signal.SIGINT, signal_handler)
VIServer = vConnect(args.VIHost, args.VIUser, args.VIPassword)
if 'list' in args.VMAction:
VirtualMachine_List(VIServer, VirtualMachine_ServerList(VIServer, args.VICategories, args.VICluster, args.VIStatus, args.tagID))
elif 'start' in args.VMAction:
VirtualMachine_Start(VIServer, VirtualMachine_ServerList(VIServer, args.VICategories, args.VICluster, args.VIStatus, args.tagID))
elif 'shutdown' in args.VMAction:
VirtualMachine_Shutdown(VIServer, VirtualMachine_ServerList(VIServer, args.VICategories, args.VICluster, args.VIStatus, args.tagID))
logger.info("Waiting " + str(args.maxWaitVMs) + " seconds before sending Power-Off Signal to the Virtual Machines")
time.sleep(args.maxWaitVMs)
VirtualMachine_PowerOff(VIServer, VirtualMachine_ServerList(VIServer, args.VICategories, args.VICluster, args.VIStatus, args.tagID))
elif 'kill' in args.VMAction:
VirtualMachine_PowerOff(VIServer, VirtualMachine_ServerList(VIServer, args.VICategories, args.VICluster, args.VIStatus, args.tagID))
else:
args
parser.print_help()
vDisconnect(VIServer)
|
|
'''
Native support for Multitouch devices on Linux, using libmtdev.
===============================================================
The Mtdev project is a part of the Ubuntu Maverick multitouch architecture.
You can read more on http://wiki.ubuntu.com/Multitouch
To configure MTDev, it's preferable to use probesysfs providers.
Check :py:class:`~kivy.input.providers.probesysfs` for more information.
Otherwise, add this to your configuration::
[input]
# devicename = hidinput,/dev/input/eventXX
acert230h = mtdev,/dev/input/event2
.. note::
You must have read access to the input event.
You can use a custom range for the X, Y and pressure values.
On some drivers, the range reported is invalid.
To fix that, you can add these options to the argument line:
* invert_x : 1 to invert X axis
* invert_y : 1 to invert Y axis
* min_position_x : X minimum
* max_position_x : X maximum
* min_position_y : Y minimum
* max_position_y : Y maximum
* min_pressure : pressure minimum
* max_pressure : pressure maximum
* min_touch_major : width shape minimum
* max_touch_major : width shape maximum
* min_touch_minor : width shape minimum
* max_touch_minor : height shape maximum
* rotation : 0,90,180 or 270 to rotate
'''
__all__ = ('MTDMotionEventProvider', 'MTDMotionEvent')
import os
from kivy.input.motionevent import MotionEvent
from kivy.input.shape import ShapeRect
class MTDMotionEvent(MotionEvent):
def depack(self, args):
self.is_touch = True
if 'x' in args:
self.sx = args['x']
else:
self.sx = -1
if 'y' in args:
self.sy = args['y']
else:
self.sy = -1
self.profile = ['pos']
if 'size_w' in args and 'size_h' in args:
self.shape = ShapeRect()
self.shape.width = args['size_w']
self.shape.height = args['size_h']
self.profile.append('shape')
if 'pressure' in args:
self.pressure = args['pressure']
self.profile.append('pressure')
super(MTDMotionEvent, self).depack(args)
def __str__(self):
i, sx, sy, d = (self.id, self.sx, self.sy, self.device)
return '<MTDMotionEvent id=%d pos=(%f, %f) device=%s>' % (i, sx, sy, d)
if 'KIVY_DOC' in os.environ:
# documentation hack
MTDMotionEventProvider = None
else:
import threading
import collections
from kivy.lib.mtdev import Device, \
MTDEV_TYPE_EV_ABS, MTDEV_CODE_SLOT, MTDEV_CODE_POSITION_X, \
MTDEV_CODE_POSITION_Y, MTDEV_CODE_PRESSURE, \
MTDEV_CODE_TOUCH_MAJOR, MTDEV_CODE_TOUCH_MINOR, \
MTDEV_CODE_TRACKING_ID, MTDEV_ABS_POSITION_X, \
MTDEV_ABS_POSITION_Y, MTDEV_ABS_TOUCH_MINOR, \
MTDEV_ABS_TOUCH_MAJOR
from kivy.input.provider import MotionEventProvider
from kivy.input.factory import MotionEventFactory
from kivy.logger import Logger
class MTDMotionEventProvider(MotionEventProvider):
options = ('min_position_x', 'max_position_x',
'min_position_y', 'max_position_y',
'min_pressure', 'max_pressure',
'min_touch_major', 'max_touch_major',
'min_touch_minor', 'max_touch_minor',
'invert_x', 'invert_y',
'rotation')
def __init__(self, device, args):
super(MTDMotionEventProvider, self).__init__(device, args)
self._device = None
self.input_fn = None
self.default_ranges = dict()
# split arguments
args = args.split(',')
if not args:
Logger.error('MTD: No filename pass to MTD configuration')
Logger.error('MTD: Use /dev/input/event0 for example')
return None
# read filename
self.input_fn = args[0]
Logger.info('MTD: Read event from <%s>' % self.input_fn)
# read parameters
for arg in args[1:]:
if arg == '':
continue
arg = arg.split('=')
# ensure it's a key = value
if len(arg) != 2:
err = 'MTD: Bad parameter %s: Not in key=value format' %\
arg
Logger.error(err)
continue
# ensure the key exist
key, value = arg
if key not in MTDMotionEventProvider.options:
Logger.error('MTD: unknown %s option' % key)
continue
# ensure the value
try:
self.default_ranges[key] = int(value)
except ValueError:
err = 'MTD: invalid value %s for option %s' % (key, value)
Logger.error(err)
continue
# all good!
Logger.info('MTD: Set custom %s to %d' % (key, int(value)))
if 'rotation' not in self.default_ranges:
self.default_ranges['rotation'] = 0
elif self.default_ranges['rotation'] not in (0, 90, 180, 270):
Logger.error('HIDInput: invalid rotation value ({})'.format(
self.default_ranges['rotation']))
self.default_ranges['rotation'] = 0
def start(self):
if self.input_fn is None:
return
self.uid = 0
self.queue = collections.deque()
self.thread = threading.Thread(
target=self._thread_run,
kwargs=dict(
queue=self.queue,
input_fn=self.input_fn,
device=self.device,
default_ranges=self.default_ranges))
self.thread.daemon = True
self.thread.start()
def _thread_run(self, **kwargs):
input_fn = kwargs.get('input_fn')
queue = kwargs.get('queue')
device = kwargs.get('device')
drs = kwargs.get('default_ranges').get
touches = {}
touches_sent = []
point = {}
l_points = {}
def assign_coord(point, value, invert, coords):
cx, cy = coords
if invert:
value = 1. - value
if rotation == 0:
point[cx] = value
elif rotation == 90:
point[cy] = value
elif rotation == 180:
point[cx] = 1. - value
elif rotation == 270:
point[cy] = 1. - value
def process(points):
for args in points:
# this can happen if we have a touch going on already at
# the start of the app
if 'id' not in args:
continue
tid = args['id']
try:
touch = touches[tid]
except KeyError:
touch = MTDMotionEvent(device, tid, args)
touches[touch.id] = touch
touch.move(args)
action = 'update'
if tid not in touches_sent:
action = 'begin'
touches_sent.append(tid)
if 'delete' in args:
action = 'end'
del args['delete']
del touches[touch.id]
touches_sent.remove(tid)
touch.update_time_end()
queue.append((action, touch))
def normalize(value, vmin, vmax):
return (value - vmin) / float(vmax - vmin)
# open mtdev device
_fn = input_fn
_slot = 0
try:
_device = Device(_fn)
except OSError as e:
if e.errno == 13: # Permission denied
Logger.warn(
'MTD: Unable to open device "{0}". Please ensure you'
' have the appropriate permissions.'.format(_fn))
return
else:
raise
_changes = set()
# prepare some vars to get limit of some component
ab = _device.get_abs(MTDEV_ABS_POSITION_X)
range_min_position_x = drs('min_position_x', ab.minimum)
range_max_position_x = drs('max_position_x', ab.maximum)
Logger.info('MTD: <%s> range position X is %d - %d' %
(_fn, range_min_position_x, range_max_position_x))
ab = _device.get_abs(MTDEV_ABS_POSITION_Y)
range_min_position_y = drs('min_position_y', ab.minimum)
range_max_position_y = drs('max_position_y', ab.maximum)
Logger.info('MTD: <%s> range position Y is %d - %d' %
(_fn, range_min_position_y, range_max_position_y))
ab = _device.get_abs(MTDEV_ABS_TOUCH_MAJOR)
range_min_major = drs('min_touch_major', ab.minimum)
range_max_major = drs('max_touch_major', ab.maximum)
Logger.info('MTD: <%s> range touch major is %d - %d' %
(_fn, range_min_major, range_max_major))
ab = _device.get_abs(MTDEV_ABS_TOUCH_MINOR)
range_min_minor = drs('min_touch_minor', ab.minimum)
range_max_minor = drs('max_touch_minor', ab.maximum)
Logger.info('MTD: <%s> range touch minor is %d - %d' %
(_fn, range_min_minor, range_max_minor))
range_min_pressure = drs('min_pressure', 0)
range_max_pressure = drs('max_pressure', 255)
Logger.info('MTD: <%s> range pressure is %d - %d' %
(_fn, range_min_pressure, range_max_pressure))
invert_x = int(bool(drs('invert_x', 0)))
invert_y = int(bool(drs('invert_y', 0)))
Logger.info('MTD: <%s> axes invertion: X is %d, Y is %d' %
(_fn, invert_x, invert_y))
rotation = drs('rotation', 0)
Logger.info('MTD: <%s> rotation set to %d' %
(_fn, rotation))
while _device:
# idle as much as we can.
while _device.idle(1000):
continue
# got data, read all without redoing idle
while True:
data = _device.get()
if data is None:
break
# set the working slot
if data.type == MTDEV_TYPE_EV_ABS and \
data.code == MTDEV_CODE_SLOT:
_slot = data.value
continue
# fill the slot
if _slot not in l_points:
l_points[_slot] = dict()
point = l_points[_slot]
ev_value = data.value
ev_code = data.code
if ev_code == MTDEV_CODE_POSITION_X:
val = normalize(ev_value,
range_min_position_x,
range_max_position_x)
assign_coord(point, val, invert_x, 'xy')
elif ev_code == MTDEV_CODE_POSITION_Y:
val = 1. - normalize(ev_value,
range_min_position_y,
range_max_position_y)
assign_coord(point, val, invert_y, 'yx')
elif ev_code == MTDEV_CODE_PRESSURE:
point['pressure'] = normalize(ev_value,
range_min_pressure,
range_max_pressure)
elif ev_code == MTDEV_CODE_TOUCH_MAJOR:
point['size_w'] = normalize(ev_value,
range_min_major,
range_max_major)
elif ev_code == MTDEV_CODE_TOUCH_MINOR:
point['size_h'] = normalize(ev_value,
range_min_minor,
range_max_minor)
elif ev_code == MTDEV_CODE_TRACKING_ID:
if ev_value == -1:
point['delete'] = True
# force process of changes here, as the slot can be
# reused.
_changes.add(_slot)
process([l_points[x] for x in _changes])
_changes.clear()
continue
else:
point['id'] = ev_value
else:
# unrecognized command, ignore.
continue
_changes.add(_slot)
# push all changes
if _changes:
process([l_points[x] for x in _changes])
_changes.clear()
def update(self, dispatch_fn):
# dispatch all event from threads
try:
while True:
event_type, touch = self.queue.popleft()
dispatch_fn(event_type, touch)
except:
pass
MotionEventFactory.register('mtdev', MTDMotionEventProvider)
|
|
# Copyright (c) 2003-2016 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Author: Alberto Solino (@agsolino)
#
# Description:
# RFC 1964 Partial Implementation
# RFC 4757 Partial Implementation
# RFC 4121 Partial Implementation
# RFC 3962 Partial Implementation
import struct
from Crypto.Hash import HMAC, MD5
from Crypto.Cipher import ARC4
from impacket.structure import Structure
from impacket.krb5 import constants, crypto
# Constants
GSS_C_DCE_STYLE = 0x1000
GSS_C_DELEG_FLAG = 1
GSS_C_MUTUAL_FLAG = 2
GSS_C_REPLAY_FLAG = 4
GSS_C_SEQUENCE_FLAG = 8
GSS_C_CONF_FLAG = 0x10
GSS_C_INTEG_FLAG = 0x20
# Mic Semantics
GSS_HMAC = 0x11
# Wrap Semantics
GSS_RC4 = 0x10
# 2. Key Derivation for Per-Message Tokens
KG_USAGE_ACCEPTOR_SEAL = 22
KG_USAGE_ACCEPTOR_SIGN = 23
KG_USAGE_INITIATOR_SEAL = 24
KG_USAGE_INITIATOR_SIGN = 25
KRB5_AP_REQ = struct.pack('<H', 0x1)
# 1.1.1. Initial Token - Checksum field
class CheckSumField(Structure):
structure = (
('Lgth','<L=16'),
('Bnd','16s=""'),
('Flags','<L=0'),
)
def GSSAPI(cipher):
if cipher.enctype == constants.EncryptionTypes.aes256_cts_hmac_sha1_96.value:
return GSSAPI_AES256()
if cipher.enctype == constants.EncryptionTypes.aes128_cts_hmac_sha1_96.value:
return GSSAPI_AES128()
elif cipher.enctype == constants.EncryptionTypes.rc4_hmac.value:
return GSSAPI_RC4()
else:
raise Exception('Unsupported etype 0x%x' % cipher.enctype)
# 7.2. GSS-API MIC Semantics
class GSSAPI_RC4:
# 1.2.1. Per-message Tokens - MIC
class MIC(Structure):
structure = (
('TOK_ID','<H=0x0101'),
('SGN_ALG','<H=0'),
('Filler','<L=0xffffffff'),
('SND_SEQ','8s=""'),
('SGN_CKSUM','8s=""'),
)
# 1.2.2. Per-message Tokens - Wrap
class WRAP(Structure):
structure = (
('TOK_ID','<H=0x0102'),
('SGN_ALG','<H=0'),
('SEAL_ALG','<H=0'),
('Filler','<H=0xffff'),
('SND_SEQ','8s=""'),
('SGN_CKSUM','8s=""'),
('Confounder','8s=""'),
)
def GSS_GetMIC(self, sessionKey, data, sequenceNumber, direction = 'init'):
GSS_GETMIC_HEADER = '\x60\x23\x06\x09\x2a\x86\x48\x86\xf7\x12\x01\x02\x02'
token = self.MIC()
# Let's pad the data
pad = (4 - (len(data) % 4)) & 0x3
padStr = chr(pad) * pad
data += padStr
token['SGN_ALG'] = GSS_HMAC
if direction == 'init':
token['SND_SEQ'] = struct.pack('>L', sequenceNumber) + '\x00'*4
else:
token['SND_SEQ'] = struct.pack('>L', sequenceNumber) + '\xff'*4
Ksign = HMAC.new(sessionKey.contents, 'signaturekey\0', MD5).digest()
Sgn_Cksum = MD5.new( struct.pack('<L',15) + str(token)[:8] + data).digest()
Sgn_Cksum = HMAC.new(Ksign, Sgn_Cksum, MD5).digest()
token['SGN_CKSUM'] = Sgn_Cksum[:8]
Kseq = HMAC.new(sessionKey.contents, struct.pack('<L',0), MD5).digest()
Kseq = HMAC.new(Kseq, token['SGN_CKSUM'], MD5).digest()
token['SND_SEQ'] = ARC4.new(Kseq).encrypt(token['SND_SEQ'])
finalData = GSS_GETMIC_HEADER + token.getData()
return finalData
def GSS_Wrap(self, sessionKey, data, sequenceNumber, direction = 'init', encrypt=True, authData=None):
# Damn inacurate RFC, useful info from here
# https://social.msdn.microsoft.com/Forums/en-US/fb98e8f4-e697-4652-bcb7-604e027e14cc/gsswrap-token-size-kerberos-and-rc4hmac?forum=os_windowsprotocols
# and here
# http://www.rfc-editor.org/errata_search.php?rfc=4757
GSS_WRAP_HEADER = '\x60\x2b\x06\x09\x2a\x86\x48\x86\xf7\x12\x01\x02\x02'
token = self.WRAP()
# Let's pad the data
pad = (8 - (len(data) % 8)) & 0x7
padStr = chr(pad) * pad
data += padStr
token['SGN_ALG'] = GSS_HMAC
token['SEAL_ALG'] = GSS_RC4
if direction == 'init':
token['SND_SEQ'] = struct.pack('>L', sequenceNumber) + '\x00'*4
else:
token['SND_SEQ'] = struct.pack('>L', sequenceNumber) + '\xff'*4
# Random confounder :)
token['Confounder'] = '12345678'
Ksign = HMAC.new(sessionKey.contents, 'signaturekey\0', MD5).digest()
Sgn_Cksum = MD5.new(struct.pack('<L',13) + str(token)[:8] + token['Confounder'] + data).digest()
Klocal = ''
for n in sessionKey.contents:
Klocal += chr(ord(n) ^ 0xF0)
Kcrypt = HMAC.new(Klocal,struct.pack('<L',0), MD5).digest()
Kcrypt = HMAC.new(Kcrypt,struct.pack('>L', sequenceNumber), MD5).digest()
Sgn_Cksum = HMAC.new(Ksign, Sgn_Cksum, MD5).digest()
token['SGN_CKSUM'] = Sgn_Cksum[:8]
Kseq = HMAC.new(sessionKey.contents, struct.pack('<L',0), MD5).digest()
Kseq = HMAC.new(Kseq, token['SGN_CKSUM'], MD5).digest()
token['SND_SEQ'] = ARC4.new(Kseq).encrypt(token['SND_SEQ'])
if authData is not None:
from impacket.dcerpc.v5.rpcrt import SEC_TRAILER
wrap = self.WRAP(authData[len(SEC_TRAILER()) + len(GSS_WRAP_HEADER):])
snd_seq = wrap['SND_SEQ']
Kseq = HMAC.new(sessionKey.contents, struct.pack('<L',0), MD5).digest()
Kseq = HMAC.new(Kseq, wrap['SGN_CKSUM'], MD5).digest()
snd_seq = ARC4.new(Kseq).encrypt(wrap['SND_SEQ'])
Kcrypt = HMAC.new(Klocal,struct.pack('<L',0), MD5).digest()
Kcrypt = HMAC.new(Kcrypt,snd_seq[:4], MD5).digest()
rc4 = ARC4.new(Kcrypt)
cipherText = rc4.decrypt(token['Confounder'] + data)[8:]
elif encrypt is True:
rc4 = ARC4.new(Kcrypt)
token['Confounder'] = rc4.encrypt(token['Confounder'])
cipherText = rc4.encrypt(data)
else:
cipherText = data
finalData = GSS_WRAP_HEADER + token.getData()
return cipherText, finalData
def GSS_Unwrap(self, sessionKey, data, sequenceNumber, direction = 'init', encrypt=True, authData=None):
return self.GSS_Wrap(sessionKey, data, sequenceNumber, direction, encrypt, authData)
class GSSAPI_AES():
checkSumProfile = None
cipherType = None
class MIC(Structure):
structure = (
('TOK_ID','>H=0x0404'),
('Flags','B=0'),
('Filler0','B=0xff'),
('Filler','>L=0xffffffff'),
('SND_SEQ','8s=""'),
('SGN_CKSUM','12s=""'),
)
# 1.2.2. Per-message Tokens - Wrap
class WRAP(Structure):
structure = (
('TOK_ID','>H=0x0504'),
('Flags','B=0'),
('Filler','B=0xff'),
('EC','>H=0'),
('RRC','>H=0'),
('SND_SEQ','8s=""'),
)
def GSS_GetMIC(self, sessionKey, data, sequenceNumber, direction = 'init'):
token = self.MIC()
# Let's pad the data
pad = (4 - (len(data) % 4)) & 0x3
padStr = chr(pad) * pad
data += padStr
checkSumProfile = self.checkSumProfile()
token['Flags'] = 4
token['SND_SEQ'] = struct.pack('>Q',sequenceNumber)
token['SGN_CKSUM'] = checkSumProfile.checksum(sessionKey, KG_USAGE_INITIATOR_SIGN, data + token.getData()[:16])
return token.getData()
def rotate(self, data, numBytes):
numBytes %= len(data)
left = len(data) - numBytes
result = data[left:] + data[:left]
return result
def unrotate(self, data, numBytes):
numBytes %= len(data)
result = data[numBytes:] + data[:numBytes]
return result
def GSS_Wrap(self, sessionKey, data, sequenceNumber, direction = 'init', encrypt=True):
token = self.WRAP()
cipher = self.cipherType()
# Let's pad the data
pad = (cipher.blocksize - (len(data) % cipher.blocksize)) & 15
padStr = '\xFF' * pad
data += padStr
# The RRC field ([RFC4121] section 4.2.5) is 12 if no encryption is requested or 28 if encryption
# is requested. The RRC field is chosen such that all the data can be encrypted in place.
rrc = 28
token['Flags'] = 6
token['EC'] = pad
token['RRC'] = 0
token['SND_SEQ'] = struct.pack('>Q',sequenceNumber)
cipherText = cipher.encrypt(sessionKey, KG_USAGE_INITIATOR_SEAL, data + token.getData(), None)
token['RRC'] = rrc
cipherText = self.rotate(cipherText, token['RRC'] + token['EC'])
nn = self.unrotate(cipherText, token['RRC'] + token['EC'])
ret1 = cipherText[len(self.WRAP()) + token['RRC'] + token['EC']:]
ret2 = token.getData() + cipherText[:len(self.WRAP()) + token['RRC'] + token['EC']]
return ret1, ret2
def GSS_Unwrap(self, sessionKey, data, sequenceNumber, direction = 'init', encrypt=True, authData=None):
from impacket.dcerpc.v5.rpcrt import SEC_TRAILER
cipher = self.cipherType()
token = self.WRAP(authData[len(SEC_TRAILER()):])
rotated = authData[len(self.WRAP())+len(SEC_TRAILER()):] + data
cipherText = self.unrotate(rotated, token['RRC'] + token['EC'])
plainText = cipher.decrypt(sessionKey, KG_USAGE_ACCEPTOR_SEAL, cipherText)
return plainText[:-(token['EC']+len(self.WRAP()))], None
class GSSAPI_AES256(GSSAPI_AES):
checkSumProfile = crypto._SHA1AES256
cipherType = crypto._AES256CTS
class GSSAPI_AES128(GSSAPI_AES):
checkSumProfile = crypto._SHA1AES128
cipherType = crypto._AES128CTS
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import unittest
import six
import numpy as np
from skbio import DNA, RNA, Protein, GeneticCode
from skbio.sequence._nucleotide_mixin import NucleotideMixin
# This file contains tests for functionality of sequence types which implement
# NucleotideMixin. Currently this means DNA and RNA. These types are so
# similar that the testing logic can be shared and parameterized across
# different test data.
class TestNucelotideSequence(unittest.TestCase):
def setUp(self):
self.sequence_kinds = frozenset([
str,
lambda s: np.fromstring(s, dtype='|S1'),
lambda s: np.fromstring(s, dtype=np.uint8)])
dna_str = 'ACGTMRWSYKVHDBN.-'
dna_comp_str = 'TGCAKYWSRMBDHVN.-'
dna_rev_comp_str = '-.NVHDBMRSWYKACGT'
rna_str = 'ACGUMRWSYKVHDBN.-'
rna_comp_str = 'UGCAKYWSRMBDHVN.-'
rna_rev_comp_str = '-.NVHDBMRSWYKACGU'
qual = tuple(range(len(dna_str)))
self.dna = (DNA, dna_str)
self.rna = (RNA, rna_str)
dna_comp = self.dna + (dna_comp_str,)
rna_comp = self.rna + (rna_comp_str,)
dna_comp_qual = dna_comp + (qual,)
rna_comp_qual = rna_comp + (qual,)
self.all_combos_comp_qual = (dna_comp_qual, rna_comp_qual)
dna_rev_comp = self.dna + (dna_rev_comp_str,)
rna_rev_comp = self.rna + (rna_rev_comp_str,)
self.all_combos_rev_comp = (dna_rev_comp, rna_rev_comp)
dna_rev_comp_qual = dna_rev_comp + (qual,)
rna_rev_comp_qual = rna_rev_comp + (qual,)
self.all_combos_rev_comp_qual = \
(dna_rev_comp_qual, rna_rev_comp_qual)
def test_instantiation_with_no_implementation(self):
class NucleotideSequenceSubclassNoImplementation(NucleotideMixin):
pass
with self.assertRaises(TypeError) as cm:
NucleotideSequenceSubclassNoImplementation()
self.assertIn("abstract class", str(cm.exception))
self.assertIn("complement_map", str(cm.exception))
def test_nondegenerate_chars(self):
dna = (DNA, "ACGT")
rna = (RNA, "ACGU")
for constructor, nondegenerate in (dna, rna):
exp = set(nondegenerate)
self.assertEqual(constructor('').nondegenerate_chars, exp)
self.assertEqual(constructor.nondegenerate_chars, exp)
def test_degenerate_map(self):
dna_exp = (DNA, {
'B': set(['C', 'T', 'G']), 'D': set(['A', 'T', 'G']),
'H': set(['A', 'C', 'T']), 'K': set(['T', 'G']),
'M': set(['A', 'C']), 'N': set(['A', 'C', 'T', 'G']),
'S': set(['C', 'G']), 'R': set(['A', 'G']), 'W': set(['A', 'T']),
'V': set(['A', 'C', 'G']), 'Y': set(['C', 'T'])
})
rna_exp = (RNA, {
'B': set(['C', 'U', 'G']), 'D': set(['A', 'U', 'G']),
'H': set(['A', 'C', 'U']), 'K': set(['U', 'G']),
'M': set(['A', 'C']), 'N': set(['A', 'C', 'U', 'G']),
'S': set(['C', 'G']), 'R': set(['A', 'G']), 'W': set(['A', 'U']),
'V': set(['A', 'C', 'G']), 'Y': set(['C', 'U'])
})
for constructor, degenerate in (dna_exp, rna_exp):
self.assertEqual(constructor('').degenerate_map, degenerate)
self.assertEqual(constructor.degenerate_map, degenerate)
def test_complement_map(self):
dna_exp = (DNA, {
'-': '-', '.': '.', 'A': 'T', 'C': 'G', 'B': 'V', 'D': 'H',
'G': 'C', 'H': 'D', 'K': 'M', 'M': 'K', 'N': 'N', 'S': 'S',
'R': 'Y', 'T': 'A', 'W': 'W', 'V': 'B', 'Y': 'R'
})
rna_exp = (RNA, {
'-': '-', '.': '.', 'A': 'U', 'C': 'G', 'B': 'V', 'D': 'H',
'G': 'C', 'H': 'D', 'K': 'M', 'M': 'K', 'N': 'N', 'S': 'S',
'R': 'Y', 'U': 'A', 'W': 'W', 'V': 'B', 'Y': 'R'
})
for constructor, comp_map in (dna_exp, rna_exp):
self.assertEqual(constructor('').complement_map, comp_map)
self.assertEqual(constructor.complement_map, comp_map)
# immutable
constructor.complement_map['A'] = 'X'
constructor.complement_map['C'] = 'W'
self.assertEqual(constructor.complement_map, comp_map)
with self.assertRaises(AttributeError):
constructor('').complement_map = {'W': 'X'}
def test_translate_ncbi_table_id(self):
for seq in RNA('AAAUUUAUGCAU'), DNA('AAATTTATGCAT'):
# default
obs = seq.translate()
self.assertEqual(obs, Protein('KFMH'))
obs = seq.translate(9)
self.assertEqual(obs, Protein('NFMH'))
def test_translate_genetic_code_object(self):
gc = GeneticCode('M' * 64, '-' * 64)
for seq in RNA('AAAUUUAUGCAU'), DNA('AAATTTATGCAT'):
obs = seq.translate(gc)
self.assertEqual(obs, Protein('MMMM'))
def test_translate_passes_parameters_through(self):
exp = Protein('MW')
for seq in RNA('UAAAUUGUGGUAA'), DNA('TAAATTGTGGTAA'):
# mix of args and kwargs
obs = seq.translate(13, reading_frame=2, start='require',
stop='require')
self.assertEqual(obs, exp)
# kwargs only
obs = seq.translate(genetic_code=13, reading_frame=2,
start='require', stop='require')
self.assertEqual(obs, exp)
# args only
obs = seq.translate(13, 2, 'require', 'require')
self.assertEqual(obs, exp)
def test_translate_preserves_metadata(self):
metadata = {'foo': 'bar', 'baz': 42}
positional_metadata = {'foo': range(3)}
for seq in (RNA('AUG', metadata=metadata,
positional_metadata=positional_metadata),
DNA('ATG', metadata=metadata,
positional_metadata=positional_metadata)):
obs = seq.translate()
# metadata retained, positional metadata dropped
self.assertEqual(obs,
Protein('M', metadata={'foo': 'bar', 'baz': 42}))
def test_translate_invalid_id(self):
for seq in RNA('AUG'), DNA('ATG'):
with six.assertRaisesRegex(self, ValueError, 'table_id.*42'):
seq.translate(42)
def test_translate_six_frames_ncbi_table_id(self):
# rc = CAAUUU
for seq in RNA('AAAUUG'), DNA('AAATTG'):
# default
obs = list(seq.translate_six_frames())
self.assertEqual(obs, [Protein('KL'), Protein('N'), Protein('I'),
Protein('QF'), Protein('N'), Protein('I')])
obs = list(seq.translate_six_frames(9))
self.assertEqual(obs, [Protein('NL'), Protein('N'), Protein('I'),
Protein('QF'), Protein('N'), Protein('I')])
def test_translate_six_frames_genetic_code_object(self):
gc = GeneticCode('M' * 64, '-' * 64)
for seq in RNA('AAAUUG'), DNA('AAATTG'):
obs = list(seq.translate_six_frames(gc))
self.assertEqual(obs, [Protein('MM'), Protein('M'), Protein('M'),
Protein('MM'), Protein('M'), Protein('M')])
def test_translate_six_frames_passes_parameters_through(self):
for seq in RNA('UUUAUGUGGUGA'), DNA('TTTATGTGGTGA'):
# mix of args and kwargs
obs = next(seq.translate_six_frames(11, start='require',
stop='require'))
self.assertEqual(obs, Protein('MW'))
# kwargs only
obs = next(seq.translate_six_frames(genetic_code=11,
start='require',
stop='require'))
self.assertEqual(obs, Protein('MW'))
# args only
obs = next(seq.translate_six_frames(11, 'require', 'require'))
self.assertEqual(obs, Protein('MW'))
def test_translate_six_frames_preserves_metadata(self):
metadata = {'foo': 'bar', 'baz': 42}
positional_metadata = {'foo': range(3)}
for seq in (RNA('AUG', metadata=metadata,
positional_metadata=positional_metadata),
DNA('ATG', metadata=metadata,
positional_metadata=positional_metadata)):
obs = list(seq.translate_six_frames())[:2]
# metadata retained, positional metadata dropped
self.assertEqual(
obs,
[Protein('M', metadata={'foo': 'bar', 'baz': 42}),
Protein('', metadata={'foo': 'bar', 'baz': 42})])
def test_translate_six_frames_invalid_id(self):
for seq in RNA('AUG'), DNA('ATG'):
with six.assertRaisesRegex(self, ValueError, 'table_id.*42'):
seq.translate_six_frames(42)
def test_repr(self):
# basic sanity checks for custom repr stats. more extensive testing is
# performed on Sequence.__repr__
for seq in DNA(''), RNA(''):
obs = repr(seq)
# obtained from super()
self.assertIn('has gaps: False', obs)
# custom to Protein
self.assertIn('GC-content: 0.00%', obs)
for seq in DNA('ACGT'), RNA('ACGU'):
obs = repr(seq)
self.assertIn('has gaps: False', obs)
self.assertIn('GC-content: 50.00%', obs)
for seq in DNA('CST'), RNA('CSU'):
obs = repr(seq)
self.assertIn('has gaps: False', obs)
self.assertIn('GC-content: 66.67%', obs)
for seq in DNA('GCSSCG'), RNA('GCSSCG'):
obs = repr(seq)
self.assertIn('has gaps: False', obs)
self.assertIn('GC-content: 100.00%', obs)
for seq in DNA('-GCSSCG.'), RNA('-GCSSCG.'):
obs = repr(seq)
self.assertIn('has gaps: True', obs)
self.assertIn('GC-content: 100.00%', obs)
def test_complement_without_reverse_empty(self):
for constructor in (DNA, RNA):
# without optional attributes
comp = constructor('').complement()
self.assertEqual(comp, constructor(''))
# with optional attributes
comp = constructor(
'',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': []}).complement()
self.assertEqual(
comp,
constructor(
'',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': []}))
def test_complement_without_reverse_non_empty(self):
for (constructor, seq_str, comp_str,
qual) in self.all_combos_comp_qual:
comp = constructor(seq_str).complement()
self.assertEqual(comp, constructor(comp_str))
comp = constructor(
seq_str,
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': qual}).complement()
self.assertEqual(
comp,
constructor(
comp_str,
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': qual}))
def test_complement_with_reverse_empty(self):
for constructor in (DNA, RNA):
rc = constructor('').complement(reverse=True)
self.assertEqual(rc, constructor(''))
rc = constructor(
'',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': []}).complement(reverse=True)
self.assertEqual(
rc,
constructor(
'',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': []}))
def test_complement_with_reverse_non_empty(self):
for (constructor, seq_str, rev_comp_str,
qual) in self.all_combos_rev_comp_qual:
rc = constructor(seq_str).complement(reverse=True)
self.assertEqual(rc, constructor(rev_comp_str))
rc = constructor(
seq_str,
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={
'quality': qual}).complement(reverse=True)
self.assertEqual(
rc,
constructor(
rev_comp_str,
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality':
list(qual)[::-1]}))
def test_reverse_complement(self):
# light tests because this just calls
# NucleotideSequence.complement(reverse=True), which is tested more
# extensively
for (constructor, seq_str, rev_comp_str,
qual) in self.all_combos_rev_comp_qual:
rc = constructor(
seq_str,
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': qual}).reverse_complement()
self.assertEqual(
rc,
constructor(
rev_comp_str,
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': list(qual)[::-1]}))
def test_is_reverse_complement_varied_types(self):
tested = 0
for constructor, seq_str, rev_comp_str in self.all_combos_rev_comp:
seq_kinds = self.sequence_kinds.union(frozenset([constructor]))
for sequence in seq_kinds:
tested += 1
seq1 = constructor(seq_str)
seq2 = sequence(rev_comp_str)
self.assertTrue(seq1.is_reverse_complement(seq2))
self.assertEqual(tested, 8)
def test_is_reverse_complement_empty(self):
for constructor in (DNA, RNA):
seq1 = constructor('')
self.assertTrue(seq1.is_reverse_complement(seq1))
# optional attributes are ignored, only the sequence is compared
seq2 = constructor(
'',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality':
np.array([], dtype=np.int64)})
self.assertTrue(seq2.is_reverse_complement(seq2))
self.assertTrue(seq1.is_reverse_complement(seq2))
self.assertTrue(seq2.is_reverse_complement(seq1))
def test_is_reverse_complement_metadata_ignored(self):
for (constructor, seq_str, rev_comp_str,
qual) in self.all_combos_rev_comp_qual:
seq1 = constructor(seq_str)
seq2 = constructor(
rev_comp_str,
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': qual})
self.assertFalse(seq1.is_reverse_complement(seq1))
self.assertFalse(seq2.is_reverse_complement(seq2))
self.assertTrue(seq1.is_reverse_complement(seq2))
self.assertTrue(seq2.is_reverse_complement(seq1))
def test_is_reverse_complement_non_reverse_complements(self):
for constructor in (DNA, RNA):
# same length
seq1 = constructor('ACAG')
seq2 = constructor('AAAA')
self.assertFalse(seq1.is_reverse_complement(seq1))
self.assertFalse(seq2.is_reverse_complement(seq2))
self.assertFalse(seq1.is_reverse_complement(seq2))
self.assertFalse(seq2.is_reverse_complement(seq1))
# different length
seq1 = constructor('ACAG')
seq2 = constructor('AAAAA')
self.assertFalse(seq1.is_reverse_complement(seq1))
self.assertFalse(seq2.is_reverse_complement(seq2))
self.assertFalse(seq1.is_reverse_complement(seq2))
self.assertFalse(seq2.is_reverse_complement(seq1))
def test_is_reverse_complement_type_mismatch(self):
for Class in (DNA, RNA):
class Subclass(Class):
pass
seq1 = Class('ABC')
seq2 = Subclass('ABC')
with self.assertRaises(TypeError):
seq1.is_reverse_complement(seq2)
def test_motif_purine_run(self):
dna = (DNA, "AARC--TCRG", "AA-RC--TCR-G")
rna = (RNA, "AARC--UCRG", "AA-RC--UCR-G")
all_sets = (dna, rna)
for constructor, run1, run2 in all_sets:
seq = constructor("")
self.assertEqual(list(seq.find_motifs("purine-run")), [])
seq = constructor(run1)
self.assertEqual(list(seq.find_motifs("purine-run")),
[slice(0, 3), slice(8, 10)])
seq = constructor(run2)
self.assertEqual(list(seq.find_motifs("purine-run", min_length=3,
ignore=seq.gaps())),
[slice(0, 4)])
def test_motif_pyrimidine_run(self):
dna = (DNA, "AARC--TCRA", "AA-RC--TCR-A")
rna = (RNA, "AARC--UCRG", "AA-RC--UCR-G")
all_sets = (dna, rna)
for constructor, run1, run2 in all_sets:
seq = constructor("")
self.assertEqual(list(seq.find_motifs("pyrimidine-run")), [])
seq = constructor(run1)
self.assertEqual(list(seq.find_motifs("pyrimidine-run")),
[slice(3, 4), slice(6, 8)])
seq = constructor(run2)
self.assertEqual(list(seq.find_motifs("pyrimidine-run",
min_length=3,
ignore=seq.gaps())),
[slice(4, 9)])
def test_gc_frequency_and_gc_content(self):
universal_sets = (('', 0, 0.0), ('ADDDH', 0, 0.0), ('ACGA', 2, 0.5),
('ACGS', 3, 0.75), ('AAAAAAAG', 1, 0.125),
('CCC', 3, 1.0), ('GGG', 3, 1.0), ('SSS', 3, 1.0),
('CGS', 3, 1.0), ('----....', 0, 0.0),
('G--..', 1, 1.0), ('ACGA', 2, 0.5))
dna = (DNA, universal_sets + (('ATMRWYKVHDBN.-', 0, 0.0),))
rna = (RNA, universal_sets + (('AUMRWYKVHDBN.-', 0, 0.0),))
for constructor, current_set in (dna, rna):
for seq_str, count, ratio in current_set:
seq = constructor(seq_str)
self.assertEqual(count, seq.gc_frequency())
self.assertEqual(count, seq.gc_frequency(relative=False))
self.assertEqual(ratio, seq.gc_frequency(relative=True))
self.assertEqual(ratio, seq.gc_content())
if __name__ == "__main__":
unittest.main()
|
|
"""
Implementation of JSONDecoder
"""
import re
import sys
from simplejson.scanner import Scanner, pattern
try:
from simplejson import _speedups
except:
_speedups = None
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
import struct
import sys
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
lineno, colno = linecol(doc, pos)
if end is None:
return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
return '%s: line %d column %d - line %d column %d (char %d - %d)' % (
msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
'true': True,
'false': False,
'null': None,
}
def JSONConstant(match, context, c=_CONSTANTS):
s = match.group(0)
fn = getattr(context, 'parse_constant', None)
if fn is None:
rval = c[s]
else:
rval = fn(s)
return rval, None
pattern('(-?Infinity|NaN|true|false|null)')(JSONConstant)
def JSONNumber(match, context):
match = JSONNumber.regex.match(match.string, *match.span())
integer, frac, exp = match.groups()
if frac or exp:
fn = getattr(context, 'parse_float', None) or float
res = fn(integer + (frac or '') + (exp or ''))
else:
fn = getattr(context, 'parse_int', None) or int
res = fn(integer)
return res, None
pattern(r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?')(JSONNumber)
STRINGCHUNK = re.compile(r'(.*?)(["\\])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def scanstring(s, end, encoding=None, _b=BACKSLASH, _m=STRINGCHUNK.match):
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
if terminator == '"':
break
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
if esc != 'u':
try:
m = _b[esc]
except KeyError:
raise ValueError(
errmsg("Invalid \\escape: %r" % (esc,), s, end))
end += 1
else:
esc = s[end + 1:end + 5]
next_end = end + 5
msg = "Invalid \\uXXXX escape"
try:
if len(esc) != 4:
raise ValueError
uni = int(esc, 16)
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise ValueError
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise ValueError
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
m = unichr(uni)
except ValueError:
raise ValueError(errmsg(msg, s, end))
end = next_end
_append(m)
return u''.join(chunks), end
# Use speedup
if _speedups is not None:
scanstring = _speedups.scanstring
def JSONString(match, context):
encoding = getattr(context, 'encoding', None)
return scanstring(match.string, match.end(), encoding)
pattern(r'"')(JSONString)
WHITESPACE = re.compile(r'\s*', FLAGS)
def JSONObject(match, context, _w=WHITESPACE.match):
pairs = {}
s = match.string
end = _w(s, match.end()).end()
nextchar = s[end:end + 1]
# trivial empty object
if nextchar == '}':
return pairs, end + 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
encoding = getattr(context, 'encoding', None)
iterscan = JSONScanner.iterscan
while True:
key, end = scanstring(s, end, encoding)
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end = _w(s, end + 1).end()
try:
value, end = iterscan(s, idx=end, context=context).next()
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == '}':
break
if nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
object_hook = getattr(context, 'object_hook', None)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
pattern(r'{')(JSONObject)
def JSONArray(match, context, _w=WHITESPACE.match):
values = []
s = match.string
end = _w(s, match.end()).end()
# look-ahead for trivial empty array
nextchar = s[end:end + 1]
if nextchar == ']':
return values, end + 1
iterscan = JSONScanner.iterscan
while True:
try:
value, end = iterscan(s, idx=end, context=context).next()
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
values.append(value)
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
if nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
end = _w(s, end).end()
return values, end
pattern(r'\[')(JSONArray)
ANYTHING = [
JSONObject,
JSONArray,
JSONString,
JSONConstant,
JSONNumber,
]
JSONScanner = Scanner(ANYTHING)
class JSONDecoder(object):
"""
Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
_scanner = Scanner(ANYTHING)
__all__ = ['__init__', 'decode', 'raw_decode']
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None):
"""
``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
"""
self.encoding = encoding
self.object_hook = object_hook
self.parse_float = parse_float
self.parse_int = parse_int
self.parse_constant = parse_constant
def decode(self, s, _w=WHITESPACE.match):
"""
Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, **kw):
"""
Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
kw.setdefault('context', self)
try:
obj, end = self._scanner.iterscan(s, **kw).next()
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
__all__ = ['JSONDecoder']
|
|
########################################################################
#
# License: BSD
# Created: September 10, 2010
# Author: Francesc Alted - francesc@continuum.io
#
########################################################################
"""Top level functions and classes.
"""
import sys
import os, os.path
import glob
import itertools as it
import numpy as np
#import blaze.carray as ca
from carrayExtension import carray
from blaze.carray.ctable import ctable
from cparams import cparams
import math
def detect_number_of_cores():
"""
detect_number_of_cores()
Return the number of cores in this system.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"):
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# Windows:
if os.environ.has_key("NUMBER_OF_PROCESSORS"):
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]);
if ncpus > 0:
return ncpus
return 1 # Default
def set_nthreads(nthreads):
"""
set_nthreads(nthreads)
Sets the number of threads to be used during carray operation.
This affects to both Blosc and Numexpr (if available). If you want to
change this number only for Blosc, use `blosc_set_nthreads` instead.
Parameters
----------
nthreads : int
The number of threads to be used during carray operation.
Returns
-------
out : int
The previous setting for the number of threads.
See Also
--------
blosc_set_nthreads
"""
nthreads_old = ca.blosc_set_nthreads(nthreads)
return nthreads_old
def open(rootdir, mode='a'):
"""
open(rootdir, mode='a')
Open a disk-based carray/ctable.
Parameters
----------
rootdir : pathname (string)
The directory hosting the carray/ctable object.
mode : the open mode (string)
Specifies the mode in which the object is opened. The supported
values are:
* 'r' for read-only
* 'w' for emptying the previous underlying data
* 'a' for allowing read/write on top of existing data
Returns
-------
out : a carray/ctable object or None (if not objects are found)
"""
# First try with a carray
obj = None
try:
obj = carray(rootdir=rootdir, mode=mode)
except IOError:
# Not a carray. Now with a ctable
try:
obj = ctable(rootdir=rootdir, mode=mode)
except IOError:
# Not a ctable
pass
return obj
def fromiter(iterable, dtype, count, **kwargs):
"""
fromiter(iterable, dtype, count, **kwargs)
Create a carray/ctable from an `iterable` object.
Parameters
----------
iterable : iterable object
An iterable object providing data for the carray.
dtype : numpy.dtype instance
Specifies the type of the outcome object.
count : int
The number of items to read from iterable. If set to -1, means that
the iterable will be used until exhaustion (not recommended, see note
below).
kwargs : list of parameters or dictionary
Any parameter supported by the carray/ctable constructors.
Returns
-------
out : a carray/ctable object
Notes
-----
Please specify `count` to both improve performance and to save memory. It
allows `fromiter` to avoid looping the iterable twice (which is slooow).
It avoids memory leaks to happen too (which can be important for large
iterables).
"""
from ctable import ctable
# Check for a true iterable
if not hasattr(iterable, "next"):
iterable = iter(iterable)
# Try to guess the final length
expected = count
if count == -1:
# Try to guess the size of the iterable length
if hasattr(iterable, "__length_hint__"):
count = iterable.__length_hint__()
expected = count
else:
# No guess
count = sys.maxint
# If we do not have a hint on the iterable length then
# create a couple of iterables and use the second when the
# first one is exhausted (ValueError will be raised).
iterable, iterable2 = it.tee(iterable)
expected = 1000*1000 # 1 million elements
# First, create the container
expectedlen = kwargs.pop("expectedlen", expected)
dtype = np.dtype(dtype)
if dtype.kind == "V":
# A ctable
obj = ctable(np.array([], dtype=dtype),
expectedlen=expectedlen, **kwargs)
chunklen = sum(obj.cols[name].chunklen
for name in obj.names) // len(obj.names)
else:
# A carray
obj = carray(np.array([], dtype=dtype),
expectedlen=expectedlen, **kwargs)
chunklen = obj.chunklen
# Then fill it
nread, blen = 0, 0
while nread < count:
if nread + chunklen > count:
blen = count - nread
else:
blen = chunklen
if count != sys.maxint:
chunk = np.fromiter(iterable, dtype=dtype, count=blen)
else:
try:
chunk = np.fromiter(iterable, dtype=dtype, count=blen)
except ValueError:
# Positionate in second iterable
iter2 = it.islice(iterable2, nread, None, 1)
# We are reaching the end, use second iterable now
chunk = np.fromiter(iter2, dtype=dtype, count=-1)
obj.append(chunk)
nread += len(chunk)
# Check the end of the iterable
if len(chunk) < chunklen:
break
obj.flush()
return obj
def fill(shape, dflt=None, dtype=np.float, **kwargs):
"""
fill(shape, dtype=float, dflt=None, **kwargs)
Return a new carray object of given shape and type, filled with `dflt`.
Parameters
----------
shape : int
Shape of the new array, e.g., ``(2,3)``.
dflt : Python or NumPy scalar
The value to be used during the filling process. If None, values are
filled with zeros. Also, the resulting carray will have this value as
its `dflt` value.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
kwargs : list of parameters or dictionary
Any parameter supported by the carray constructor.
Returns
-------
out : carray
Array filled with `dflt` values with the given shape and dtype.
See Also
--------
ones, zeros
"""
dtype = np.dtype(dtype)
if type(shape) in (int, long, float):
shape = (int(shape),)
else:
shape = tuple(shape)
if len(shape) > 1:
# Multidimensional shape.
# The atom will have shape[1:] dims (+ the dtype dims).
dtype = np.dtype((dtype.base, shape[1:]+dtype.shape))
length = shape[0]
# Create the container
expectedlen = kwargs.pop("expectedlen", length)
if dtype.kind == "V" and dtype.shape == ():
raise ValueError, "fill does not support ctables objects"
obj = carray([], dtype=dtype, dflt=dflt, expectedlen=expectedlen,
**kwargs)
chunklen = obj.chunklen
# Then fill it
# We need an array for the defaults so as to keep the atom info
dflt = np.array(obj.dflt, dtype=dtype)
# Making strides=(0,) below is a trick to create the array fast and
# without memory consumption
chunk = np.ndarray(length, dtype=dtype, buffer=dflt, strides=(0,))
obj.append(chunk)
obj.flush()
return obj
def zeros(shape, dtype=np.float, **kwargs):
"""
zeros(shape, dtype=float, **kwargs)
Return a new carray object of given shape and type, filled with zeros.
Parameters
----------
shape : int
Shape of the new array, e.g., ``(2,3)``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
kwargs : list of parameters or dictionary
Any parameter supported by the carray constructor.
Returns
-------
out : carray
Array of zeros with the given shape and dtype.
See Also
--------
fill, ones
"""
dtype = np.dtype(dtype)
return fill(shape=shape, dflt=np.zeros((), dtype), dtype=dtype, **kwargs)
def ones(shape, dtype=np.float, **kwargs):
"""
ones(shape, dtype=float, **kwargs)
Return a new carray object of given shape and type, filled with ones.
Parameters
----------
shape : int
Shape of the new array, e.g., ``(2,3)``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
kwargs : list of parameters or dictionary
Any parameter supported by the carray constructor.
Returns
-------
out : carray
Array of ones with the given shape and dtype.
See Also
--------
fill, zeros
"""
dtype = np.dtype(dtype)
return fill(shape=shape, dflt=np.ones((), dtype), dtype=dtype, **kwargs)
def arange(start=None, stop=None, step=None, dtype=None, **kwargs):
"""
arange([start,] stop[, step,], dtype=None, **kwargs)
Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range <http://docs.python.org/lib/built-in-funcs.html>`_ function,
but returns a carray rather than a list.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified, `start` must also be given.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
kwargs : list of parameters or dictionary
Any parameter supported by the carray constructor.
Returns
-------
out : carray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
"""
# Check start, stop, step values
if (start, stop) == (None, None):
raise ValueError, "You must pass a `stop` value at least."
elif stop is None:
start, stop = 0, start
elif start is None:
start, stop = 0, stop
if step is None:
step = 1
# Guess the dtype
if dtype is None:
if type(stop) in (int, long):
dtype = np.dtype(np.int_)
dtype = np.dtype(dtype)
stop = int(stop)
# Create the container
expectedlen = kwargs.pop("expectedlen", stop)
if dtype.kind == "V":
raise ValueError, "arange does not support ctables yet."
else:
obj = ca.carray(np.array([], dtype=dtype),
expectedlen=expectedlen,
**kwargs)
chunklen = obj.chunklen
# Then fill it
incr = chunklen * step # the increment for each chunk
incr += step - (incr % step) # make it match step boundary
bstart, bstop = start, start + incr
while bstart < stop:
if bstop > stop:
bstop = stop
chunk = np.arange(bstart, bstop, step, dtype=dtype)
obj.append(chunk)
bstart = bstop
bstop += incr
obj.flush()
return obj
def _getvars(expression, user_dict, depth, vm):
"""Get the variables in `expression`.
`depth` specifies the depth of the frame in order to reach local
or global variables.
"""
cexpr = compile(expression, '<string>', 'eval')
if vm == "python":
exprvars = [ var for var in cexpr.co_names
if var not in ['None', 'False', 'True'] ]
else:
# Check that var is not a numexpr function here. This is useful for
# detecting unbound variables in expressions. This is not necessary
# for the 'python' engine.
exprvars = [ var for var in cexpr.co_names
if var not in ['None', 'False', 'True']
and var not in numexpr_functions ]
# Get the local and global variable mappings of the user frame
user_locals, user_globals = {}, {}
user_frame = sys._getframe(depth)
user_locals = user_frame.f_locals
user_globals = user_frame.f_globals
# Look for the required variables
reqvars = {}
for var in exprvars:
# Get the value.
if var in user_dict:
val = user_dict[var]
elif var in user_locals:
val = user_locals[var]
elif var in user_globals:
val = user_globals[var]
else:
if vm == "numexpr":
raise NameError("variable name ``%s`` not found" % var)
val = None
# Check the value.
if (vm == "numexpr" and
hasattr(val, 'dtype') and hasattr(val, "__len__") and
val.dtype.str[1:] == 'u8'):
raise NotImplementedError(
"variable ``%s`` refers to "
"a 64-bit unsigned integer object, that is "
"not yet supported in numexpr expressions; "
"rather, use the 'python' vm." % var )
if val is not None:
reqvars[var] = val
return reqvars
# Assign function `eval` to a variable because we are overriding it
_eval = eval
def eval(expression, vm=None, out_flavor=None, user_dict={}, **kwargs):
"""
eval(expression, vm=None, out_flavor=None, user_dict=None, **kwargs)
Evaluate an `expression` and return the result.
Parameters
----------
expression : string
A string forming an expression, like '2*a+3*b'. The values for 'a' and
'b' are variable names to be taken from the calling function's frame.
These variables may be scalars, carrays or NumPy arrays.
vm : string
The virtual machine to be used in computations. It can be 'numexpr'
or 'python'. The default is to use 'numexpr' if it is installed.
out_flavor : string
The flavor for the `out` object. It can be 'carray' or 'numpy'.
user_dict : dict
An user-provided dictionary where the variables in expression
can be found by name.
kwargs : list of parameters or dictionary
Any parameter supported by the carray constructor.
Returns
-------
out : carray object
The outcome of the expression. You can tailor the
properties of this carray by passing additional arguments
supported by carray constructor in `kwargs`.
"""
if vm is None:
vm = ca.defaults.eval_vm
if vm not in ("numexpr", "python"):
raiseValue, "`vm` must be either 'numexpr' or 'python'"
if out_flavor is None:
out_flavor = ca.defaults.eval_out_flavor
if out_flavor not in ("carray", "numpy"):
raiseValue, "`out_flavor` must be either 'carray' or 'numpy'"
# Get variables and column names participating in expression
depth = kwargs.pop('depth', 2)
vars = _getvars(expression, user_dict, depth, vm=vm)
# Gather info about sizes and lengths
typesize, vlen = 0, 1
for name in vars.iterkeys():
var = vars[name]
if hasattr(var, "__len__") and not hasattr(var, "dtype"):
raise ValueError, "only numpy/carray sequences supported"
if hasattr(var, "dtype") and not hasattr(var, "__len__"):
continue
if hasattr(var, "dtype"): # numpy/carray arrays
if isinstance(var, np.ndarray): # numpy array
typesize += var.dtype.itemsize * np.prod(var.shape[1:])
elif isinstance(var, ca.carray): # carray array
typesize += var.dtype.itemsize
else:
raise ValueError, "only numpy/carray objects supported"
if hasattr(var, "__len__"):
if vlen > 1 and vlen != len(var):
raise ValueError, "arrays must have the same length"
vlen = len(var)
if typesize == 0:
# All scalars
if vm == "python":
return _eval(expression, vars)
else:
return ca.numexpr.evaluate(expression, local_dict=vars)
return _eval_blocks(expression, vars, vlen, typesize, vm, out_flavor,
**kwargs)
def _eval_blocks(expression, vars, vlen, typesize, vm, out_flavor,
**kwargs):
"""Perform the evaluation in blocks."""
# Compute the optimal block size (in elements)
# The next is based on experiments with bench/ctable-query.py
if vm == "numexpr":
# If numexpr, make sure that operands fits in L3 chache
bsize = 2**20 # 1 MB is common for L3
else:
# If python, make sure that operands fits in L2 chache
bsize = 2**17 # 256 KB is common for L2
bsize //= typesize
# Evaluation seems more efficient if block size is a power of 2
bsize = 2 ** (int(math.log(bsize, 2)))
if vlen < 100*1000:
bsize //= 8
elif vlen < 1000*1000:
bsize //= 4
elif vlen < 10*1000*1000:
bsize //= 2
# Protection against too large atomsizes
if bsize == 0:
bsize = 1
vars_ = {}
# Get temporaries for vars
maxndims = 0
for name in vars.iterkeys():
var = vars[name]
if hasattr(var, "__len__"):
ndims = len(var.shape) + len(var.dtype.shape)
if ndims > maxndims:
maxndims = ndims
if len(var) > bsize and hasattr(var, "_getrange"):
vars_[name] = np.empty(bsize, dtype=var.dtype)
for i in xrange(0, vlen, bsize):
# Get buffers for vars
for name in vars.iterkeys():
var = vars[name]
if hasattr(var, "__len__") and len(var) > bsize:
if hasattr(var, "_getrange"):
if i+bsize < vlen:
var._getrange(i, bsize, vars_[name])
else:
vars_[name] = var[i:]
else:
vars_[name] = var[i:i+bsize]
else:
if hasattr(var, "__getitem__"):
vars_[name] = var[:]
else:
vars_[name] = var
# Perform the evaluation for this block
if vm == "python":
res_block = _eval(expression, vars_)
else:
res_block = ca.numexpr.evaluate(expression, local_dict=vars_)
if i == 0:
# Detection of reduction operations
scalar = False
dim_reduction = False
if len(res_block.shape) == 0:
scalar = True
result = res_block
continue
elif len(res_block.shape) < maxndims:
dim_reduction = True
result = res_block
continue
# Get a decent default for expectedlen
if out_flavor == "carray":
nrows = kwargs.pop('expectedlen', vlen)
result = ca.carray(res_block, expectedlen=nrows, **kwargs)
else:
out_shape = list(res_block.shape)
out_shape[0] = vlen
result = np.empty(out_shape, dtype=res_block.dtype)
result[:bsize] = res_block
else:
if scalar or dim_reduction:
result += res_block
elif out_flavor == "carray":
result.append(res_block)
else:
result[i:i+bsize] = res_block
if isinstance(result, ca.carray):
result.flush()
if scalar:
return result[()]
return result
def walk(dir, classname=None, mode='a'):
"""walk(dir, classname=None, mode='a')
Recursively iterate over carray/ctable objects hanging from `dir`.
Parameters
----------
dir : string
The directory from which the listing starts.
classname : string
If specified, only object of this class are returned. The values
supported are 'carray' and 'ctable'.
mode : string
The mode in which the object should be opened.
Returns
-------
out : iterator
Iterator over the objects found.
"""
# First, iterate over the carray objects in current dir
names = os.path.join(dir, '*')
dirs = []
for node in glob.glob(names):
if os.path.isdir(node):
try:
obj = carray(rootdir=node, mode=mode)
except:
try:
obj = ctable(rootdir=node, mode=mode)
except:
obj = None
dirs.append(node)
if obj:
if classname:
if obj.__class__.__name__ == classname:
yield obj
else:
yield obj
# Then recurse into the true directories
for dir_ in dirs:
for node in walk(dir_, classname, mode):
yield node
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## fill-column: 78
|
|
#!/usr/bin/env python
# encoding=utf8
import os
import sys
import getopt
import subprocess
from shutil import rmtree, copy
homeDir = os.getenv("HOME") + "/"
bkpDir = homeDir + ".bashrc_bkp/"
bashDir = homeDir + ".bashrc_include/"
tmpDir = homeDir + ".bashrc_tmp/"
repoUrl = "https://github.com/svilborg/dotfiles"
files = [".bashrc", ".bash_logout", ".gitconfig"]
def cleanup_bash():
if os.path.exists(bashDir):
rmtree(bashDir)
pass
def cleanup_tmp():
if os.path.exists(tmpDir):
rmtree(tmpDir)
pass
def cleanup_bkp():
if os.path.exists(bkpDir):
rmtree(bkpDir)
pass
def backup():
if not os.path.exists(bkpDir):
print "Create backup dir - " + bkpDir
os.makedirs(bkpDir)
for file in files:
if os.path.exists(homeDir + file):
print "Backup file - ~." + file
copy(homeDir + file, bkpDir + file)
pass
def checkout():
print "Cloning repo"
try:
output = subprocess.check_output(
["git", "clone", "--recursive", repoUrl, tmpDir])
pass
except subprocess.CalledProcessError, e:
print "CalledProcessError"
print e
except Exception, e:
print e
else:
pass
finally:
pass
pass
def install_aliases():
# Create Destination Dir
if not os.path.exists(bashDir):
print "Create bashrc dir - " + bashDir
os.makedirs(bashDir)
# Copy from Destination to Bashrc Dir
src_files = os.listdir(tmpDir + "/bashrc/")
print "Copying aliases"
for file in src_files:
fullFile = os.path.join(tmpDir + "bashrc/", file)
if (os.path.isfile(fullFile)):
copy(fullFile, bashDir + file)
pass
def install_files():
""" Install Bashrc """
print "Replace .bashrc"
for file in files:
print "Replace file - ~." + file
copy(tmpDir + file, homeDir + file)
pass
def install_bin():
print "Install bin"
# Copy from Destination to Bashrc Dir
src_files = os.listdir(tmpDir + "bin/")
print "Copying bin files"
print tmpDir + "/bin/"
for file in src_files:
fullFile = os.path.join(tmpDir + "/bin/", file)
print "Add file - ~./bin/" + file
if (os.path.isfile(fullFile)):
copy(fullFile, homeDir + "bin/" + file)
pass
def reload_bashrc():
print "Reload .bashrc"
try:
output = subprocess.check_output(['./bin/reload_bashrc.sh', '$HOME'])
pass
except Exception, e:
print e
else:
pass
finally:
pass
pass
def revertHomeFiles():
src_files = os.listdir(bkpDir)
for file in src_files:
fullFile = os.path.join(bkpDir, file)
if (os.path.isfile(fullFile)):
print "Reverting " + file
copy(fullFile, homeDir + file)
pass
def install():
cleanup_bash()
cleanup_tmp()
backup()
checkout()
install_aliases()
install_files()
install_bin()
reload_bashrc()
cleanup_tmp()
print "Installed"
pass
def revert():
revertHomeFiles()
cleanup_bash()
cleanup_tmp()
print "Reverted"
pass
def main(argv):
info = """
Usage :
Install - install.py -i
Uninstall - install.py -u
Clean backup - install.py -c
Reload .bashrc - install.py -u
"""
try:
opts, args = getopt.getopt(argv, "hiurc")
except getopt.GetoptError:
print info
sys.exit(2)
if len(opts) > 0:
for opt, arg in opts:
if opt == '-h':
print info
sys.exit()
elif opt in ("-i"):
install()
elif opt in ("-r"):
reload_bashrc()
elif opt in ("-u"):
revert()
elif opt in ("-c"):
print "Clean backup"
cleanup_bkp()
else:
print info
else:
print info
pass
if __name__ == "__main__":
main(sys.argv[1:])
|
|
"""
**********
Edge Lists
**********
Read and write NetworkX graphs as edge lists.
The multi-line adjacency list format is useful for graphs with nodes
that can be meaningfully represented as strings. With the edgelist
format simple edge data can be stored but node or graph data is not.
There is no way of representing isolated nodes unless the node has a
self-loop edge.
Format
------
You can read or write three formats of edge lists with these functions.
Node pairs with no data::
1 2
Python dictionary as data::
1 2 {'weight':7, 'color':'green'}
Arbitrary data::
1 2 7 green
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)\nDan Schult (dschult@colgate.edu)"""
# Copyright (C) 2004-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__all__ = ['generate_edgelist',
'write_edgelist',
'parse_edgelist',
'read_edgelist',
'read_weighted_edgelist',
'write_weighted_edgelist']
from networkx.utils import is_string_like, _get_fh, make_str
import networkx as nx
def generate_edgelist(G, delimiter=' ', data=True):
"""Generate a single line of the graph G in edge list format.
Parameters
----------
G : NetworkX graph
delimiter : string, optional
Separator for node labels
data : bool or list of keys
If False generate no edge data. If True use a dictionary
representation of edge data. If a list of keys use a list of data
values corresponding to the keys.
Returns
-------
lines : string
Lines of data in adjlist format.
Examples
--------
>>> G = nx.lollipop_graph(4, 3)
>>> G[1][2]['weight'] = 3
>>> G[3][4]['capacity'] = 12
>>> for line in nx.generate_edgelist(G, data=False):
... print(line)
0 1
0 2
0 3
1 2
1 3
2 3
3 4
4 5
5 6
>>> for line in nx.generate_edgelist(G):
... print(line)
0 1 {}
0 2 {}
0 3 {}
1 2 {'weight': 3}
1 3 {}
2 3 {}
3 4 {'capacity': 12}
4 5 {}
5 6 {}
>>> for line in nx.generate_edgelist(G,data=['weight']):
... print(line)
0 1
0 2
0 3
1 2 3
1 3
2 3
3 4
4 5
5 6
See Also
--------
write_adjlist, read_adjlist
"""
if data is True or data is False:
for e in G.edges(data=data):
yield delimiter.join(map(make_str,e))
else:
for u,v,d in G.edges(data=True):
e=[u,v]
try:
e.extend(d[k] for k in data)
except KeyError:
pass # missing data for this edge, should warn?
yield delimiter.join(map(make_str,e))
def write_edgelist(G, path, comments="#", delimiter=' ', data=True,
encoding = 'utf-8'):
"""Write graph as a list of edges.
Parameters
----------
G : graph
A NetworkX graph
path : file or string
File or filename to write. If a file is provided, it must be
opened in 'wb' mode. Filenames ending in .gz or .bz2 will be compressed.
comments : string, optional
The character used to indicate the start of a comment
delimiter : string, optional
The string used to separate values. The default is whitespace.
data : bool or list, optional
If False write no edge data.
If True write a string representation of the edge data dictionary..
If a list (or other iterable) is provided, write the keys specified
in the list.
encoding: string, optional
Specify which encoding to use when writing file.
Examples
--------
>>> G=nx.path_graph(4)
>>> nx.write_edgelist(G, "test.edgelist")
>>> G=nx.path_graph(4)
>>> fh=open("test.edgelist",'wb')
>>> nx.write_edgelist(G, fh)
>>> nx.write_edgelist(G, "test.edgelist.gz")
>>> nx.write_edgelist(G, "test.edgelist.gz", data=False)
>>> G=nx.Graph()
>>> G.add_edge(1,2,weight=7,color='red')
>>> nx.write_edgelist(G,'test.edgelist',data=False)
>>> nx.write_edgelist(G,'test.edgelist',data=['color'])
>>> nx.write_edgelist(G,'test.edgelist',data=['color','weight'])
See Also
--------
write_edgelist()
write_weighted_edgelist()
"""
fh=_get_fh(path, 'wb')
for line in generate_edgelist(G, delimiter, data):
line+='\n'
fh.write(line.encode(encoding))
def parse_edgelist(lines, comments='#', delimiter=' ',
create_using=None, nodetype=None, data=True):
"""Parse lines of an edge list representation of a graph.
Returns
-------
G: NetworkX Graph
The graph corresponding to lines
data : bool or list of (label,type) tuples
If False generate no edge data or if True use a dictionary
representation of edge data or a list tuples specifying dictionary
key names and types for edge data.
create_using: NetworkX graph container, optional
Use given NetworkX graph for holding nodes or edges.
nodetype : Python type, optional
Convert nodes to this type.
comments : string, optional
Marker for comment lines
delimiter : string, optional
Separator for node labels
create_using: NetworkX graph container
Use given NetworkX graph for holding nodes or edges.
Examples
--------
Edgelist with no data:
>>> lines = ["1 2",
... "2 3",
... "3 4"]
>>> G = nx.parse_edgelist(lines, nodetype = int)
>>> G.nodes()
[1, 2, 3, 4]
>>> G.edges()
[(1, 2), (2, 3), (3, 4)]
Edgelist with data in Python dictionary representation:
>>> lines = ["1 2 {'weight':3}",
... "2 3 {'weight':27}",
... "3 4 {'weight':3.0}"]
>>> G = nx.parse_edgelist(lines, nodetype = int)
>>> G.nodes()
[1, 2, 3, 4]
>>> G.edges(data = True)
[(1, 2, {'weight': 3}), (2, 3, {'weight': 27}), (3, 4, {'weight': 3.0})]
Edgelist with data in a list:
>>> lines = ["1 2 3",
... "2 3 27",
... "3 4 3.0"]
>>> G = nx.parse_edgelist(lines, nodetype = int, data=(('weight',float),))
>>> G.nodes()
[1, 2, 3, 4]
>>> G.edges(data = True)
[(1, 2, {'weight': 3.0}), (2, 3, {'weight': 27.0}), (3, 4, {'weight': 3.0})]
See Also
--------
read_weighted_edgelist
"""
from ast import literal_eval
if create_using is None:
G=nx.Graph()
else:
try:
G=create_using
G.clear()
except:
raise TypeError("create_using input is not a NetworkX graph type")
for line in lines:
p=line.find(comments)
if p>=0:
line = line[:p]
if not len(line):
continue
# split line, should have 2 or more
s=line.strip().split(delimiter)
if len(s)<2:
continue
u=s.pop(0)
v=s.pop(0)
d=s
if nodetype is not None:
try:
u=nodetype(u)
v=nodetype(v)
except:
raise TypeError("Failed to convert nodes %s,%s to type %s."
%(u,v,nodetype))
if len(d)==0 or data is False:
# no data or data type specified
edgedata={}
elif data is True:
# no edge types specified
try: # try to evaluate as dictionary
edgedata=dict(literal_eval(' '.join(d)))
except:
raise TypeError(
"Failed to convert edge data (%s) to dictionary."%(d))
else:
# convert edge data to dictionary with specified keys and type
if len(d)!=len(data):
raise IndexError(
"Edge data %s and data_keys %s are not the same length"%
(d, data))
edgedata={}
for (edge_key,edge_type),edge_value in zip(data,d):
try:
edge_value=edge_type(edge_value)
except:
raise TypeError(
"Failed to convert %s data %s to type %s."
%(edge_key, edge_value, edge_type))
edgedata.update({edge_key:edge_value})
G.add_edge(u, v, attr_dict=edgedata)
return G
def read_edgelist(path, comments="#", delimiter=' ', create_using=None,
nodetype=None, data=True, edgetype=None, encoding='utf-8'):
"""Read a graph from a list of edges.
Parameters
----------
path : file or string
File or filename to write. If a file is provided, it must be
opened in 'rb' mode.
Filenames ending in .gz or .bz2 will be uncompressed.
comments : string, optional
The character used to indicate the start of a comment.
delimiter : string, optional
The string used to separate values. The default is whitespace.
create_using : Graph container, optional,
Use specified container to build graph. The default is networkx.Graph,
an undirected graph.
nodetype : int, float, str, Python type, optional
Convert node data from strings to specified type
data : bool or list of (label,type) tuples
Tuples specifying dictionary key names and types for edge data
edgetype : int, float, str, Python type, optional OBSOLETE
Convert edge data from strings to specified type and use as 'weight'
encoding: string, optional
Specify which encoding to use when reading file.
Returns
-------
G : graph
A networkx Graph or other type specified with create_using
Examples
--------
>>> nx.write_edgelist(nx.path_graph(4), "test.edgelist")
>>> G=nx.read_edgelist("test.edgelist")
>>> fh=open("test.edgelist", 'rb')
>>> G=nx.read_edgelist(fh)
>>> G=nx.read_edgelist("test.edgelist", nodetype=int)
>>> G=nx.read_edgelist("test.edgelist",create_using=nx.DiGraph())
Edgelist with data in a list:
>>> textline = '1 2 3'
>>> open('test.edgelist','w').write(textline)
>>> G = nx.read_edgelist('test.edgelist', nodetype=int, data=(('weight',float),))
>>> G.nodes()
[1, 2]
>>> G.edges(data = True)
[(1, 2, {'weight': 3.0})]
See parse_edgelist() for more examples of formatting.
See Also
--------
parse_edgelist
Notes
-----
Since nodes must be hashable, the function nodetype must return hashable
types (e.g. int, float, str, frozenset - or tuples of those, etc.)
"""
fh=_get_fh(path, 'rb')
lines = (line.decode(encoding) for line in fh)
return parse_edgelist(lines,comments=comments, delimiter=delimiter,
create_using=create_using, nodetype=nodetype,
data=data)
def write_weighted_edgelist(G, path, comments="#",
delimiter=' ', encoding='utf-8'):
"""Write graph G as a list of edges with numeric weights.
Parameters
----------
G : graph
A NetworkX graph
path : file or string
File or filename to write. If a file is provided, it must be
opened in 'wb' mode.
Filenames ending in .gz or .bz2 will be compressed.
comments : string, optional
The character used to indicate the start of a comment
delimiter : string, optional
The string used to separate values. The default is whitespace.
encoding: string, optional
Specify which encoding to use when writing file.
Examples
--------
>>> G=nx.Graph()
>>> G.add_edge(1,2,weight=7)
>>> nx.write_weighted_edgelist(G, 'test.weighted.edgelist')
See Also
--------
read_edgelist()
write_edgelist()
write_weighted_edgelist()
"""
write_edgelist(G,path, comments=comments, delimiter=delimiter,
data=('weight',), encoding = encoding)
def read_weighted_edgelist(path, comments="#", delimiter=' ',
create_using=None, nodetype=None, encoding='utf-8'):
"""Read a graph as list of edges with numeric weights.
Parameters
----------
path : file or string
File or filename to write. If a file is provided, it must be
opened in 'rb' mode.
Filenames ending in .gz or .bz2 will be uncompressed.
comments : string, optional
The character used to indicate the start of a comment.
delimiter : string, optional
The string used to separate values. The default is whitespace.
create_using : Graph container, optional,
Use specified container to build graph. The default is networkx.Graph,
an undirected graph.
nodetype : int, float, str, Python type, optional
Convert node data from strings to specified type
encoding: string, optional
Specify which encoding to use when reading file.
Returns
-------
G : graph
A networkx Graph or other type specified with create_using
Notes
-----
Since nodes must be hashable, the function nodetype must return hashable
types (e.g. int, float, str, frozenset - or tuples of those, etc.)
Example edgelist file format.
With numeric edge data::
# read with
# >>> G=nx.read_weighted_edgelist(fh)
# source target data
a b 1
a c 3.14159
d e 42
"""
return read_edgelist(path,
comments=comments,
delimiter=delimiter,
create_using=create_using,
nodetype=nodetype,
data=(('weight',float),),
encoding = encoding
)
# fixture for nose tests
def teardown_module(module):
import os
os.unlink('test.edgelist')
os.unlink('test.edgelist.gz')
os.unlink('test.weighted.edgelist')
|
|
# Copyright 2013 eBay Inc.
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
import webob
from cinder.api.contrib import scheduler_stats
from cinder.api import microversions as mv
from cinder.api.openstack import api_version_request as api_version
from cinder import context
from cinder import exception
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import test
def schedule_rpcapi_get_pools(self, context, filters=None):
all_pools = []
pool1 = dict(name='pool1',
capabilities=dict(
total_capacity=1024, free_capacity=100,
volume_backend_name='pool1', reserved_percentage=0,
driver_version='1.0.0', storage_protocol='iSCSI',
QoS_support='False', updated=None))
all_pools.append(pool1)
pool2 = dict(name='pool2',
capabilities=dict(
total_capacity=512, free_capacity=200,
volume_backend_name='pool2', reserved_percentage=0,
driver_version='1.0.1', storage_protocol='iSER',
QoS_support='True', updated=None))
all_pools.append(pool2)
return all_pools
@ddt.ddt
class SchedulerStatsAPITest(test.TestCase):
def setUp(self):
super(SchedulerStatsAPITest, self).setUp()
self.flags(host='fake')
self.controller = scheduler_stats.SchedulerStatsController()
self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools',
schedule_rpcapi_get_pools)
def test_get_pools_summary(self):
req = fakes.HTTPRequest.blank('/v3/%s/scheduler_stats' %
fake.PROJECT_ID)
req.environ['cinder.context'] = self.ctxt
res = self.controller.get_pools(req)
self.assertEqual(2, len(res['pools']))
expected = {
'pools': [
{
'name': 'pool1',
},
{
'name': 'pool2',
}
]
}
self.assertDictEqual(expected, res)
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools')
def test_get_pools_summary_filter_name(self, mock_rpcapi):
req = fakes.HTTPRequest.blank('/v3/%s/scheduler_stats?name=pool1' %
fake.PROJECT_ID)
mock_rpcapi.return_value = [dict(name='pool1',
capabilities=dict(foo='bar'))]
req.api_version_request = mv.get_api_version(mv.POOL_FILTER)
req.environ['cinder.context'] = self.ctxt
res = self.controller.get_pools(req)
expected = {
'pools': [
{
'name': 'pool1',
}
]
}
self.assertDictEqual(expected, res)
filters = {'name': 'pool1'}
mock_rpcapi.assert_called_with(mock.ANY, filters=filters)
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools')
def test_get_pools_summary_filter_capabilities(self, mock_rpcapi):
req = fakes.HTTPRequest.blank('/v3/%s/scheduler_stats?detail=True'
'&foo=bar' % fake.PROJECT_ID)
mock_rpcapi.return_value = [dict(name='pool1',
capabilities=dict(foo='bar'))]
req.api_version_request = mv.get_api_version(mv.POOL_FILTER)
req.environ['cinder.context'] = self.ctxt
res = self.controller.get_pools(req)
expected = {
'pools': [
{
'name': 'pool1',
'capabilities': {
'foo': 'bar'
}
}
]
}
self.assertDictEqual(expected, res)
filters = {'foo': 'bar'}
mock_rpcapi.assert_called_with(mock.ANY, filters=filters)
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools',
schedule_rpcapi_get_pools)
def test_get_pools_detail(self):
req = fakes.HTTPRequest.blank('/v3/%s/scheduler_stats?detail=True' %
fake.PROJECT_ID)
req.environ['cinder.context'] = self.ctxt
res = self.controller.get_pools(req)
self.assertEqual(2, len(res['pools']))
expected = {
'pools': [
{
'name': 'pool1',
'capabilities': {
'updated': None,
'total_capacity': 1024,
'free_capacity': 100,
'volume_backend_name': 'pool1',
'reserved_percentage': 0,
'driver_version': '1.0.0',
'storage_protocol': 'iSCSI',
'QoS_support': 'False', }
},
{
'name': 'pool2',
'capabilities': {
'updated': None,
'total_capacity': 512,
'free_capacity': 200,
'volume_backend_name': 'pool2',
'reserved_percentage': 0,
'driver_version': '1.0.1',
'storage_protocol': 'iSER',
'QoS_support': 'True', }
}
]
}
self.assertDictEqual(expected, res)
def test_get_pools_detail_invalid_bool(self):
req = fakes.HTTPRequest.blank(
'/v3/%s/scheduler_stats?detail=InvalidBool' %
fake.PROJECT_ID)
req.environ['cinder.context'] = self.ctxt
self.assertRaises(exception.InvalidParameterValue,
self.controller.get_pools,
req)
@ddt.data((mv.get_prior_version(mv.POOL_TYPE_FILTER), False),
(mv.POOL_TYPE_FILTER, True))
@ddt.unpack
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools')
@mock.patch('cinder.api.common.reject_invalid_filters')
def test_get_pools_by_volume_type(self,
version,
support_volume_type,
mock_reject_invalid_filters,
mock_get_pools
):
req = fakes.HTTPRequest.blank('/v3/%s/scheduler-stats/get_pools?'
'volume_type=lvm' % fake.PROJECT_ID)
mock_get_pools.return_value = [{'name': 'pool1',
'capabilities': {'foo': 'bar'}}]
req.api_version_request = api_version.APIVersionRequest(version)
req.environ['cinder.context'] = self.ctxt
res = self.controller.get_pools(req)
expected = {
'pools': [{'name': 'pool1'}]
}
filters = dict()
if support_volume_type:
filters = {'volume_type': 'lvm'}
filters = webob.multidict.MultiDict(filters)
mock_reject_invalid_filters.assert_called_once_with(self.ctxt, filters,
'pool', True)
self.assertDictEqual(expected, res)
mock_get_pools.assert_called_with(mock.ANY, filters=filters)
|
|
#!/usr/bin/python
import sys
import os
sys.path.append("../../src/")
sys.path.append("../util/")
import commands
import common
CXXTAGS_QUERY = "../../bin/cxxtags_query"
if len(sys.argv) != 2:
print "usage: cmd db_file"
exit(1)
cur_dir = os.getcwd()
db_dir = sys.argv[1]
q_list = [
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 3 7", #CParent0
"def "+db_dir+" "+cur_dir+"/inhe.cpp 3 7", #CParent0
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 3 7", #CParent0
"override "+db_dir+" "+cur_dir+"/inhe.cpp 3 7", #CParent0
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 6 5", #CParent0
"def "+db_dir+" "+cur_dir+"/inhe.cpp 6 5", #CParent0
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 6 5", #CParent0
"override "+db_dir+" "+cur_dir+"/inhe.cpp 6 5", #CParent0
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 7 5", #~CParent0
"def "+db_dir+" "+cur_dir+"/inhe.cpp 7 5", #~CParent0
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 7 5", #~CParent0
"override "+db_dir+" "+cur_dir+"/inhe.cpp 7 5", #~CParent0
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 8 18", #response
"def "+db_dir+" "+cur_dir+"/inhe.cpp 8 18", #response
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 8 18", #response
"override "+db_dir+" "+cur_dir+"/inhe.cpp 8 18", #response
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 11 6", #CParent0
"def "+db_dir+" "+cur_dir+"/inhe.cpp 11 6", #CParent0
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 11 6", #CParent0
"override "+db_dir+" "+cur_dir+"/inhe.cpp 11 6", #CParent0
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 11 16", #response
"def "+db_dir+" "+cur_dir+"/inhe.cpp 11 16", #response
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 11 16", #response
"override "+db_dir+" "+cur_dir+"/inhe.cpp 11 16", #response
#"decl "+db_dir+" "+cur_dir+"/inhe.cpp 12 5", #printf
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 15 7", #CParent1
"def "+db_dir+" "+cur_dir+"/inhe.cpp 15 7", #CParent1
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 15 7", #CParent1
"override "+db_dir+" "+cur_dir+"/inhe.cpp 15 7", #CParent1
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 18 5", #CParent1
"def "+db_dir+" "+cur_dir+"/inhe.cpp 18 5", #CParent1
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 18 5", #CParent1
"override "+db_dir+" "+cur_dir+"/inhe.cpp 18 5", #CParent1
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 19 5", #~CParent1
"def "+db_dir+" "+cur_dir+"/inhe.cpp 19 5", #~CParent1
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 19 5", #~CParent1
"override "+db_dir+" "+cur_dir+"/inhe.cpp 19 5", #~CParent1
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 20 18", #response
"def "+db_dir+" "+cur_dir+"/inhe.cpp 20 18", #response
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 20 18", #response
"override "+db_dir+" "+cur_dir+"/inhe.cpp 20 18", #response
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 23 6", #CParent1
"def "+db_dir+" "+cur_dir+"/inhe.cpp 23 6", #CParent1
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 23 6", #CParent1
"override "+db_dir+" "+cur_dir+"/inhe.cpp 23 6", #CParent1
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 23 16", #response
"def "+db_dir+" "+cur_dir+"/inhe.cpp 23 16", #response
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 23 16", #response
"override "+db_dir+" "+cur_dir+"/inhe.cpp 23 16", #response
#"decl "+db_dir+" "+cur_dir+"/inhe.cpp 24 5", #printf
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 27 7", #CChild
"def "+db_dir+" "+cur_dir+"/inhe.cpp 27 7", #CChild
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 27 7", #CChild
"override "+db_dir+" "+cur_dir+"/inhe.cpp 27 7", #CChild
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 28 10", #CParent0
"def "+db_dir+" "+cur_dir+"/inhe.cpp 28 10", #CParent0
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 28 10", #CParent0
"override "+db_dir+" "+cur_dir+"/inhe.cpp 28 10", #CParent0
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 31 5", #CChild
"def "+db_dir+" "+cur_dir+"/inhe.cpp 31 5", #CChild
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 31 5", #CChild
"override "+db_dir+" "+cur_dir+"/inhe.cpp 31 5", #CChild
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 32 5", #~CChild
"def "+db_dir+" "+cur_dir+"/inhe.cpp 32 5", #~CChild
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 32 5", #~CChild
"override "+db_dir+" "+cur_dir+"/inhe.cpp 32 5", #~CChild
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 33 18", #response
"def "+db_dir+" "+cur_dir+"/inhe.cpp 33 18", #response
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 33 18", #response
"override "+db_dir+" "+cur_dir+"/inhe.cpp 33 18", #response
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 36 6", #CChild
"def "+db_dir+" "+cur_dir+"/inhe.cpp 36 6", #CChild
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 36 6", #CChild
"override "+db_dir+" "+cur_dir+"/inhe.cpp 36 6", #CChild
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 36 14", #response
"def "+db_dir+" "+cur_dir+"/inhe.cpp 36 14", #response
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 36 14", #response
"override "+db_dir+" "+cur_dir+"/inhe.cpp 36 14", #response
#"decl "+db_dir+" "+cur_dir+"/inhe.cpp 37 5", #printf
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 40 7", #CGChild
"def "+db_dir+" "+cur_dir+"/inhe.cpp 40 7", #CGChild
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 40 7", #CGChild
"override "+db_dir+" "+cur_dir+"/inhe.cpp 40 7", #CGChild
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 41 10", #CChild
"def "+db_dir+" "+cur_dir+"/inhe.cpp 41 10", #CChild
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 41 10", #CChild
"override "+db_dir+" "+cur_dir+"/inhe.cpp 41 10", #CChild
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 44 5", #CGChild
"def "+db_dir+" "+cur_dir+"/inhe.cpp 44 5", #CGChild
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 44 5", #CGChild
"override "+db_dir+" "+cur_dir+"/inhe.cpp 44 5", #CGChild
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 45 5", #~CGChild
"def "+db_dir+" "+cur_dir+"/inhe.cpp 45 5", #~CGChild
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 45 5", #~CGChild
"override "+db_dir+" "+cur_dir+"/inhe.cpp 45 5", #~CGChild
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 46 18", #response
"def "+db_dir+" "+cur_dir+"/inhe.cpp 46 18", #response
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 46 18", #response
"override "+db_dir+" "+cur_dir+"/inhe.cpp 46 18", #response
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 49 6", #CGChild
"def "+db_dir+" "+cur_dir+"/inhe.cpp 49 6", #CGChild
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 49 6", #CGChild
"override "+db_dir+" "+cur_dir+"/inhe.cpp 49 6", #CGChild
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 49 15", #response
"def "+db_dir+" "+cur_dir+"/inhe.cpp 49 15", #response
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 49 15", #response
"override "+db_dir+" "+cur_dir+"/inhe.cpp 49 15", #response
#"decl "+db_dir+" "+cur_dir+"/inhe.cpp 50 5", #printf
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 53 7", #COther
"def "+db_dir+" "+cur_dir+"/inhe.cpp 53 7", #COther
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 53 7", #COther
"override "+db_dir+" "+cur_dir+"/inhe.cpp 53 7", #COther
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 54 10", #CParent0
"def "+db_dir+" "+cur_dir+"/inhe.cpp 54 10", #CParent0
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 54 10", #CParent0
"override "+db_dir+" "+cur_dir+"/inhe.cpp 54 10", #CParent0
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 54 27", #CParent1
"def "+db_dir+" "+cur_dir+"/inhe.cpp 54 27", #CParent1
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 54 27", #CParent1
"override "+db_dir+" "+cur_dir+"/inhe.cpp 54 27", #CParent1
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 57 5", #COther
"def "+db_dir+" "+cur_dir+"/inhe.cpp 57 5", #COther
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 57 5", #COther
"override "+db_dir+" "+cur_dir+"/inhe.cpp 57 5", #COther
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 58 5", #~COther
"def "+db_dir+" "+cur_dir+"/inhe.cpp 58 5", #~COther
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 58 5", #~COther
"override "+db_dir+" "+cur_dir+"/inhe.cpp 58 5", #~COther
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 59 18", #response
"def "+db_dir+" "+cur_dir+"/inhe.cpp 59 18", #response
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 59 18", #response
"override "+db_dir+" "+cur_dir+"/inhe.cpp 59 18", #response
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 62 6", #COther
"def "+db_dir+" "+cur_dir+"/inhe.cpp 62 6", #COther
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 62 6", #COther
"override "+db_dir+" "+cur_dir+"/inhe.cpp 62 6", #COther
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 62 14", #response
"def "+db_dir+" "+cur_dir+"/inhe.cpp 62 14", #response
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 62 14", #response
"override "+db_dir+" "+cur_dir+"/inhe.cpp 62 14", #response
#"decl "+db_dir+" "+cur_dir+"/inhe.cpp 63 5", #printf
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 66 13", #test
"def "+db_dir+" "+cur_dir+"/inhe.cpp 66 13", #test
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 66 13", #test
"override "+db_dir+" "+cur_dir+"/inhe.cpp 66 13", #test
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 66 24", #CParent0
"def "+db_dir+" "+cur_dir+"/inhe.cpp 66 24", #CParent0
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 66 24", #CParent0
"override "+db_dir+" "+cur_dir+"/inhe.cpp 66 24", #CParent0
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 66 34", #a
"def "+db_dir+" "+cur_dir+"/inhe.cpp 66 34", #a
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 66 34", #a
"override "+db_dir+" "+cur_dir+"/inhe.cpp 66 34", #a
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 68 5", #a
"def "+db_dir+" "+cur_dir+"/inhe.cpp 68 5", #a
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 68 5", #a
"override "+db_dir+" "+cur_dir+"/inhe.cpp 68 5", #a
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 68 8", #response
"def "+db_dir+" "+cur_dir+"/inhe.cpp 68 8", #response
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 68 8", #response
"override "+db_dir+" "+cur_dir+"/inhe.cpp 68 8", #response
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 71 5", #main
"def "+db_dir+" "+cur_dir+"/inhe.cpp 71 5", #main
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 71 5", #main
"override "+db_dir+" "+cur_dir+"/inhe.cpp 71 5", #main
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 73 5", #CParent0
"def "+db_dir+" "+cur_dir+"/inhe.cpp 73 5", #CParent0
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 73 5", #CParent0
"override "+db_dir+" "+cur_dir+"/inhe.cpp 73 5", #CParent0
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 73 14", #parent
"def "+db_dir+" "+cur_dir+"/inhe.cpp 73 14", #parent
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 73 14", #parent
"override "+db_dir+" "+cur_dir+"/inhe.cpp 73 14", #parent
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 74 5", #CChild
"def "+db_dir+" "+cur_dir+"/inhe.cpp 74 5", #CChild
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 74 5", #CChild
"override "+db_dir+" "+cur_dir+"/inhe.cpp 74 5", #CChild
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 74 12", #child
"def "+db_dir+" "+cur_dir+"/inhe.cpp 74 12", #child
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 74 12", #child
"override "+db_dir+" "+cur_dir+"/inhe.cpp 74 12", #child
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 75 5", #CGChild
"def "+db_dir+" "+cur_dir+"/inhe.cpp 75 5", #CGChild
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 75 5", #CGChild
"override "+db_dir+" "+cur_dir+"/inhe.cpp 75 5", #CGChild
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 75 13", #gchild
"def "+db_dir+" "+cur_dir+"/inhe.cpp 75 13", #gchild
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 75 13", #gchild
"override "+db_dir+" "+cur_dir+"/inhe.cpp 75 13", #gchild
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 76 5", #COther
"def "+db_dir+" "+cur_dir+"/inhe.cpp 76 5", #COther
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 76 5", #COther
"override "+db_dir+" "+cur_dir+"/inhe.cpp 76 5", #COther
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 76 12", #other
"def "+db_dir+" "+cur_dir+"/inhe.cpp 76 12", #other
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 76 12", #other
"override "+db_dir+" "+cur_dir+"/inhe.cpp 76 12", #other
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 77 5", #parent
"def "+db_dir+" "+cur_dir+"/inhe.cpp 77 5", #parent
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 77 5", #parent
"override "+db_dir+" "+cur_dir+"/inhe.cpp 77 5", #parent
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 77 12", #response
"def "+db_dir+" "+cur_dir+"/inhe.cpp 77 12", #response
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 77 12", #response
"override "+db_dir+" "+cur_dir+"/inhe.cpp 77 12", #response
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 78 5", #child
"def "+db_dir+" "+cur_dir+"/inhe.cpp 78 5", #child
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 78 5", #child
"override "+db_dir+" "+cur_dir+"/inhe.cpp 78 5", #child
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 78 11", #response
"def "+db_dir+" "+cur_dir+"/inhe.cpp 78 11", #response
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 78 11", #response
"override "+db_dir+" "+cur_dir+"/inhe.cpp 78 11", #response
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 79 5", #gchild
"def "+db_dir+" "+cur_dir+"/inhe.cpp 79 5", #gchild
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 79 5", #gchild
"override "+db_dir+" "+cur_dir+"/inhe.cpp 79 5", #gchild
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 79 12", #response
"def "+db_dir+" "+cur_dir+"/inhe.cpp 79 12", #response
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 79 12", #response
"override "+db_dir+" "+cur_dir+"/inhe.cpp 79 12", #response
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 80 5", #other
"def "+db_dir+" "+cur_dir+"/inhe.cpp 80 5", #other
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 80 5", #other
"override "+db_dir+" "+cur_dir+"/inhe.cpp 80 5", #other
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 80 11", #response
"def "+db_dir+" "+cur_dir+"/inhe.cpp 80 11", #response
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 80 11", #response
"override "+db_dir+" "+cur_dir+"/inhe.cpp 80 11", #response
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 81 5", #test
"def "+db_dir+" "+cur_dir+"/inhe.cpp 81 5", #test
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 81 5", #test
"override "+db_dir+" "+cur_dir+"/inhe.cpp 81 5", #test
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 81 11", #parent
"def "+db_dir+" "+cur_dir+"/inhe.cpp 81 11", #parent
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 81 11", #parent
"override "+db_dir+" "+cur_dir+"/inhe.cpp 81 11", #parent
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 82 5", #test
"def "+db_dir+" "+cur_dir+"/inhe.cpp 82 5", #test
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 82 5", #test
"override "+db_dir+" "+cur_dir+"/inhe.cpp 82 5", #test
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 82 11", #child
"def "+db_dir+" "+cur_dir+"/inhe.cpp 82 11", #child
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 82 11", #child
"override "+db_dir+" "+cur_dir+"/inhe.cpp 82 11", #child
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 83 5", #test
"def "+db_dir+" "+cur_dir+"/inhe.cpp 83 5", #test
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 83 5", #test
"override "+db_dir+" "+cur_dir+"/inhe.cpp 83 5", #test
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 83 11", #gchild
"def "+db_dir+" "+cur_dir+"/inhe.cpp 83 11", #gchild
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 83 11", #gchild
"override "+db_dir+" "+cur_dir+"/inhe.cpp 83 11", #gchild
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 84 5", #test
"def "+db_dir+" "+cur_dir+"/inhe.cpp 84 5", #test
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 84 5", #test
"override "+db_dir+" "+cur_dir+"/inhe.cpp 84 5", #test
"decl "+db_dir+" "+cur_dir+"/inhe.cpp 84 11", #other
"def "+db_dir+" "+cur_dir+"/inhe.cpp 84 11", #other
"ref "+db_dir+" "+cur_dir+"/inhe.cpp 84 11", #other
"override "+db_dir+" "+cur_dir+"/inhe.cpp 84 11", #other
]
ans_list = [
# inhe.cpp
["CParent0|"+cur_dir+"/inhe.cpp|3|7|class CParent0"],
["CParent0|"+cur_dir+"/inhe.cpp|3|7|class CParent0"],
[
"CParent0|"+cur_dir+r'/inhe.cpp|11|6|void CParent0::response(void) {',
"CParent0|"+cur_dir+r'/inhe.cpp|28|10|: public CParent0',
"CParent0|"+cur_dir+r'/inhe.cpp|54|10|: public CParent0, public CParent1',
"CParent0|"+cur_dir+r'/inhe.cpp|66|24|static void test(class CParent0 *a)',
"CParent0|"+cur_dir+r'/inhe.cpp|73|5| CParent0 parent;',
],
[""],
["CParent0|"+cur_dir+"/inhe.cpp|6|5| CParent0(){}"],
["CParent0|"+cur_dir+"/inhe.cpp|6|5| CParent0(){}"],
[""],
[""],
["~CParent0|"+cur_dir+"/inhe.cpp|7|5| ~CParent0(){}"],
["~CParent0|"+cur_dir+"/inhe.cpp|7|5| ~CParent0(){}"],
[""],
[""],
# 8 18
["response|"+cur_dir+"/inhe.cpp|8|18| virtual void response(void);"],
["response|"+cur_dir+"/inhe.cpp|11|16|void CParent0::response(void) {"],
[
"response|"+cur_dir+"/inhe.cpp|68|8| a->response();",
"response|"+cur_dir+"/inhe.cpp|77|12| parent.response();",
],
[
"response|"+cur_dir+"/inhe.cpp|33|18| virtual void response(void);",
"response|"+cur_dir+"/inhe.cpp|36|14|void CChild::response(void) {",
"response|"+cur_dir+"/inhe.cpp|59|18| virtual void response(void);",
"response|"+cur_dir+"/inhe.cpp|62|14|void COther::response(void) {",
],
# 11 6
["CParent0|"+cur_dir+"/inhe.cpp|3|7|class CParent0"],
["CParent0|"+cur_dir+"/inhe.cpp|3|7|class CParent0"],
[
"CParent0|"+cur_dir+r'/inhe.cpp|11|6|void CParent0::response(void) {',
"CParent0|"+cur_dir+r'/inhe.cpp|28|10|: public CParent0',
"CParent0|"+cur_dir+r'/inhe.cpp|54|10|: public CParent0, public CParent1',
"CParent0|"+cur_dir+r'/inhe.cpp|66|24|static void test(class CParent0 *a)',
"CParent0|"+cur_dir+r'/inhe.cpp|73|5| CParent0 parent;',
],
[""],
# 11 16
["response|"+cur_dir+"/inhe.cpp|8|18| virtual void response(void);"],
["response|"+cur_dir+"/inhe.cpp|11|16|void CParent0::response(void) {"],
[
"response|"+cur_dir+"/inhe.cpp|68|8| a->response();",
"response|"+cur_dir+"/inhe.cpp|77|12| parent.response();",
],
[
"response|"+cur_dir+"/inhe.cpp|33|18| virtual void response(void);",
"response|"+cur_dir+"/inhe.cpp|36|14|void CChild::response(void) {",
"response|"+cur_dir+"/inhe.cpp|59|18| virtual void response(void);",
"response|"+cur_dir+"/inhe.cpp|62|14|void COther::response(void) {",
],
# 15 7
["CParent1|"+cur_dir+"/inhe.cpp|15|7|class CParent1"],
["CParent1|"+cur_dir+"/inhe.cpp|15|7|class CParent1"],
[
"CParent1|"+cur_dir+r'/inhe.cpp|23|6|void CParent1::response(void) {',
"CParent1|"+cur_dir+r'/inhe.cpp|54|27|: public CParent0, public CParent1',
],
[""],
# 18 5
["CParent1|"+cur_dir+"/inhe.cpp|18|5| CParent1(){}"],
["CParent1|"+cur_dir+"/inhe.cpp|18|5| CParent1(){}"],
[""],
[""],
# 19 5
["~CParent1|"+cur_dir+"/inhe.cpp|19|5| ~CParent1(){}"],
["~CParent1|"+cur_dir+"/inhe.cpp|19|5| ~CParent1(){}"],
[""],
[""],
# 20 18
["response|"+cur_dir+"/inhe.cpp|20|18| virtual void response(void);"],
["response|"+cur_dir+"/inhe.cpp|23|16|void CParent1::response(void) {"],
[""],
[
"response|"+cur_dir+"/inhe.cpp|59|18| virtual void response(void);",
"response|"+cur_dir+"/inhe.cpp|62|14|void COther::response(void) {",
],
# 23 6
["CParent1|"+cur_dir+"/inhe.cpp|15|7|class CParent1"],
["CParent1|"+cur_dir+"/inhe.cpp|15|7|class CParent1"],
[
"CParent1|"+cur_dir+r'/inhe.cpp|23|6|void CParent1::response(void) {',
"CParent1|"+cur_dir+r'/inhe.cpp|54|27|: public CParent0, public CParent1',
],
[""],
# 23 16
["response|"+cur_dir+"/inhe.cpp|20|18| virtual void response(void);"],
["response|"+cur_dir+"/inhe.cpp|23|16|void CParent1::response(void) {"],
[""],
[
"response|"+cur_dir+"/inhe.cpp|59|18| virtual void response(void);",
"response|"+cur_dir+"/inhe.cpp|62|14|void COther::response(void) {",
],
# 27 7
["CChild|"+cur_dir+"/inhe.cpp|27|7|class CChild"],
["CChild|"+cur_dir+"/inhe.cpp|27|7|class CChild"],
[
"CChild|"+cur_dir+r'/inhe.cpp|36|6|void CChild::response(void) {',
"CChild|"+cur_dir+r'/inhe.cpp|41|10|: public CChild',
"CChild|"+cur_dir+r'/inhe.cpp|74|5| CChild child;',
],
[""],
# 28 10
["CParent0|"+cur_dir+"/inhe.cpp|3|7|class CParent0"],
["CParent0|"+cur_dir+"/inhe.cpp|3|7|class CParent0"],
[
"CParent0|"+cur_dir+r'/inhe.cpp|11|6|void CParent0::response(void) {',
"CParent0|"+cur_dir+r'/inhe.cpp|28|10|: public CParent0',
"CParent0|"+cur_dir+r'/inhe.cpp|54|10|: public CParent0, public CParent1',
"CParent0|"+cur_dir+r'/inhe.cpp|66|24|static void test(class CParent0 *a)',
"CParent0|"+cur_dir+r'/inhe.cpp|73|5| CParent0 parent;',
],
[""],
# 31 5
["CChild|"+cur_dir+"/inhe.cpp|31|5| CChild(){}"],
["CChild|"+cur_dir+"/inhe.cpp|31|5| CChild(){}"],
[""],
[""],
# 39 5
["~CChild|"+cur_dir+"/inhe.cpp|32|5| ~CChild(){}"],
["~CChild|"+cur_dir+"/inhe.cpp|32|5| ~CChild(){}"],
[""],
[""],
# 33 18
["response|"+cur_dir+"/inhe.cpp|33|18| virtual void response(void);"],
["response|"+cur_dir+"/inhe.cpp|36|14|void CChild::response(void) {"],
["response|"+cur_dir+"/inhe.cpp|78|11| child.response();"],
[
"response|"+cur_dir+"/inhe.cpp|46|18| virtual void response(void);",
"response|"+cur_dir+"/inhe.cpp|49|15|void CGChild::response(void) {",
"response|"+cur_dir+"/inhe.cpp|8|18| virtual void response(void);",
],
# 36 6
["CChild|"+cur_dir+"/inhe.cpp|27|7|class CChild"],
["CChild|"+cur_dir+"/inhe.cpp|27|7|class CChild"],
[
"CChild|"+cur_dir+r'/inhe.cpp|36|6|void CChild::response(void) {',
"CChild|"+cur_dir+r'/inhe.cpp|41|10|: public CChild',
"CChild|"+cur_dir+r'/inhe.cpp|74|5| CChild child;',
],
[""],
# 36 14
["response|"+cur_dir+"/inhe.cpp|33|18| virtual void response(void);"],
["response|"+cur_dir+"/inhe.cpp|36|14|void CChild::response(void) {"],
["response|"+cur_dir+"/inhe.cpp|78|11| child.response();"],
[
"response|"+cur_dir+"/inhe.cpp|46|18| virtual void response(void);",
"response|"+cur_dir+"/inhe.cpp|49|15|void CGChild::response(void) {",
"response|"+cur_dir+"/inhe.cpp|8|18| virtual void response(void);",
],
# 40 7
["CGChild|"+cur_dir+"/inhe.cpp|40|7|class CGChild"],
["CGChild|"+cur_dir+"/inhe.cpp|40|7|class CGChild"],
[
"CGChild|"+cur_dir+r'/inhe.cpp|49|6|void CGChild::response(void) {',
"CGChild|"+cur_dir+r'/inhe.cpp|75|5| CGChild gchild;',
],
[""],
# 41 10
["CChild|"+cur_dir+"/inhe.cpp|27|7|class CChild"],
["CChild|"+cur_dir+"/inhe.cpp|27|7|class CChild"],
[
"CChild|"+cur_dir+r'/inhe.cpp|36|6|void CChild::response(void) {',
"CChild|"+cur_dir+r'/inhe.cpp|41|10|: public CChild',
"CChild|"+cur_dir+r'/inhe.cpp|74|5| CChild child;',
],
[""],
# 44 5
["CGChild|"+cur_dir+"/inhe.cpp|44|5| CGChild(){}"],
["CGChild|"+cur_dir+"/inhe.cpp|44|5| CGChild(){}"],
[""],
[""],
# 45 5
["~CGChild|"+cur_dir+"/inhe.cpp|45|5| ~CGChild(){}"],
["~CGChild|"+cur_dir+"/inhe.cpp|45|5| ~CGChild(){}"],
[""],
[""],
# 46 18
["response|"+cur_dir+"/inhe.cpp|46|18| virtual void response(void);"],
["response|"+cur_dir+"/inhe.cpp|49|15|void CGChild::response(void) {"],
["response|"+cur_dir+"/inhe.cpp|79|12| gchild.response();"],
["response|"+cur_dir+"/inhe.cpp|33|18| virtual void response(void);"],
# 49 6
["CGChild|"+cur_dir+"/inhe.cpp|40|7|class CGChild"],
["CGChild|"+cur_dir+"/inhe.cpp|40|7|class CGChild"],
[
"CGChild|"+cur_dir+r'/inhe.cpp|49|6|void CGChild::response(void) {',
"CGChild|"+cur_dir+r'/inhe.cpp|75|5| CGChild gchild;',
],
[""],
# 49 15
["response|"+cur_dir+"/inhe.cpp|46|18| virtual void response(void);"],
["response|"+cur_dir+"/inhe.cpp|49|15|void CGChild::response(void) {"],
["response|"+cur_dir+"/inhe.cpp|79|12| gchild.response();"],
["response|"+cur_dir+"/inhe.cpp|33|18| virtual void response(void);"],
# 53 7
["COther|"+cur_dir+"/inhe.cpp|53|7|class COther"],
["COther|"+cur_dir+"/inhe.cpp|53|7|class COther"],
[
"COther|"+cur_dir+r'/inhe.cpp|62|6|void COther::response(void) {',
"COther|"+cur_dir+r'/inhe.cpp|76|5| COther other;',
],
[""],
# 54 10
["CParent0|"+cur_dir+"/inhe.cpp|3|7|class CParent0"],
["CParent0|"+cur_dir+"/inhe.cpp|3|7|class CParent0"],
[
"CParent0|"+cur_dir+r'/inhe.cpp|11|6|void CParent0::response(void) {',
"CParent0|"+cur_dir+r'/inhe.cpp|28|10|: public CParent0',
"CParent0|"+cur_dir+r'/inhe.cpp|54|10|: public CParent0, public CParent1',
"CParent0|"+cur_dir+r'/inhe.cpp|66|24|static void test(class CParent0 *a)',
"CParent0|"+cur_dir+r'/inhe.cpp|73|5| CParent0 parent;',
],
[""],
# 54 27
["CParent1|"+cur_dir+"/inhe.cpp|15|7|class CParent1"],
["CParent1|"+cur_dir+"/inhe.cpp|15|7|class CParent1"],
[
"CParent1|"+cur_dir+r'/inhe.cpp|23|6|void CParent1::response(void) {',
"CParent1|"+cur_dir+r'/inhe.cpp|54|27|: public CParent0, public CParent1',
],
[""],
# 57 5
["COther|"+cur_dir+"/inhe.cpp|57|5| COther(){}"],
["COther|"+cur_dir+"/inhe.cpp|57|5| COther(){}"],
[""],
[""],
# 58 5
["~COther|"+cur_dir+"/inhe.cpp|58|5| ~COther(){}"],
["~COther|"+cur_dir+"/inhe.cpp|58|5| ~COther(){}"],
[""],
[""],
# 59 18
["response|"+cur_dir+"/inhe.cpp|59|18| virtual void response(void);"],
["response|"+cur_dir+"/inhe.cpp|62|14|void COther::response(void) {"],
["response|"+cur_dir+"/inhe.cpp|80|11| other.response();"],
[
"response|"+cur_dir+"/inhe.cpp|8|18| virtual void response(void);",
"response|"+cur_dir+"/inhe.cpp|20|18| virtual void response(void);",
],
# 62 6
["COther|"+cur_dir+"/inhe.cpp|53|7|class COther"],
["COther|"+cur_dir+"/inhe.cpp|53|7|class COther"],
[
"COther|"+cur_dir+r'/inhe.cpp|62|6|void COther::response(void) {',
"COther|"+cur_dir+r'/inhe.cpp|76|5| COther other;',
],
[""],
# 62 14
["response|"+cur_dir+"/inhe.cpp|59|18| virtual void response(void);"],
["response|"+cur_dir+"/inhe.cpp|62|14|void COther::response(void) {"],
["response|"+cur_dir+"/inhe.cpp|80|11| other.response();"],
[
"response|"+cur_dir+"/inhe.cpp|8|18| virtual void response(void);",
"response|"+cur_dir+"/inhe.cpp|20|18| virtual void response(void);",
],
# 66 13
["test|"+cur_dir+"/inhe.cpp|66|13|static void test(class CParent0 *a)"],
["test|"+cur_dir+"/inhe.cpp|66|13|static void test(class CParent0 *a)"],
[
"test|"+cur_dir+"/inhe.cpp|81|5| test(&parent);",
"test|"+cur_dir+"/inhe.cpp|82|5| test(&child);",
"test|"+cur_dir+"/inhe.cpp|83|5| test(&gchild);",
"test|"+cur_dir+"/inhe.cpp|84|5| test(&other);",
],
[""],
# 66 24
["CParent0|"+cur_dir+"/inhe.cpp|3|7|class CParent0"],
["CParent0|"+cur_dir+"/inhe.cpp|3|7|class CParent0"],
[
"CParent0|"+cur_dir+r'/inhe.cpp|11|6|void CParent0::response(void) {',
"CParent0|"+cur_dir+r'/inhe.cpp|28|10|: public CParent0',
"CParent0|"+cur_dir+r'/inhe.cpp|54|10|: public CParent0, public CParent1',
"CParent0|"+cur_dir+r'/inhe.cpp|66|24|static void test(class CParent0 *a)',
"CParent0|"+cur_dir+r'/inhe.cpp|73|5| CParent0 parent;',
],
[""],
# 66 34
["a|"+cur_dir+"/inhe.cpp|66|34|static void test(class CParent0 *a)"],
["a|"+cur_dir+"/inhe.cpp|66|34|static void test(class CParent0 *a)"],
["a|"+cur_dir+r'/inhe.cpp|68|5| a->response();'],
[""],
# 68 5
["a|"+cur_dir+"/inhe.cpp|66|34|static void test(class CParent0 *a)"],
["a|"+cur_dir+"/inhe.cpp|66|34|static void test(class CParent0 *a)"],
["a|"+cur_dir+r'/inhe.cpp|68|5| a->response();'],
[""],
# 68 8
["response|"+cur_dir+"/inhe.cpp|8|18| virtual void response(void);"],
["response|"+cur_dir+"/inhe.cpp|11|16|void CParent0::response(void) {"],
[
"response|"+cur_dir+"/inhe.cpp|68|8| a->response();",
"response|"+cur_dir+"/inhe.cpp|77|12| parent.response();",
],
[
"response|"+cur_dir+"/inhe.cpp|33|18| virtual void response(void);",
"response|"+cur_dir+"/inhe.cpp|36|14|void CChild::response(void) {",
"response|"+cur_dir+"/inhe.cpp|59|18| virtual void response(void);",
"response|"+cur_dir+"/inhe.cpp|62|14|void COther::response(void) {",
],
# 71 5
["main|"+cur_dir+"/inhe.cpp|71|5|int main()"],
["main|"+cur_dir+"/inhe.cpp|71|5|int main()"],
[""],
[""],
# 73 5
["CParent0|"+cur_dir+"/inhe.cpp|3|7|class CParent0"],
["CParent0|"+cur_dir+"/inhe.cpp|3|7|class CParent0"],
[
"CParent0|"+cur_dir+r'/inhe.cpp|11|6|void CParent0::response(void) {',
"CParent0|"+cur_dir+r'/inhe.cpp|28|10|: public CParent0',
"CParent0|"+cur_dir+r'/inhe.cpp|54|10|: public CParent0, public CParent1',
"CParent0|"+cur_dir+r'/inhe.cpp|66|24|static void test(class CParent0 *a)',
"CParent0|"+cur_dir+r'/inhe.cpp|73|5| CParent0 parent;',
],
[""],
# 73 14
["parent|"+cur_dir+"/inhe.cpp|73|14| CParent0 parent;"],
["parent|"+cur_dir+"/inhe.cpp|73|14| CParent0 parent;"],
[
"parent|"+cur_dir+"/inhe.cpp|77|5| parent.response();",
"parent|"+cur_dir+"/inhe.cpp|81|11| test(&parent);",
],
[""],
# 74 5
["CChild|"+cur_dir+"/inhe.cpp|27|7|class CChild"],
["CChild|"+cur_dir+"/inhe.cpp|27|7|class CChild"],
[
"CChild|"+cur_dir+r'/inhe.cpp|36|6|void CChild::response(void) {',
"CChild|"+cur_dir+r'/inhe.cpp|41|10|: public CChild',
"CChild|"+cur_dir+r'/inhe.cpp|74|5| CChild child;',
],
[""],
# 74 12
["child|"+cur_dir+"/inhe.cpp|74|12| CChild child;"],
["child|"+cur_dir+"/inhe.cpp|74|12| CChild child;"],
[
"child|"+cur_dir+"/inhe.cpp|78|5| child.response();",
"child|"+cur_dir+"/inhe.cpp|82|11| test(&child);",
],
[""],
# 75 5
["CGChild|"+cur_dir+"/inhe.cpp|40|7|class CGChild"],
["CGChild|"+cur_dir+"/inhe.cpp|40|7|class CGChild"],
[
"CGChild|"+cur_dir+r'/inhe.cpp|49|6|void CGChild::response(void) {',
"CGChild|"+cur_dir+r'/inhe.cpp|75|5| CGChild gchild;',
],
[""],
# 74 12
["gchild|"+cur_dir+"/inhe.cpp|75|13| CGChild gchild;"],
["gchild|"+cur_dir+"/inhe.cpp|75|13| CGChild gchild;"],
[
"gchild|"+cur_dir+"/inhe.cpp|79|5| gchild.response();",
"gchild|"+cur_dir+"/inhe.cpp|83|11| test(&gchild);",
],
[""],
# 76 5
["COther|"+cur_dir+"/inhe.cpp|53|7|class COther"],
["COther|"+cur_dir+"/inhe.cpp|53|7|class COther"],
[
"COther|"+cur_dir+r'/inhe.cpp|62|6|void COther::response(void) {',
"COther|"+cur_dir+r'/inhe.cpp|76|5| COther other;',
],
[""],
# 76 12
["other|"+cur_dir+"/inhe.cpp|76|12| COther other;"],
["other|"+cur_dir+"/inhe.cpp|76|12| COther other;"],
[
"other|"+cur_dir+"/inhe.cpp|80|5| other.response();",
"other|"+cur_dir+"/inhe.cpp|84|11| test(&other);",
],
[""],
# 77 5
["parent|"+cur_dir+"/inhe.cpp|73|14| CParent0 parent;"],
["parent|"+cur_dir+"/inhe.cpp|73|14| CParent0 parent;"],
[
"parent|"+cur_dir+"/inhe.cpp|77|5| parent.response();",
"parent|"+cur_dir+"/inhe.cpp|81|11| test(&parent);",
],
[""],
# 77 12
["response|"+cur_dir+"/inhe.cpp|8|18| virtual void response(void);"],
["response|"+cur_dir+"/inhe.cpp|11|16|void CParent0::response(void) {"],
[
"response|"+cur_dir+"/inhe.cpp|68|8| a->response();",
"response|"+cur_dir+"/inhe.cpp|77|12| parent.response();",
],
[
"response|"+cur_dir+"/inhe.cpp|33|18| virtual void response(void);",
"response|"+cur_dir+"/inhe.cpp|36|14|void CChild::response(void) {",
"response|"+cur_dir+"/inhe.cpp|59|18| virtual void response(void);",
"response|"+cur_dir+"/inhe.cpp|62|14|void COther::response(void) {",
],
# 78 5
["child|"+cur_dir+"/inhe.cpp|74|12| CChild child;"],
["child|"+cur_dir+"/inhe.cpp|74|12| CChild child;"],
[
"child|"+cur_dir+"/inhe.cpp|78|5| child.response();",
"child|"+cur_dir+"/inhe.cpp|82|11| test(&child);",
],
[""],
# 78 11
["response|"+cur_dir+"/inhe.cpp|33|18| virtual void response(void);"],
["response|"+cur_dir+"/inhe.cpp|36|14|void CChild::response(void) {"],
["response|"+cur_dir+"/inhe.cpp|78|11| child.response();"],
[
"response|"+cur_dir+"/inhe.cpp|46|18| virtual void response(void);",
"response|"+cur_dir+"/inhe.cpp|49|15|void CGChild::response(void) {",
"response|"+cur_dir+"/inhe.cpp|8|18| virtual void response(void);",
],
# 79 5
["gchild|"+cur_dir+"/inhe.cpp|75|13| CGChild gchild;"],
["gchild|"+cur_dir+"/inhe.cpp|75|13| CGChild gchild;"],
[
"gchild|"+cur_dir+"/inhe.cpp|79|5| gchild.response();",
"gchild|"+cur_dir+"/inhe.cpp|83|11| test(&gchild);",
],
[""],
# 79 12
["response|"+cur_dir+"/inhe.cpp|46|18| virtual void response(void);"],
["response|"+cur_dir+"/inhe.cpp|49|15|void CGChild::response(void) {"],
["response|"+cur_dir+"/inhe.cpp|79|12| gchild.response();"],
["response|"+cur_dir+"/inhe.cpp|33|18| virtual void response(void);"],
# 80 5
["other|"+cur_dir+"/inhe.cpp|76|12| COther other;"],
["other|"+cur_dir+"/inhe.cpp|76|12| COther other;"],
[
"other|"+cur_dir+"/inhe.cpp|80|5| other.response();",
"other|"+cur_dir+"/inhe.cpp|84|11| test(&other);",
],
[""],
# 80 11
["response|"+cur_dir+"/inhe.cpp|59|18| virtual void response(void);"],
["response|"+cur_dir+"/inhe.cpp|62|14|void COther::response(void) {"],
["response|"+cur_dir+"/inhe.cpp|80|11| other.response();"],
[
"response|"+cur_dir+"/inhe.cpp|8|18| virtual void response(void);",
"response|"+cur_dir+"/inhe.cpp|20|18| virtual void response(void);",
],
# 81 5
["test|"+cur_dir+"/inhe.cpp|66|13|static void test(class CParent0 *a)"],
["test|"+cur_dir+"/inhe.cpp|66|13|static void test(class CParent0 *a)"],
[
"test|"+cur_dir+"/inhe.cpp|81|5| test(&parent);",
"test|"+cur_dir+"/inhe.cpp|82|5| test(&child);",
"test|"+cur_dir+"/inhe.cpp|83|5| test(&gchild);",
"test|"+cur_dir+"/inhe.cpp|84|5| test(&other);",
],
[""],
# 81 11
["parent|"+cur_dir+"/inhe.cpp|73|14| CParent0 parent;"],
["parent|"+cur_dir+"/inhe.cpp|73|14| CParent0 parent;"],
[
"parent|"+cur_dir+"/inhe.cpp|77|5| parent.response();",
"parent|"+cur_dir+"/inhe.cpp|81|11| test(&parent);",
],
[""],
# 82 5
["test|"+cur_dir+"/inhe.cpp|66|13|static void test(class CParent0 *a)"],
["test|"+cur_dir+"/inhe.cpp|66|13|static void test(class CParent0 *a)"],
[
"test|"+cur_dir+"/inhe.cpp|81|5| test(&parent);",
"test|"+cur_dir+"/inhe.cpp|82|5| test(&child);",
"test|"+cur_dir+"/inhe.cpp|83|5| test(&gchild);",
"test|"+cur_dir+"/inhe.cpp|84|5| test(&other);",
],
[""],
# 82 11
["child|"+cur_dir+"/inhe.cpp|74|12| CChild child;"],
["child|"+cur_dir+"/inhe.cpp|74|12| CChild child;"],
[
"child|"+cur_dir+"/inhe.cpp|78|5| child.response();",
"child|"+cur_dir+"/inhe.cpp|82|11| test(&child);",
],
[""],
# 83 5
["test|"+cur_dir+"/inhe.cpp|66|13|static void test(class CParent0 *a)"],
["test|"+cur_dir+"/inhe.cpp|66|13|static void test(class CParent0 *a)"],
[
"test|"+cur_dir+"/inhe.cpp|81|5| test(&parent);",
"test|"+cur_dir+"/inhe.cpp|82|5| test(&child);",
"test|"+cur_dir+"/inhe.cpp|83|5| test(&gchild);",
"test|"+cur_dir+"/inhe.cpp|84|5| test(&other);",
],
[""],
# 83 11
["gchild|"+cur_dir+"/inhe.cpp|75|13| CGChild gchild;"],
["gchild|"+cur_dir+"/inhe.cpp|75|13| CGChild gchild;"],
[
"gchild|"+cur_dir+"/inhe.cpp|79|5| gchild.response();",
"gchild|"+cur_dir+"/inhe.cpp|83|11| test(&gchild);",
],
[""],
# 84 5
["test|"+cur_dir+"/inhe.cpp|66|13|static void test(class CParent0 *a)"],
["test|"+cur_dir+"/inhe.cpp|66|13|static void test(class CParent0 *a)"],
[
"test|"+cur_dir+"/inhe.cpp|81|5| test(&parent);",
"test|"+cur_dir+"/inhe.cpp|82|5| test(&child);",
"test|"+cur_dir+"/inhe.cpp|83|5| test(&gchild);",
"test|"+cur_dir+"/inhe.cpp|84|5| test(&other);",
],
[""],
# 84 11
["other|"+cur_dir+"/inhe.cpp|76|12| COther other;"],
["other|"+cur_dir+"/inhe.cpp|76|12| COther other;"],
[
"other|"+cur_dir+"/inhe.cpp|80|5| other.response();",
"other|"+cur_dir+"/inhe.cpp|84|11| test(&other);",
],
[""],
]
err = 0
i = 0
for q in q_list:
err += common.test_one(q, ans_list[i])
i+=1
if err == 0:
print "OK"
else:
print "ERR: %d"%(err)
exit(err)
|
|
import os
import stat
import pytest
from topaz.objects.fileobject import W_FileObject
from topaz.system import IS_WINDOWS
from ..base import BaseTopazTest
class TestFile(BaseTopazTest):
def test_access_flags(self, space):
assert space.int_w(space.execute("return File::RDONLY")) == os.O_RDONLY
assert space.int_w(space.execute("return File::WRONLY")) == os.O_WRONLY
assert space.int_w(space.execute("return File::RDWR")) == os.O_RDWR
assert space.int_w(space.execute("return File::APPEND")) == os.O_APPEND
assert space.int_w(space.execute("return File::CREAT")) == os.O_CREAT
assert space.int_w(space.execute("return File::EXCL")) == os.O_EXCL
assert space.int_w(space.execute("return File::TRUNC")) == os.O_TRUNC
w_res = space.execute("return File::BINARY")
assert space.int_w(w_res) == (os.O_BINARY if hasattr(os, "O_BINARY") else 0)
def test_separator(self, space):
space.execute("File::SEPARATOR")
def test_alt_separator(self, space):
space.execute("File::ALT_SEPARATOR")
def test_path_separator(self, space):
space.execute("File::PATH_SEPARATOR")
def test_fnm_syscase(self, space):
space.execute("File::FNM_SYSCASE")
def test_fnm_dotmatch(self, space):
space.execute("File::FNM_DOTMATCH")
def test_fnm_pathname(self, space):
space.execute("File::FNM_PATHNAME")
def test_fnm_noescape(self, space):
space.execute("File::FNM_NOESCAPE")
def test_new_simple(self, space, tmpdir):
contents = "foo\nbar\nbaz\n"
f = tmpdir.join("file.txt")
f.write(contents)
w_res = space.execute("return File.new('%s')" % f)
assert isinstance(w_res, W_FileObject)
w_res = space.execute("return File.new('%s', 'r')" % f)
assert isinstance(w_res, W_FileObject)
w_res = space.execute("return File.new('%s', 'rb')" % f)
assert isinstance(w_res, W_FileObject)
w_res = space.execute("return File.new('%s', 'r+')" % f)
assert isinstance(w_res, W_FileObject)
w_res = space.execute("return File.new('%s', 'rb+')" % f)
assert isinstance(w_res, W_FileObject)
w_res = space.execute("return File.new('%s', 'a+')" % f)
assert isinstance(w_res, W_FileObject)
with self.raises(space, "ArgumentError", "invalid access mode rw"):
space.execute("File.new('%s', 'rw')" % f)
with self.raises(space, "ArgumentError", "invalid access mode wa"):
space.execute("File.new('%s', 'wa')" % f)
with self.raises(space, "ArgumentError", "invalid access mode rw+"):
space.execute("File.new('%s', 'rw+')" % f)
with self.raises(space, "ArgumentError", "invalid access mode ra"):
space.execute("File.new('%s', 'ra')" % f)
with self.raises(space, "Errno::ENOENT"):
space.execute("File.new('%s', 1)" % tmpdir.join("non-existant"))
w_res = space.execute("return File.new('%s%snonexist', 'w')" % (tmpdir.dirname, os.sep))
assert isinstance(w_res, W_FileObject)
w_res = space.execute("""
path = '%s%snonexist2'
f = File.new(path, 'w')
f.puts "first"
f = File.new(path, 'a')
f.puts "second"
f = File.new(path, 'r')
return f.read
""" % (tmpdir.dirname, os.sep))
assert space.str_w(w_res) == "first\nsecond\n"
def test_readline(self, space, tmpdir):
contents = "01\n02\n03\n04\n"
f = tmpdir.join("file.txt")
f.write(contents)
w_res = space.execute("return File.new('%s').readline" % f)
assert self.unwrap(space, w_res) == "01\n"
w_res = space.execute("return File.new('%s').readline('3')" % f)
assert self.unwrap(space, w_res) == "01\n02\n03"
w_res = space.execute("return File.new('%s').readline(1)" % f)
assert self.unwrap(space, w_res) == "0"
w_res = space.execute("return File.new('%s').readline('3', 4)" % f)
assert self.unwrap(space, w_res) == "01\n0"
def test_readlines(self, space, tmpdir):
contents = "01\n02\n03\n04\n"
f = tmpdir.join("file.txt")
f.write(contents)
w_res = space.execute("return File.new('%s').readlines()" % f)
assert self.unwrap(space, w_res) == ["01", "02", "03", "04", ""]
w_res = space.execute("return File.new('%s').readlines('3')" % f)
assert self.unwrap(space, w_res) == ["01\n02\n0", "\n04\n"]
w_res = space.execute("return File.new('%s').readlines(1)" % f)
assert self.unwrap(space, w_res) == ["0", "1", "0", "2", "0", "3", "0", "4", ""]
w_res = space.execute("return File.new('%s').readlines('3', 4)" % f)
assert self.unwrap(space, w_res) == ["01\n0", "2\n0", "\n04\n"]
def test_each_line(self, space, tmpdir):
contents = "01\n02\n03\n04\n"
f = tmpdir.join("file.txt")
f.write(contents)
w_res = space.execute("""
r = []
File.new('%s').each_line { |l| r << l }
return r
""" % f)
assert self.unwrap(space, w_res) == ["01", "02", "03", "04", ""]
w_res = space.execute("""
r = []
File.new('%s').each_line('3') { |l| r << l }
return r
""" % f)
assert self.unwrap(space, w_res) == ["01\n02\n0", "\n04\n"]
w_res = space.execute("""
r = []
File.new('%s').each_line(1) { |l| r << l }
return r
""" % f)
assert self.unwrap(space, w_res) == ["0", "1", "0", "2", "0", "3", "0", "4", ""]
w_res = space.execute("""
r = []
File.new('%s').each_line('3', 4) { |l| r << l }
return r
""" % f)
assert self.unwrap(space, w_res) == ["01\n0", "2\n0", "\n04\n"]
with self.raises(space, "ArgumentError", "invalid limit: 0 for each_line"):
w_res = space.execute("""
File.new('%s').each_line(0) { |l| }
""" % f)
def test_join(self, space):
w_res = space.execute("return File.join('/abc', 'bin')")
assert space.str_w(w_res) == "/abc/bin"
w_res = space.execute("return File.join('', 'abc', 'bin')")
assert space.str_w(w_res) == "/abc/bin"
w_res = space.execute("return File.join")
assert space.str_w(w_res) == ""
w_res = space.execute("return File.join('abc')")
assert space.str_w(w_res) == "abc"
w_res = space.execute("return File.join('abc', 'def', 'ghi')")
assert space.str_w(w_res) == "abc/def/ghi"
w_res = space.execute("return File.join(['abc', ['def'], []], 'ghi')")
assert space.str_w(w_res) == "abc/def/ghi"
w_res = space.execute("return File.join('a', '//', 'b', '/', 'd', '/')")
assert space.str_w(w_res) == "a//b/d/"
w_res = space.execute("return File.join('a', '')")
assert space.str_w(w_res) == "a/"
w_res = space.execute("return File.join('a/')")
assert space.str_w(w_res) == "a/"
w_res = space.execute("return File.join('a/', '')")
assert space.str_w(w_res) == "a/"
w_res = space.execute("return File.join('a', '/')")
assert space.str_w(w_res) == "a/"
w_res = space.execute("return File.join('a/', '/')")
assert space.str_w(w_res) == "a/"
w_res = space.execute("return File.join('')")
assert space.str_w(w_res) == ""
w_res = space.execute("return File.join([])")
assert space.str_w(w_res) == ""
def test_existp(self, space, tmpdir):
f = tmpdir.join("test.rb")
f.write("")
w_res = space.execute("return File.exist?('%s')" % f)
assert w_res is space.w_true
w_res = space.execute("return File.exist?('%s')" % tmpdir)
assert w_res is space.w_true
w_res = space.execute("return File.exist?('no way this exists')")
assert w_res is space.w_false
def test_filep(self, space, tmpdir):
f = tmpdir.join("test.rb")
f.write("")
w_res = space.execute("return File.file?('%s')" % f)
assert w_res is space.w_true
w_res = space.execute("return File.file?('%s')" % tmpdir)
assert w_res is space.w_false
w_res = space.execute("return File.file?('no way this exists')")
assert w_res is space.w_false
def test_executablep(self, space, tmpdir):
f = tmpdir.join("test.rb")
f.write("")
w_res = space.execute("return File.executable?('%s')" % f)
assert w_res is space.w_false
os.chmod(str(f), stat.S_IEXEC)
w_res = space.execute("return File.executable?('%s')" % f)
assert w_res is space.w_true
def test_directoryp(self, space, tmpdir):
w_res = space.execute("return File.directory?('%s')" % tmpdir)
assert self.unwrap(space, w_res) is True
w_res = space.execute("return File.directory?('%s')" % tmpdir.join("t.rb"))
assert self.unwrap(space, w_res) is False
def test_open(self, space, tmpdir):
contents = "foo\nbar\nbaz\n"
f = tmpdir.join("file.txt")
f.write(contents)
w_res = space.execute("""
File.open('%s') { |f| return f, f.read }
""" % f)
w_file, w_string = space.listview(w_res)
assert space.str_w(w_string) == contents
with pytest.raises(OSError):
# fd should be inaccessible
os.fstat(w_file.fd)
def test_close(self, space, tmpdir):
f = tmpdir.join("file.txt")
f.write("")
w_res = space.execute("""
f = File.new('%s')
f.close
return f
""" % f)
with pytest.raises(OSError):
# fd should be inaccessible
os.fstat(w_res.fd)
def test_closedp(self, space, tmpdir):
f = tmpdir.join("file.txt")
f.write("")
w_res = space.execute("""
f = File.new('%s')
opened = f.closed?
f.close
return opened, f.closed?
""" % f)
assert self.unwrap(space, w_res) == [False, True]
def test_basename(self, space):
assert space.str_w(space.execute("return File.basename('ab')")) == "ab"
assert space.str_w(space.execute("return File.basename('/ab')")) == "ab"
assert space.str_w(space.execute("return File.basename('/foo/bar/ab')")) == "ab"
assert space.str_w(space.execute("return File.basename('ab.rb', '.rb')")) == "ab"
assert space.str_w(space.execute("return File.basename('ab.rb', 'b.rb')")) == "a"
def test_truncate(self, space, tmpdir):
f = tmpdir.join("file.txt")
f.write("content")
w_res = space.execute("""
f = File.new('%s', "r+")
f.truncate(3)
return f.read
""" % f)
assert self.unwrap(space, w_res) == "con"
def test_get_umask(self, space, monkeypatch):
monkeypatch.setattr(os, "umask", lambda mask: 2)
w_res = space.execute("return File.umask")
assert space.int_w(w_res) == 2
def test_set_umask(self, space, monkeypatch):
umask = [2]
def mock_umask(mask):
[current], umask[0] = umask, mask
return current
monkeypatch.setattr(os, "umask", mock_umask)
w_res = space.execute("return File.umask(10), File.umask")
assert self.unwrap(space, w_res) == [2, 10]
def test_size_p(self, space, tmpdir):
w_res = space.execute("return File.size?('%s')" % tmpdir.join("x.txt"))
assert w_res is space.w_nil
tmpdir.join("x.txt").ensure()
w_res = space.execute("return File.size?('%s')" % tmpdir.join("x.txt"))
assert w_res is space.w_nil
tmpdir.join("x.txt").write("abc")
w_res = space.execute("return File.size?('%s')" % tmpdir.join("x.txt"))
assert space.int_w(w_res) == 3
def test_delete(self, space, tmpdir):
tmpdir.join("t.txt").ensure()
w_res = space.execute("return File.delete('%s')" % tmpdir.join("t.txt"))
assert space.int_w(w_res) == 1
assert not tmpdir.join("t.txt").check()
class TestExpandPath(BaseTopazTest):
def test_expand_to_absolute(self, space):
w_res = space.execute("""
return [File.expand_path(""), File.expand_path("a"), File.expand_path("a", nil)]
""")
assert self.unwrap(space, w_res) == [
os.getcwd(),
os.path.join(os.getcwd(), "a"),
os.path.join(os.getcwd(), "a"),
]
with self.raises(space, "ArgumentError", "string contains null byte"):
space.execute("""return File.expand_path(".\\0.")""")
def test_expand_backslash_handling(self, space):
w_res = space.execute("""
return File.expand_path("a\\\\b")
""")
res = self.unwrap(space, w_res)
if IS_WINDOWS:
assert res == "/".join([os.getcwd().replace("\\", "/"), "a", "b"])
else:
assert res == os.path.join(os.getcwd(), "a\\b")
def test_covert_to_absolute_using_provided_base(self, space):
w_res = space.execute("""return File.expand_path("", "/tmp")""")
assert self.unwrap(space, w_res) == "/tmp"
w_res = space.execute("""return File.expand_path("a", "/tmp")""")
assert self.unwrap(space, w_res) == "/tmp/a"
w_res = space.execute("""return File.expand_path("../a", "/tmp/xxx")""")
assert self.unwrap(space, w_res) == "/tmp/a"
w_res = space.execute("""return File.expand_path(".", "/")""")
assert self.unwrap(space, w_res) == "/"
w_res = space.execute("""return File.expand_path(".", nil)""")
assert self.unwrap(space, w_res) == os.getcwd()
def test_home_expansion(self, space):
w_res = space.execute("""return File.expand_path("~")""")
assert self.unwrap(space, w_res) == os.environ["HOME"]
w_res = space.execute("""return File.expand_path("~", "/tmp/random")""")
assert self.unwrap(space, w_res) == os.environ["HOME"]
w_res = space.execute("""return File.expand_path("~/a", "/tmp/random")""")
assert self.unwrap(space, w_res) == os.path.join(os.environ["HOME"], "a")
class TestDirname(BaseTopazTest):
def test_simple(self, space):
w_res = space.execute("""
return [
File.dirname("/home/guido"),
File.dirname("/home/guido/test.txt"),
File.dirname("test.txt"),
File.dirname("/home///guido//file.txt"),
File.dirname(""),
File.dirname("/"),
File.dirname("/foo/foo"),
File.dirname("/foo/foo//")
]
""")
assert self.unwrap(space, w_res) == [
"/home",
"/home/guido",
".",
"/home///guido",
".",
"/",
"/foo",
"/foo",
]
def test_windows_backslash_handling(self, space):
w_res = space.execute("""
return [
File.dirname("a/b/c"),
File.dirname("a\\\\b\\\\//\\\\c/\\\\"),
File.dirname("\\\\"),
]
""")
res = self.unwrap(space, w_res)
if IS_WINDOWS:
assert res == ["a/b", "a\\b", "/"]
else:
assert res == ["a/b", "a\\b\\//\\c", "."]
|
|
#!/usr/bin/env python
"""
(The MIT License)
Copyright (c) 2011, Mihail Szabolcs
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the 'Software'), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__author__ = 'Mihail Szabolcs'
__description__ = 'General purpose data-driven generator.'
__version__ = (0, 2, 0)
__license__ = 'MIT'
import sys
import os
import re
import glob
import json
import platform
import math
import logging
from datetime import datetime
def qglob(path, pattern):
""" Glob convenience helper """
return glob.glob(os.path.join(path, pattern))
class TemplatizerException(Exception):
""" Generic Templatizer Exception """
pass
class TemplateNotFound(TemplatizerException):
""" Templatizer Template Not Found Exception """
pass
class DuplicateTemplate(TemplatizerException):
""" Templatizer Duplicate Template Exception """
pass
class InvalidTemplate(TemplatizerException):
""" Templatizer Invalid Template Exception """
pass
class InvalidActionHandler(TemplatizerException):
""" Templatizer Invalid Action Handler Exception """
pass
class ArgumentRequired(TemplatizerException):
""" Templatizer Argument Required Exception """
pass
class Template:
"""
Template Class
"""
def __init__(self, path):
""" Constructor """
self.path = path
self.constants = {}
self.arguments = {}
self.missing_arguments = []
self.actions = []
self.__regexp = None
self.__regexp_dict = {}
self.__name = None
def __getName(self):
""" Getter for name attribute """
return self.__name
name = property(__getName, None, None, 'This template\'s name')
def parse(self, filename, arguments={}):
template = json.loads(open(filename).read())
if not self.validate(template):
raise InvalidTemplate('Invalid %s template' % filename)
self.__name = template['name']
if not self.parse_arguments(template['arguments'], arguments):
return False
if not self.parse_constants(template['constants']):
return False
if not self.preprocess():
return False
if not self.parse_actions(template['actions']):
return False
return True
def parse_arguments(self, arguments, cmd_args):
""" Parses and evaluates arguments based on the command line arguments """
for k, v in arguments.items():
if k in cmd_args:
vvv = cmd_args[k]
for kk, vv in v.items():
self.arguments[kk] = str(eval('lambda v: %s' % vv)(vvv))
else:
self.missing_arguments.append(k)
return True
def parse_constants(self, constants):
""" Parses and evaluates constants """
for k, v in constants.items():
self.constants[k] = str(eval('lambda: %s' % v)())
# list of (internal) built-in constants
self.constants['%YEAR%'] = str(datetime.now().year)
self.constants['%DATE%'] = datetime.now().strftime('%d/%m/%y')
self.constants['%TPLDIR%'] = self.path # absolute path to the template directory
return True
def parse_actions(self, actions):
""" Parses and evaluates actions """
for action in actions:
action[0] = self.process(action[0])
if len(action) == 2:
action[1] = os.path.join(self.path, self.process(action[1]))
self.actions.append(action)
return True
def validate(self, parsed):
""" Validates a parsed JSON template descriptor """
for p in ['name','actions','arguments','constants']:
if not p in parsed:
return False
return True
def preprocess(self):
""" Prepares the multi-regexp used for templating """
self.__regexp_dict = dict(self.arguments, **self.constants)
self.__regexp = re.compile('(%s)' % '|'.join(map(re.escape, self.__regexp_dict.keys())))
return True
def process(self, data):
""" Processes a piece of data using the multi-regexp in one shot """
return self.__regexp.sub(lambda mo: self.__regexp_dict[mo.string[mo.start():mo.end()]], data)
def execute(self, action_handler):
""" Execute """
if len(self.missing_arguments) > 0:
raise ArgumentRequired('--%s argument required' % self.missing_arguments[0])
if not callable(action_handler):
raise InvalidActionHandler('Invalid action handler')
for action in self.actions:
action_type = len(action)
action_name = action[0]
action_data = None
if action_type == 2:
action_data = self.process(open(action[1]).read())
action_handler(action_type, action_name, action_data)
return 0
class Generator:
"""
Generator Class
"""
def __init__(self):
""" Constructor """
self.templates = {}
def add_template(self, template):
""" Registers a template with this generator """
name = template.name
if name in self.templates:
raise DuplicateTemplate('Duplicate %s template' % name)
self.templates[name] = template
def find_template(self, name):
""" Returns a registered template by its name """
if not name in self.templates:
raise TemplateNotFound('Template %s not found' % name)
return self.templates[name]
def action_handler(self, action_type, action_name, action_data=None):
""" Default action handler which creates files and executes shell commands """
if action_type == 1:
# execute shell command
logging.debug('Executing \'%s\'' % action_name)
os.system(action_name)
elif action_type == 2:
# do not overwrite any generated files
if os.path.exists(action_name):
logging.debug('File %s exists, skipping ...' % action_name)
else:
# write out file (the content is processed)
with open(action_name,'w') as f:
f.write(action_data)
logging.debug('File %s written ...' % action_name)
def execute(self, name):
""" Executes the generator """
return self.find_template(name).execute(self.action_handler)
class Templatizer:
def __init__(self, argv={}, config='~/.templatizer'):
""" Constructor """
self.generator = Generator()
self.arguments = {}
self.parse_arguments(argv)
self.parse_config(config)
def parse_templates(self, path):
""" Scans a directory for available templates """
template_dir = os.path.expanduser(path)
logging.debug('Searching %s for available templates ...' % template_dir)
for template in qglob(template_dir,'*.templatizer'):
logging.debug('-> Parsing %s ...' % template)
tpl = Template(template_dir)
if tpl.parse(template, self.arguments):
self.generator.add_template(tpl)
logging.debug('-> SUCCESS')
else:
logging.debug('-> FAIL')
def parse_arguments(self, argv):
""" Parses arguments into a key, value dictionary """
for arg in argv:
# accepted argument format: --key=value
if arg.startswith('--') and arg.find('=') != -1:
k, v = arg.split('=')
self.arguments[k[2:]] = v
def parse_config(self, config):
""" Parses the configuration and scans all paths for available templates """
config_file = os.path.expanduser(config)
if not os.path.exists(config_file) or not os.path.isfile(config_file): return
with open(config_file) as f:
for path in f:
self.parse_templates(path.strip())
def execute(self, name, path):
""" Executes the internal generator """
logging.debug('Working directory `%s`' % path)
# change current working directory
os.chdir(path)
return self.generator.execute(name)
def main(argv):
""" Main """
if len(argv) < 2:
print('Templatizer v%d.%d.%d' % __version__)
print('usage: %s [options] template [--key1=value1, --key2=value2]' % os.path.splitext(os.path.basename(argv[0]))[0])
print('')
print('Options:')
print('\t-v, --version\tshows the version number')
print('\t-d, --debug\tenables debug output')
return -1
if '-v' in argv or '--version' in argv:
print('%d.%d.%d' % __version__) # print version
return 0
if argv[1] == '-d' or argv[1] == '--debug':
argv.pop(1) # remove this item
logging.basicConfig(level=logging.DEBUG)
try:
return Templatizer(argv[2:]).execute(argv[1], os.getcwd())
except TemplatizerException as message:
print(message)
return -1
if __name__ == '__main__':
exit(main(sys.argv))
|
|
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import hashlib
import simplejson
import random
import shutil
# import urllib
import os
from pprint import pprint
import math
import numpy as np
import sys
import psutil
import requests
from PIL import Image
from aetros.utils import get_option
from .keras_model_utils import ensure_dir
from .backend import invalid_json_values
from threading import Thread, Lock
from six.moves.queue import Queue, Empty
import six
from six.moves import range
def download_image(url, path):
if os.path.exists(path):
return True
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36'
}
try:
r = requests.get(url, stream=True, timeout=9, headers=headers)
if r.status_code == 200:
with open(path, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
return True
else:
print(("Could not download image %s, response %d" % (url, r.status_code)))
except Exception as e:
if hasattr(e, 'message'):
print(("Could not download image %s due to %s" % (url, e.message)))
else:
print(("Could not download image %s due to %s" % (url, repr(e))))
return False
class ImageDownloaderWorker(Thread):
def __init__(self, q, progress, dataset, max, images, controller):
Thread.__init__(self)
self.q = q
self.progress = progress
self.dataset = dataset
self.max = max
self.images = images
self.controller = controller
def run(self):
try:
while self.controller['running']:
try:
self.handle(self.q.get(True, 4))
self.q.task_done()
except Empty:
pass
except KeyboardInterrupt:
self.controller['running'] = False
return
def handle(self, message):
image = message[0]
if 'id' not in image:
return
local_image_path = message[2]
try:
ensure_dir(os.path.dirname(local_image_path))
except Exception:
pass
if not os.path.isfile(local_image_path):
if download_image(image['src'], local_image_path):
try:
with open(local_image_path, "rb") as fp:
img = Image.open(fp)
img.load()
resize = bool(get_option(self.dataset['config'], 'resize', True))
if resize:
os.remove(local_image_path)
size = (int(get_option(self.dataset['config'], 'resizeWidth', 512)), int(
get_option(self.dataset['config'], 'resizeHeight', 512)))
quality = int(
float(get_option(self.dataset['config'], 'resizeCompression', 0.8)) * 100)
img.thumbnail(size, Image.ANTIALIAS)
local_image_path = os.path.splitext(local_image_path)[0] + '.jpg'
img.save(local_image_path, 'JPEG', quality=quality, optimize=True)
except SystemExit:
self.controller['running'] = False
except KeyboardInterrupt:
self.controller['running'] = False
except Exception as e:
print(("No valid image found %s" % (local_image_path,)))
if os.path.exists(local_image_path):
os.remove(local_image_path)
self.images[image['id']] = local_image_path
self.progress.advance(1)
# class to read all images in the ram at once
class ImageReadWorker(Thread):
def __init__(self, q, job_model, input_node, path, images, controller):
Thread.__init__(self)
self.q = q
self.job_model = job_model
self.input_node = input_node
self.path = path
self.images = images
self.controller = controller
def run(self):
while self.controller['running']:
self.handle(self.q.get())
self.q.task_done()
def handle(self, message):
path, validation, category_dir = message
try:
if os.path.isfile(path):
input = self.job_model.convert_file_to_input_node(path, self.input_node)
else:
return
if input is not None and len(input) > 0:
self.images.append([input, validation, category_dir])
except IOError as e:
print(('Could not open %s due to %s' % (path, e.message)))
return
class InMemoryDataGenerator():
def __init__(self, datagen, images, classes_count, batch_size):
self.index = -1
self.datagen = datagen
self.images = images
random.shuffle(self.images)
self.lock = Lock()
self.classes_count = classes_count
self.batch_size = batch_size
def __iter__(self):
return self
def next(self):
batch_x = []
batch_y = []
for i in range(self.batch_size):
with self.lock:
self.index += 1
# reset iterator if necessary
if self.index == len(self.images):
self.index = 0
random.shuffle(self.images)
image, class_idx = self.images[self.index]
# we need to copy it, otherwise we'd operate on the same object again and again
image = np.copy(image)
if self.datagen is not None:
image = self.datagen.random_transform(image)
image = self.datagen.standardize(image)
batch_x.append(image)
y = np.zeros((self.classes_count,), dtype='float32')
y[class_idx] = 1.
batch_y.append(y)
return np.array(batch_x), np.array(batch_y)
def __next__(self):
return self.next()
def read_images_in_memory(job_model, dataset, node, trainer):
"""
Reads all images into memory and applies augmentation if enabled
"""
concurrent = psutil.cpu_count()
dataset_config = dataset['config']
controller = {'running': True}
q = Queue(concurrent)
result = {
'X_train': [],
'Y_train': [],
'X_test': [],
'Y_test': []
}
images = []
max = 0
path = job_model.get_dataset_downloads_dir(dataset)
if 'path' in dataset['config']:
path = dataset['config']['path']
classes_count = 0
category_map = {}
classes = []
trainer.set_status('LOAD IMAGES INTO MEMORY')
try:
for i in range(concurrent):
t = ImageReadWorker(q, job_model, node, path, images, controller)
t.daemon = True
t.start()
for validation_or_training in ['validation', 'training']:
if os.path.isdir(os.path.normpath(path + '/' + validation_or_training)):
for category_name in os.listdir(os.path.normpath(path + '/' + validation_or_training)):
if os.path.isdir(os.path.normpath(path + '/' + validation_or_training + '/' + category_name)):
if category_name not in category_map:
category_map[category_name] = classes_count
if 'classes' in dataset_config and 'category_' in category_name:
category_idx = int(category_name.replace('category_', ''))
category_map[category_name] = category_idx
target_category = dataset_config['classes'][category_idx]
classes.append(target_category['title'] or 'Class %s' % (category_idx, ))
else:
classes.append(category_name)
classes_count += 1
for id in os.listdir(os.path.normpath(path + '/' + validation_or_training + '/' + category_name)):
file_path = os.path.join(path, validation_or_training, category_name, id)
q.put([file_path, validation_or_training == 'validation', category_name])
max += 1
q.join()
controller['running'] = False
train_images = []
test_images = []
for v in images:
image, validation, category_dir = v
if validation is True:
test_images.append([image, category_map[category_dir]])
else:
train_images.append([image, category_map[category_dir]])
train_datagen = None
augmentation = bool(get_option(dataset_config, 'augmentation', False))
if augmentation:
train_datagen = get_image_data_augmentor_from_dataset(dataset)
train = InMemoryDataGenerator(train_datagen, train_images, classes_count, job_model.job['config']['batchSize'])
test = InMemoryDataGenerator(None, test_images, classes_count, job_model.job['config']['batchSize'])
nb_sample = len(train_images)
trainer.set_info('Dataset size', {'training': nb_sample, 'validation': len(test_images)})
trainer.set_generator_training_nb(nb_sample)
trainer.set_generator_validation_nb(len(test_images))
trainer.logger.info(("Found %d classes, %d images (%d in training [%saugmented], %d in validation). Read all images into memory from %s" %
(classes_count, max, len(train_images), 'not ' if augmentation is False else '', len(test_images), path)))
if classes_count == 0:
trainer.logger.warning("Could not find any classes. Does the directory contains images?")
sys.exit(1)
trainer.output_size = classes_count
trainer.set_info('classes', classes)
trainer.classes = classes
result['X_train'] = train
result['Y_train'] = train
result['X_test'] = test
result['Y_test'] = test
return result
except KeyboardInterrupt:
controller['running'] = False
sys.exit(1)
def get_image_data_augmentor_from_dataset(dataset):
from keras.preprocessing.image import ImageDataGenerator
dataset_config = dataset['config']
augShearRange = float(get_option(dataset_config, 'augShearRange', 0.1))
augZoomRange = float(get_option(dataset_config, 'augZoomRange', 0.1))
augHorizontalFlip = bool(get_option(dataset_config, 'augHorizontalFlip', False))
augVerticalFlip = bool(get_option(dataset_config, 'augVerticalFlip', False))
augRotationRange = float(get_option(dataset_config, 'augRotationRange', 0.2))
return ImageDataGenerator(
rotation_range=augRotationRange,
shear_range=augShearRange,
zoom_range=augZoomRange,
horizontal_flip=augHorizontalFlip,
vertical_flip=augVerticalFlip
)
def read_images_keras_generator(job_model, dataset, node, trainer):
from keras.preprocessing.image import ImageDataGenerator
size = (int(node['width']), int(node['height']))
grayscale = False
if node['inputType'] == 'image':
grayscale = True
dataset_config = dataset['config']
trainer.logger.info(("Generate image iterator in folder %s " % (dataset_config['path'],)))
augmentation = bool(get_option(dataset_config, 'augmentation', False))
if augmentation:
train_datagen = get_image_data_augmentor_from_dataset(dataset)
else:
train_datagen = ImageDataGenerator()
if 'imageScale' not in node:
node['imageScale'] = 255
if float(node['imageScale']) > 0:
train_datagen.rescale = 1.0 / float(node['imageScale'])
train_generator = train_datagen.flow_from_directory(
directory=os.path.join(dataset_config['path'], 'training'),
target_size=size,
batch_size=job_model.job['config']['batchSize'],
color_mode='grayscale' if grayscale is True else 'rgb',
class_mode='categorical')
classes = []
for folderName, outputNeuron in six.iteritems(train_generator.class_indices):
if dataset['type'] == 'images_search' or dataset['type'] == 'images_upload':
category_idx = int(folderName.replace('category_', ''))
target_category = dataset_config['classes'][category_idx]
classes.append(target_category['title'] or 'Category %s' % (category_idx, ))
else:
classes.append(folderName)
trainer.set_info('classes', classes)
trainer.classes = classes
# ensure_dir(dataset_config['path'] + '/preview')
test_datagen = ImageDataGenerator()
if float(node['imageScale']) > 0:
test_datagen.rescale = 1.0 / float(node['imageScale'])
validation_generator = test_datagen.flow_from_directory(
directory=os.path.join(dataset_config['path'], 'validation'),
# save_to_dir=dataset_config['path'] + '/preview',
target_size=size,
batch_size=job_model.get_batch_size(),
color_mode='grayscale' if grayscale is True else 'rgb',
class_mode='categorical')
validation_samples = 0
train_samples = 0
# Keras 2
if hasattr(train_generator, 'num_class'):
trainer.output_size = train_generator.num_class
if hasattr(train_generator, 'samples'):
train_samples = train_generator.samples
if hasattr(validation_generator, 'samples'):
validation_samples = validation_generator.samples
# Keras 1
if hasattr(train_generator, 'nb_class'):
trainer.output_size = train_generator.nb_class
if hasattr(train_generator, 'nb_sample'):
train_samples = train_generator.nb_sample
if hasattr(validation_generator, 'nb_sample'):
validation_samples = validation_generator.nb_sample
trainer.set_info('Dataset size', {'training': train_samples, 'validation': validation_samples})
trainer.set_generator_validation_nb(validation_samples)
trainer.set_generator_training_nb(train_samples)
trainer.logger.info(("Found %d classes, %d images (%d in training [%saugmented], %d in validation) in %s " %
(len(classes), validation_samples + train_samples, train_samples, 'not ' if augmentation is False else '', validation_samples, dataset_config['path'])))
if trainer.output_size == 0:
trainer.logger.warning("Could not find any classes. Does the directory contains images?")
sys.exit(1)
trainer.logger.debug(str(train_generator.class_indices))
trainer.logger.debug(str(classes))
return {
'X_train': train_generator,
'Y_train': train_generator,
'X_test': validation_generator,
'Y_test': validation_generator,
}
def get_images(job_model, dataset, node, trainer):
concurrent = 15
from PIL import ImageFile
if hasattr(ImageFile, 'LOAD_TRUNCATED_IMAGES'):
ImageFile.LOAD_TRUNCATED_IMAGES = True
q = Queue(concurrent)
config = dataset['config']
dir = job_model.get_dataset_downloads_dir(dataset)
ensure_dir(dir)
if 'classes' not in config or not config['classes']:
trainer.logger.warning("Dataset %s does not contain any classes." % (dataset['id'],))
return {
'X_train': np.array([]),
'Y_train': np.array([]),
'X_test': np.array([]),
'Y_test': np.array([])
}
classes = config['classes']
trainer.set_status('LOAD IMAGES')
max = 0
images = {}
dataset_path = job_model.get_dataset_downloads_dir(dataset)
meta_information_file = dataset_path + '/meta.json'
classes_changed = False
config_changed = False
had_previous = False
classes_md5 = hashlib.md5(simplejson.dumps(classes, default=invalid_json_values, sort_keys=True).encode('utf-8')).hexdigest()
validationFactor = 0.2
meta = {}
if os.path.isdir(dataset_path):
if os.path.isfile(meta_information_file):
with open(meta_information_file) as f:
meta = simplejson.load(f)
if meta:
had_previous = True
if 'classes_md5' in meta and meta['classes_md5'] != classes_md5:
classes_changed = True
trigger_changed = ['resize', 'resizeWidth', 'resizeHeight', 'resizeCompression']
for i in trigger_changed:
if i in meta['config'] and i in config and meta['config'][i] != config[i]:
config_changed = True
else:
config_changed = True
else:
config_changed = True
need_download = classes_changed or config_changed
if need_download:
if had_previous:
trainer.logger.info("Reset dataset and re-download images to " + dir)
if classes_changed:
trainer.logger.info(" .. because classes changed in", meta['classes_md5'], classes_md5, meta_information_file)
if config_changed:
trainer.logger.info(" .. because settings changed in", meta_information_file)
else:
trainer.logger.info("Download images to " + dir)
resize = bool(get_option(config, 'resize', True))
if resize:
resizeSize = (int(get_option(config, 'resizeWidth', 64)),
int(get_option(config, 'resizeHeight', 64)))
trainer.logger.info(" .. with resizing to %dx%d " % resizeSize)
# # we need to donwload all images
shutil.rmtree(dataset_path)
controller = {'running': True}
try:
for category in classes:
max += len(category['images'])
progress = trainer.job_backend.create_progress('dataset-download-images', max)
progress.label('Download dataset images')
for i in range(concurrent):
t = ImageDownloaderWorker(q, progress, dataset, max, images, controller)
t.daemon = True
t.start()
for category_idx, category in enumerate(classes):
for image in category['images']:
local_name = image['id']
local_path = '%s/%s' % (trainer.job_model.get_dataset_downloads_dir(dataset), local_name)
q.put([image, category_idx, local_path])
q.join()
controller['running'] = False
def move_image(image, category='training'):
if image['id'] in images and os.path.isfile(images[image['id']]):
target_path = dataset_path + \
'/%s/category_%s/%s' % (category, category_idx,
os.path.basename(images[image['id']]))
ensure_dir(os.path.dirname(target_path))
os.rename(images[image['id']], target_path)
for category_idx, category in enumerate(classes):
random.shuffle(category['images'])
position = int(math.ceil(len(category['images']) * validationFactor))
ensure_dir(dataset_path + '/training')
ensure_dir(dataset_path + '/validation')
for image in category['images'][position:]: # test data
if image['id'] in images and os.path.isfile(images[image['id']]):
move_image(image, 'training')
for image in category['images'][:position]: # validation data
if image['id'] in images and os.path.isfile(images[image['id']]):
move_image(image, 'validation')
with open(meta_information_file, 'w') as f:
meta = {
'loaded_at': classes_md5,
'classes_md5': classes_md5,
'config': config
}
simplejson.dump(meta, f, default=invalid_json_values)
except KeyboardInterrupt:
controller['running'] = False
sys.exit(1)
else:
trainer.logger.info("Downloaded images up2date in " + dir)
trainer.logger.info(" - Remove this directory if you want to re-download all images of your dataset and re-shuffle training/validation images.")
trainer.output_size = len(classes)
# change to type local_images
dataset_transformed = dataset.copy()
dataset_transformed['config']['path'] = dir
all_memory = get_option(dataset['config'], 'allMemory', False, 'bool')
if all_memory:
return read_images_in_memory(job_model, dataset_transformed, node, trainer)
else:
return read_images_keras_generator(job_model, dataset_transformed, node, trainer)
|
|
# Unix SMB/CIFS implementation.
# backend code for provisioning DNS for a Samba4 server
#
# Copyright (C) Kai Blin <kai@samba.org> 2011
# Copyright (C) Amitay Isaacs <amitay@gmail.com> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""DNS-related provisioning"""
import os
import uuid
import shutil
import time
import ldb
from base64 import b64encode
import samba
from samba.tdb_util import tdb_copy
from samba.ndr import ndr_pack, ndr_unpack
from samba import setup_file
from samba.dcerpc import dnsp, misc, security
from samba.dsdb import (
DS_DOMAIN_FUNCTION_2000,
DS_DOMAIN_FUNCTION_2003,
DS_DOMAIN_FUNCTION_2008_R2
)
from samba.descriptor import (
get_domain_descriptor,
get_domain_delete_protected1_descriptor,
get_domain_delete_protected2_descriptor,
get_dns_partition_descriptor,
get_dns_forest_microsoft_dns_descriptor,
get_dns_domain_microsoft_dns_descriptor
)
from samba.provision.common import (
setup_path,
setup_add_ldif,
setup_modify_ldif,
setup_ldb
)
def get_domainguid(samdb, domaindn):
res = samdb.search(base=domaindn, scope=ldb.SCOPE_BASE, attrs=["objectGUID"])
domainguid = str(ndr_unpack(misc.GUID, res[0]["objectGUID"][0]))
return domainguid
def get_dnsadmins_sid(samdb, domaindn):
res = samdb.search(base="CN=DnsAdmins,CN=Users,%s" % domaindn, scope=ldb.SCOPE_BASE,
attrs=["objectSid"])
dnsadmins_sid = ndr_unpack(security.dom_sid, res[0]["objectSid"][0])
return dnsadmins_sid
class ARecord(dnsp.DnssrvRpcRecord):
def __init__(self, ip_addr, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE):
super(ARecord, self).__init__()
self.wType = dnsp.DNS_TYPE_A
self.rank = rank
self.dwSerial = serial
self.dwTtlSeconds = ttl
self.data = ip_addr
class AAAARecord(dnsp.DnssrvRpcRecord):
def __init__(self, ip6_addr, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE):
super(AAAARecord, self).__init__()
self.wType = dnsp.DNS_TYPE_AAAA
self.rank = rank
self.dwSerial = serial
self.dwTtlSeconds = ttl
self.data = ip6_addr
class CNameRecord(dnsp.DnssrvRpcRecord):
def __init__(self, cname, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE):
super(CNameRecord, self).__init__()
self.wType = dnsp.DNS_TYPE_CNAME
self.rank = rank
self.dwSerial = serial
self.dwTtlSeconds = ttl
self.data = cname
class NSRecord(dnsp.DnssrvRpcRecord):
def __init__(self, dns_server, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE):
super(NSRecord, self).__init__()
self.wType = dnsp.DNS_TYPE_NS
self.rank = rank
self.dwSerial = serial
self.dwTtlSeconds = ttl
self.data = dns_server
class SOARecord(dnsp.DnssrvRpcRecord):
def __init__(self, mname, rname, serial=1, refresh=900, retry=600,
expire=86400, minimum=3600, ttl=3600, rank=dnsp.DNS_RANK_ZONE):
super(SOARecord, self).__init__()
self.wType = dnsp.DNS_TYPE_SOA
self.rank = rank
self.dwSerial = serial
self.dwTtlSeconds = ttl
soa = dnsp.soa()
soa.serial = serial
soa.refresh = refresh
soa.retry = retry
soa.expire = expire
soa.mname = mname
soa.rname = rname
self.data = soa
class SRVRecord(dnsp.DnssrvRpcRecord):
def __init__(self, target, port, priority=0, weight=100, serial=1, ttl=900,
rank=dnsp.DNS_RANK_ZONE):
super(SRVRecord, self).__init__()
self.wType = dnsp.DNS_TYPE_SRV
self.rank = rank
self.dwSerial = serial
self.dwTtlSeconds = ttl
srv = dnsp.srv()
srv.nameTarget = target
srv.wPort = port
srv.wPriority = priority
srv.wWeight = weight
self.data = srv
class TXTRecord(dnsp.DnssrvRpcRecord):
def __init__(self, slist, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE):
super(TXTRecord, self).__init__()
self.wType = dnsp.DNS_TYPE_TXT
self.rank = rank
self.dwSerial = serial
self.dwTtlSeconds = ttl
stringlist = dnsp.string_list()
stringlist.count = len(slist)
stringlist.str = slist
self.data = stringlist
class TypeProperty(dnsp.DnsProperty):
def __init__(self, zone_type=dnsp.DNS_ZONE_TYPE_PRIMARY):
super(TypeProperty, self).__init__()
self.wDataLength = 1
self.version = 1
self.id = dnsp.DSPROPERTY_ZONE_TYPE
self.data = zone_type
class AllowUpdateProperty(dnsp.DnsProperty):
def __init__(self, allow_update=dnsp.DNS_ZONE_UPDATE_SECURE):
super(AllowUpdateProperty, self).__init__()
self.wDataLength = 1
self.version = 1
self.id = dnsp.DSPROPERTY_ZONE_ALLOW_UPDATE
self.data = allow_update
class SecureTimeProperty(dnsp.DnsProperty):
def __init__(self, secure_time=0):
super(SecureTimeProperty, self).__init__()
self.wDataLength = 1
self.version = 1
self.id = dnsp.DSPROPERTY_ZONE_SECURE_TIME
self.data = secure_time
class NorefreshIntervalProperty(dnsp.DnsProperty):
def __init__(self, norefresh_interval=0):
super(NorefreshIntervalProperty, self).__init__()
self.wDataLength = 1
self.version = 1
self.id = dnsp.DSPROPERTY_ZONE_NOREFRESH_INTERVAL
self.data = norefresh_interval
class RefreshIntervalProperty(dnsp.DnsProperty):
def __init__(self, refresh_interval=0):
super(RefreshIntervalProperty, self).__init__()
self.wDataLength = 1
self.version = 1
self.id = dnsp.DSPROPERTY_ZONE_REFRESH_INTERVAL
self.data = refresh_interval
class AgingStateProperty(dnsp.DnsProperty):
def __init__(self, aging_enabled=0):
super(AgingStateProperty, self).__init__()
self.wDataLength = 1
self.version = 1
self.id = dnsp.DSPROPERTY_ZONE_AGING_STATE
self.data = aging_enabled
class AgingEnabledTimeProperty(dnsp.DnsProperty):
def __init__(self, next_cycle_hours=0):
super(AgingEnabledTimeProperty, self).__init__()
self.wDataLength = 1
self.version = 1;
self.id = dnsp.DSPROPERTY_ZONE_AGING_ENABLED_TIME
self.data = next_cycle_hours
def setup_dns_partitions(samdb, domainsid, domaindn, forestdn, configdn,
serverdn):
domainzone_dn = "DC=DomainDnsZones,%s" % domaindn
forestzone_dn = "DC=ForestDnsZones,%s" % forestdn
descriptor = get_dns_partition_descriptor(domainsid)
setup_add_ldif(samdb, setup_path("provision_dnszones_partitions.ldif"), {
"DOMAINZONE_DN": domainzone_dn,
"FORESTZONE_DN": forestzone_dn,
"SECDESC" : b64encode(descriptor)
})
domainzone_guid = get_domainguid(samdb, domainzone_dn)
forestzone_guid = get_domainguid(samdb, forestzone_dn)
domainzone_guid = str(uuid.uuid4())
forestzone_guid = str(uuid.uuid4())
domainzone_dns = ldb.Dn(samdb, domainzone_dn).canonical_ex_str().strip()
forestzone_dns = ldb.Dn(samdb, forestzone_dn).canonical_ex_str().strip()
protected1_desc = get_domain_delete_protected1_descriptor(domainsid)
protected2_desc = get_domain_delete_protected2_descriptor(domainsid)
setup_add_ldif(samdb, setup_path("provision_dnszones_add.ldif"), {
"DOMAINZONE_DN": domainzone_dn,
"FORESTZONE_DN": forestzone_dn,
"DOMAINZONE_GUID": domainzone_guid,
"FORESTZONE_GUID": forestzone_guid,
"DOMAINZONE_DNS": domainzone_dns,
"FORESTZONE_DNS": forestzone_dns,
"CONFIGDN": configdn,
"SERVERDN": serverdn,
"LOSTANDFOUND_DESCRIPTOR": b64encode(protected2_desc),
"INFRASTRUCTURE_DESCRIPTOR": b64encode(protected1_desc),
})
setup_modify_ldif(samdb, setup_path("provision_dnszones_modify.ldif"), {
"CONFIGDN": configdn,
"SERVERDN": serverdn,
"DOMAINZONE_DN": domainzone_dn,
"FORESTZONE_DN": forestzone_dn,
})
def add_dns_accounts(samdb, domaindn):
setup_add_ldif(samdb, setup_path("provision_dns_accounts_add.ldif"), {
"DOMAINDN": domaindn,
})
def add_dns_container(samdb, domaindn, prefix, domain_sid, dnsadmins_sid, forest=False):
name_map = {'DnsAdmins': str(dnsadmins_sid)}
if forest is True:
sd_val = get_dns_forest_microsoft_dns_descriptor(domain_sid,
name_map=name_map)
else:
sd_val = get_dns_domain_microsoft_dns_descriptor(domain_sid,
name_map=name_map)
# CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
msg = ldb.Message(ldb.Dn(samdb, "CN=MicrosoftDNS,%s,%s" % (prefix, domaindn)))
msg["objectClass"] = ["top", "container"]
msg["nTSecurityDescriptor"] = ldb.MessageElement(sd_val, ldb.FLAG_MOD_ADD,
"nTSecurityDescriptor")
samdb.add(msg)
def add_rootservers(samdb, domaindn, prefix):
rootservers = {}
rootservers["a.root-servers.net"] = "198.41.0.4"
rootservers["b.root-servers.net"] = "192.228.79.201"
rootservers["c.root-servers.net"] = "192.33.4.12"
rootservers["d.root-servers.net"] = "128.8.10.90"
rootservers["e.root-servers.net"] = "192.203.230.10"
rootservers["f.root-servers.net"] = "192.5.5.241"
rootservers["g.root-servers.net"] = "192.112.36.4"
rootservers["h.root-servers.net"] = "128.63.2.53"
rootservers["i.root-servers.net"] = "192.36.148.17"
rootservers["j.root-servers.net"] = "192.58.128.30"
rootservers["k.root-servers.net"] = "193.0.14.129"
rootservers["l.root-servers.net"] = "199.7.83.42"
rootservers["m.root-servers.net"] = "202.12.27.33"
rootservers_v6 = {}
rootservers_v6["a.root-servers.net"] = "2001:503:ba3e::2:30"
rootservers_v6["f.root-servers.net"] = "2001:500:2f::f"
rootservers_v6["h.root-servers.net"] = "2001:500:1::803f:235"
rootservers_v6["j.root-servers.net"] = "2001:503:c27::2:30"
rootservers_v6["k.root-servers.net"] = "2001:7fd::1"
rootservers_v6["m.root-servers.net"] = "2001:dc3::35"
container_dn = "DC=RootDNSServers,CN=MicrosoftDNS,%s,%s" % (prefix, domaindn)
# Add DC=RootDNSServers,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
msg = ldb.Message(ldb.Dn(samdb, container_dn))
props = []
props.append(ndr_pack(TypeProperty(zone_type=dnsp.DNS_ZONE_TYPE_CACHE)))
props.append(ndr_pack(AllowUpdateProperty(allow_update=dnsp.DNS_ZONE_UPDATE_OFF)))
props.append(ndr_pack(SecureTimeProperty()))
props.append(ndr_pack(NorefreshIntervalProperty()))
props.append(ndr_pack(RefreshIntervalProperty()))
props.append(ndr_pack(AgingStateProperty()))
props.append(ndr_pack(AgingEnabledTimeProperty()))
msg["objectClass"] = ["top", "dnsZone"]
msg["cn"] = ldb.MessageElement("Zone", ldb.FLAG_MOD_ADD, "cn")
msg["dNSProperty"] = ldb.MessageElement(props, ldb.FLAG_MOD_ADD, "dNSProperty")
samdb.add(msg)
# Add DC=@,DC=RootDNSServers,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
record = []
for rserver in rootservers:
record.append(ndr_pack(NSRecord(rserver, serial=0, ttl=0, rank=dnsp.DNS_RANK_ROOT_HINT)))
msg = ldb.Message(ldb.Dn(samdb, "DC=@,%s" % container_dn))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(record, ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
# Add DC=<rootserver>,DC=RootDNSServers,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
for rserver in rootservers:
record = [ndr_pack(ARecord(rootservers[rserver], serial=0, ttl=0, rank=dnsp.DNS_RANK_ROOT_HINT))]
# Add AAAA record as well (How does W2K* add IPv6 records?)
#if rserver in rootservers_v6:
# record.append(ndr_pack(AAAARecord(rootservers_v6[rserver], serial=0, ttl=0)))
msg = ldb.Message(ldb.Dn(samdb, "DC=%s,%s" % (rserver, container_dn)))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(record, ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
def add_at_record(samdb, container_dn, prefix, hostname, dnsdomain, hostip, hostip6):
fqdn_hostname = "%s.%s" % (hostname, dnsdomain)
at_records = []
# SOA record
at_soa_record = SOARecord(fqdn_hostname, "hostmaster.%s" % dnsdomain)
at_records.append(ndr_pack(at_soa_record))
# NS record
at_ns_record = NSRecord(fqdn_hostname)
at_records.append(ndr_pack(at_ns_record))
if hostip is not None:
# A record
at_a_record = ARecord(hostip)
at_records.append(ndr_pack(at_a_record))
if hostip6 is not None:
# AAAA record
at_aaaa_record = AAAARecord(hostip6)
at_records.append(ndr_pack(at_aaaa_record))
msg = ldb.Message(ldb.Dn(samdb, "DC=@,%s" % container_dn))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(at_records, ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
def add_srv_record(samdb, container_dn, prefix, host, port):
srv_record = SRVRecord(host, port)
msg = ldb.Message(ldb.Dn(samdb, "%s,%s" % (prefix, container_dn)))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(ndr_pack(srv_record), ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
def add_ns_record(samdb, container_dn, prefix, host):
ns_record = NSRecord(host)
msg = ldb.Message(ldb.Dn(samdb, "%s,%s" % (prefix, container_dn)))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(ndr_pack(ns_record), ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
def add_ns_glue_record(samdb, container_dn, prefix, host):
ns_record = NSRecord(host, rank=dnsp.DNS_RANK_NS_GLUE)
msg = ldb.Message(ldb.Dn(samdb, "%s,%s" % (prefix, container_dn)))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(ndr_pack(ns_record), ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
def add_cname_record(samdb, container_dn, prefix, host):
cname_record = CNameRecord(host)
msg = ldb.Message(ldb.Dn(samdb, "%s,%s" % (prefix, container_dn)))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(ndr_pack(cname_record), ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
def add_host_record(samdb, container_dn, prefix, hostip, hostip6):
host_records = []
if hostip:
a_record = ARecord(hostip)
host_records.append(ndr_pack(a_record))
if hostip6:
aaaa_record = AAAARecord(hostip6)
host_records.append(ndr_pack(aaaa_record))
if host_records:
msg = ldb.Message(ldb.Dn(samdb, "%s,%s" % (prefix, container_dn)))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(host_records, ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
def add_domain_record(samdb, domaindn, prefix, dnsdomain, domainsid, dnsadmins_sid):
# DC=<DNSDOMAIN>,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
sddl = "O:SYG:BAD:AI" \
"(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;DA)" \
"(A;;CC;;;AU)" \
"(A;;RPLCLORC;;;WD)" \
"(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \
"(A;CI;RPWPCRCCDCLCRCWOWDSDDTSW;;;ED)" \
"(A;CIID;RPWPCRCCDCLCRCWOWDSDDTSW;;;%s)" \
"(A;CIID;RPWPCRCCDCLCRCWOWDSDDTSW;;;ED)" \
"(OA;CIID;RPWPCR;91e647de-d96f-4b70-9557-d63ff4f3ccd8;;PS)" \
"(A;CIID;RPWPCRCCDCLCLORCWOWDSDDTSW;;;EA)" \
"(A;CIID;LC;;;RU)" \
"(A;CIID;RPWPCRCCLCLORCWOWDSDSW;;;BA)" \
"S:AI" % dnsadmins_sid
sec = security.descriptor.from_sddl(sddl, domainsid)
props = []
props.append(ndr_pack(TypeProperty()))
props.append(ndr_pack(AllowUpdateProperty()))
props.append(ndr_pack(SecureTimeProperty()))
props.append(ndr_pack(NorefreshIntervalProperty(norefresh_interval=168)))
props.append(ndr_pack(RefreshIntervalProperty(refresh_interval=168)))
props.append(ndr_pack(AgingStateProperty()))
props.append(ndr_pack(AgingEnabledTimeProperty()))
msg = ldb.Message(ldb.Dn(samdb, "DC=%s,CN=MicrosoftDNS,%s,%s" % (dnsdomain, prefix, domaindn)))
msg["objectClass"] = ["top", "dnsZone"]
msg["ntSecurityDescriptor"] = ldb.MessageElement(ndr_pack(sec), ldb.FLAG_MOD_ADD,
"nTSecurityDescriptor")
msg["dNSProperty"] = ldb.MessageElement(props, ldb.FLAG_MOD_ADD, "dNSProperty")
samdb.add(msg)
def add_msdcs_record(samdb, forestdn, prefix, dnsforest):
# DC=_msdcs.<DNSFOREST>,CN=MicrosoftDNS,<PREFIX>,<FORESTDN>
msg = ldb.Message(ldb.Dn(samdb, "DC=_msdcs.%s,CN=MicrosoftDNS,%s,%s" %
(dnsforest, prefix, forestdn)))
msg["objectClass"] = ["top", "dnsZone"]
samdb.add(msg)
def add_dc_domain_records(samdb, domaindn, prefix, site, dnsdomain, hostname,
hostip, hostip6):
fqdn_hostname = "%s.%s" % (hostname, dnsdomain)
# Set up domain container - DC=<DNSDOMAIN>,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
domain_container_dn = ldb.Dn(samdb, "DC=%s,CN=MicrosoftDNS,%s,%s" %
(dnsdomain, prefix, domaindn))
# DC=@ record
add_at_record(samdb, domain_container_dn, "DC=@", hostname, dnsdomain,
hostip, hostip6)
# DC=<HOSTNAME> record
add_host_record(samdb, domain_container_dn, "DC=%s" % hostname, hostip,
hostip6)
# DC=_kerberos._tcp record
add_srv_record(samdb, domain_container_dn, "DC=_kerberos._tcp",
fqdn_hostname, 88)
# DC=_kerberos._tcp.<SITENAME>._sites record
add_srv_record(samdb, domain_container_dn, "DC=_kerberos._tcp.%s._sites" %
site, fqdn_hostname, 88)
# DC=_kerberos._udp record
add_srv_record(samdb, domain_container_dn, "DC=_kerberos._udp",
fqdn_hostname, 88)
# DC=_kpasswd._tcp record
add_srv_record(samdb, domain_container_dn, "DC=_kpasswd._tcp",
fqdn_hostname, 464)
# DC=_kpasswd._udp record
add_srv_record(samdb, domain_container_dn, "DC=_kpasswd._udp",
fqdn_hostname, 464)
# DC=_ldap._tcp record
add_srv_record(samdb, domain_container_dn, "DC=_ldap._tcp", fqdn_hostname,
389)
# DC=_ldap._tcp.<SITENAME>._sites record
add_srv_record(samdb, domain_container_dn, "DC=_ldap._tcp.%s._sites" %
site, fqdn_hostname, 389)
# FIXME: The number of SRV records depend on the various roles this DC has.
# _gc and _msdcs records are added if the we are the forest dc and not subdomain dc
#
# Assumption: current DC is GC and add all the entries
# DC=_gc._tcp record
add_srv_record(samdb, domain_container_dn, "DC=_gc._tcp", fqdn_hostname,
3268)
# DC=_gc._tcp.<SITENAME>,_sites record
add_srv_record(samdb, domain_container_dn, "DC=_gc._tcp.%s._sites" % site,
fqdn_hostname, 3268)
# DC=_msdcs record
add_ns_glue_record(samdb, domain_container_dn, "DC=_msdcs", fqdn_hostname)
# FIXME: Following entries are added only if DomainDnsZones and ForestDnsZones partitions
# are created
#
# Assumption: Additional entries won't hurt on os_level = 2000
# DC=_ldap._tcp.<SITENAME>._sites.DomainDnsZones
add_srv_record(samdb, domain_container_dn,
"DC=_ldap._tcp.%s._sites.DomainDnsZones" % site, fqdn_hostname,
389)
# DC=_ldap._tcp.<SITENAME>._sites.ForestDnsZones
add_srv_record(samdb, domain_container_dn,
"DC=_ldap._tcp.%s._sites.ForestDnsZones" % site, fqdn_hostname,
389)
# DC=_ldap._tcp.DomainDnsZones
add_srv_record(samdb, domain_container_dn, "DC=_ldap._tcp.DomainDnsZones",
fqdn_hostname, 389)
# DC=_ldap._tcp.ForestDnsZones
add_srv_record(samdb, domain_container_dn, "DC=_ldap._tcp.ForestDnsZones",
fqdn_hostname, 389)
# DC=DomainDnsZones
add_host_record(samdb, domain_container_dn, "DC=DomainDnsZones", hostip,
hostip6)
# DC=ForestDnsZones
add_host_record(samdb, domain_container_dn, "DC=ForestDnsZones", hostip,
hostip6)
def add_dc_msdcs_records(samdb, forestdn, prefix, site, dnsforest, hostname,
hostip, hostip6, domainguid, ntdsguid):
fqdn_hostname = "%s.%s" % (hostname, dnsforest)
# Set up forest container - DC=<DNSDOMAIN>,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
forest_container_dn = ldb.Dn(samdb, "DC=_msdcs.%s,CN=MicrosoftDNS,%s,%s" %
(dnsforest, prefix, forestdn))
# DC=@ record
add_at_record(samdb, forest_container_dn, "DC=@", hostname, dnsforest,
None, None)
# DC=_kerberos._tcp.dc record
add_srv_record(samdb, forest_container_dn, "DC=_kerberos._tcp.dc",
fqdn_hostname, 88)
# DC=_kerberos._tcp.<SITENAME>._sites.dc record
add_srv_record(samdb, forest_container_dn,
"DC=_kerberos._tcp.%s._sites.dc" % site, fqdn_hostname, 88)
# DC=_ldap._tcp.dc record
add_srv_record(samdb, forest_container_dn, "DC=_ldap._tcp.dc",
fqdn_hostname, 389)
# DC=_ldap._tcp.<SITENAME>._sites.dc record
add_srv_record(samdb, forest_container_dn, "DC=_ldap._tcp.%s._sites.dc" %
site, fqdn_hostname, 389)
# DC=_ldap._tcp.<SITENAME>._sites.gc record
add_srv_record(samdb, forest_container_dn, "DC=_ldap._tcp.%s._sites.gc" %
site, fqdn_hostname, 3268)
# DC=_ldap._tcp.gc record
add_srv_record(samdb, forest_container_dn, "DC=_ldap._tcp.gc",
fqdn_hostname, 3268)
# DC=_ldap._tcp.pdc record
add_srv_record(samdb, forest_container_dn, "DC=_ldap._tcp.pdc",
fqdn_hostname, 389)
# DC=gc record
add_host_record(samdb, forest_container_dn, "DC=gc", hostip, hostip6)
# DC=_ldap._tcp.<DOMAINGUID>.domains record
add_srv_record(samdb, forest_container_dn,
"DC=_ldap._tcp.%s.domains" % domainguid, fqdn_hostname, 389)
# DC=<NTDSGUID>
add_cname_record(samdb, forest_container_dn, "DC=%s" % ntdsguid,
fqdn_hostname)
def secretsdb_setup_dns(secretsdb, names, private_dir, realm,
dnsdomain, dns_keytab_path, dnspass, key_version_number):
"""Add DNS specific bits to a secrets database.
:param secretsdb: Ldb Handle to the secrets database
:param names: Names shortcut
:param machinepass: Machine password
"""
try:
os.unlink(os.path.join(private_dir, dns_keytab_path))
except OSError:
pass
if key_version_number is None:
key_version_number = 1
setup_ldb(secretsdb, setup_path("secrets_dns.ldif"), {
"REALM": realm,
"DNSDOMAIN": dnsdomain,
"DNS_KEYTAB": dns_keytab_path,
"DNSPASS_B64": b64encode(dnspass),
"KEY_VERSION_NUMBER": str(key_version_number),
"HOSTNAME": names.hostname,
"DNSNAME" : '%s.%s' % (
names.netbiosname.lower(), names.dnsdomain.lower())
})
def create_dns_dir(logger, paths):
"""Write out a DNS zone file, from the info in the current database.
:param logger: Logger object
:param paths: paths object
"""
dns_dir = os.path.dirname(paths.dns)
try:
shutil.rmtree(dns_dir, True)
except OSError:
pass
os.mkdir(dns_dir, 0770)
if paths.bind_gid is not None:
try:
os.chown(dns_dir, -1, paths.bind_gid)
# chmod needed to cope with umask
os.chmod(dns_dir, 0770)
except OSError:
if not os.environ.has_key('SAMBA_SELFTEST'):
logger.error("Failed to chown %s to bind gid %u" % (
dns_dir, paths.bind_gid))
def create_zone_file(lp, logger, paths, targetdir, dnsdomain,
hostip, hostip6, hostname, realm, domainguid,
ntdsguid, site):
"""Write out a DNS zone file, from the info in the current database.
:param paths: paths object
:param dnsdomain: DNS Domain name
:param domaindn: DN of the Domain
:param hostip: Local IPv4 IP
:param hostip6: Local IPv6 IP
:param hostname: Local hostname
:param realm: Realm name
:param domainguid: GUID of the domain.
:param ntdsguid: GUID of the hosts nTDSDSA record.
"""
assert isinstance(domainguid, str)
if hostip6 is not None:
hostip6_base_line = " IN AAAA " + hostip6
hostip6_host_line = hostname + " IN AAAA " + hostip6
gc_msdcs_ip6_line = "gc._msdcs IN AAAA " + hostip6
else:
hostip6_base_line = ""
hostip6_host_line = ""
gc_msdcs_ip6_line = ""
if hostip is not None:
hostip_base_line = " IN A " + hostip
hostip_host_line = hostname + " IN A " + hostip
gc_msdcs_ip_line = "gc._msdcs IN A " + hostip
else:
hostip_base_line = ""
hostip_host_line = ""
gc_msdcs_ip_line = ""
# we need to freeze the zone while we update the contents
if targetdir is None:
rndc = ' '.join(lp.get("rndc command"))
os.system(rndc + " freeze " + lp.get("realm"))
setup_file(setup_path("provision.zone"), paths.dns, {
"HOSTNAME": hostname,
"DNSDOMAIN": dnsdomain,
"REALM": realm,
"HOSTIP_BASE_LINE": hostip_base_line,
"HOSTIP_HOST_LINE": hostip_host_line,
"DOMAINGUID": domainguid,
"DATESTRING": time.strftime("%Y%m%d%H"),
"DEFAULTSITE": site,
"NTDSGUID": ntdsguid,
"HOSTIP6_BASE_LINE": hostip6_base_line,
"HOSTIP6_HOST_LINE": hostip6_host_line,
"GC_MSDCS_IP_LINE": gc_msdcs_ip_line,
"GC_MSDCS_IP6_LINE": gc_msdcs_ip6_line,
})
if paths.bind_gid is not None:
try:
os.chown(paths.dns, -1, paths.bind_gid)
# chmod needed to cope with umask
os.chmod(paths.dns, 0664)
except OSError:
if not os.environ.has_key('SAMBA_SELFTEST'):
logger.error("Failed to chown %s to bind gid %u" % (
paths.dns, paths.bind_gid))
if targetdir is None:
os.system(rndc + " unfreeze " + lp.get("realm"))
def create_samdb_copy(samdb, logger, paths, names, domainsid, domainguid):
"""Create a copy of samdb and give write permissions to named for dns partitions
"""
private_dir = paths.private_dir
samldb_dir = os.path.join(private_dir, "sam.ldb.d")
dns_dir = os.path.dirname(paths.dns)
dns_samldb_dir = os.path.join(dns_dir, "sam.ldb.d")
# Find the partitions and corresponding filenames
partfile = {}
res = samdb.search(base="@PARTITION", scope=ldb.SCOPE_BASE, attrs=["partition"])
for tmp in res[0]["partition"]:
(nc, fname) = tmp.split(':')
partfile[nc.upper()] = fname
# Create empty domain partition
domaindn = names.domaindn.upper()
domainpart_file = os.path.join(dns_dir, partfile[domaindn])
try:
os.mkdir(dns_samldb_dir)
file(domainpart_file, 'w').close()
# Fill the basedn and @OPTION records in domain partition
dom_ldb = samba.Ldb(domainpart_file)
domainguid_line = "objectGUID: %s\n-" % domainguid
descr = b64encode(get_domain_descriptor(domainsid))
setup_add_ldif(dom_ldb, setup_path("provision_basedn.ldif"), {
"DOMAINDN" : names.domaindn,
"DOMAINGUID" : domainguid_line,
"DOMAINSID" : str(domainsid),
"DESCRIPTOR" : descr})
setup_add_ldif(dom_ldb,
setup_path("provision_basedn_options.ldif"), None)
except:
logger.error(
"Failed to setup database for BIND, AD based DNS cannot be used")
raise
del partfile[domaindn]
# Link dns partitions and metadata
domainzonedn = "DC=DOMAINDNSZONES,%s" % names.domaindn.upper()
forestzonedn = "DC=FORESTDNSZONES,%s" % names.rootdn.upper()
domainzone_file = partfile[domainzonedn]
forestzone_file = partfile[forestzonedn]
metadata_file = "metadata.tdb"
try:
os.link(os.path.join(samldb_dir, metadata_file),
os.path.join(dns_samldb_dir, metadata_file))
os.link(os.path.join(private_dir, domainzone_file),
os.path.join(dns_dir, domainzone_file))
os.link(os.path.join(private_dir, forestzone_file),
os.path.join(dns_dir, forestzone_file))
except OSError:
logger.error(
"Failed to setup database for BIND, AD based DNS cannot be used")
raise
del partfile[domainzonedn]
del partfile[forestzonedn]
# Copy root, config, schema partitions (and any other if any)
# Since samdb is open in the current process, copy them in a child process
try:
tdb_copy(os.path.join(private_dir, "sam.ldb"),
os.path.join(dns_dir, "sam.ldb"))
for nc in partfile:
pfile = partfile[nc]
tdb_copy(os.path.join(private_dir, pfile),
os.path.join(dns_dir, pfile))
except:
logger.error(
"Failed to setup database for BIND, AD based DNS cannot be used")
raise
# Give bind read/write permissions dns partitions
if paths.bind_gid is not None:
try:
os.chown(samldb_dir, -1, paths.bind_gid)
os.chmod(samldb_dir, 0750)
for dirname, dirs, files in os.walk(dns_dir):
for d in dirs:
dpath = os.path.join(dirname, d)
os.chown(dpath, -1, paths.bind_gid)
os.chmod(dpath, 0770)
for f in files:
if f.endswith('.ldb') or f.endswith('.tdb'):
fpath = os.path.join(dirname, f)
os.chown(fpath, -1, paths.bind_gid)
os.chmod(fpath, 0660)
except OSError:
if not os.environ.has_key('SAMBA_SELFTEST'):
logger.error(
"Failed to set permissions to sam.ldb* files, fix manually")
else:
if not os.environ.has_key('SAMBA_SELFTEST'):
logger.warning("""Unable to find group id for BIND,
set permissions to sam.ldb* files manually""")
def create_dns_update_list(lp, logger, paths):
"""Write out a dns_update_list file"""
# note that we use no variable substitution on this file
# the substitution is done at runtime by samba_dnsupdate, samba_spnupdate
setup_file(setup_path("dns_update_list"), paths.dns_update_list, None)
setup_file(setup_path("spn_update_list"), paths.spn_update_list, None)
def create_named_conf(paths, realm, dnsdomain, dns_backend):
"""Write out a file containing zone statements suitable for inclusion in a
named.conf file (including GSS-TSIG configuration).
:param paths: all paths
:param realm: Realm name
:param dnsdomain: DNS Domain name
:param dns_backend: DNS backend type
:param keytab_name: File name of DNS keytab file
"""
if dns_backend == "BIND9_FLATFILE":
setup_file(setup_path("named.conf"), paths.namedconf, {
"DNSDOMAIN": dnsdomain,
"REALM": realm,
"ZONE_FILE": paths.dns,
"REALM_WC": "*." + ".".join(realm.split(".")[1:]),
"NAMED_CONF": paths.namedconf,
"NAMED_CONF_UPDATE": paths.namedconf_update
})
setup_file(setup_path("named.conf.update"), paths.namedconf_update)
elif dns_backend == "BIND9_DLZ":
setup_file(setup_path("named.conf.dlz"), paths.namedconf, {
"NAMED_CONF": paths.namedconf,
"MODULESDIR" : samba.param.modules_dir(),
})
def create_named_txt(path, realm, dnsdomain, dnsname, private_dir,
keytab_name):
"""Write out a file containing zone statements suitable for inclusion in a
named.conf file (including GSS-TSIG configuration).
:param path: Path of the new named.conf file.
:param realm: Realm name
:param dnsdomain: DNS Domain name
:param private_dir: Path to private directory
:param keytab_name: File name of DNS keytab file
"""
setup_file(setup_path("named.txt"), path, {
"DNSDOMAIN": dnsdomain,
"DNSNAME" : dnsname,
"REALM": realm,
"DNS_KEYTAB": keytab_name,
"DNS_KEYTAB_ABS": os.path.join(private_dir, keytab_name),
"PRIVATE_DIR": private_dir
})
def is_valid_dns_backend(dns_backend):
return dns_backend in ("BIND9_FLATFILE", "BIND9_DLZ", "SAMBA_INTERNAL", "NONE")
def is_valid_os_level(os_level):
return DS_DOMAIN_FUNCTION_2000 <= os_level <= DS_DOMAIN_FUNCTION_2008_R2
def create_dns_legacy(samdb, domainsid, forestdn, dnsadmins_sid):
# Set up MicrosoftDNS container
add_dns_container(samdb, forestdn, "CN=System", domainsid, dnsadmins_sid)
# Add root servers
add_rootservers(samdb, forestdn, "CN=System")
def fill_dns_data_legacy(samdb, domainsid, forestdn, dnsdomain, site, hostname,
hostip, hostip6, dnsadmins_sid):
# Add domain record
add_domain_record(samdb, forestdn, "CN=System", dnsdomain, domainsid,
dnsadmins_sid)
# Add DNS records for a DC in domain
add_dc_domain_records(samdb, forestdn, "CN=System", site, dnsdomain,
hostname, hostip, hostip6)
def create_dns_partitions(samdb, domainsid, names, domaindn, forestdn,
dnsadmins_sid):
# Set up additional partitions (DomainDnsZones, ForstDnsZones)
setup_dns_partitions(samdb, domainsid, domaindn, forestdn,
names.configdn, names.serverdn)
# Set up MicrosoftDNS containers
add_dns_container(samdb, domaindn, "DC=DomainDnsZones", domainsid,
dnsadmins_sid)
add_dns_container(samdb, forestdn, "DC=ForestDnsZones", domainsid,
dnsadmins_sid, forest=True)
def fill_dns_data_partitions(samdb, domainsid, site, domaindn, forestdn,
dnsdomain, dnsforest, hostname, hostip, hostip6,
domainguid, ntdsguid, dnsadmins_sid, autofill=True):
"""Fill data in various AD partitions
:param samdb: LDB object connected to sam.ldb file
:param domainsid: Domain SID (as dom_sid object)
:param site: Site name to create hostnames in
:param domaindn: DN of the domain
:param forestdn: DN of the forest
:param dnsdomain: DNS name of the domain
:param dnsforest: DNS name of the forest
:param hostname: Host name of this DC
:param hostip: IPv4 addresses
:param hostip6: IPv6 addresses
:param domainguid: Domain GUID
:param ntdsguid: NTDS GUID
:param dnsadmins_sid: SID for DnsAdmins group
:param autofill: Create DNS records (using fixed template)
"""
##### Set up DC=DomainDnsZones,<DOMAINDN>
# Add rootserver records
add_rootservers(samdb, domaindn, "DC=DomainDnsZones")
# Add domain record
add_domain_record(samdb, domaindn, "DC=DomainDnsZones", dnsdomain,
domainsid, dnsadmins_sid)
# Add DNS records for a DC in domain
if autofill:
add_dc_domain_records(samdb, domaindn, "DC=DomainDnsZones", site,
dnsdomain, hostname, hostip, hostip6)
##### Set up DC=ForestDnsZones,<DOMAINDN>
# Add _msdcs record
add_msdcs_record(samdb, forestdn, "DC=ForestDnsZones", dnsforest)
# Add DNS records for a DC in forest
if autofill:
add_dc_msdcs_records(samdb, forestdn, "DC=ForestDnsZones", site,
dnsforest, hostname, hostip, hostip6,
domainguid, ntdsguid)
def setup_ad_dns(samdb, secretsdb, domainsid, names, paths, lp, logger,
dns_backend, os_level, site, dnspass=None, hostip=None, hostip6=None,
targetdir=None):
"""Provision DNS information (assuming GC role)
:param samdb: LDB object connected to sam.ldb file
:param secretsdb: LDB object connected to secrets.ldb file
:param domainsid: Domain SID (as dom_sid object)
:param names: Names shortcut
:param paths: Paths shortcut
:param lp: Loadparm object
:param logger: Logger object
:param dns_backend: Type of DNS backend
:param os_level: Functional level (treated as os level)
:param site: Site to create hostnames in
:param dnspass: Password for bind's DNS account
:param hostip: IPv4 address
:param hostip6: IPv6 address
:param targetdir: Target directory for creating DNS-related files for BIND9
"""
if not is_valid_dns_backend(dns_backend):
raise Exception("Invalid dns backend: %r" % dns_backend)
if not is_valid_os_level(os_level):
raise Exception("Invalid os level: %r" % os_level)
if dns_backend == "NONE":
logger.info("No DNS backend set, not configuring DNS")
return
# Add dns accounts (DnsAdmins, DnsUpdateProxy) in domain
logger.info("Adding DNS accounts")
add_dns_accounts(samdb, names.domaindn)
# If dns_backend is BIND9_FLATFILE
# Populate only CN=MicrosoftDNS,CN=System,<FORESTDN>
#
# If dns_backend is SAMBA_INTERNAL or BIND9_DLZ
# Populate DNS partitions
# If os_level < 2003 (DS_DOMAIN_FUNCTION_2000)
# All dns records are in CN=MicrosoftDNS,CN=System,<FORESTDN>
#
# If os_level >= 2003 (DS_DOMAIN_FUNCTION_2003, DS_DOMAIN_FUNCTION_2008,
# DS_DOMAIN_FUNCTION_2008_R2)
# Root server records are in CN=MicrosoftDNS,CN=System,<FORESTDN>
# Domain records are in CN=MicrosoftDNS,CN=System,<FORESTDN>
# Domain records are in CN=MicrosoftDNS,DC=DomainDnsZones,<DOMAINDN>
# Forest records are in CN=MicrosoftDNS,DC=ForestDnsZones,<FORESTDN>
domaindn = names.domaindn
forestdn = samdb.get_root_basedn().get_linearized()
dnsdomain = names.dnsdomain.lower()
dnsforest = dnsdomain
hostname = names.netbiosname.lower()
dnsadmins_sid = get_dnsadmins_sid(samdb, domaindn)
domainguid = get_domainguid(samdb, domaindn)
# Create CN=System
logger.info("Creating CN=MicrosoftDNS,CN=System,%s" % forestdn)
create_dns_legacy(samdb, domainsid, forestdn, dnsadmins_sid)
if os_level == DS_DOMAIN_FUNCTION_2000:
# Populating legacy dns
logger.info("Populating CN=MicrosoftDNS,CN=System,%s" % forestdn)
fill_dns_data_legacy(samdb, domainsid, forestdn, dnsdomain, site,
hostname, hostip, hostip6, dnsadmins_sid)
elif dns_backend in ("SAMBA_INTERNAL", "BIND9_DLZ") and \
os_level >= DS_DOMAIN_FUNCTION_2003:
# Create DNS partitions
logger.info("Creating DomainDnsZones and ForestDnsZones partitions")
create_dns_partitions(samdb, domainsid, names, domaindn, forestdn,
dnsadmins_sid)
# Populating dns partitions
logger.info("Populating DomainDnsZones and ForestDnsZones partitions")
fill_dns_data_partitions(samdb, domainsid, site, domaindn, forestdn,
dnsdomain, dnsforest, hostname, hostip, hostip6,
domainguid, names.ntdsguid, dnsadmins_sid)
if dns_backend.startswith("BIND9_"):
setup_bind9_dns(samdb, secretsdb, domainsid, names, paths, lp, logger,
dns_backend, os_level, site=site, dnspass=dnspass, hostip=hostip,
hostip6=hostip6, targetdir=targetdir)
def setup_bind9_dns(samdb, secretsdb, domainsid, names, paths, lp, logger,
dns_backend, os_level, site=None, dnspass=None, hostip=None,
hostip6=None, targetdir=None, key_version_number=None):
"""Provision DNS information (assuming BIND9 backend in DC role)
:param samdb: LDB object connected to sam.ldb file
:param secretsdb: LDB object connected to secrets.ldb file
:param domainsid: Domain SID (as dom_sid object)
:param names: Names shortcut
:param paths: Paths shortcut
:param lp: Loadparm object
:param logger: Logger object
:param dns_backend: Type of DNS backend
:param os_level: Functional level (treated as os level)
:param site: Site to create hostnames in
:param dnspass: Password for bind's DNS account
:param hostip: IPv4 address
:param hostip6: IPv6 address
:param targetdir: Target directory for creating DNS-related files for BIND9
"""
if (not is_valid_dns_backend(dns_backend) or
not dns_backend.startswith("BIND9_")):
raise Exception("Invalid dns backend: %r" % dns_backend)
if not is_valid_os_level(os_level):
raise Exception("Invalid os level: %r" % os_level)
domaindn = names.domaindn
domainguid = get_domainguid(samdb, domaindn)
secretsdb_setup_dns(secretsdb, names,
paths.private_dir, realm=names.realm,
dnsdomain=names.dnsdomain,
dns_keytab_path=paths.dns_keytab, dnspass=dnspass,
key_version_number=key_version_number)
create_dns_dir(logger, paths)
if dns_backend == "BIND9_FLATFILE":
create_zone_file(lp, logger, paths, targetdir, site=site,
dnsdomain=names.dnsdomain, hostip=hostip,
hostip6=hostip6, hostname=names.hostname,
realm=names.realm, domainguid=domainguid,
ntdsguid=names.ntdsguid)
if dns_backend == "BIND9_DLZ" and os_level >= DS_DOMAIN_FUNCTION_2003:
create_samdb_copy(samdb, logger, paths, names, domainsid, domainguid)
create_named_conf(paths, realm=names.realm,
dnsdomain=names.dnsdomain, dns_backend=dns_backend)
create_named_txt(paths.namedtxt,
realm=names.realm, dnsdomain=names.dnsdomain,
dnsname = "%s.%s" % (names.hostname, names.dnsdomain),
private_dir=paths.private_dir,
keytab_name=paths.dns_keytab)
logger.info("See %s for an example configuration include file for BIND",
paths.namedconf)
logger.info("and %s for further documentation required for secure DNS "
"updates", paths.namedtxt)
|
|
#!/usr/bin/env python
# Copyright 2011, The Board of Regents of Leland Stanford, Jr. University
# All rights reserved. See LICENSE.
# Author: Christine Williams <christine.bennett.williams@gmail.com>
# Description: Pulls events from database and writes them to disc.
from Cheetah.Template import Template
from lxml import etree
from pprint import pprint
from optparse import OptionParser
from StringIO import StringIO
from dateutil import rrule
import bisect
import collections
import datetime
import re
import sys
import datastore
import config
import file_manager
# TODO: Figure out why it creates pages with no events (per month calendars).
# TODO: Remove workshop events from Events Calendar.
# Known issues:
# Doesn't line break in event descriptions. (I thought we had fixed that?)
# Creates pages with no events- if another calendar has events in those months. Workshop landing page only?
# Title not displaying correctly. Displays "Event Calendar" or "Workshop Calendar" all the time.
# Per Month Landing Cals: "Events For March 2012" or "Workshop Events For 2012"
# Per Month Indiv Cals: "Stanford Humanities Center Events For March 2012"
# Indiv Cals landing: "Stanford Humanities Center Events", "Archaeology Today Calendar"
# In the middle of a month, forward and back link to the current month. Solvable problem? Change language
# to match landing pages: "All events for"
class PostFlipBook:
"""Sets up the flipbooks for posts- yearmonths, tags, all posts."""
def __init__(self, uri="", pretty_name=""):
self.uri = uri
self.pretty_name = pretty_name
self.posts = []
def render(self, fm, options):
groups = group(self.posts, 10)
pagenums = range(len(groups))
pages = zip([None] + pagenums[:-1], pagenums, pagenums[1:] + [None], groups)
for next_pg_num, current_pg_num, back_pg_num, posts in pages:
fm.save(options.output_dir + self.page_uri(current_pg_num), str(Template(file="news-template.tmpl",
searchList=[{"posts" : posts,
"pretty_name": self.pretty_name,
"forward_url": self.page_uri(next_pg_num),
"forward_text": "Newer Posts»",
"back_url": self.page_uri(back_pg_num),
"back_text": "Older Posts"}])))
def page_uri(self, current_pg_num):
if current_pg_num is None:
return None
if current_pg_num == 0:
return self.uri
else:
path, extension = self.uri.rsplit(".", 1)
return path + "-" + str(current_pg_num) + "." + extension
class FlipbookIndex:
def __init__(self, yearmonth, categories):
self.yearmonth = yearmonth
self.categories = categories
def group(lst, n):
return zip(*[lst[i::n] for i in range(n)])
def parse_args(argv):
"""Sets up the option parser and parses command line arguments.
Returns:
options, args: a pair whose first element is an Options object with settings
taken from the command line, and whose second element is the remaining
unparsed arguments on the command line.
"""
op = OptionParser()
op.add_option("-o", "--output-dir", dest="output_dir",
help="Output generated files under DIR",
metavar="DIR",
default="/Library/Server/Web/Data/Sites/Default/")
op.add_option("-t", "--test-date", dest="test_date",
action="store_true",
help="Force the date to 2012-01-31 for testing",
default=False)
options, args = op.parse_args(argv[1:])
if not options.output_dir.endswith("/"):
options.output_dir += "/"
return options, args
def main(argv):
options, args = parse_args(argv)
fm = file_manager.FileManager()
ds = datastore.DataStore("database.db")
now = datetime.datetime.now()
if options.test_date:
now = datetime.datetime(2012, 01, 31, 8, 25)
end_date = now + datetime.timedelta(31)
all_events = CalendarFlipBook(calendar_name="Events Calendar",
landing_page_template="calendar-landing-page.tmpl",
landing_page_uri="events/calendar/index.html",
title_prefix="Events")
all_workshops = CalendarFlipBook(calendar_name="Workshop Calendar",
landing_page_template="workshop-landing-page.tmpl",
landing_page_uri="workshops/calendar/index.html",
title_prefix="Workshop Events")
flipbooks = {} #calendar_name -> flipbook
#fix calendar_name and template.
for c in config.calendar_ids:
if c.calendar_name in ("Stanford Humanities Center Events",
"Co-sponsored Events Held at the Humanities Center",
"Test SHC Calendar"):
landing_page_uri="events/calendar/%s.html" % friendly_title(c.calendar_name)
else:
landing_page_uri="workshops/calendar/%s.html" % friendly_title(c.calendar_name)
flipbooks[c.calendar_name] = CalendarFlipBook(calendar_name=c.calendar_name,
title_prefix=c.calendar_name + " Events",
landing_page_template=c.landing_page_template,
landing_page_uri=landing_page_uri)
calendars = [all_events, all_workshops] + flipbooks.values()
events = list(ds.GetAllEvents())
for event in events:
all_events.AddEvent(event, now, end_date)
all_workshops.AddEvent(event, now, end_date)
flipbooks[event.calendar_title].AddEvent(event, now, end_date)
for calendar in calendars:
calendar.WriteUpcomingEvents(options, fm, calendars, now)
calendar.WritePerMonthCalendars(options, fm, calendars)
calendar.WritePerDayCalendars(options, fm, calendars)
calendar.WriteMiniCals(options, fm)
WriteEventPages(options, fm, events, calendars) # Move me last
all_posts = list(ds.GetAllPosts())
WritePostPages(options, fm, all_posts)
all_posts_fb = PostFlipBook("news-videos/news/index.html", "All Posts")
for post in all_posts:
all_posts_fb.posts.append(post)
all_posts_fb.render(fm, options)
if options.test_date:
SanityCheck(fm, options)
#print fm.show_diff()
fm.commit()
def MyAssert(actual, expected):
assert actual == expected, "Got %r" % actual
class CalendarFlipBook:
def __init__(self, landing_page_template="", calendar_name="", title_prefix="",
landing_page_uri=""):
self.landing_page_template = landing_page_template
self.calendar_name = calendar_name
self.title_prefix = title_prefix
self.landing_page_uri = landing_page_uri
self.events = {} # month -> [event]
self.daily_events = {} # day -> [event]
self.upcoming = []
self.next_date = None
self.back_date = None
self.earliest_date = None
self.latest_date = None
def __repr__(self):
return "<Calendar %r>" % self.calendar_name
def WriteMiniCals(self, options, fm):
if self.earliest_date is None or self.latest_date is None:
return
first_day = datetime.datetime(self.earliest_date.year,
self.earliest_date.month,
1)
# NOTE(scottw): weekday() is the ISO weekday, where Monday is day 0. That's stupid, so we have
# to perform some modular arithmetic to get the days to work out correctly.
first_weekday = (first_day.weekday() + 1) % 7
# The last day is the first day of the month after the month containing self.latest_date
last_day = datetime.datetime(self.latest_date.year + 1, 1, 1)
# First, fill out a flat list of days in the range (first_day, last_day]
days = [None] * first_weekday # Pad with None up to first weekday
current_day = first_day
while current_day < last_day:
days.append(current_day)
current_day += datetime.timedelta(days=1)
days_in_last_week = len(days) % 7
days_of_padding = (7 - days_in_last_week) % 7
days.extend([None] * days_of_padding) # Add trailing None to come to exactly a week's worth
assert len(days) % 7 == 0, "There should be an even multiple of 7 days!"
month_starts = {}
for num, day in enumerate(days):
if not day:
continue
month = datetime.datetime(day.year, day.month, 1)
if month not in month_starts:
month_starts[month] = num / 7
months = month_starts
# Divide up the day list into weeks
weeks = []
for week in range(len(days) / 7):
sunday = week * 7
saturday = sunday + 7
weeks.append(days[sunday:saturday])
years = {}
for month in months:
years.setdefault(month.year, []).append(month)
month_dates = sorted(months.keys())
for prev_month, month, next_month in zip([None] + month_dates[:-1],
month_dates,
month_dates[1:] + [None]):
fm.save(options.output_dir + self.minical_uri(month),
str(Template(file="minical.tmpl",
searchList=[{"weeks" : weeks[months[month]:months[month] + 6],
"daily_events": self.daily_events,
"month": month,
"years": years,
"prev_month": prev_month,
"next_month": next_month,
"minical_uri": self.minical_uri,
"day_uri": self.day_uri
}])))
def WriteUpcomingEvents(self, options, fm, calendars, today):
forward_url = self.next_date and "../../" + self.month_uri(self.next_date)
forward_text = forward_url and self.next_date.strftime('%b %Y') + "»"
back_url = self.back_date and "../../" + self.month_uri(self.back_date)
back_text = back_url and self.back_date.strftime('%b %Y')
minical_uri = "../../" + self.minical_uri(today)
# TESTING CODE
# fm.save(options.output_dir + self.minical_uri(today), "I'm a minical for %r" % today)
fm.save(options.output_dir + self.landing_page_uri,
str(Template(file=self.landing_page_template,
searchList=[{"events": self.upcoming,
"calendar_title": self.calendar_name,
# TODO(chris): Change the template calendar_urls to calendars.
# in the template:
# #for calendar in calendars
# <a href="$calendar.uri()">$calendar.calendar_name</a>
# #end
"calendar_urls": [(c.calendar_name, c.landing_page_uri) for c in calendars],
"forward_url": forward_url,
"forward_text": forward_text,
"back_url": back_url,
"back_text": back_text,
"minical_uri": minical_uri}])))
def WritePerMonthCalendars(self, options, fm, calendars):
month_events = sorted(self.events.items())
months = [month for month, events in month_events]
for (yearmonth, events), back, forward in zip(month_events,
[None] + months[:-1],
months[1:] + [None]):
per_month_name = self.calendar_name + yearmonth.strftime(": %B %Y")
minical_uri = "../../" + self.minical_uri(yearmonth)
# fm.save(options.output_dir + self.minical_uri(yearmonth), "I'm a minical for %r" % yearmonth)
fm.save(options.output_dir + self.month_uri(yearmonth),
str(Template(file=self.landing_page_template,
searchList=[{"events": events,
"calendar_urls": [(c.calendar_name, c.landing_page_uri) for c in calendars],
"calendar_title": per_month_name,
"back_url": back and "../../" + self.month_uri(back),
"back_text": back and back.strftime ('%b %Y'),
"forward_url": forward and "../../" + self.month_uri(forward),
# TODO(chris): Put » in the template
"forward_text": forward and forward.strftime('%b %Y') + "»",
"minical_uri": minical_uri}])))
def WritePerDayCalendars(self, options, fm, calendars):
for day, events in self.daily_events.iteritems():
per_day_name = self.calendar_name + day.strftime(": %B %d, %Y")
minical_uri = "../../" + self.minical_uri(day) + "#day:" + day.strftime('%d %b %Y')
fm.save(options.output_dir + self.day_uri(day),
str(Template(file=self.landing_page_template,
searchList=[{"events": events,
"calendar_urls": [(c.calendar_name, c.landing_page_uri) for c in calendars],
"calendar_title": per_day_name,
"back_url": None,
"back_text": None,
"forward_url": None,
"forward_text": None,
"minical_uri": minical_uri}])))
def AddEvent(self, event, now, end_date):
month = datetime.datetime(event.start_time.year, event.start_time.month, 1)
if month not in self.events:
self.events[month] = []
self.events[month].append(event)
day = datetime.datetime(event.start_time.year, event.start_time.month, event.start_time.day)
if day not in self.daily_events: # if k in a
self.daily_events[day] = []
self.daily_events[day].append(event)
self.latest_date = max(self.latest_date or event.start_time, event.start_time)
self.earliest_date = min(self.earliest_date or event.start_time, event.start_time)
if event.start_time >= now and event.start_time <= end_date:
self.upcoming.append(event)
if event.start_time < now:
self.back_date = max(self.back_date or event.start_time, event.start_time)
if event.start_time > end_date:
self.next_date = min(self.next_date or event.start_time, event.start_time)
def month_uri(self, yearmonth):
if self.calendar_name == "Events Calendar":
return yearmonth.strftime("events/calendar/%Y-%m.html")
if self.calendar_name == "Workshop Calendar":
return yearmonth.strftime("workshops/calendar/%Y-%m.html")
else:
return yearmonth.strftime("events/calendar/%%Y-%%m-%s.html" % friendly_title(self.calendar_name))
def day_uri(self, day):
if self.calendar_name == "Events Calendar":
return day.strftime("events/calendar/%Y-%m-%d.html")
if self.calendar_name == "Workshop Calendar":
return day.strftime("workshops/calendar/%Y-%m-%d.html")
else:
return day.strftime("events/calendar/%%Y-%%m-%%d-%s.html" % friendly_title(self.calendar_name))
def minical_uri(self, yearmonth):
if self.calendar_name == "Events Calendar":
return yearmonth.strftime("events/calendar/%Y-%m.mini.html")
if self.calendar_name == "Workshop Calendar":
return yearmonth.strftime("workshops/calendar/%Y-%m.mini.html")
else:
return yearmonth.strftime("events/calendar/%%Y-%%m-%s.mini.html" % friendly_title(self.calendar_name))
def friendly_title(calendar_name):
title = re.sub(" +", "-", calendar_name.lower())
title = re.sub("[^-a-z0-9]", "", title)
return title
def WriteEventPages(options, fm, events, calendars):
for event in events:
if event.calendar_title in ("Stanford Humanities Center Events",
"Co-sponsored Events Held at the Humanities Center",
"Test SHC Calendar"):
tmpl = "shc_event.tmpl"
else:
tmpl = "workshop_event.tmpl"
fm.save(options.output_dir + event.uri(),
str(Template(file=tmpl,
searchList=[{"event": event,
"calendar_title": event.calendar_title,
"calendar_urls": [(c.calendar_name, c.landing_page_uri) for c in calendars],
"forward_url": None,
"forward_text": None,
"back_url": None,
"back_text": None}])))
def WritePostPages(options, fm, all_posts):
for post in all_posts:
tmpl = "post-template.tmpl"
fm.save(options.output_dir + post.uri(),
str(Template(file=tmpl,
searchList=[{"post" : post,
"title": post.title,
"published" : post.published,
"content" : post.content,
"categories" : post.categories}])))
def SanityCheck(fm, options):
#pprint(sorted(fm.files.keys()))
assert fm.HasFile(options.output_dir + "events/calendar/index.html")
assert fm.HasFile(options.output_dir + "workshops/calendar/index.html")
assert fm.HasFile(options.output_dir + "events/calendar/2012-02.html")
assert fm.HasFile(options.output_dir + "events/calendar/2012-02.mini.html")
assert fm.HasFile(options.output_dir + "workshops/calendar/2012-02.html")
assert fm.HasFile(options.output_dir + "workshops/calendar/2012-02.mini.html")
assert fm.HasFile(options.output_dir + "events/calendar/test-shc-calendar.html")
assert fm.HasFile(options.output_dir + "workshops/calendar/test-workshop-calendar.html")
assert fm.HasFile(options.output_dir + "events/calendar/2012-01-test-shc-calendar.html")
assert fm.HasFile(options.output_dir + "events/calendar/2012-01-test-shc-calendar.mini.html")
assert fm.HasFile(options.output_dir + "events/calendar/2012-03-test-shc-calendar.html")
assert fm.HasFile(options.output_dir + "events/calendar/2012-03-test-shc-calendar.mini.html")
assert fm.HasFile(options.output_dir + "events/calendar/2012-01-test-workshop-calendar.html")
assert fm.HasFile(options.output_dir + "events/calendar/2012-01-test-workshop-calendar.mini.html")
assert fm.HasFile(options.output_dir + "events/calendar/2012-03-test-workshop-calendar.html")
assert fm.HasFile(options.output_dir + "events/calendar/2012-03-test-workshop-calendar.mini.html")
assert fm.HasFile(options.output_dir + "events/calendar/2012-1-9-all-day-event.html")
assert fm.HasFile(options.output_dir + "events/calendar/2012-1-9-all-day-workshop-event.html")
assert fm.HasFile(options.output_dir + "events/calendar/2012-1-10-location-only-workshop-event.html")
assert fm.HasFile(options.output_dir + "events/calendar/2012-1-10-location-only-event.html")
assert fm.HasFile(options.output_dir + "events/calendar/2012-1-11-multi-day-workshop-event.html")
assert fm.HasFile(options.output_dir + "events/calendar/2012-1-11-multi-day-event.html")
#assert fm.HasFile(options.output_dir + "events/calendar/2012-1-14-event-to-be-changed.html")
#assert fm.HasFile(options.output_dir + "events/calendar/2012-1-14-workshop-event-to-be-changed.html")
assert fm.HasFile(options.output_dir + "events/calendar/2012-3-29-far-away-shc-event.html")
assert fm.HasFile(options.output_dir + "events/calendar/2012-3-30-far-away-workshop-event.html")
shc_test_text = fm.GetFile(options.output_dir + "events/calendar/test-shc-calendar.html")
dom = etree.HTML(shc_test_text)
MyAssert(dom.xpath('//title')[0].text,'Test SHC Calendar | Stanford Humanities Center')
assert dom.xpath('//div[@id = "topnext"]')
MyAssert(dom.xpath('//div[@id = "topnext"]/a')[0].get('href'), "../../events/calendar/2012-03-test-shc-calendar.html")
MyAssert(dom.xpath('//div[@id = "topnext"]/a')[0].text, u"Mar 2012\xbb")
assert dom.xpath('//div[@id = "topback"]')
MyAssert(dom.xpath('//div[@id = "topback"]/a')[0].get('href'), "../../events/calendar/2012-01-test-shc-calendar.html")
MyAssert(dom.xpath('//div[@id = "topback"]/a')[0].text, u"\xabJan 2012")
assert dom.xpath('//div[@id = "bottomnext"]')
MyAssert(dom.xpath('//div[@id = "bottomnext"]/a')[0].get('href'), "../../events/calendar/2012-03-test-shc-calendar.html")
MyAssert(dom.xpath('//div[@id = "bottomnext"]/a')[0].text, u"Mar 2012\xbb")
assert dom.xpath('//div[@id = "bottomback"]')
MyAssert(dom.xpath('//div[@id = "bottomback"]/a')[0].get('href'), "../../events/calendar/2012-01-test-shc-calendar.html")
MyAssert(dom.xpath('//div[@id = "bottomback"]/a')[0].text, u"\xabJan 2012")
workshop_test_text = fm.GetFile(options.output_dir + "workshops/calendar/test-workshop-calendar.html")
dom = etree.HTML(workshop_test_text)
MyAssert(dom.xpath('//title')[0].text, 'Test Workshop Calendar | Stanford Humanities Center')
assert dom.xpath('//div[@id = "topnext"]')
MyAssert(dom.xpath('//div[@id = "topnext"]/a')[0].get('href'), "../../events/calendar/2012-03-test-workshop-calendar.html")
MyAssert(dom.xpath('//div[@id = "topnext"]/a')[0].text, u"Mar 2012\xbb")
assert dom.xpath('//div[@id = "topback"]')
MyAssert(dom.xpath('//div[@id = "topback"]/a')[0].get('href'), "../../events/calendar/2012-01-test-workshop-calendar.html")
MyAssert(dom.xpath('//div[@id = "topback"]/a')[0].text, u"\xabJan 2012")
assert dom.xpath('//div[@id = "bottomnext"]')
MyAssert(dom.xpath('//div[@id = "bottomnext"]/a')[0].get('href'), "../../events/calendar/2012-03-test-workshop-calendar.html")
MyAssert(dom.xpath('//div[@id = "bottomnext"]/a')[0].text, u"Mar 2012\xbb")
assert dom.xpath('//div[@id = "bottomback"]')
MyAssert(dom.xpath('//div[@id = "bottomback"]/a')[0].get('href'), "../../events/calendar/2012-01-test-workshop-calendar.html")
MyAssert(dom.xpath('//div[@id = "bottomback"]/a')[0].text, u"\xabJan 2012")
jan_shc_text = fm.GetFile(options.output_dir + "events/calendar/2012-01-test-shc-calendar.html")
dom = etree.HTML(jan_shc_text)
MyAssert(dom.xpath('//title')[0].text,'Test SHC Calendar: January 2012 | Stanford Humanities Center')
assert dom.xpath('//iframe[@id = "minical"]')
MyAssert(dom.xpath('//iframe[@id = "minical"]')[0].get('src'), "../../events/calendar/2012-01-test-shc-calendar.mini.html")
assert dom.xpath('//div[@id = "topnext"]')
MyAssert(dom.xpath('//div[@id = "topnext"]/a')[0].get('href'), "../../events/calendar/2012-03-test-shc-calendar.html")
MyAssert(dom.xpath('//div[@id = "topnext"]/a')[0].text, u"Mar 2012\xbb")
assert not dom.xpath('//div[@id = "topback"]'), "Expected None, found %r" % dom.xpath('//div[@id = "topback"]')
MyAssert(dom.xpath('//div[@id = "bottomnext"]/a')[0].get('href'), "../../events/calendar/2012-03-test-shc-calendar.html")
MyAssert(dom.xpath('//div[@id = "bottomnext"]/a')[0].text, u"Mar 2012\xbb")
assert not dom.xpath('//div[@id = "bottomback"]')
mar_shc_text = fm.GetFile(options.output_dir + "events/calendar/2012-03-test-shc-calendar.html")
dom = etree.HTML(mar_shc_text)
MyAssert(dom.xpath('//title')[0].text,'Test SHC Calendar: March 2012 | Stanford Humanities Center')
assert dom.xpath('//iframe[@id = "minical"]')
MyAssert(dom.xpath('//iframe[@id = "minical"]')[0].get('src'), "../../events/calendar/2012-03-test-shc-calendar.mini.html")
assert not dom.xpath('//div[@id = "topnext"]')
assert dom.xpath('//div[@id = "topback"]')
MyAssert(dom.xpath('//div[@id = "topback"]/a')[0].get('href'), "../../events/calendar/2012-01-test-shc-calendar.html")
MyAssert(dom.xpath('//div[@id = "topback"]/a')[0].text, u"\xabJan 2012")
assert not dom.xpath('//div[@id = "bottomnext"]')
assert dom.xpath('//div[@id = "bottomback"]')
MyAssert(dom.xpath('//div[@id = "bottomback"]/a')[0].get('href'), "../../events/calendar/2012-01-test-shc-calendar.html")
MyAssert(dom.xpath('//div[@id = "bottomback"]/a')[0].text, u"\xabJan 2012")
jan_workshop_text = fm.GetFile(options.output_dir + "events/calendar/2012-01-test-workshop-calendar.html")
dom = etree.HTML(jan_workshop_text)
MyAssert(dom.xpath('//title')[0].text,'Test Workshop Calendar: January 2012 | Stanford Humanities Center')
assert dom.xpath('//iframe[@id = "minical"]')
MyAssert(dom.xpath('//iframe[@id = "minical"]')[0].get('src'), "../../events/calendar/2012-01-test-workshop-calendar.mini.html")
assert dom.xpath('//div[@id = "topnext"]')
MyAssert(dom.xpath('//div[@id = "topnext"]/a')[0].get('href'), "../../events/calendar/2012-03-test-workshop-calendar.html")
MyAssert(dom.xpath('//div[@id = "topnext"]/a')[0].text, u"Mar 2012\xbb")
assert not dom.xpath('//div[@id = "topback"]')
MyAssert(dom.xpath('//div[@id = "bottomnext"]/a')[0].get('href'), "../../events/calendar/2012-03-test-workshop-calendar.html")
MyAssert(dom.xpath('//div[@id = "bottomnext"]/a')[0].text, u"Mar 2012\xbb")
assert not dom.xpath('//div[@id = "bottomback"]')
mar_workshop_text = fm.GetFile(options.output_dir + "events/calendar/2012-03-test-workshop-calendar.html")
dom = etree.HTML(mar_workshop_text)
MyAssert(dom.xpath('//title')[0].text,'Test Workshop Calendar: March 2012 | Stanford Humanities Center')
assert dom.xpath('//iframe[@id = "minical"]')
MyAssert(dom.xpath('//iframe[@id = "minical"]')[0].get('src'), "../../events/calendar/2012-03-test-workshop-calendar.mini.html")
assert not dom.xpath('//div[@id = "topnext"]')
assert dom.xpath('//div[@id = "topback"]')
MyAssert(dom.xpath('//div[@id = "topback"]/a')[0].get('href'), "../../events/calendar/2012-01-test-workshop-calendar.html")
MyAssert(dom.xpath('//div[@id = "topback"]/a')[0].text, u"\xabJan 2012")
assert not dom.xpath('//div[@id = "bottomnext"]')
assert dom.xpath('//div[@id = "bottomback"]')
MyAssert(dom.xpath('//div[@id = "bottomback"]/a')[0].get('href'), "../../events/calendar/2012-01-test-workshop-calendar.html")
MyAssert(dom.xpath('//div[@id = "bottomback"]/a')[0].text, u"\xabJan 2012")
feb_landing_page = fm.GetFile(options.output_dir + "events/calendar/2012-02.html")
dom = etree.HTML(feb_landing_page)
MyAssert(dom.xpath('//title')[0].text,'Events Calendar: February 2012 | Stanford Humanities Center')
assert dom.xpath('//iframe[@id = "minical"]')
MyAssert(dom.xpath('//iframe[@id = "minical"]')[0].get('src'), "../../events/calendar/2012-02.mini.html")
assert dom.xpath('//div[@id = "topnext"]')
MyAssert(dom.xpath('//div[@id = "topnext"]/a')[0].get('href'), "../../events/calendar/2012-03.html")
MyAssert(dom.xpath('//div[@id = "topnext"]/a')[0].text, u"Mar 2012\xbb")
assert dom.xpath('//div[@id = "topback"]')
MyAssert(dom.xpath('//div[@id = "topback"]/a')[0].get('href'), "../../events/calendar/2012-01.html")
MyAssert(dom.xpath('//div[@id = "topback"]/a')[0].text, u"\xabJan 2012")
assert dom.xpath('//div[@id = "bottomnext"]')
MyAssert(dom.xpath('//div[@id = "bottomnext"]/a')[0].get('href'), "../../events/calendar/2012-03.html")
MyAssert(dom.xpath('//div[@id = "bottomnext"]/a')[0].text, u"Mar 2012\xbb")
assert dom.xpath('//div[@id = "bottomback"]')
MyAssert(dom.xpath('//div[@id = "bottomback"]/a')[0].get('href'), "../../events/calendar/2012-01.html")
MyAssert(dom.xpath('//div[@id = "bottomback"]/a')[0].text, u"\xabJan 2012")
feb_workshop_page = fm.GetFile(options.output_dir + "workshops/calendar/2012-02.html")
dom = etree.HTML(feb_workshop_page)
MyAssert(dom.xpath('//title')[0].text,'Workshop Calendar: February 2012 | Stanford Humanities Center')
assert dom.xpath('//iframe[@id = "minical"]')
MyAssert(dom.xpath('//iframe[@id = "minical"]')[0].get('src'), "../../workshops/calendar/2012-02.mini.html")
assert dom.xpath('//div[@id = "topnext"]')
MyAssert(dom.xpath('//div[@id = "topnext"]/a')[0].get('href'), "../../workshops/calendar/2012-03.html")
MyAssert(dom.xpath('//div[@id = "topnext"]/a')[0].text, u"Mar 2012\xbb")
assert dom.xpath('//div[@id = "topback"]')
MyAssert(dom.xpath('//div[@id = "topback"]/a')[0].get('href'), "../../workshops/calendar/2012-01.html")
MyAssert(dom.xpath('//div[@id = "topback"]/a')[0].text, u"\xabJan 2012")
assert dom.xpath('//div[@id = "bottomnext"]')
MyAssert(dom.xpath('//div[@id = "bottomnext"]/a')[0].get('href'), "../../workshops/calendar/2012-03.html")
MyAssert(dom.xpath('//div[@id = "bottomnext"]/a')[0].text, u"Mar 2012\xbb")
assert dom.xpath('//div[@id = "bottomback"]')
MyAssert(dom.xpath('//div[@id = "bottomback"]/a')[0].get('href'), "../../workshops/calendar/2012-01.html")
MyAssert(dom.xpath('//div[@id = "bottomback"]/a')[0].text, u"\xabJan 2012")
# TODO(chris): Add assertions for the <iframe> src. (Add an id to the iframe). Add assertions for the existence of the minicals
if __name__ == "__main__":
main(sys.argv)
|
|
# This module defines many standard colors that should be useful.
# These colors should be exactly the same as the ones defined in
# colors.tcl.
# Whites
antique_white = (0.9804, 0.9216, 0.8431)
azure = (0.9412, 1.0000, 1.0000)
bisque = (1.0000, 0.8941, 0.7686)
blanched_almond = (1.0000, 0.9216, 0.8039)
cornsilk = (1.0000, 0.9725, 0.8627)
eggshell = (0.9900, 0.9000, 0.7900)
floral_white = (1.0000, 0.9804, 0.9412)
gainsboro = (0.8627, 0.8627, 0.8627)
ghost_white = (0.9725, 0.9725, 1.0000)
honeydew = (0.9412, 1.0000, 0.9412)
ivory = (1.0000, 1.0000, 0.9412)
lavender = (0.9020, 0.9020, 0.9804)
lavender_blush = (1.0000, 0.9412, 0.9608)
lemon_chiffon = (1.0000, 0.9804, 0.8039)
linen = (0.9804, 0.9412, 0.9020)
mint_cream = (0.9608, 1.0000, 0.9804)
misty_rose = (1.0000, 0.8941, 0.8824)
moccasin = (1.0000, 0.8941, 0.7098)
navajo_white = (1.0000, 0.8706, 0.6784)
old_lace = (0.9922, 0.9608, 0.9020)
papaya_whip = (1.0000, 0.9373, 0.8353)
peach_puff = (1.0000, 0.8549, 0.7255)
seashell = (1.0000, 0.9608, 0.9333)
snow = (1.0000, 0.9804, 0.9804)
thistle = (0.8471, 0.7490, 0.8471)
titanium_white = (0.9900, 1.0000, 0.9400)
wheat = (0.9608, 0.8706, 0.7020)
white = (1.0000, 1.0000, 1.0000)
white_smoke = (0.9608, 0.9608, 0.9608)
zinc_white = (0.9900, 0.9700, 1.0000)
# Greys
cold_grey = (0.5000, 0.5400, 0.5300)
dim_grey = (0.4118, 0.4118, 0.4118)
grey = (0.7529, 0.7529, 0.7529)
light_grey = (0.8275, 0.8275, 0.8275)
slate_grey = (0.4392, 0.5020, 0.5647)
slate_grey_dark = (0.1843, 0.3098, 0.3098)
slate_grey_light = (0.4667, 0.5333, 0.6000)
warm_grey = (0.5000, 0.5000, 0.4100)
# Blacks
black = (0.0000, 0.0000, 0.0000)
ivory_black = (0.1600, 0.1400, 0.1300)
lamp_black = (0.1800, 0.2800, 0.2300)
# Reds
alizarin_crimson = (0.8900, 0.1500, 0.2100)
brick = (0.6100, 0.4000, 0.1200)
cadmium_red_deep = (0.8900, 0.0900, 0.0500)
coral = (1.0000, 0.4980, 0.3137)
coral_light = (0.9412, 0.5020, 0.5020)
deep_pink = (1.0000, 0.0784, 0.5765)
english_red = (0.8300, 0.2400, 0.1000)
firebrick = (0.6980, 0.1333, 0.1333)
geranium_lake = (0.8900, 0.0700, 0.1900)
hot_pink = (1.0000, 0.4118, 0.7059)
indian_red = (0.6900, 0.0900, 0.1200)
light_salmon = (1.0000, 0.6275, 0.4784)
madder_lake_deep = (0.8900, 0.1800, 0.1900)
maroon = (0.6902, 0.1882, 0.3765)
pink = (1.0000, 0.7529, 0.7961)
pink_light = (1.0000, 0.7137, 0.7569)
raspberry = (0.5300, 0.1500, 0.3400)
red = (1.0000, 0.0000, 0.0000)
rose_madder = (0.8900, 0.2100, 0.2200)
salmon = (0.9804, 0.5020, 0.4471)
tomato = (1.0000, 0.3882, 0.2784)
venetian_red = (0.8300, 0.1000, 0.1200)
# Browns
beige = (0.6400, 0.5800, 0.5000)
brown = (0.5000, 0.1647, 0.1647)
brown_madder = (0.8600, 0.1600, 0.1600)
brown_ochre = (0.5300, 0.2600, 0.1200)
burlywood = (0.8706, 0.7216, 0.5294)
burnt_sienna = (0.5400, 0.2100, 0.0600)
burnt_umber = (0.5400, 0.2000, 0.1400)
chocolate = (0.8235, 0.4118, 0.1176)
deep_ochre = (0.4500, 0.2400, 0.1000)
flesh = (1.0000, 0.4900, 0.2500)
flesh_ochre = (1.0000, 0.3400, 0.1300)
gold_ochre = (0.7800, 0.4700, 0.1500)
greenish_umber = (1.0000, 0.2400, 0.0500)
khaki = (0.9412, 0.9020, 0.5490)
khaki_dark = (0.7412, 0.7176, 0.4196)
light_beige = (0.9608, 0.9608, 0.8627)
peru = (0.8039, 0.5216, 0.2471)
rosy_brown = (0.7373, 0.5608, 0.5608)
raw_sienna = (0.7800, 0.3800, 0.0800)
raw_umber = (0.4500, 0.2900, 0.0700)
sepia = (0.3700, 0.1500, 0.0700)
sienna = (0.6275, 0.3216, 0.1765)
saddle_brown = (0.5451, 0.2706, 0.0745)
sandy_brown = (0.9569, 0.6431, 0.3765)
tan = (0.8235, 0.7059, 0.5490)
van_dyke_brown = (0.3700, 0.1500, 0.0200)
# Oranges
cadmium_orange = (1.0000, 0.3800, 0.0100)
cadmium_red_light = (1.0000, 0.0100, 0.0500)
carrot = (0.9300, 0.5700, 0.1300)
dark_orange = (1.0000, 0.5490, 0.0000)
mars_orange = (0.5900, 0.2700, 0.0800)
mars_yellow = (0.8900, 0.4400, 0.1000)
orange = (1.0000, 0.5000, 0.0000)
orange_red = (1.0000, 0.2706, 0.0000)
yellow_ochre = (0.8900, 0.5100, 0.0900)
# Yellows
aureoline_yellow = (1.0000, 0.6600, 0.1400)
banana = (0.8900, 0.8100, 0.3400)
cadmium_lemon = (1.0000, 0.8900, 0.0100)
cadmium_yellow = (1.0000, 0.6000, 0.0700)
cadmium_yellow_light = (1.0000, 0.6900, 0.0600)
gold = (1.0000, 0.8431, 0.0000)
goldenrod = (0.8549, 0.6471, 0.1255)
goldenrod_dark = (0.7216, 0.5255, 0.0431)
goldenrod_light = (0.9804, 0.9804, 0.8235)
goldenrod_pale = (0.9333, 0.9098, 0.6667)
light_goldenrod = (0.9333, 0.8667, 0.5098)
melon = (0.8900, 0.6600, 0.4100)
naples_yellow_deep = (1.0000, 0.6600, 0.0700)
yellow = (1.0000, 1.0000, 0.0000)
yellow_light = (1.0000, 1.0000, 0.8784)
# Greens
chartreuse = (0.4980, 1.0000, 0.0000)
chrome_oxide_green = (0.4000, 0.5000, 0.0800)
cinnabar_green = (0.3800, 0.7000, 0.1600)
cobalt_green = (0.2400, 0.5700, 0.2500)
emerald_green = (0.0000, 0.7900, 0.3400)
forest_green = (0.1333, 0.5451, 0.1333)
green = (0.0000, 1.0000, 0.0000)
green_dark = (0.0000, 0.3922, 0.0000)
green_pale = (0.5961, 0.9843, 0.5961)
green_yellow = (0.6784, 1.0000, 0.1843)
lawn_green = (0.4863, 0.9882, 0.0000)
lime_green = (0.1961, 0.8039, 0.1961)
mint = (0.7400, 0.9900, 0.7900)
olive = (0.2300, 0.3700, 0.1700)
olive_drab = (0.4196, 0.5569, 0.1373)
olive_green_dark = (0.3333, 0.4196, 0.1843)
permanent_green = (0.0400, 0.7900, 0.1700)
sap_green = (0.1900, 0.5000, 0.0800)
sea_green = (0.1804, 0.5451, 0.3412)
sea_green_dark = (0.5608, 0.7373, 0.5608)
sea_green_medium = (0.2353, 0.7020, 0.4431)
sea_green_light = (0.1255, 0.6980, 0.6667)
spring_green = (0.0000, 1.0000, 0.4980)
spring_green_medium = (0.0000, 0.9804, 0.6039)
terre_verte = (0.2200, 0.3700, 0.0600)
viridian_light = (0.4300, 1.0000, 0.4400)
yellow_green = (0.6039, 0.8039, 0.1961)
# Cyans
aquamarine = (0.4980, 1.0000, 0.8314)
aquamarine_medium = (0.4000, 0.8039, 0.6667)
cyan = (0.0000, 1.0000, 1.0000)
cyan_white = (0.8784, 1.0000, 1.0000)
turquoise = (0.2510, 0.8784, 0.8157)
turquoise_dark = (0.0000, 0.8078, 0.8196)
turquoise_medium = (0.2824, 0.8196, 0.8000)
turquoise_pale = (0.6863, 0.9333, 0.9333)
# Blues
alice_blue = (0.9412, 0.9725, 1.0000)
blue = (0.0000, 0.0000, 1.0000)
blue_light = (0.6784, 0.8471, 0.9020)
blue_medium = (0.0000, 0.0000, 0.8039)
cadet = (0.3725, 0.6196, 0.6275)
cobalt = (0.2400, 0.3500, 0.6700)
cornflower = (0.3922, 0.5843, 0.9294)
cerulean = (0.0200, 0.7200, 0.8000)
dodger_blue = (0.1176, 0.5647, 1.0000)
indigo = (0.0300, 0.1800, 0.3300)
manganese_blue = (0.0100, 0.6600, 0.6200)
midnight_blue = (0.0980, 0.0980, 0.4392)
navy = (0.0000, 0.0000, 0.5020)
peacock = (0.2000, 0.6300, 0.7900)
powder_blue = (0.6902, 0.8784, 0.9020)
royal_blue = (0.2549, 0.4118, 0.8824)
slate_blue = (0.4157, 0.3529, 0.8039)
slate_blue_dark = (0.2824, 0.2392, 0.5451)
slate_blue_light = (0.5176, 0.4392, 1.0000)
slate_blue_medium = (0.4824, 0.4078, 0.9333)
sky_blue = (0.5294, 0.8078, 0.9216)
sky_blue_deep = (0.0000, 0.7490, 1.0000)
sky_blue_light = (0.5294, 0.8078, 0.9804)
steel_blue = (0.2745, 0.5098, 0.7059)
steel_blue_light = (0.6902, 0.7686, 0.8706)
turquoise_blue = (0.0000, 0.7800, 0.5500)
ultramarine = (0.0700, 0.0400, 0.5600)
# Magentas
blue_violet = (0.5412, 0.1686, 0.8863)
cobalt_violet_deep = (0.5700, 0.1300, 0.6200)
magenta = (1.0000, 0.0000, 1.0000)
orchid = (0.8549, 0.4392, 0.8392)
orchid_dark = (0.6000, 0.1961, 0.8000)
orchid_medium = (0.7294, 0.3333, 0.8275)
permanent_red_violet = (0.8600, 0.1500, 0.2700)
plum = (0.8667, 0.6275, 0.8667)
purple = (0.6275, 0.1255, 0.9412)
purple_medium = (0.5765, 0.4392, 0.8588)
ultramarine_violet = (0.3600, 0.1400, 0.4300)
violet = (0.5600, 0.3700, 0.6000)
violet_dark = (0.5804, 0.0000, 0.8275)
violet_red = (0.8157, 0.1255, 0.5647)
violet_red_medium = (0.7804, 0.0824, 0.5216)
violet_red_pale = (0.8588, 0.4392, 0.5765)
|
|
# Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Cinder common internal object model"""
import contextlib
import datetime
from oslo_log import log as logging
from oslo_utils import versionutils
from oslo_versionedobjects import base
from oslo_versionedobjects import fields
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
LOG = logging.getLogger('object')
obj_make_list = base.obj_make_list
class CinderObjectVersionsHistory(dict):
"""Helper class that maintains objects version history.
Current state of object versions is aggregated in a single version number
that explicitily identifies a set of object versions. That way a service
is able to report what objects it supports using a single string and all
the newer services will know exactly what that mean for a single object.
"""
def __init__(self):
super(CinderObjectVersionsHistory, self).__init__()
# NOTE(dulek): This is our pre-history and a starting point - Liberty.
# We want Mitaka to be able to talk to Liberty services, so we need to
# handle backporting to these objects versions (although I don't expect
# we've made a lot of incompatible changes inside the objects).
#
# If an object doesn't exist in Liberty, RPC API compatibility layer
# shouldn't send it or convert it to a dictionary.
#
# Please note that we do not need to add similar entires for each
# release. Liberty is here just for historical reasons.
self.versions = ['liberty']
self['liberty'] = {
'Backup': '1.1',
'BackupImport': '1.1',
'BackupList': '1.0',
'ConsistencyGroup': '1.1',
'ConsistencyGroupList': '1.0',
'Service': '1.0',
'ServiceList': '1.0',
'Snapshot': '1.0',
'SnapshotList': '1.0',
'Volume': '1.1',
'VolumeAttachment': '1.0',
'VolumeAttachmentList': '1.0',
'VolumeList': '1.1',
'VolumeType': '1.0',
'VolumeTypeList': '1.0',
}
def get_current(self):
return self.versions[-1]
def get_current_versions(self):
return self[self.get_current()]
def add(self, ver, updates):
if ver in self.versions:
msg = 'Version %s already exists in history.' % ver
raise exception.ProgrammingError(reason=msg)
self[ver] = self[self.get_current()].copy()
self.versions.append(ver)
self[ver].update(updates)
OBJ_VERSIONS = CinderObjectVersionsHistory()
# NOTE(dulek): You should add a new version here each time you bump a version
# of any object. As a second parameter you need to specify only what changed.
#
# When dropping backward compatibility with an OpenStack release we can rework
# this and remove some history while keeping the versions order.
OBJ_VERSIONS.add('1.0', {'Backup': '1.3', 'BackupImport': '1.3',
'CGSnapshot': '1.0', 'CGSnapshotList': '1.0',
'ConsistencyGroup': '1.2',
'ConsistencyGroupList': '1.1', 'Service': '1.1',
'Volume': '1.3', 'VolumeTypeList': '1.1'})
OBJ_VERSIONS.add('1.1', {'Service': '1.2', 'ServiceList': '1.1'})
OBJ_VERSIONS.add('1.2', {'Backup': '1.4', 'BackupImport': '1.4'})
OBJ_VERSIONS.add('1.3', {'Service': '1.3'})
OBJ_VERSIONS.add('1.4', {'Snapshot': '1.1'})
OBJ_VERSIONS.add('1.5', {'VolumeType': '1.1'})
OBJ_VERSIONS.add('1.6', {'QualityOfServiceSpecs': '1.0',
'QualityOfServiceSpecsList': '1.0',
'VolumeType': '1.2'})
OBJ_VERSIONS.add('1.7', {'Cluster': '1.0', 'ClusterList': '1.0',
'Service': '1.4', 'Volume': '1.4',
'ConsistencyGroup': '1.3'})
OBJ_VERSIONS.add('1.8', {'RequestSpec': '1.0', 'VolumeProperties': '1.0'})
OBJ_VERSIONS.add('1.9', {'GroupType': '1.0', 'GroupTypeList': '1.0'})
OBJ_VERSIONS.add('1.10', {'Group': '1.0', 'GroupList': '1.0', 'Volume': '1.5',
'RequestSpec': '1.1', 'VolumeProperties': '1.1'})
OBJ_VERSIONS.add('1.11', {'GroupSnapshot': '1.0', 'GroupSnapshotList': '1.0',
'Group': '1.1'})
OBJ_VERSIONS.add('1.12', {'VolumeType': '1.3'})
OBJ_VERSIONS.add('1.13', {'CleanupRequest': '1.0'})
OBJ_VERSIONS.add('1.14', {'VolumeAttachmentList': '1.1'})
OBJ_VERSIONS.add('1.15', {'Volume': '1.6', 'Snapshot': '1.2'})
OBJ_VERSIONS.add('1.16', {'BackupDeviceInfo': '1.0'})
OBJ_VERSIONS.add('1.17', {'VolumeAttachment': '1.1'})
OBJ_VERSIONS.add('1.18', {'Snapshot': '1.3'})
OBJ_VERSIONS.add('1.19', {'ConsistencyGroup': '1.4', 'CGSnapshot': '1.1'})
OBJ_VERSIONS.add('1.20', {'Cluster': '1.1'})
OBJ_VERSIONS.add('1.21', {'ManageableSnapshot': '1.0',
'ManageableVolume': '1.0',
'ManageableVolumeList': '1.0',
'ManageableSnapshotList': '1.0'})
class CinderObjectRegistry(base.VersionedObjectRegistry):
def registration_hook(self, cls, index):
"""Hook called when registering a class.
This method takes care of adding the class to cinder.objects namespace.
Should registering class have a method called cinder_ovo_cls_init it
will be called to support class initialization. This is convenient
for all persistent classes that need to register their models.
"""
setattr(objects, cls.obj_name(), cls)
# If registering class has a callable initialization method, call it.
if callable(getattr(cls, 'cinder_ovo_cls_init', None)):
cls.cinder_ovo_cls_init()
class CinderObject(base.VersionedObject):
# NOTE(thangp): OBJ_PROJECT_NAMESPACE needs to be set so that nova,
# cinder, and other objects can exist on the same bus and be distinguished
# from one another.
OBJ_PROJECT_NAMESPACE = 'cinder'
def cinder_obj_get_changes(self):
"""Returns a dict of changed fields with tz unaware datetimes.
Any timezone aware datetime field will be converted to UTC timezone
and returned as timezone unaware datetime.
This will allow us to pass these fields directly to a db update
method as they can't have timezone information.
"""
# Get dirtied/changed fields
changes = self.obj_get_changes()
# Look for datetime objects that contain timezone information
for k, v in changes.items():
if isinstance(v, datetime.datetime) and v.tzinfo:
# Remove timezone information and adjust the time according to
# the timezone information's offset.
changes[k] = v.replace(tzinfo=None) - v.utcoffset()
# Return modified dict
return changes
def obj_make_compatible(self, primitive, target_version):
_log_backport(self, target_version)
super(CinderObject, self).obj_make_compatible(primitive,
target_version)
def __contains__(self, name):
# We're using obj_extra_fields to provide aliases for some fields while
# in transition period. This override is to make these aliases pass
# "'foo' in obj" tests.
return name in self.obj_extra_fields or super(CinderObject,
self).__contains__(name)
class CinderObjectDictCompat(base.VersionedObjectDictCompat):
"""Mix-in to provide dictionary key access compat.
If an object needs to support attribute access using
dictionary items instead of object attributes, inherit
from this class. This should only be used as a temporary
measure until all callers are converted to use modern
attribute access.
NOTE(berrange) This class will eventually be deleted.
"""
def get(self, key, value=base._NotSpecifiedSentinel):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
if key not in self.obj_fields:
# NOTE(jdg): There are a number of places where we rely on the
# old dictionary version and do a get(xxx, None).
# The following preserves that compatibility but in
# the future we'll remove this shim altogether so don't
# rely on it.
LOG.debug('Cinder object %(object_name)s has no '
'attribute named: %(attribute_name)s',
{'object_name': self.__class__.__name__,
'attribute_name': key})
return None
if (value != base._NotSpecifiedSentinel and
key not in self.obj_extra_fields and
not self.obj_attr_is_set(key)):
return value
else:
try:
return getattr(self, key)
except (exception.ObjectActionError, NotImplementedError):
# Exception when haven't set a value for non-lazy
# loadable attribute, but to mimic typical dict 'get'
# behavior we should still return None
return None
class CinderPersistentObject(object):
"""Mixin class for Persistent objects.
This adds the fields that we use in common for all persistent objects.
"""
OPTIONAL_FIELDS = []
Not = db.Not
Case = db.Case
fields = {
'created_at': fields.DateTimeField(nullable=True),
'updated_at': fields.DateTimeField(nullable=True),
'deleted_at': fields.DateTimeField(nullable=True),
'deleted': fields.BooleanField(default=False,
nullable=True),
}
@classmethod
def cinder_ovo_cls_init(cls):
"""This method is called on OVO registration and sets the DB model."""
# Persistent Versioned Objects Classes should have a DB model, and if
# they don't, then we have a problem and we must raise an exception on
# registration.
try:
cls.model = db.get_model_for_versioned_object(cls)
except (ImportError, AttributeError):
msg = _("Couldn't find ORM model for Persistent Versioned "
"Object %s.") % cls.obj_name()
raise exception.ProgrammingError(reason=msg)
@contextlib.contextmanager
def obj_as_admin(self):
"""Context manager to make an object call as an admin.
This temporarily modifies the context embedded in an object to
be elevated() and restores it after the call completes. Example
usage:
with obj.obj_as_admin():
obj.save()
"""
if self._context is None:
raise exception.OrphanedObjectError(method='obj_as_admin',
objtype=self.obj_name())
original_context = self._context
self._context = self._context.elevated()
try:
yield
finally:
self._context = original_context
@classmethod
def _get_expected_attrs(cls, context, *args, **kwargs):
return None
@classmethod
def get_by_id(cls, context, id, *args, **kwargs):
# To get by id we need to have a model and for the model to
# have an id field
if 'id' not in cls.fields:
msg = (_('VersionedObject %s cannot retrieve object by id.') %
(cls.obj_name()))
raise NotImplementedError(msg)
orm_obj = db.get_by_id(context, cls.model, id, *args, **kwargs)
# We pass parameters because fields to expect may depend on them
expected_attrs = cls._get_expected_attrs(context, *args, **kwargs)
kargs = {}
if expected_attrs:
kargs = {'expected_attrs': expected_attrs}
return cls._from_db_object(context, cls(context), orm_obj, **kargs)
def conditional_update(self, values, expected_values=None, filters=(),
save_all=False, session=None, reflect_changes=True,
order=None):
"""Compare-and-swap update.
A conditional object update that, unlike normal update, will SAVE the
contents of the update to the DB.
Update will only occur in the DB and the object if conditions are met.
If no expected_values are passed in we will default to make sure that
all fields have not been changed in the DB. Since we cannot know the
original value in the DB for dirty fields in the object those will be
excluded.
We have 4 different condition types we can use in expected_values:
- Equality: {'status': 'available'}
- Inequality: {'status': vol_obj.Not('deleting')}
- In range: {'status': ['available', 'error']
- Not in range: {'status': vol_obj.Not(['in-use', 'attaching'])
Method accepts additional filters, which are basically anything that
can be passed to a sqlalchemy query's filter method, for example:
.. code-block:: python
[~sql.exists().where(models.Volume.id == models.Snapshot.volume_id)]
We can select values based on conditions using Case objects in the
'values' argument. For example:
.. code-block:: python
has_snapshot_filter = sql.exists().where(
models.Snapshot.volume_id == models.Volume.id)
case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')],
else_='no-snapshot')
volume.conditional_update({'status': case_values},
{'status': 'available'}))
And we can use DB fields using model class attribute for example to
store previous status in the corresponding field even though we don't
know which value is in the db from those we allowed:
.. code-block:: python
volume.conditional_update({'status': 'deleting',
'previous_status': volume.model.status},
{'status': ('available', 'error')})
:param values: Dictionary of key-values to update in the DB.
:param expected_values: Dictionary of conditions that must be met for
the update to be executed.
:param filters: Iterable with additional filters
:param save_all: Object may have changes that are not in the DB, this
will say whether we want those changes saved as well.
:param session: Session to use for the update
:param reflect_changes: If we want changes made in the database to be
reflected in the versioned object. This may
mean in some cases that we have to reload the
object from the database.
:param order: Specific order of fields in which to update the values
:returns: number of db rows that were updated, which can be used as a
boolean, since it will be 0 if we couldn't update the DB and
1 if we could, because we are using unique index id.
"""
if 'id' not in self.fields:
msg = (_('VersionedObject %s does not support conditional update.')
% (self.obj_name()))
raise NotImplementedError(msg)
# If no conditions are set we will require object in DB to be unchanged
if expected_values is None:
changes = self.obj_what_changed()
expected = {key: getattr(self, key)
for key in self.fields.keys()
if self.obj_attr_is_set(key) and key not in changes and
key not in self.OPTIONAL_FIELDS}
else:
# Set the id in expected_values to limit conditional update to only
# change this object
expected = expected_values.copy()
expected['id'] = self.id
# If we want to save any additional changes the object has besides the
# ones referred in values
if save_all:
changes = self.cinder_obj_get_changes()
changes.update(values)
values = changes
result = db.conditional_update(self._context, self.model, values,
expected, filters, order=order)
# If we were able to update the DB then we need to update this object
# as well to reflect new DB contents and clear the object's dirty flags
# for those fields.
if result and reflect_changes:
# If we have used a Case, a db field or an expression in values we
# don't know which value was used, so we need to read the object
# back from the DB
if any(isinstance(v, self.Case) or db.is_orm_value(v)
for v in values.values()):
# Read back object from DB
obj = type(self).get_by_id(self._context, self.id)
db_values = obj.obj_to_primitive()['versioned_object.data']
# Only update fields were changes were requested
values = {field: db_values[field]
for field, value in values.items()}
# NOTE(geguileo): We don't use update method because our objects
# will eventually move away from VersionedObjectDictCompat
for key, value in values.items():
setattr(self, key, value)
self.obj_reset_changes(values.keys())
return result
def refresh(self):
# To refresh we need to have a model and for the model to have an id
# field
if 'id' not in self.fields:
msg = (_('VersionedObject %s cannot retrieve object by id.') %
(self.obj_name()))
raise NotImplementedError(msg)
current = self.get_by_id(self._context, self.id)
# Copy contents retrieved from the DB into self
my_data = vars(self)
my_data.clear()
my_data.update(vars(current))
@classmethod
def exists(cls, context, id_):
return db.resource_exists(context, cls.model, id_)
class CinderComparableObject(base.ComparableVersionedObject):
def __eq__(self, obj):
if hasattr(obj, 'obj_to_primitive'):
return self.obj_to_primitive() == obj.obj_to_primitive()
return False
def __ne__(self, other):
return not self.__eq__(other)
class ObjectListBase(base.ObjectListBase):
def obj_make_compatible(self, primitive, target_version):
_log_backport(self, target_version)
super(ObjectListBase, self).obj_make_compatible(primitive,
target_version)
class ClusteredObject(object):
@property
def service_topic_queue(self):
return self.cluster_name or self.host
@property
def is_clustered(self):
return bool(self.cluster_name)
def assert_not_frozen(self):
ctxt = self._context.elevated()
if db.is_backend_frozen(ctxt, self.host, self.cluster_name):
msg = _('Modification operations are not allowed on frozen '
'storage backends.')
raise exception.InvalidInput(reason=msg)
class CinderObjectSerializer(base.VersionedObjectSerializer):
OBJ_BASE_CLASS = CinderObject
def __init__(self, version_cap=None):
super(CinderObjectSerializer, self).__init__()
self.version_cap = version_cap
# NOTE(geguileo): During upgrades we will use a manifest to ensure that
# all objects are properly backported. This allows us to properly
# backport child objects to the right version even if parent version
# has not been bumped.
if not version_cap or version_cap == OBJ_VERSIONS.get_current():
self.manifest = None
else:
if version_cap not in OBJ_VERSIONS:
raise exception.CappedVersionUnknown(version=version_cap)
self.manifest = OBJ_VERSIONS[version_cap]
def _get_capped_obj_version(self, obj):
objname = obj.obj_name()
version_dict = OBJ_VERSIONS.get(self.version_cap, {})
version_cap = version_dict.get(objname, None)
if version_cap:
cap_tuple = versionutils.convert_version_to_tuple(version_cap)
obj_tuple = versionutils.convert_version_to_tuple(obj.VERSION)
if cap_tuple > obj_tuple:
# NOTE(dulek): Do not set version cap to be higher than actual
# object version as we don't support "forwardporting" of
# objects. If service will receive an object that's too old it
# should handle it explicitly.
version_cap = None
return version_cap
def serialize_entity(self, context, entity):
if isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.serialize_entity,
entity)
elif (hasattr(entity, 'obj_to_primitive') and
callable(entity.obj_to_primitive)):
# NOTE(dulek): Backport outgoing object to the capped version.
backport_ver = self._get_capped_obj_version(entity)
entity = entity.obj_to_primitive(backport_ver, self.manifest)
return entity
def _log_backport(ovo, target_version):
"""Log backported versioned objects."""
if target_version and target_version != ovo.VERSION:
LOG.debug('Backporting %(obj_name)s from version %(src_vers)s '
'to version %(dst_vers)s',
{'obj_name': ovo.obj_name(),
'src_vers': ovo.VERSION,
'dst_vers': target_version})
|
|
# -*- coding: utf-8 -*-
import pymel.core as pm
from anima.rig.drawNode import DrawNode
from shapes import Shape
from curve import Curve
# JOINT CLASS
class Joint(object):
def __init__(self, jointName_in, position):
self._jointPos = self._getPosition(position)
pm.select(cl=1)
self._jointName = pm.joint(n=(jointName_in + "#"), p=self._jointPos)
# BASE SETUP METHODS
def _getPosition(self, position):
# Checks the type of the Position and Return Position vector
if isinstance(position, pm.dt.Vector):
self._jointPos = position
elif isinstance(position, pm.dt.Point):
self._jointPos = position
elif isinstance(position, pm.nt.Transform):
self._jointPos = pm.getAttr(position.translate)
elif not isinstance(position, pm.dt.Vector):
self._jointPos = pm.dt.Vector(0, 0, 0)
return self._jointPos
# PROPERTIES
@property
# Joint Name Getter - Setter
def jointName(self):
return self._jointName
@jointName.setter
def jointName(self, name_in):
self._jointName = pm.rename(self._jointName, name_in)
@property
# Joint Position Getter
def jointPos(self):
return self._jointPos
# JOINT CHAIN CLASS
class JointChain(object):
def __init__(self, jointsName, positions):
"""
:param jointsName:
:param positions:
"""
self._jointChain = []
self._jointsPos = []
# Creates Joints
self._createJoints(jointsName, positions)
self._jointRoot = self.jointChain[0]
self._numOfJoints = len(self._jointChain)
self._startJoint = None
self._endJoint = None
self._startPos = None
self._endPos = None
# BASE SETUP METHODS
def _createJoints(self, jointsName, positions):
self._validate_positions(positions)
for pos in positions:
tempJnt = Joint(jointsName, pos)
self._jointChain.append(tempJnt.jointName)
self._jointsPos.append(tempJnt.jointPos)
for i in range(1, len(self.jointChain), 1):
pm.parent(self._jointChain[i], self._jointChain[i - 1])
def _validate_positions(self, positions):
if not isinstance(positions, (list)):
raise TypeError(
"%s.name should be an instance of List!" % self.__class__.__name__
)
# PROPERTIES
@property
def startJoint(self):
return self._jointChain[0]
@property
def endJoint(self):
return self._jointChain[self.numOfJoints - 1]
@property
# Joint Chain Getter
def jointChain(self):
return self._jointChain
@property
# Positions of the Joints Getter
def jointPos(self):
return self._jointsPos
@property
def jointRoot(self):
return self._jointRoot
@property
def numOfJoints(self):
return self._numOfJoints
@property
def startPos(self):
return self._jointsPos[0]
@property
def endPos(self):
return self._jointsPos[self.numOfJoints - 1]
# ORIENTATION SET METHODS
def orient_joint(
self,
joint,
aimAxis=[1, 0, 0],
upAxis=[0, 0, 1],
worldUpType="vector",
worldUpVector=[0, 1, 0],
):
# joint should be pm.nt.Joint type
if not isinstance(joint, pm.nt.Joint):
raise TypeError("%s sholud be an instance of pm.nt.Joint Class" % joint)
jointUnder = self.jointUnder(joint)
if jointUnder is None:
return 0
temporalGroup = DrawNode(Shape.transform, "temporalGroup")
pm.parent(jointUnder, temporalGroup.drawnNode)
pm.setAttr(joint.jointOrient, (0, 0, 0))
if worldUpType == "object":
aimConst = pm.aimConstraint(
jointUnder,
joint,
aimVector=aimAxis,
upVector=upAxis,
worldUpType=worldUpType,
worldUpObject=worldUpVector,
)
elif worldUpType == "vector":
aimConst = pm.aimConstraint(
jointUnder,
joint,
aimVector=aimAxis,
upVector=upAxis,
worldUpType=worldUpType,
worldUpVector=worldUpVector,
)
pm.delete(aimConst)
pm.parent(jointUnder, joint)
pm.setAttr(joint.jointOrient, (pm.getAttr(joint.rotate)))
pm.setAttr((joint.rotate), [0, 0, 0])
pm.delete(temporalGroup.drawnNode)
def orient_joint_frontAxis(
self,
joint,
aimAxis=[0, 1, 0],
upAxis=[0, 0, 1],
worldUpType="object",
frontAxis="z",
):
# TODO : Validate frontAxis
# orient_joint Function OverLoading
# Creates a temporal Trasnform node for WorldUpVector.
# Calls orient_joint method
jointUnder = self.jointUnder(joint)
if jointUnder is None:
return 0
moveAxis = [0, 0]
if frontAxis == "x":
moveAxis[0] = -1
elif frontAxis == "z":
moveAxis[1] = -1
temporalTrans = DrawNode(Shape.transform, "temporalTransform")
temporalTrans.temp_constrain(jointUnder)
temporalTrans.freeze_transformations()
temporalTransMove = self.orient_choose_direction(joint, jointUnder, frontAxis)
temporalTrans.move(
pm.dt.Vector([moveAxis[0], (temporalTransMove * 0.001), moveAxis[1]])
)
worldUpVector = temporalTrans.drawnNode
self.orient_joint(joint, aimAxis, upAxis, worldUpType, worldUpVector)
temporalTrans.delete()
def orient_choose_direction(self, joint, jointUnder, frontAxis):
# TODO : Validate frontAxis
if frontAxis == "x":
frontInt = 0
elif frontAxis == "z":
frontInt = 2
returnVal = 1
transform_1 = DrawNode(Shape.transform, "direction1")
transform_1.temp_constrain(joint)
transform_2 = DrawNode(Shape.transform, "direction2")
transform_2.temp_constrain(jointUnder)
frontTransform = (
transform_1.transform[frontInt] - transform_2.transform[frontInt]
)
if frontTransform > 0:
returnVal = -1
transform_1.delete()
transform_2.delete()
return returnVal
def jointUnder(self, joint):
jointUnder = pm.listRelatives(joint, c=1, type="joint")
if not len(jointUnder):
pm.setAttr(joint.jointOrient, (0, 0, 0))
return None
return jointUnder
def orient_joint_chain(self, startJoint=None, endJoint=None, frontAxis=None):
# TODO: Validate front axis
startIndex, endIndex = self.get_start_end_index(startJoint, endJoint)
for j in range(startIndex, (endIndex + 1)):
if frontAxis is None:
self.orient_joint(self.jointChain[j])
else:
self.orient_joint_frontAxis(self.jointChain[j], frontAxis=frontAxis)
def get_start_end_index(self, startJoint=None, endJoint=None):
# Sets the default values
if startJoint is None:
startJoint = self.jointRoot
if endJoint is None:
endJoint = self.jointChain[self.numOfJoints - 1]
# Gets the Index of the Start Joint and End Joint
startIndex = self.get_index_of_joint(startJoint)
endIndex = self.get_index_of_joint(endJoint)
return startIndex, endIndex
def get_index_of_joint(self, joint_in):
# Return the index of the joint_in
for index in range(0, self.numOfJoints):
if joint_in == self.jointChain[index]:
return index
raise TypeError(
"%s is not a member joint of %s" % (joint_in, self.__class__.__name__)
)
# SPINE JOINT CLASS
class SpineJoints(JointChain):
def __init__(self, jointsName, curve, spans=10, horizontalSpine=0):
"""
:param jointsName:
:param curve:
:param spans:
:param horizontalSpine:"""
jointsName = jointsName + "_jnt_"
curveName = jointsName + "baseCrv"
# Position of the Joints
self._jointsPos = []
# Curve Node creation
self._curve = Curve(curveName, curve)
self._curve.rebuildCurve(spans)
self._startJoint = None
self._endJoint = None
# get cv positions for to create a joint chain
self._horizontalSpine = horizontalSpine
self._frontAxis = None
self._zeroJoint = None
self._zeroPos = None
self._get_curve_points()
super(SpineJoints, self).__init__(jointsName, self._jointsPos)
self.set_zero_joint()
# BASE SETUP METHODS
def set_zero_joint(self):
# Removes Zero Joint from Joint Chain
pm.joint(self.jointChain[0], e=True, zso=True, oj="xyz", sao="xup")
self.zeroJoint = self.jointChain[0]
self._zeroPos = pm.dt.Point(pm.getAttr(self._zeroJoint.translate))
self.jointChain.remove(self.jointChain[0])
self.jointPos.remove(self.jointPos[0])
pm.joint(self.jointChain[1], e=True, zso=True, oj="xyz", sao="yup")
for i in range(1, len(self.jointChain)):
pm.joint(self.jointChain[i], e=True, zso=True, oj="xyz", sao="yup")
# sets Start End Num Of Joints again
self._numOfJoints = len(self._jointChain)
# Orient Zero Joint
temporalGroup = DrawNode(Shape.transform, "temporalGroup")
pm.parent(self.startJoint, temporalGroup.drawnNode)
print(pm.getAttr(self.zeroJoint.jointOrient))
pm.setAttr(self.zeroJoint.jointOrientX, 0)
pm.parent(self.startJoint, self.zeroJoint)
temporalGroup.delete()
def _get_curve_points(self):
self._jointPos = []
firstCVPos = self._curve.cvPositions[0]
zeroCVsPos = pm.dt.Vector(self._curve.cvPositions[0])
if not self.horizontalSpine:
zeroCVsPos[1] = zeroCVsPos[1] - (self._curve.arclen / 10)
else:
zeroCVsPos[2] = zeroCVsPos[2] - (self._curve.arclen / 10)
self._jointsPos.append(zeroCVsPos)
self._jointsPos.append(firstCVPos)
for i in range(2, (self._curve.numCVs - 2)):
self._jointsPos.append(self._curve.cvPositions[i])
lastCVPos = self._curve.cvPositions[self._curve.numCVs - 1]
self._jointsPos.append(lastCVPos)
# PROPERTIES
@property
def curve(self):
return self._curve
@property
def horizontalSpine(self):
return self._horizontalSpine
@horizontalSpine.setter
def horizontalSpine(self, horizontal_in):
self._horizontalSpine = horizontal_in
@property
def frontAxis(self):
return self._frontAxis
@frontAxis.setter
def frontAxis(self, frontAxis):
self._frontAxis = frontAxis
@property
def zeroJoint(self):
return self._zeroJoint
@zeroJoint.setter
def zeroJoint(self, joint_in):
self._zeroJoint = pm.nt.Joint(joint_in)
@property
def zeroPos(self):
return self._zeroPos
# METHODS
def orient_spine(self, frontAxis):
self.orient_joint_chain(self.startJoint, self.endJoint, frontAxis)
|
|
#coding: utf-8
from lxml import etree as ET
import re
from xylose.scielodocument import UnavailableMetadataException
import plumber
SUPPLBEG_REGEX = re.compile(r'^0 ')
SUPPLEND_REGEX = re.compile(r' 0$')
ISO6392T_TO_ISO6392B = {
u'sqi': u'alb',
u'hye': u'arm',
u'eus': u'baq',
u'mya': u'bur',
u'zho': u'chi',
u'ces': u'cze',
u'nld': u'dut',
u'fra': u'fre',
u'kat': u'geo',
u'deu': u'ger',
u'ell': u'gre',
u'isl': u'ice',
u'mkd': u'mac',
u'msa': u'may',
u'mri': u'mao',
u'fas': u'per',
u'ron': u'rum',
u'slk': u'slo',
u'bod': u'tib',
u'cym': u'wel'
}
class SetupArticlePipe(plumber.Pipe):
def transform(self, data):
xml = ET.Element('records')
return data, xml
class XMLArticlePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
article = ET.Element('record')
xml.append(article)
return data
class XMLJournalMetaJournalTitlePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
journaltitle = ET.Element('journalTitle')
journaltitle.text = raw.journal.title
xml.find('./record').append(journaltitle)
return data
class XMLJournalMetaISSNPipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
issn = ET.Element('issn')
issn.text = raw.any_issn()
xml.find('./record').append(issn)
return data
class XMLJournalMetaPublisherPipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
for item in raw.journal.publisher_name or []:
publisher = ET.Element('publisher')
publisher.text = item
xml.find('./record').append(publisher)
return data
class XMLArticleMetaIdPipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
uniquearticleid = ET.Element('publisherRecordId')
uniquearticleid.text = raw.publisher_id
xml.find('./record').append(uniquearticleid)
return data
class XMLArticleMetaArticleIdDOIPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.doi:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
articleiddoi = ET.Element('doi')
articleiddoi.text = raw.doi
xml.find('./record').append(articleiddoi)
return data
class XMLArticleMetaTitlePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
raw.original_language()
if raw.original_title():
title = ET.Element('title')
title.text = raw.original_title()
title.set('language', ISO6392T_TO_ISO6392B.get(raw.original_language(), raw.original_language()))
xml.find('./record').append(title)
elif raw.translated_titles() and len(raw.translated_titles()) != 0:
item = [(k, v) for k, v in raw.translated_titles().items()][0]
title = ET.Element('title')
title.text = item[1]
title.set('language', ISO6392T_TO_ISO6392B.get(item[0], item[0]))
xml.find('./record').append(title)
return data
class XMLArticleMetaAuthorsPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.authors:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
contribgroup = ET.Element('authors')
for author in raw.authors:
names = [author.get('given_names', ''), author.get('surname', '')]
contribname = ET.Element('name')
contribname.text = ' '.join(names)
contrib = ET.Element('author')
contrib.append(contribname)
for xr in author.get('xref', []):
xref = ET.Element('affiliationId')
xref.text = xr
contrib.append(xref)
contribgroup.append(contrib)
xml.find('./record').append(contribgroup)
return data
class XMLArticleMetaAffiliationPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.mixed_affiliations:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
affs = ET.Element('affiliationsList')
for affiliation in raw.mixed_affiliations:
if 'institution' in affiliation:
aff = ET.Element('affiliationName')
aff.set('affiliationId', affiliation['index'])
aff.text = affiliation['institution']
affs.append(aff)
xml.find('./record').append(affs)
return data
class XMLArticleMetaPublicationDatePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
pdate = raw.publication_date.split('-')
if len(pdate) == 1:
pdate = '-'.join(pdate + ['00', '00'])
elif len(pdate) == 2:
pdate = '-'.join(pdate + ['00'])
else:
pdate = '-'.join(pdate)
pubdate = ET.Element('publicationDate')
pubdate.text = pdate
xml.find('./record').append(pubdate)
return data
class XMLArticleMetaStartPagePipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.start_page:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
startpage = ET.Element('startPage')
startpage.text = raw.start_page
xml.find('./record').append(startpage)
return data
class XMLArticleMetaEndPagePipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.end_page:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
endpage = ET.Element('endPage')
endpage.text = raw.end_page
xml.find('./record').append(endpage)
return data
class XMLArticleMetaVolumePipe(plumber.Pipe):
def precond(data):
raw, __ = data
if not raw.issue:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
if raw.issue.volume:
volume = ET.Element('volume')
volume.text = raw.issue.volume
xml.find('./record').append(volume)
return data
class XMLArticleMetaIssuePipe(plumber.Pipe):
def precond(data):
raw, xml = data
try:
if not raw.issue:
raise plumber.UnmetPrecondition()
except UnavailableMetadataException as e:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
label_issue = raw.issue.number.replace('ahead', '') if raw.issue.number else ''
label_suppl_issue = ' suppl %s' % raw.issue.supplement_number if raw.issue.supplement_number else ''
if label_suppl_issue:
label_issue += label_suppl_issue
label_suppl_volume = ' suppl %s' % raw.issue.supplement_volume if raw.issue.supplement_volume else ''
if label_suppl_volume:
label_issue += label_suppl_volume
label_issue = SUPPLBEG_REGEX.sub('', label_issue)
label_issue = SUPPLEND_REGEX.sub('', label_issue)
if label_issue.strip():
issue = ET.Element('issue')
issue.text = label_issue.strip()
xml.find('./record').append(issue)
return data
class XMLArticleMetaDocumentTypePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
documenttype = ET.Element('documentType')
documenttype.text = raw.document_type
xml.find('./record').append(documenttype)
return data
class XMLArticleMetaFullTextUrlPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.html_url:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
url = ET.Element('fullTextUrl')
url.set('format', 'html')
url.text = raw.html_url(language='en')
xml.find('./record').append(url)
return data
class XMLArticleMetaAbstractsPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.original_abstract() and not raw.translated_abstracts():
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
articlemeta = xml.find('./record')
if raw.original_abstract():
abstract = ET.Element('abstract')
abstract.set('language', ISO6392T_TO_ISO6392B.get(raw.original_language(), raw.original_language()))
abstract.text = raw.original_abstract()
articlemeta.append(abstract)
if raw.translated_abstracts():
for lang, text in raw.translated_abstracts().items():
abstract = ET.Element('abstract')
abstract.set('language', ISO6392T_TO_ISO6392B.get(lang, lang))
abstract.text = text
articlemeta.append(abstract)
return data
class XMLArticleMetaKeywordsPipe(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.keywords():
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
articlemeta = xml.find('./record')
if raw.keywords():
for lang, keywords in raw.keywords().items():
kwdgroup = ET.Element('keywords')
kwdgroup.set('language', ISO6392T_TO_ISO6392B.get(lang, lang))
for keyword in keywords:
kwd = ET.Element('keyword')
kwd.text = keyword
kwdgroup.append(kwd)
articlemeta.append(kwdgroup)
return data
class XMLClosePipe(plumber.Pipe):
def transform(self, data):
raw, xml = data
data = ET.tostring(xml, encoding="utf-8", method="xml")
return data
|
|
# Requires python3
import re
import sqlite3
import subprocess
import shutil
import os
import codecs
import datetime
import sys
class TskDbDiff(object):
"""Compares two TSK/Autospy SQLite databases.
Attributes:
gold_artifacts:
autopsy_artifacts:
gold_attributes:
autopsy_attributes:
gold_objects:
autopsy_objects:
artifact_comparison:
attribute_comparision:
report_errors: a listof_listof_String, the error messages that will be
printed to screen in the run_diff method
passed: a boolean, did the diff pass?
autopsy_db_file:
gold_db_file:
"""
def __init__(self, output_db, gold_db, output_dir=None, gold_bb_dump=None, gold_dump=None, verbose=False):
"""Constructor for TskDbDiff.
Args:
output_db_path: path to output database (non-gold standard)
gold_db_path: path to gold database
output_dir: (optional) Path to folder where generated files will be put.
gold_bb_dump: (optional) path to file where the gold blackboard dump is located
gold_dump: (optional) path to file where the gold non-blackboard dump is located
verbose: (optional) a boolean, if true, diff results are sent to stdout.
"""
self.output_db_file = output_db
self.gold_db_file = gold_db
self.output_dir = output_dir
self.gold_bb_dump = gold_bb_dump
self.gold_dump = gold_dump
self._bb_dump_diff = ""
self._dump_diff = ""
self._bb_dump = ""
self._dump = ""
self.verbose = verbose
def run_diff(self):
"""Compare the databases.
Raises:
TskDbDiffException: if an error occurs while diffing or dumping the database
"""
self._init_diff()
# generate the output database dumps (both DB and BB)
TskDbDiff._dump_output_db_nonbb(self.output_db_file, self._dump)
TskDbDiff._dump_output_db_bb(self.output_db_file, self._bb_dump)
# Compare non-BB
dump_diff_pass = self._diff(self._dump, self.gold_dump, self._dump_diff)
# Compare BB
bb_dump_diff_pass = self._diff(self._bb_dump, self.gold_bb_dump, self._bb_dump_diff)
self._cleanup_diff()
return dump_diff_pass, bb_dump_diff_pass
def _init_diff(self):
"""Set up the necessary files based on the arguments given at construction"""
if self.output_dir is None:
# No stored files
self._bb_dump = TskDbDiff._get_tmp_file("BlackboardDump", ".txt")
self._bb_dump_diff = TskDbDiff._get_tmp_file("BlackboardDump-Diff", ".txt")
self._dump = TskDbDiff._get_tmp_file("DBDump", ".txt")
self._dump_diff = TskDbDiff._get_tmp_file("DBDump-Diff", ".txt")
else:
self._bb_dump = os.path.join(self.output_dir, "BlackboardDump.txt")
self._bb_dump_diff = os.path.join(self.output_dir, "BlackboardDump-Diff.txt")
self._dump = os.path.join(self.output_dir, "DBDump.txt")
self._dump_diff = os.path.join(self.output_dir, "DBDump-Diff.txt")
# Sorting gold before comparing (sort behaves differently in different environments)
new_bb = TskDbDiff._get_tmp_file("GoldBlackboardDump", ".txt")
new_db = TskDbDiff._get_tmp_file("GoldDBDump", ".txt")
if self.gold_bb_dump is not None:
srtcmdlst = ["sort", self.gold_bb_dump, "-o", new_bb]
subprocess.call(srtcmdlst)
srtcmdlst = ["sort", self.gold_dump, "-o", new_db]
subprocess.call(srtcmdlst)
self.gold_bb_dump = new_bb
self.gold_dump = new_db
def _cleanup_diff(self):
if self.output_dir is None:
#cleanup temp files
os.remove(self._dump)
os.remove(self._bb_dump)
if os.path.isfile(self._dump_diff):
os.remove(self._dump_diff)
if os.path.isfile(self._bb_dump_diff):
os.remove(self._bb_dump_diff)
if self.gold_bb_dump is None:
os.remove(self.gold_bb_dump)
os.remove(self.gold_dump)
def _diff(self, output_file, gold_file, diff_path):
"""Compare two text files.
Args:
output_file: a pathto_File, the latest text file
gold_file: a pathto_File, the gold text file
diff_path: The file to write the differences to
Returns False if different
"""
if (not os.path.isfile(output_file)):
return False
if (not os.path.isfile(gold_file)):
return False
# It is faster to read the contents in and directly compare
output_data = codecs.open(output_file, "r", "utf_8").read()
gold_data = codecs.open(gold_file, "r", "utf_8").read()
if (gold_data == output_data):
return True
# If they are different, invoke 'diff'
diff_file = codecs.open(diff_path, "wb", "utf_8")
# Gold needs to be passed in as 1st arg and output as 2nd
dffcmdlst = ["diff", gold_file, output_file]
subprocess.call(dffcmdlst, stdout = diff_file)
# create file path for gold files inside output folder. In case of diff, both gold and current run files
# are available in the report output folder. Prefix Gold- is added to the filename.
gold_file_in_output_dir = output_file[:output_file.rfind("/")] + "/Gold-" + output_file[output_file.rfind("/")+1:]
shutil.copy(gold_file, gold_file_in_output_dir)
return False
def _dump_output_db_bb(db_file, bb_dump_file):
"""Dumps sorted text results to the given output location.
Smart method that deals with a blackboard comparison to avoid issues
with different IDs based on when artifacts were created.
Args:
db_file: a pathto_File, the output database.
bb_dump_file: a pathto_File, the sorted dump file to write to
"""
unsorted_dump = TskDbDiff._get_tmp_file("dump_data", ".txt")
conn = sqlite3.connect(db_file)
conn.text_factory = lambda x: x.decode("utf-8", "ignore")
conn.row_factory = sqlite3.Row
artifact_cursor = conn.cursor()
# Get the list of all artifacts (along with type and associated file)
# @@@ Could add a SORT by parent_path in here since that is how we are going to later sort it.
artifact_cursor.execute("SELECT tsk_files.parent_path, tsk_files.name, blackboard_artifact_types.display_name, blackboard_artifacts.artifact_id FROM blackboard_artifact_types INNER JOIN blackboard_artifacts ON blackboard_artifact_types.artifact_type_id = blackboard_artifacts.artifact_type_id INNER JOIN tsk_files ON tsk_files.obj_id = blackboard_artifacts.obj_id")
database_log = codecs.open(unsorted_dump, "wb", "utf_8")
row = artifact_cursor.fetchone()
appnd = False
counter = 0
artifact_count = 0
artifact_fail = 0
# Cycle through artifacts
try:
while (row != None):
# File Name and artifact type
if(row["parent_path"] != None):
database_log.write(row["parent_path"] + row["name"] + ' <artifact type="' + row["display_name"] + '" > ')
else:
database_log.write(row["name"] + ' <artifact type="' + row["display_name"] + '" > ')
# Get attributes for this artifact
attribute_cursor = conn.cursor()
looptry = True
artifact_count += 1
try:
art_id = ""
art_id = str(row["artifact_id"])
attribute_cursor.execute("SELECT blackboard_attributes.source, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double FROM blackboard_attributes INNER JOIN blackboard_attribute_types ON blackboard_attributes.attribute_type_id = blackboard_attribute_types.attribute_type_id WHERE artifact_id =? ORDER BY blackboard_attributes.source, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double", [art_id])
attributes = attribute_cursor.fetchall()
# Print attributes
if (len(attributes) == 0):
# @@@@ This should be </artifact>
database_log.write(' <artifact/>\n')
row = artifact_cursor.fetchone()
continue
src = attributes[0][0]
for attr in attributes:
attr_value_index = 3 + attr["value_type"]
numvals = 0
for x in range(3, 6):
if(attr[x] != None):
numvals += 1
if(numvals > 1):
msg = "There were too many values for attribute type: " + attr["display_name"] + " for artifact with id #" + str(row["artifact_id"]) + ".\n"
if(not attr["source"] == src):
msg = "There were inconsistent sources for artifact with id #" + str(row["artifact_id"]) + ".\n"
try:
attr_value_as_string = str(attr[attr_value_index])
if attr["display_name"] == "Associated Artifact":
attr_value_as_string = getAssociatedArtifactType(db_file, attr_value_as_string)
#if((type(attr_value_as_string) != 'unicode') or (type(attr_value_as_string) != 'str')):
# attr_value_as_string = str(attr_value_as_string)
patrn = re.compile("[\n\0\a\b\r\f]")
attr_value_as_string = re.sub(patrn, ' ', attr_value_as_string)
database_log.write('<attribute source="' + attr["source"] + '" type="' + attr["display_name"] + '" value="' + attr_value_as_string + '" />')
except IOError as e:
print("IO error")
raise TskDbDiffException("Unexpected IO error while writing to database log." + str(e))
except sqlite3.Error as e:
msg = "Attributes in artifact id (in output DB)# " + str(row["artifact_id"]) + " encountered an error: " + str(e) +" .\n"
print("Attributes in artifact id (in output DB)# ", str(row["artifact_id"]), " encountered an error: ", str(e))
print()
looptry = False
artifact_fail += 1
database_log.write('Error Extracting Attributes')
database_log.close()
raise TskDbDiffException(msg)
finally:
attribute_cursor.close()
# @@@@ This should be </artifact>
database_log.write(' <artifact/>\n')
row = artifact_cursor.fetchone()
if(artifact_fail > 0):
msg ="There were " + str(artifact_count) + " artifacts and " + str(artifact_fail) + " threw an exception while loading.\n"
except Exception as e:
raise TskDbDiffException("Unexpected error while dumping blackboard database: " + str(e))
finally:
database_log.close()
artifact_cursor.close()
conn.close()
# Now sort the file
srtcmdlst = ["sort", unsorted_dump, "-o", bb_dump_file]
subprocess.call(srtcmdlst)
def _dump_output_db_nonbb(db_file, dump_file):
"""Dumps a database to a text file.
Does not dump the artifact and attributes.
Args:
db_file: a pathto_File, the database file to dump
dump_file: a pathto_File, the location to dump the non-blackboard database items
"""
# Make a copy that we can modify
backup_db_file = TskDbDiff._get_tmp_file("tsk_backup_db", ".db")
shutil.copy(db_file, backup_db_file)
# We sometimes get situations with messed up permissions
os.chmod (backup_db_file, 0o777)
conn = sqlite3.connect(backup_db_file)
id_path_table = build_id_table(conn.cursor())
conn.text_factory = lambda x: x.decode("utf-8", "ignore")
# Delete the blackboard tables
conn.execute("DROP TABLE blackboard_artifacts")
conn.execute("DROP TABLE blackboard_attributes")
# Write to the database dump
with codecs.open(dump_file, "wb", "utf_8") as db_log:
for line in conn.iterdump():
line = normalize_db_entry(line, id_path_table)
db_log.write('%s\n' % line)
# Now sort the file
srtcmdlst = ["sort", dump_file, "-o", dump_file]
subprocess.call(srtcmdlst)
conn.close()
# cleanup the backup
os.remove(backup_db_file)
def dump_output_db(db_file, dump_file, bb_dump_file):
"""Dumps the given database to text files for later comparison.
Args:
db_file: a pathto_File, the database file to dump
dump_file: a pathto_File, the location to dump the non-blackboard database items
bb_dump_file: a pathto_File, the location to dump the blackboard database items
"""
TskDbDiff._dump_output_db_nonbb(db_file, dump_file)
TskDbDiff._dump_output_db_bb(db_file, bb_dump_file)
def _get_tmp_file(base, ext):
time = datetime.datetime.now().time().strftime("%H%M%f")
return os.path.join(os.environ['TMP'], base + time + ext)
class TskDbDiffException(Exception):
pass
def normalize_db_entry(line, table):
""" Make testing more consistent and reasonable by doctoring certain db entries.
Args:
line: a String, the line to remove the object id from.
table: a map from object ids to file paths.
"""
files_index = line.find('INSERT INTO "tsk_files"')
path_index = line.find('INSERT INTO "tsk_files_path"')
object_index = line.find('INSERT INTO "tsk_objects"')
report_index = line.find('INSERT INTO "reports"')
layout_index = line.find('INSERT INTO "tsk_file_layout"')
parens = line[line.find('(') + 1 : line.find(')')]
fields_list = parens.replace(" ", "").split(',')
# remove object ID
if (files_index != -1):
obj_id = fields_list[0]
path = table[int(obj_id)]
newLine = ('INSERT INTO "tsk_files" VALUES(' + ', '.join(fields_list[1:]) + ');')
return newLine
# remove object ID
elif (path_index != -1):
obj_id = fields_list[0]
path = table[int(obj_id)]
newLine = ('INSERT INTO "tsk_files_path" VALUES(' + path + ', '.join(fields_list[1:]) + ');')
return newLine
# remove object ID
elif (layout_index != -1):
obj_id = fields_list[0]
path= table[int(obj_id)]
newLine = ('INSERT INTO "tsk_file_layout" VALUES(' + path + ', '.join(fields_list[1:]) + ');')
return newLine
# remove object ID
elif (object_index != -1):
obj_id = fields_list[0]
parent_id = fields_list[1]
try:
path = table[int(obj_id)]
parent_path = table[int(parent_id)]
newLine = ('INSERT INTO "tsk_objects" VALUES(' + path + ', ' + parent_path + ', ' + ', '.join(fields_list[2:]) + ');')
return newLine
except Exception as e:
# objects table has things that aren't files. if lookup fails, don't replace anything.
return line
# remove time-based information, ie Test_6/11/14 -> Test
elif (report_index != -1):
fields_list[1] = "AutopsyTestCase"
fields_list[2] = "0"
newLine = ('INSERT INTO "reports" VALUES(' + ','.join(fields_list) + ');')
return newLine
else:
return line
def getAssociatedArtifactType(db_file, artifact_id):
# Make a copy that we can modify
backup_db_file = TskDbDiff._get_tmp_file("tsk_backup_db", ".db")
shutil.copy(db_file, backup_db_file)
# We sometimes get situations with messed up permissions
os.chmod (backup_db_file, 0o777)
conn = sqlite3.connect(backup_db_file)
cur = conn.cursor()
#artifact_cursor.execute("SELECT display_name FROM blackboard_artifact_types WHERE artifact_id=?",[artifact_id])
cur.execute("SELECT tsk_files.parent_path, blackboard_artifact_types.display_name FROM blackboard_artifact_types INNER JOIN blackboard_artifacts ON blackboard_artifact_types.artifact_type_id = blackboard_artifacts.artifact_type_id INNER JOIN tsk_files ON tsk_files.obj_id = blackboard_artifacts.obj_id WHERE artifact_id=?",[artifact_id])
info = cur.fetchone()
conn.close()
# cleanup the backup
os.remove(backup_db_file)
return "File path: " + info[0] + " Artifact Type: " + info[1]
def build_id_table(artifact_cursor):
"""Build the map of object ids to file paths.
Args:
artifact_cursor: the database cursor
"""
# for each row in the db, take the object id, parent path, and name, then create a tuple in the dictionary
# with the object id as the key and the full file path (parent + name) as the value
mapping = dict([(row[0], str(row[1]) + str(row[2])) for row in artifact_cursor.execute("SELECT obj_id, parent_path, name FROM tsk_files")])
return mapping
def main():
try:
sys.argv.pop(0)
output_db = sys.argv.pop(0)
gold_db = sys.argv.pop(0)
except:
print("usage: tskdbdiff [OUPUT DB PATH] [GOLD DB PATH]")
sys.exit(1)
db_diff = TskDbDiff(output_db, gold_db, output_dir=".")
dump_passed, bb_dump_passed = db_diff.run_diff()
if dump_passed and bb_dump_passed:
print("Database comparison passed.")
if not dump_passed:
print("Non blackboard database comparison failed.")
if not bb_dump_passed:
print("Blackboard database comparison failed.")
sys.exit(0)
if __name__ == "__main__":
if sys.hexversion < 0x03000000:
print("Python 3 required")
sys.exit(1)
main()
|
|
import json
import logging
import requests
try:
# python 2
from urlparse import urljoin
except ImportError:
# python3
from urllib.parse import urljoin
try:
from collections import OrderedDict
except ImportError:
# python 2.6 or earlier, use backport
from ordereddict import OrderedDict
from .config import *
class FosDickAPIException(Exception):
pass
class FosDickAPIUnavailable(FosDickAPIException):
def __init__(self, url, message=None):
self.message = message or "API is unavailable"
self.url = url
def __str__(self):
return self.message
class FosDickAPIRequestError(FosDickAPIException):
def __init__(self, message):
super(FosDickAPIRequestError, self).__init__(message)
class FosDickAPI(object):
"""
This class will interact with fostdick api's for pulling information
about shipments, inventory, receipts etc.
"""
# sets "init'ial" state .
def __init__(self, username=None, password=None):
self.USERNAME = username or FOSDICK_API_USERNAME
self.PASSWORD = password or FOSDICK_API_PASSWORD
self.URL = URL
def _get_url(self, pathname):
"""
For each pathname return corresponding URL.
:param pathname:
:return:
"""
return urljoin(self.URL, pathname)
def _create_request(self, action, **kwargs):
"""
Used to create the request_url and query params.
:param action: action
:param kwargs:
:return: request_url, data
"""
data = dict()
data.update(kwargs)
req_url = URL_MAP.get(action, '/')
request_url = self._get_url(req_url)
return request_url, data
def _get_response(self, request_url, data):
"""
Perform json-request to API
:param request_url: Absolute url to API.
:type request_url: str
:param data: Datas to send to the API.
:type data: dict
:return: json response
:rtype: dict
"""
# To authenticate the request.
auth = (self.USERNAME, self.PASSWORD)
try:
req = requests.get(
request_url,
params=data,
auth=auth
)
content = req.content
logging.debug(content)
except requests.exceptions.RequestException as e:
# API not available.
raise FosDickAPIUnavailable(self.URL, str(e))
try:
json_response = req.json()
except Exception as e:
raise FosDickAPIRequestError(str(e))
return json_response
def request(self, action, **kwargs):
"""
Perform json-request to API
:param action: method action (name of the function in API)
:type action: str
:return: json response
:rtype: dict
"""
request_url, data = self._create_request(
action, **kwargs)
try:
json_response = self._get_response(request_url, data)
except Exception as e:
raise FosDickAPIException(str(e))
# if No response
if not json_response:
return []
return json_response
def get_shipments(self, **kwargs):
"""
:param kwargs:page,
per_page,
updated_at_min,
updated_at_max,
shipped_on_min,
shipped_on_max,
fosdick_order_num,
external_order_num
:return: list of shipped orders
"""
shipment_info = self.request(action='shipments', **kwargs)
return shipment_info
def get_inventory(self, **kwargs):
"""
:param kwargs:page,
per_page,
updated_at_min,
updated_at_max.
:return: List of inventory levels for products
"""
inventory_info = self.request(action='inventory', **kwargs)
return inventory_info
def get_all_returns(self, **kwargs):
"""
:param kwargs:page,
per_page,
updated_at_min,
updated_at_max,
returned_at_min,
returned_at_max.
:return:List of returned orders/items
"""
returns_items = self.request(action='returns', **kwargs)
return returns_items
def get_shipment_details(self, **kwargs):
"""
:param kwargs:page,
per_page,
updated_at_min,
updated_at_max,
shipped_on_min,
shipped_on_max,
fosdick_order_num,
external_order_num
:return:List of shipped line item.
"""
shipment_details = self.request(action='shipmentdetail', **kwargs)
return shipment_details
def get_receipts(self, **kwargs):
"""
:param kwargs: page,
per_page,
transaction_at_min,
transaction_at_max,
updated_at_min,
updated_at_max,
sku,
warehouse
:return: Submit a list of receipts.
"""
receipts = self.request(action='receipts', **kwargs)
return receipts
# TODO: Xml request, and parsing.
class PlaceOrder(object):
"""
This is where the order is placed"
"""
# sets "init'ial" state .
def __init__(self, order, client_code=None, client_name=None, test_flag=None):
self.ORDER = order
self.URL = PLACE_ORDER_URL
self.CLIENT_CODE = client_code or CLIENT_CODE
self.CLIENT_NAME = client_name or CLIENT_NAME
self.TESTFLAG = test_flag or TESTFLAG
self.headers = {'content-type': 'application/json'}
def __new__(cls, order, client_code=None, client_name=None, test_flag=None):
"""
to validate the order dic.
"""
if not isinstance(order, dict):
raise FosDickAPIException("Invalid order format.")
return super(PlaceOrder, cls).__new__(cls)
def _create_request(self):
"""
Will return absolute url and formatted order data.
:return:req_url, data
"""
data = self._create_order_data()
req_url = self.URL
return req_url, data
def _get_response(self, request_url, data):
"""
Perform json-request to API.
:param request_url: Absolute url to API.
:type request_url: str
:param data: Data to post to the API.
:type data: dict
:return: json response
:rtype: dict
"""
try:
req = requests.post(
request_url,
data,
headers=self.headers
)
content = req.content
logging.debug(content)
except requests.exceptions.RequestException as e:
# API not available.
raise FosDickAPIUnavailable(self.URL, str(e))
try:
json_response = req.json()
except Exception as e:
raise FosDickAPIRequestError(str(e))
if json_response['UnitycartOrderResponse']['OrderResponse']['SuccessCode'] == 'False':
raise FosDickAPIException(json_response)
return json_response
def _create_order_data(self):
"""
organise order data according to requirement.
And also order of dic items is important.
"""
data = OrderedDict()
data["UnitycartOrderPost"] = OrderedDict()
data["UnitycartOrderPost"]["ClientName"] = self.CLIENT_NAME
data["UnitycartOrderPost"]["ClientCode"] = self.CLIENT_CODE
data["UnitycartOrderPost"]["Test"] = self.TESTFLAG
data["UnitycartOrderPost"]["Order"] = []
order_object = self.ORDER
items = {"Item": []}
order = order_object['Order'][0]
all_items = order_object['Order'][0]['Items']['Item']
for order_item in all_items:
items["Item"].append(OrderedDict({
"Inv": order_item['Inv'],
"Qty": order_item['Qty'],
"PricePer": order_item['PricePer'],
"NumOfPayments": order_item['NumOfPayments'],
}))
order_with_items = OrderedDict()
order_with_items["Subtotal"] = order['Subtotal']
order_with_items["Total"] = order['Total']
order_with_items["ExternalID"] = str(order['ExternalID'])[:60]
order_with_items["AdCode"] = order['AdCode']
order_with_items["ShipFirstname"] = order['ShipFirstname'][:22]
order_with_items["ShipLastname"] = order['ShipLastname'][:16]
order_with_items["ShipAddress1"] = order['ShipAddress1'][:26] # total limit 30
order_with_items["ShipCity"] = order['ShipCity'][:13]
order_with_items["ShipState"] = order['ShipState'][:2]
order_with_items["ShipPhone"] = ""
order_with_items["ShipZip"] = order['ShipZip'][:11]
order_with_items["Email"] = order['Email'][:100] # configurable in fosdick
order_with_items["UseAsBilling"] = order['UseAsBilling']
order_with_items["PaymentType"] = order['PaymentType']
order_with_items["Items"] = items
data["UnitycartOrderPost"]["Order"].append(order_with_items)
data = json.dumps(data)
return data
def request(self):
"""
Perform json-post-request to API
:return: json response
:rtype: dict
"""
request_url, data = self._create_request()
try:
json_response = self._get_response(request_url, data)
except Exception as e:
raise FosDickAPIException(str(e))
# if No response
if not json_response:
return []
return json_response
def create_order(self):
"""
Used to place the order.
:return: order_item
"""
order_item = self.request()
return order_item
# TODO: multiple order at a time.
|
|
from __future__ import unicode_literals
from .... import ProResource
class AccountDetailsFinancial(ProResource):
full_endpoint = True
attribute_names = [
'id',
# string Accounts ID
'last_update',
# dateTime Date of last update
'date',
# dateTime Accounting to date
'type',
# string Accounts type
'account_status',
# integer Accounts status
'accountant_fees',
# integer Accountant fees
'acquisitions_and_disposals',
# integer Acquisitions and disposals
'accruals_deferred_income',
# integer Accruals & deferred income
'amortisation_of_tangibles',
# integer Amortisation of tangibles
'assets_available_for_sale_financial',
# integer Financial assets available for sale
'assets_deferred_tax',
# integer Deferred tax assets
'assets_financial',
# integer Financial assets
'assets_financial_due_after',
# integer Financial assets due after 12 months
'assets_financial_due_width',
# integer Financial assets due within 12 months
'assets_other_due_after',
# integer Other assets due after 12 months
'assets_trading',
# integer Trading assets
'assets_intangible',
# integer Intangible assets
'assets_investment',
# integer Investment assets
'assets_misc_current',
# integer Miscellaneous current assets
'assets_other',
# integer Other assets
'assets_other_due_within',
# integer Other assets due within 1 year
'assets_other_intangible',
# integer Other intangible assets
'bank_loan',
# integer Bank loan
'bank_overdraft',
# integer Bank overdraft
'capital_expenditure',
# integer Capital expenditure
'commercial_assets_tangible',
# integer Tangible commercial assets
'company',
# integer Company registration numbers
'creditors',
# integer Creditors
'customer_accounts_due_after',
# integer Customer accounts due after 12 months
'financial_assets_tangible',
# integer Tangible financial assets
'assets_total_current',
# integer Total current assets
'assets_total_fixed',
# integer Total fixed assets
'auditor_fees',
# integer Auditor fees
'cash',
# integer Cash
'cash_at_central_banks',
# integer Cash at central banks
'cash_year_start',
# integer Cash year start
'change_in_cash',
# integer Change in cash
'consolidated_accounts',
# boolean Accounts consolidated (Y/N)
'currency',
# string Accounts currency
'customer_accounts',
# integer Customer accounts
'customer_accounts_due_within',
# integer Customer accounts due within 1 year
'debt_securities',
# integer Debt securities
'debt_securities_due_after',
# integer Debt securities due after one year
'debt_securities_due_within',
# integer Debt securities due within one year
'debt_securities_in_issue',
# integer Debt securities in issue
'debt_securities_in_issue_due_after',
# integer Debt securities in issue due after 12 months
'debt_securities_in_issue_due_within',
# integer Debt securities in issue due within 12 months
'debtors',
# integer Debtors
'debtors_due_after',
# integer Debtors due after 12 months
'deposits_by_banks',
# integer Deposits by banks
'deposits_by_banks_due_after',
# integer Deposits by banks due after 1 year
'deposits_by_banks_due_within',
# integer Deposits by banks due within 1 year
'depreciation_of_tangibles',
# integer Depreciation of tangibles
'derivative_financial_instruments',
# integer Derivative financial instruments
'derivatives',
# integer Derivatives
'derivatives_due_after',
# integer Derivatives due after 12 months
'derivatives_due_within',
# integer Derivatives due within 12 months
'director_other',
# integer Director other
'director_pensions',
# integer Director pensions
'director_social_security',
# integer Director social security
'directors_accounts',
# integer Directors accounts
'director_fees',
# integer Director fees
'directors_remuneration',
# integer Directors' remuneration
'dividends',
# integer Dividends
'dividends_other',
# integer Other dividends
'employee_costs',
# integer Employee costs
'employee_numbers',
# integer Employee numbers
'employee_other',
# integer Employee other
'employee_pensions',
# integer Employee pensions
'employee_remuneration',
# integer Employee remuneration
'employee_social_security',
# integer Employee social security
'equity_dividends_paid',
# integer Equity dividends paid
'equity_shares',
# integer Equity shares
'exceptional_items',
# integer Exceptional items
'exceptional_other_items',
# integer Exceptional other items
'exceptional_pandl_on_disposal',
# integer Exceptional profit & loss on disposal
'exceptional_pandl_on_reorganisations',
# integer Exceptional profit & loss on reorganisations
'exchange_rate_effect',
# integer Exchange rate effect
'fees_and_commission_expense',
# integer Fees & commission expense
'fees_and_commission_income',
# integer Fees & commission income
'financing_activities',
# integer Financing activities
'goodwill',
# integer Goodwill
'group_accounts',
# integer Group accounts
'group_debtors',
# integer Group debtors
'highest_paid_director',
# integer Highest paid director
'hp_commitments',
# integer HP commitments
'interest_and_similar_expense',
# integer Interest & similar expense
'interest_and_similar_income',
# integer Interest & similar income
'investing_activities',
# integer Investing activities
'investment_and_other',
# integer Investment & other
'investment_property',
# integer Investment property
'items_in_course_of_collection',
# integer Items in course of collection
'items_in_course_of_transmission',
# integer Items in course of transmission
'lease_commitments',
# integer Lease commitments
'liabilities_current_tax',
# integer Current tax liabilities
'liabilities_deferred_tax',
# integer Deferred tax liabilities
'liabilities_financial',
# integer Financial liablities
'liabilities_financial_due_after',
# integer Financial liabilities due after 12 months
'liabilities_financial_due_within',
# integer Financial liabilities due within 12 months
'liabilities_insurance',
# integer Insurance liabilities
'liabilities_other_due_after',
# integer Other liabilities due after one year
'liabilities_other_provisions',
# integer Liabilities - other provisions
'liabilities_subordinated_due_after',
# integer Subordinated liabilities due after one year
'liabilities_trading',
# integer Trading liabilities
'liabilities_other',
# integer Other liabilities
'liabilities_other_due_within',
# integer Other Liabilities due within 1 year
'liabilities_subordinated',
# integer Subordinated liabilities
'liabilities_subordinated_due_within',
# integer Subordinated Liabilities due within 1 year
'loans_and_advances_to_banks',
# integer Loans & advances to banks
'loans_and_advances_to_banks_due_after',
# integer Loans & advances to banks due after 1 year
'loans_and_advances_to_banks_due_within',
# integer Loans & advances to banks due within 1 year
'loans_and_advances_to_customers',
# integer Loans & advances to customers
'loans_and_advances_to_customers_due_after',
# integer Loans & advances to customers due after 1 year
'loans_and_advances_to_customers_due_within',
# integer Loans & advances to customers due within 1 year
'lt_bank_loans',
# integer Long term bank loans
'lt_directors_accounts',
# integer Long term directors accounts
'lt_group_accounts',
# integer Long term group accounts
'lt_hp_commitments',
# integer Long term HP commitments
'lt_lease_commitments',
# integer Long term lease commitments
'lt_loans',
# integer Long term loans
'lt_other_loans_finance',
# integer Other long term finance loans
'lt_total_accruals_deferred_income',
# integer Long term total accruals deferred income
'lt_total_hp_lease_commitments',
# integer Long term total HP lease commitments
'lt_total_liabilities',
# integer Long term total liabilities
'management_of_liquid_resources',
# integer Management of liquid resources
'minority_interests',
# integer Minority interests
'minority_interests_profit',
# integer Minority interests profit
'misc_debtors',
# integer Miscellaneous debtors
'misc_liabilities',
# integer Miscellaneous liabilities
'months',
# integer Months included in accounts
'net_cashflow_from_financing',
# integer Net cashflow from financing
'net_change_in_cash',
# integer Net change in cash
'net_fees_and_commission_income',
# integer Net fees & commission income
'net_interest_income',
# integer Net interest income
'net_pension_liability',
# integer Net pension liability
'net_tax_paid',
# integer Net tax paid
'net_trading_income',
# integer Net trading income
'operating_activities',
# integer Operating activities
'operating_expenses',
# integer Operating expenses
'operating_profit',
# integer Operating profit
'ordinary_shares',
# integer Ordinary shares
'other_audit_costs',
# integer Other audit costs
'other_income',
# integer Other income
'other_reserves',
# integer Other reserves
'other_appropriations',
# integer Other appropriations
'other_current_liability',
# integer Other current liability
'other_lt_liabilities',
# integer Other long term liabilities
'other_provisions_for_liabilities',
# integer Other provisions for liabilities
'other_shares',
# integer Other shares
'other_st_loans',
# integer Other short term loans
'pandl_revenue_reserve',
# integer P&L revenue reserve
'pre_tax_profit',
# integer Pre-tax profit
'preference_shares',
# integer Preference shares
'prepayments_accrued_income',
# integer Prepayments & accrued income
'profit_after_tax',
# integer Profit after tax
'retained_profit',
# integer Retained profit
'return_on_investments',
# integer Return on investments
'revaluation_reserve',
# integer Revaluation reserve
'share_premium_account',
# integer Share premium account
'share_profit_in_ventures',
# integer Share profit in ventures
'statutory_audit_costs',
# integer Statutory audit costs
'stocks_work_in_progress',
# integer Stocks work in progress
'tax',
# integer Tax
'total_current_liabilities',
# integer Total current liabilities
'total_current_liabilities',
# integer Total long term liabilities
'total_operating_income',
# integer Total operating income
'total_shareholders_funds',
# integer Total shareholders' funds
'total_called_issued_capital',
# integer Total called issued capital
'total_lt_liabilities',
# integer Total long term liabilities
'total_other_creditors',
# integer Total other creditors
'total_provisions',
# integer Total provisions
'trade_creditors',
# integer Trade creditors
'trade_debtors',
# integer Trade debtors
'treasury_other_bills',
# integer Other treasury bills
'year_end_cash_equivalents',
# integer Year end cash equivalents
]
|
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import datetime
import time
import copy
import argparse
import json
import ast
import base64
from functools import wraps
from decimal import Decimal
import util
from util import print_msg, format_satoshis, print_stderr
import bitcoin
from bitcoin import is_address, hash_160, COIN, TYPE_ADDRESS
import transaction
from transaction import Transaction
import paymentrequest
from paymentrequest import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
import contacts
known_commands = {}
def satoshis(amount):
# satoshi conversion must not be performed by the parser
return int(COIN*Decimal(amount)) if amount not in ['!', None] else amount
class Command:
def __init__(self, func, s):
self.name = func.__name__
self.requires_network = 'n' in s
self.requires_wallet = 'w' in s
self.requires_password = 'p' in s
self.description = func.__doc__
self.help = self.description.split('.')[0] if self.description else None
varnames = func.func_code.co_varnames[1:func.func_code.co_argcount]
self.defaults = func.func_defaults
if self.defaults:
n = len(self.defaults)
self.params = list(varnames[:-n])
self.options = list(varnames[-n:])
else:
self.params = list(varnames)
self.options = []
self.defaults = []
def command(s):
def decorator(func):
global known_commands
name = func.__name__
known_commands[name] = Command(func, s)
@wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return func_wrapper
return decorator
class Commands:
def __init__(self, config, wallet, network, callback = None, password=None, new_password=None):
self.config = config
self.wallet = wallet
self.network = network
self._callback = callback
self._password = password
self.new_password = new_password
def _run(self, method, args, password_getter):
cmd = known_commands[method]
if cmd.requires_password and self.wallet.has_password():
self._password = apply(password_getter,())
if self._password is None:
return
f = getattr(self, method)
result = f(*args)
self._password = None
if self._callback:
apply(self._callback, ())
return result
@command('')
def commands(self):
"""List of commands"""
return ' '.join(sorted(known_commands.keys()))
@command('')
def create(self):
"""Create a new wallet"""
raise BaseException('Not a JSON-RPC command')
@command('wn')
def restore(self, text):
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of Argentum addresses
or Argentum private keys. If you want to be prompted for your
seed, type '?' or ':' (concealed) """
raise BaseException('Not a JSON-RPC command')
@command('wp')
def password(self):
"""Change wallet password. """
self.wallet.update_password(self._password, self.new_password)
self.wallet.storage.write()
return {'password':self.wallet.use_encryption}
@command('')
def getconfig(self, key):
"""Return a configuration variable. """
return self.config.get(key)
@command('')
def setconfig(self, key, value):
"""Set a configuration variable. 'value' may be a string or a Python expression."""
try:
value = ast.literal_eval(value)
except:
pass
self.config.set_key(key, value)
return True
@command('')
def make_seed(self, nbits=132, entropy=1, language=None):
"""Create a seed"""
from mnemonic import Mnemonic
s = Mnemonic(language).make_seed('standard', nbits, custom_entropy=entropy)
return s.encode('utf8')
@command('')
def check_seed(self, seed, entropy=1, language=None):
"""Check that a seed was generated with given entropy"""
from mnemonic import Mnemonic
return Mnemonic(language).check_seed(seed, entropy)
@command('n')
def getaddresshistory(self, address):
"""Return the transaction history of any address. Note: This is a
walletless server query, results are not checked by SPV.
"""
return self.network.synchronous_get(('blockchain.address.get_history', [address]))
@command('w')
def listunspent(self):
"""List unspent outputs. Returns the list of unspent transaction
outputs in your wallet."""
l = copy.deepcopy(self.wallet.get_utxos(exclude_frozen=False))
for i in l:
v = i["value"]
i["value"] = float(v)/COIN if v is not None else None
return l
@command('n')
def getaddressunspent(self, address):
"""Returns the UTXO list of any address. Note: This
is a walletless server query, results are not checked by SPV.
"""
return self.network.synchronous_get(('blockchain.address.listunspent', [address]))
@command('n')
def getutxoaddress(self, txid, pos):
"""Get the address of a UTXO. Note: This is a walletless server query, results are
not checked by SPV.
"""
r = self.network.synchronous_get(('blockchain.utxo.get_address', [txid, pos]))
return {'address': r}
@command('')
def serialize(self, jsontx):
"""Create a transaction from json inputs.
Inputs must have a redeemPubkey.
Outputs must be a list of {'address':address, 'value':satoshi_amount}.
"""
keypairs = {}
inputs = jsontx.get('inputs')
outputs = jsontx.get('outputs')
locktime = jsontx.get('locktime', 0)
for txin in inputs:
if txin.get('output'):
prevout_hash, prevout_n = txin['output'].split(':')
txin['prevout_n'] = int(prevout_n)
txin['prevout_hash'] = prevout_hash
if txin.get('redeemPubkey'):
pubkey = txin['redeemPubkey']
txin['type'] = 'p2pkh'
txin['x_pubkeys'] = [pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
if txin.get('privkey'):
keypairs[pubkey] = txin['privkey']
elif txin.get('redeemScript'):
raise BaseException('Not implemented')
outputs = map(lambda x: (TYPE_ADDRESS, x['address'], int(x['value'])), outputs)
tx = Transaction.from_io(inputs, outputs, locktime=locktime)
tx.sign(keypairs)
return tx.as_dict()
@command('wp')
def signtransaction(self, tx, privkey=None):
"""Sign a transaction. The wallet keys will be used unless a private key is provided."""
tx = Transaction(tx)
if privkey:
pubkey = bitcoin.public_key_from_private_key(privkey)
h160 = bitcoin.hash_160(pubkey.decode('hex'))
x_pubkey = 'fd' + (chr(0) + h160).encode('hex')
tx.sign({x_pubkey:privkey})
else:
self.wallet.sign_transaction(tx, self._password)
return tx.as_dict()
@command('')
def deserialize(self, tx):
"""Deserialize a serialized transaction"""
tx = Transaction(tx)
return tx.deserialize()
@command('n')
def broadcast(self, tx, timeout=30):
"""Broadcast a transaction to the network. """
tx = Transaction(tx)
return self.network.broadcast(tx, timeout)
@command('')
def createmultisig(self, num, pubkeys):
"""Create multisig address"""
assert isinstance(pubkeys, list), (type(num), type(pubkeys))
redeem_script = transaction.multisig_script(pubkeys, num)
address = bitcoin.hash160_to_p2sh(hash_160(redeem_script.decode('hex')))
return {'address':address, 'redeemScript':redeem_script}
@command('w')
def freeze(self, address):
"""Freeze address. Freeze the funds at one of your wallet\'s addresses"""
return self.wallet.set_frozen_state([address], True)
@command('w')
def unfreeze(self, address):
"""Unfreeze address. Unfreeze the funds at one of your wallet\'s address"""
return self.wallet.set_frozen_state([address], False)
@command('wp')
def getprivatekeys(self, address):
"""Get private keys of addresses. You may pass a single wallet address, or a list of wallet addresses."""
if is_address(address):
return self.wallet.get_private_key(address, self._password)
domain = address
return [self.wallet.get_private_key(address, self._password) for address in domain]
@command('w')
def ismine(self, address):
"""Check if address is in wallet. Return true if and only address is in wallet"""
return self.wallet.is_mine(address)
@command('')
def dumpprivkeys(self):
"""Deprecated."""
return "This command is deprecated. Use a pipe instead: 'electrum-arg listaddresses | electrum-arg getprivatekeys - '"
@command('')
def validateaddress(self, address):
"""Check that an address is valid. """
return is_address(address)
@command('w')
def getpubkeys(self, address):
"""Return the public keys for a wallet address. """
return self.wallet.get_public_keys(address)
@command('w')
def getbalance(self):
"""Return the balance of your wallet. """
c, u, x = self.wallet.get_balance()
out = {"confirmed": str(Decimal(c)/COIN)}
if u:
out["unconfirmed"] = str(Decimal(u)/COIN)
if x:
out["unmatured"] = str(Decimal(x)/COIN)
return out
@command('n')
def getaddressbalance(self, address):
"""Return the balance of any address. Note: This is a walletless
server query, results are not checked by SPV.
"""
out = self.network.synchronous_get(('blockchain.address.get_balance', [address]))
out["confirmed"] = str(Decimal(out["confirmed"])/COIN)
out["unconfirmed"] = str(Decimal(out["unconfirmed"])/COIN)
return out
@command('n')
def getproof(self, address):
"""Get Merkle branch of an address in the UTXO set"""
p = self.network.synchronous_get(('blockchain.address.get_proof', [address]))
out = []
for i,s in p:
out.append(i)
return out
@command('n')
def getmerkle(self, txid, height):
"""Get Merkle branch of a transaction included in a block. Electrum
uses this to verify transactions (Simple Payment Verification)."""
return self.network.synchronous_get(('blockchain.transaction.get_merkle', [txid, int(height)]))
@command('n')
def getservers(self):
"""Return the list of available servers"""
return self.network.get_servers()
@command('')
def version(self):
"""Return the version of electrum."""
from version import ELECTRUM_VERSION
return ELECTRUM_VERSION
@command('w')
def getmpk(self):
"""Get master public key. Return your wallet\'s master public key"""
return self.wallet.get_master_public_key()
@command('wp')
def getmasterprivate(self):
"""Get master private key. Return your wallet\'s master private key"""
return str(self.wallet.keystore.get_master_private_key(self._password))
@command('wp')
def getseed(self):
"""Get seed phrase. Print the generation seed of your wallet."""
s = self.wallet.get_seed(self._password)
return s.encode('utf8')
@command('wp')
def importprivkey(self, privkey):
"""Import a private key. """
try:
addr = self.wallet.import_key(privkey, self._password)
out = "Keypair imported: " + addr
except BaseException as e:
out = "Error: " + str(e)
return out
def _resolver(self, x):
if x is None:
return None
out = self.wallet.contacts.resolve(x)
if out.get('type') == 'openalias' and self.nocheck is False and out.get('validated') is False:
raise BaseException('cannot verify alias', x)
return out['address']
@command('nw')
def sweep(self, privkey, destination, tx_fee=None, nocheck=False, imax=100):
"""Sweep private keys. Returns a transaction that spends UTXOs from
privkey to a destination address. The transaction is not
broadcasted."""
tx_fee = satoshis(tx_fee)
privkeys = privkey if type(privkey) is list else [privkey]
self.nocheck = nocheck
dest = self._resolver(destination)
tx = self.wallet.sweep(privkeys, self.network, self.config, dest, tx_fee, imax)
return tx.as_dict() if tx else None
@command('wp')
def signmessage(self, address, message):
"""Sign a message with a key. Use quotes if your message contains
whitespaces"""
sig = self.wallet.sign_message(address, message, self._password)
return base64.b64encode(sig)
@command('')
def verifymessage(self, address, signature, message):
"""Verify a signature."""
sig = base64.b64decode(signature)
return bitcoin.verify_message(address, sig, message)
def _mktx(self, outputs, fee, change_addr, domain, nocheck, unsigned, rbf):
self.nocheck = nocheck
change_addr = self._resolver(change_addr)
domain = None if domain is None else map(self._resolver, domain)
final_outputs = []
for address, amount in outputs:
address = self._resolver(address)
amount = satoshis(amount)
final_outputs.append((TYPE_ADDRESS, address, amount))
coins = self.wallet.get_spendable_coins(domain)
tx = self.wallet.make_unsigned_transaction(coins, final_outputs, self.config, fee, change_addr)
if rbf:
tx.set_rbf(False)
if not unsigned:
self.wallet.sign_transaction(tx, self._password)
return tx
@command('wp')
def payto(self, destination, amount, tx_fee=None, from_addr=None, change_addr=None, nocheck=False, unsigned=False, rbf=False):
"""Create a transaction. """
tx_fee = satoshis(tx_fee)
domain = [from_addr] if from_addr else None
tx = self._mktx([(destination, amount)], tx_fee, change_addr, domain, nocheck, unsigned, rbf)
return tx.as_dict()
@command('wp')
def paytomany(self, outputs, tx_fee=None, from_addr=None, change_addr=None, nocheck=False, unsigned=False, rbf=False):
"""Create a multi-output transaction. """
tx_fee = satoshis(tx_fee)
domain = [from_addr] if from_addr else None
tx = self._mktx(outputs, tx_fee, change_addr, domain, nocheck, unsigned, rbf)
return tx.as_dict()
@command('w')
def history(self):
"""Wallet history. Returns the transaction history of your wallet."""
balance = 0
out = []
for item in self.wallet.get_history():
tx_hash, height, conf, timestamp, value, balance = item
if timestamp:
date = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
else:
date = "----"
label = self.wallet.get_label(tx_hash)
out.append({
'txid': tx_hash,
'timestamp': timestamp,
'date': date,
'label': label,
'value': float(value)/COIN if value is not None else None,
'height': height,
'confirmations': conf
})
return out
@command('w')
def setlabel(self, key, label):
"""Assign a label to an item. Item may be a Argentum address or a
transaction ID"""
self.wallet.set_label(key, label)
@command('w')
def listcontacts(self):
"""Show your list of contacts"""
return self.wallet.contacts
@command('w')
def getalias(self, key):
"""Retrieve alias. Lookup in your list of contacts, and for an OpenAlias DNS record."""
return self.wallet.contacts.resolve(key)
@command('w')
def searchcontacts(self, query):
"""Search through contacts, return matching entries. """
results = {}
for key, value in self.wallet.contacts.items():
if query.lower() in key.lower():
results[key] = value
return results
@command('w')
def listaddresses(self, receiving=False, change=False, show_labels=False, frozen=False, unused=False, funded=False, show_balance=False):
"""List wallet addresses. Returns the list of all addresses in your wallet. Use optional arguments to filter the results."""
out = []
for addr in self.wallet.get_addresses():
if frozen and not self.wallet.is_frozen(addr):
continue
if receiving and self.wallet.is_change(addr):
continue
if change and not self.wallet.is_change(addr):
continue
if unused and self.wallet.is_used(addr):
continue
if funded and self.wallet.is_empty(addr):
continue
item = addr
if show_balance:
item += ", "+ format_satoshis(sum(self.wallet.get_addr_balance(addr)))
if show_labels:
item += ', ' + repr(self.wallet.labels.get(addr, ''))
out.append(item)
return out
@command('n')
def gettransaction(self, txid):
"""Retrieve a transaction. """
if self.wallet and txid in self.wallet.transactions:
tx = self.wallet.transactions[txid]
else:
raw = self.network.synchronous_get(('blockchain.transaction.get', [txid]))
if raw:
tx = Transaction(raw)
else:
raise BaseException("Unknown transaction")
return tx.as_dict()
@command('')
def encrypt(self, pubkey, message):
"""Encrypt a message with a public key. Use quotes if the message contains whitespaces."""
return bitcoin.encrypt_message(message, pubkey)
@command('wp')
def decrypt(self, pubkey, encrypted):
"""Decrypt a message encrypted with a public key."""
return self.wallet.decrypt_message(pubkey, encrypted, self._password)
def _format_request(self, out):
pr_str = {
PR_UNKNOWN: 'Unknown',
PR_UNPAID: 'Pending',
PR_PAID: 'Paid',
PR_EXPIRED: 'Expired',
}
out['amount (ARG)'] = format_satoshis(out.get('amount'))
out['status'] = pr_str[out.get('status', PR_UNKNOWN)]
return out
@command('w')
def getrequest(self, key):
"""Return a payment request"""
r = self.wallet.get_payment_request(key, self.config)
if not r:
raise BaseException("Request not found")
return self._format_request(r)
#@command('w')
#def ackrequest(self, serialized):
# """<Not implemented>"""
# pass
@command('w')
def listrequests(self, pending=False, expired=False, paid=False):
"""List the payment requests you made."""
out = self.wallet.get_sorted_requests(self.config)
if pending:
f = PR_UNPAID
elif expired:
f = PR_EXPIRED
elif paid:
f = PR_PAID
else:
f = None
if f is not None:
out = filter(lambda x: x.get('status')==f, out)
return map(self._format_request, out)
@command('w')
def getunusedaddress(self,force=False):
"""Returns the first unused address."""
addr = self.wallet.get_unused_address()
if addr is None and force:
addr = self.wallet.create_new_address(False)
if addr:
return addr
else:
return False
@command('w')
def addrequest(self, amount, memo='', expiration=None, force=False):
"""Create a payment request."""
addr = self.wallet.get_unused_address()
if addr is None:
if force:
addr = self.wallet.create_new_address(False)
else:
return False
amount = satoshis(amount)
expiration = int(expiration) if expiration else None
req = self.wallet.make_payment_request(addr, amount, memo, expiration)
self.wallet.add_payment_request(req, self.config)
out = self.wallet.get_payment_request(addr, self.config)
return self._format_request(out)
@command('wp')
def signrequest(self, address):
"Sign payment request with an OpenAlias"
alias = self.config.get('alias')
if not alias:
raise BaseException('No alias in your configuration')
alias_addr = self.wallet.contacts.resolve(alias)['address']
self.wallet.sign_payment_request(address, alias, alias_addr, self._password)
@command('w')
def rmrequest(self, address):
"""Remove a payment request"""
return self.wallet.remove_payment_request(address, self.config)
@command('w')
def clearrequests(self):
"""Remove all payment requests"""
for k in self.wallet.receive_requests.keys():
self.wallet.remove_payment_request(k, self.config)
@command('n')
def notify(self, address, URL):
"""Watch an address. Everytime the address changes, a http POST is sent to the URL."""
def callback(x):
import urllib2
headers = {'content-type':'application/json'}
data = {'address':address, 'status':x.get('result')}
try:
req = urllib2.Request(URL, json.dumps(data), headers)
response_stream = urllib2.urlopen(req)
util.print_error('Got Response for %s' % address)
except BaseException as e:
util.print_error(str(e))
self.network.send([('blockchain.address.subscribe', [address])], callback)
return True
@command('wn')
def is_synchronized(self):
""" return wallet synchronization status """
return self.wallet.is_up_to_date()
@command('')
def help(self):
# for the python console
return sorted(known_commands.keys())
param_descriptions = {
'privkey': 'Private key. Type \'?\' to get a prompt.',
'destination': 'Argentum address, contact or alias',
'address': 'Argentum address',
'seed': 'Seed phrase',
'txid': 'Transaction ID',
'pos': 'Position',
'height': 'Block height',
'tx': 'Serialized transaction (hexadecimal)',
'key': 'Variable name',
'pubkey': 'Public key',
'message': 'Clear text message. Use quotes if it contains spaces.',
'encrypted': 'Encrypted message',
'amount': 'Amount to be sent (in ARG). Type \'!\' to send the maximum available.',
'requested_amount': 'Requested amount (in ARG).',
'outputs': 'list of ["address", amount]',
}
command_options = {
'password': ("-W", "--password", "Password"),
'receiving': (None, "--receiving", "Show only receiving addresses"),
'change': (None, "--change", "Show only change addresses"),
'frozen': (None, "--frozen", "Show only frozen addresses"),
'unused': (None, "--unused", "Show only unused addresses"),
'funded': (None, "--funded", "Show only funded addresses"),
'show_balance':("-b", "--balance", "Show the balances of listed addresses"),
'show_labels': ("-l", "--labels", "Show the labels of listed addresses"),
'nocheck': (None, "--nocheck", "Do not verify aliases"),
'imax': (None, "--imax", "Maximum number of inputs"),
'tx_fee': ("-f", "--fee", "Transaction fee (in ARG)"),
'from_addr': ("-F", "--from", "Source address. If it isn't in the wallet, it will ask for the private key unless supplied in the format public_key:private_key. It's not saved in the wallet."),
'change_addr': ("-c", "--change", "Change address. Default is a spare address, or the source address if it's not in the wallet"),
'nbits': (None, "--nbits", "Number of bits of entropy"),
'entropy': (None, "--entropy", "Custom entropy"),
'language': ("-L", "--lang", "Default language for wordlist"),
'gap_limit': ("-G", "--gap", "Gap limit"),
'privkey': (None, "--privkey", "Private key. Set to '?' to get a prompt."),
'unsigned': ("-u", "--unsigned", "Do not sign transaction"),
'rbf': (None, "--rbf", "Replace-by-fee transaction"),
'domain': ("-D", "--domain", "List of addresses"),
'memo': ("-m", "--memo", "Description of the request"),
'expiration': (None, "--expiration", "Time in seconds"),
'timeout': (None, "--timeout", "Timeout in seconds"),
'force': (None, "--force", "Create new address beyond gap limit, if no more addresses are available."),
'pending': (None, "--pending", "Show only pending requests."),
'expired': (None, "--expired", "Show only expired requests."),
'paid': (None, "--paid", "Show only paid requests."),
}
# don't use floats because of rounding errors
from transaction import tx_from_str
json_loads = lambda x: json.loads(x, parse_float=lambda x: str(Decimal(x)))
arg_types = {
'num': int,
'nbits': int,
'imax': int,
'entropy': long,
'tx': tx_from_str,
'pubkeys': json_loads,
'jsontx': json_loads,
'inputs': json_loads,
'outputs': json_loads,
'tx_fee': lambda x: str(Decimal(x)) if x is not None else None,
'amount': lambda x: str(Decimal(x)) if x != '!' else '!',
}
config_variables = {
'addrequest': {
'requests_dir': 'directory where a bip70 file will be written.',
'ssl_privkey': 'Path to your SSL private key, needed to sign the request.',
'ssl_chain': 'Chain of SSL certificates, needed for signed requests. Put your certificate at the top and the root CA at the end',
'url_rewrite': 'Parameters passed to str.replace(), in order to create the r= part of argentum: URIs. Example: \"(\'file:///var/www/\',\'https://electrum-arg.org/\')\"',
},
'listrequests':{
'url_rewrite': 'Parameters passed to str.replace(), in order to create the r= part of argentum: URIs. Example: \"(\'file:///var/www/\',\'https://electrum-arg.org/\')\"',
}
}
def set_default_subparser(self, name, args=None):
"""see http://stackoverflow.com/questions/5176691/argparse-how-to-specify-a-default-subcommand"""
subparser_found = False
for arg in sys.argv[1:]:
if arg in ['-h', '--help']: # global help if no subparser
break
else:
for x in self._subparsers._actions:
if not isinstance(x, argparse._SubParsersAction):
continue
for sp_name in x._name_parser_map.keys():
if sp_name in sys.argv[1:]:
subparser_found = True
if not subparser_found:
# insert default in first position, this implies no
# global options without a sub_parsers specified
if args is None:
sys.argv.insert(1, name)
else:
args.insert(0, name)
argparse.ArgumentParser.set_default_subparser = set_default_subparser
# workaround https://bugs.python.org/issue23058
# see https://github.com/nickstenning/honcho/pull/121
def subparser_call(self, parser, namespace, values, option_string=None):
from argparse import ArgumentError, SUPPRESS, _UNRECOGNIZED_ARGS_ATTR
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
tup = parser_name, ', '.join(self._name_parser_map)
msg = _('unknown parser %r (choices: %s)') % tup
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
# store any unrecognized options on the object, so that the top
# level parser can decide what to do with them
namespace, arg_strings = parser.parse_known_args(arg_strings, namespace)
if arg_strings:
vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
argparse._SubParsersAction.__call__ = subparser_call
def add_network_options(parser):
parser.add_argument("-1", "--oneserver", action="store_true", dest="oneserver", default=False, help="connect to one server only")
parser.add_argument("-s", "--server", dest="server", default=None, help="set server host:port:protocol, where protocol is either t (tcp) or s (ssl)")
parser.add_argument("-p", "--proxy", dest="proxy", default=None, help="set proxy [type:]host[:port], where type is socks4,socks5 or http")
def add_global_options(parser):
group = parser.add_argument_group('global options')
group.add_argument("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Show debugging information")
group.add_argument("-D", "--dir", dest="electrum_path", help="electrum directory")
group.add_argument("-P", "--portable", action="store_true", dest="portable", default=False, help="Use local 'electrum-arg_data' directory")
group.add_argument("-w", "--wallet", dest="wallet_path", help="wallet path")
group.add_argument("--testnet", action="store_true", dest="testnet", default=False, help="Use Testnet")
group.add_argument("--segwit", action="store_true", dest="segwit", default=False, help="The Wizard will create Segwit seed phrases (Testnet only).")
group.add_argument("--nolnet", action="store_true", dest="nolnet", default=False, help="Use Nolnet")
def get_parser():
# create main parser
parser = argparse.ArgumentParser(
epilog="Run 'electrum help <command>' to see the help for a command")
add_global_options(parser)
subparsers = parser.add_subparsers(dest='cmd', metavar='<command>')
# gui
parser_gui = subparsers.add_parser('gui', description="Run Electrum's Graphical User Interface.", help="Run GUI (default)")
parser_gui.add_argument("url", nargs='?', default=None, help="argentum URI (or bip70 file)")
parser_gui.add_argument("-g", "--gui", dest="gui", help="select graphical user interface", choices=['qt', 'kivy', 'text', 'stdio'])
parser_gui.add_argument("-o", "--offline", action="store_true", dest="offline", default=False, help="Run offline")
parser_gui.add_argument("-m", action="store_true", dest="hide_gui", default=False, help="hide GUI on startup")
parser_gui.add_argument("-L", "--lang", dest="language", default=None, help="default language used in GUI")
add_network_options(parser_gui)
add_global_options(parser_gui)
# daemon
parser_daemon = subparsers.add_parser('daemon', help="Run Daemon")
parser_daemon.add_argument("subcommand", choices=['start', 'status', 'stop', 'load_wallet', 'close_wallet'], nargs='?')
#parser_daemon.set_defaults(func=run_daemon)
add_network_options(parser_daemon)
add_global_options(parser_daemon)
# commands
for cmdname in sorted(known_commands.keys()):
cmd = known_commands[cmdname]
p = subparsers.add_parser(cmdname, help=cmd.help, description=cmd.description)
add_global_options(p)
if cmdname == 'restore':
p.add_argument("-o", "--offline", action="store_true", dest="offline", default=False, help="Run offline")
#p.set_defaults(func=run_cmdline)
if cmd.requires_password:
p.add_argument("-W", "--password", dest="password", default=None, help="password")
for optname, default in zip(cmd.options, cmd.defaults):
a, b, help = command_options[optname]
action = "store_true" if type(default) is bool else 'store'
args = (a, b) if a else (b,)
if action == 'store':
_type = arg_types.get(optname, str)
p.add_argument(*args, dest=optname, action=action, default=default, help=help, type=_type)
else:
p.add_argument(*args, dest=optname, action=action, default=default, help=help)
for param in cmd.params:
h = param_descriptions.get(param, '')
_type = arg_types.get(param, str)
p.add_argument(param, help=h, type=_type)
cvh = config_variables.get(cmdname)
if cvh:
group = p.add_argument_group('configuration variables', '(set with setconfig/getconfig)')
for k, v in cvh.items():
group.add_argument(k, nargs='?', help=v)
# 'gui' is the default command
parser.set_default_subparser('gui')
return parser
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworksOperations(object):
"""VirtualNetworksOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetwork"
"""Gets the specified virtual network by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetwork, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.VirtualNetwork
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
parameters, # type: "_models.VirtualNetwork"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetwork"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetwork')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
parameters, # type: "_models.VirtualNetwork"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetwork"]
"""Creates or updates a virtual network in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param parameters: Parameters supplied to the create or update virtual network operation.
:type parameters: ~azure.mgmt.network.v2019_07_01.models.VirtualNetwork
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetwork or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.VirtualNetwork]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetwork"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetwork"]
"""Updates a virtual network tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param parameters: Parameters supplied to update virtual network tags.
:type parameters: ~azure.mgmt.network.v2019_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetwork or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.VirtualNetwork]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkListResult"]
"""Gets all virtual networks in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_07_01.models.VirtualNetworkListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworks'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkListResult"]
"""Gets all virtual networks in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_07_01.models.VirtualNetworkListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks'} # type: ignore
def check_ip_address_availability(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
ip_address, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.IPAddressAvailabilityResult"
"""Checks whether a private IP address is available for use.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param ip_address: The private IP address to be verified.
:type ip_address: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IPAddressAvailabilityResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.IPAddressAvailabilityResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IPAddressAvailabilityResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.check_ip_address_availability.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['ipAddress'] = self._serialize.query("ip_address", ip_address, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IPAddressAvailabilityResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_ip_address_availability.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/CheckIPAddressAvailability'} # type: ignore
def list_usage(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkListUsageResult"]
"""Lists usage stats.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkListUsageResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_07_01.models.VirtualNetworkListUsageResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkListUsageResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_usage.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkListUsageResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_usage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/usages'} # type: ignore
|
|
from __future__ import absolute_import, print_function
import numpy as np
import warnings
def _bit_length_26(x):
if x == 0:
return 0
elif x == 1:
return 1
else:
return len(bin(x)) - 2
try:
from scipy.lib._version import NumpyVersion
except ImportError:
import re
string_types = basestring
class NumpyVersion():
"""Parse and compare numpy version strings.
Numpy has the following versioning scheme (numbers given are examples; they
can be >9) in principle):
- Released version: '1.8.0', '1.8.1', etc.
- Alpha: '1.8.0a1', '1.8.0a2', etc.
- Beta: '1.8.0b1', '1.8.0b2', etc.
- Release candidates: '1.8.0rc1', '1.8.0rc2', etc.
- Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)
- Development versions after a1: '1.8.0a1.dev-f1234afa',
'1.8.0b2.dev-f1234afa',
'1.8.1rc1.dev-f1234afa', etc.
- Development versions (no git hash available): '1.8.0.dev-Unknown'
Comparing needs to be done against a valid version string or other
`NumpyVersion` instance.
Parameters
----------
vstring : str
Numpy version string (``np.__version__``).
Notes
-----
All dev versions of the same (pre-)release compare equal.
Examples
--------
>>> from scipy.lib._version import NumpyVersion
>>> if NumpyVersion(np.__version__) < '1.7.0':
... print('skip')
skip
>>> NumpyVersion('1.7') # raises ValueError, add ".0"
"""
def __init__(self, vstring):
self.vstring = vstring
ver_main = re.match(r'\d[.]\d+[.]\d+', vstring)
if not ver_main:
raise ValueError("Not a valid numpy version string")
self.version = ver_main.group()
self.major, self.minor, self.bugfix = [int(x) for x in
self.version.split('.')]
if len(vstring) == ver_main.end():
self.pre_release = 'final'
else:
alpha = re.match(r'a\d', vstring[ver_main.end():])
beta = re.match(r'b\d', vstring[ver_main.end():])
rc = re.match(r'rc\d', vstring[ver_main.end():])
pre_rel = [m for m in [alpha, beta, rc] if m is not None]
if pre_rel:
self.pre_release = pre_rel[0].group()
else:
self.pre_release = ''
self.is_devversion = bool(re.search(r'.dev-', vstring))
def _compare_version(self, other):
"""Compare major.minor.bugfix"""
if self.major == other.major:
if self.minor == other.minor:
if self.bugfix == other.bugfix:
vercmp = 0
elif self.bugfix > other.bugfix:
vercmp = 1
else:
vercmp = -1
elif self.minor > other.minor:
vercmp = 1
else:
vercmp = -1
elif self.major > other.major:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare_pre_release(self, other):
"""Compare alpha/beta/rc/final."""
if self.pre_release == other.pre_release:
vercmp = 0
elif self.pre_release == 'final':
vercmp = 1
elif other.pre_release == 'final':
vercmp = -1
elif self.pre_release > other.pre_release:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare(self, other):
if not isinstance(other, (string_types, NumpyVersion)):
raise ValueError("Invalid object to compare with NumpyVersion.")
if isinstance(other, string_types):
other = NumpyVersion(other)
vercmp = self._compare_version(other)
if vercmp == 0:
# Same x.y.z version, check for alpha/beta/rc
vercmp = self._compare_pre_release(other)
if vercmp == 0:
# Same version and same pre-release, check if dev version
if self.is_devversion is other.is_devversion:
vercmp = 0
elif self.is_devversion:
vercmp = -1
else:
vercmp = 1
return vercmp
def __lt__(self, other):
return self._compare(other) < 0
def __le__(self, other):
return self._compare(other) <= 0
def __eq__(self, other):
return self._compare(other) == 0
def __ne__(self, other):
return self._compare(other) != 0
def __gt__(self, other):
return self._compare(other) > 0
def __ge__(self, other):
return self._compare(other) >= 0
def __repr(self):
return "NumpyVersion(%s)" % self.vstring
class ResettableCache(dict):
"""
Dictionary whose elements mey depend one from another.
If entry `B` depends on entry `A`, changing the values of entry `A` will
reset the value of entry `B` to a default (None); deleteing entry `A` will
delete entry `B`. The connections between entries are stored in a
`_resetdict` private attribute.
Parameters
----------
reset : dictionary, optional
An optional dictionary, associated a sequence of entries to any key
of the object.
items : var, optional
An optional dictionary used to initialize the dictionary
Examples
--------
>>> reset = dict(a=('b',), b=('c',))
>>> cache = resettable_cache(a=0, b=1, c=2, reset=reset)
>>> assert_equal(cache, dict(a=0, b=1, c=2))
>>> print("Try resetting a")
>>> cache['a'] = 1
>>> assert_equal(cache, dict(a=1, b=None, c=None))
>>> cache['c'] = 2
>>> assert_equal(cache, dict(a=1, b=None, c=2))
>>> cache['b'] = 0
>>> assert_equal(cache, dict(a=1, b=0, c=None))
>>> print("Try deleting b")
>>> del(cache['a'])
>>> assert_equal(cache, {})
"""
def __init__(self, reset=None, **items):
self._resetdict = reset or {}
dict.__init__(self, **items)
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
# if hasattr needed for unpickling with protocol=2
if hasattr(self, '_resetdict'):
for mustreset in self._resetdict.get(key, []):
self[mustreset] = None
def __delitem__(self, key):
dict.__delitem__(self, key)
for mustreset in self._resetdict.get(key, []):
del(self[mustreset])
# def __getstate__(self):
# print('pickling wrapper', self.__dict__)
# return self.__dict__
#
# def __setstate__(self, dict_):
# print('unpickling wrapper', dict_)
# self.__dict__.update(dict_)
resettable_cache = ResettableCache
def _next_regular(target):
"""
Find the next regular number greater than or equal to target.
Regular numbers are composites of the prime factors 2, 3, and 5.
Also known as 5-smooth numbers or Hamming numbers, these are the optimal
size for inputs to FFTPACK.
Target must be a positive integer.
"""
if target <= 6:
return target
# Quickly check if it's already a power of 2
if not (target & (target - 1)):
return target
match = float('inf') # Anything found will be smaller
p5 = 1
while p5 < target:
p35 = p5
while p35 < target:
# Ceiling integer division, avoiding conversion to float
# (quotient = ceil(target / p35))
quotient = -(-target // p35)
# Quickly find next power of 2 >= quotient
try:
p2 = 2 ** ((quotient - 1).bit_length())
except AttributeError:
# Fallback for Python <2.7
p2 = 2 ** _bit_length_26(quotient - 1)
N = p2 * p35
if N == target:
return N
elif N < match:
match = N
p35 *= 3
if p35 == target:
return p35
if p35 < match:
match = p35
p5 *= 5
if p5 == target:
return p5
if p5 < match:
match = p5
return match
if NumpyVersion(np.__version__) >= '1.7.1':
np_matrix_rank = np.linalg.matrix_rank
else:
def np_matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = np.asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M == 0))
S = np.linalg.svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * np.finfo(S.dtype).eps
return np.sum(S > tol)
class CacheWriteWarning(UserWarning):
pass
class CachedAttribute(object):
def __init__(self, func, cachename=None, resetlist=None):
self.fget = func
self.name = func.__name__
self.cachename = cachename or '_cache'
self.resetlist = resetlist or ()
def __get__(self, obj, type=None):
if obj is None:
return self.fget
# Get the cache or set a default one if needed
_cachename = self.cachename
_cache = getattr(obj, _cachename, None)
if _cache is None:
setattr(obj, _cachename, resettable_cache())
_cache = getattr(obj, _cachename)
# Get the name of the attribute to set and cache
name = self.name
_cachedval = _cache.get(name, None)
# print("[_cachedval=%s]" % _cachedval)
if _cachedval is None:
# Call the "fget" function
_cachedval = self.fget(obj)
# Set the attribute in obj
# print("Setting %s in cache to %s" % (name, _cachedval))
try:
_cache[name] = _cachedval
except KeyError:
setattr(_cache, name, _cachedval)
# Update the reset list if needed (and possible)
resetlist = self.resetlist
if resetlist is not ():
try:
_cache._resetdict[name] = self.resetlist
except AttributeError:
pass
# else:
# print("Reading %s from cache (%s)" % (name, _cachedval))
return _cachedval
def __set__(self, obj, value):
errmsg = "The attribute '%s' cannot be overwritten" % self.name
warnings.warn(errmsg, CacheWriteWarning)
class _cache_readonly(object):
"""
Decorator for CachedAttribute
"""
def __init__(self, cachename=None, resetlist=None):
self.func = None
self.cachename = cachename
self.resetlist = resetlist or None
def __call__(self, func):
return CachedAttribute(func,
cachename=self.cachename,
resetlist=self.resetlist)
cache_readonly = _cache_readonly()
|
|
#! /usr/bin/env python
# encoding: utf-8
from xbrl import XBRLParser
from xbrl import GAAP
from xbrl import GAAPSerializer
from xbrl import DEISerializer
from xbrl import XBRLParserException
import pytest
import sys
import os
import six
try:
import __pypy__
except ImportError:
__pypy__ = None
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
sys.path.insert(0, os.path.abspath('python-xbrl'))
def test_parse_empty_file():
xbrl_parser = XBRLParser()
file_to_parse = "tests/nothing.xml"
with pytest.raises(XBRLParserException):
xbrl_parser.parse(file_to_parse)
def test_open_file_handle():
xbrl_parser = XBRLParser()
file_to_parse = "tests/sam-20130629.xml"
try:
xbrl_parser.parse(file(file_to_parse))
except NameError:
pass
def test_parse_GAAP10Q_RRDonnelley():
xbrl_parser = XBRLParser(0)
file_to_parse = "tests/sam-20130629.xml"
xbrl = xbrl_parser.parse(file_to_parse)
gaap_obj = xbrl_parser.parseGAAP(xbrl,
str(file_to_parse
.split("-")[1].split(".")[0][:4] +
file_to_parse.split("-")[1]
.split(".")[0][4:6] +
file_to_parse.split("-")[1]
.split(".")[0][6:8]),
"current")
serializer = GAAPSerializer()
result = serializer.dump(gaap_obj)
assert result.data['liabilities'] == 98032.0
assert result.data['net_cash_flows_financing_continuing'] == 0.0
assert result.data['revenue'] == 0.0
assert result.data['income_tax_expense_benefit'] == 12107.0
assert result.data['income_from_equity_investments'] == 0.0
assert result.data['preferred_stock_dividends'] == 0.0
assert result.data['redeemable_noncontrolling_interest'] == 0.0
assert result.data['extraordary_items_gain_loss'] == 0.0
assert result.data['temporary_equity'] == 0.0
assert result.data['costs_and_expenses'] == 0.0
assert result.data['non_current_assets'] == 5417.0
assert result.data['net_cash_flows_discontinued'] == 0.0
assert result.data['income_loss'] == 19715.0
assert result.data['liabilities_and_equity'] == 60263.0
assert result.data['other_operating_income'] == 0.0
assert result.data['operating_income_loss'] == 0.0
assert result.data['net_income_parent'] == 0.0
assert result.data['equity'] == 0.0
assert result.data['net_cash_flows_operating_discontinued'] == 0.0
assert result.data['cost_of_revenue'] == 0.0
assert result.data['operating_expenses'] == 65084.0
assert result.data['noncurrent_liabilities'] == 0.0
assert result.data['current_liabilities'] == 0.0
assert result.data['net_cash_flows_investing'] == 0.0
assert result.data['stockholders_equity'] == 253536.0
assert result.data['net_income_loss'] == 19715.0
assert result.data['net_cash_flows_investing_continuing'] == 0.0
assert result.data['nonoperating_income_loss'] == 0.0
assert result.data['net_cash_flows_financing'] == 0.0
assert result.data['net_income_shareholders'] == 0.0
assert result.data['comprehensive_income'] == 19715.0
assert result.data['equity_attributable_interest'] == 0.0
assert result.data['commitments_and_contingencies'] == 0.0
assert result.data['comprehensive_income_parent'] == 19715.0
assert result.data['income_before_equity_investments'] == 31822.0
assert result.data['comprehensive_income_interest'] == 0.0
assert result.data['other_comprehensive_income'] == 0.0
assert result.data['equity_attributable_parent'] == 0.0
assert result.data['assets'] == 376766.0
assert result.data['gross_profit'] == 97132.0
assert result.data['net_cash_flows_operating_continuing'] == 0.0
assert result.data['current_assets'] == 138996.0
assert result.data['interest_and_debt_expense'] == 0.0
assert result.data['net_income_loss_noncontrolling'] == 0.0
assert result.data['net_cash_flows_operating'] == 0.0
assert result.data['common_shares_outstanding'] == 0.0
assert result.data['common_shares_issued'] == 0.0
def test_parse_GAAP10K_RRDonnelley():
xbrl_parser = XBRLParser(0)
file_to_parse = "tests/sam-20131228.xml"
xbrl = xbrl_parser.parse(file_to_parse)
gaap_obj = xbrl_parser.parseGAAP(xbrl,
str(file_to_parse
.split("-")[1].split(".")[0][:4] +
file_to_parse.split("-")[1]
.split(".")[0][4:6] +
file_to_parse.split("-")[1]
.split(".")[0][6:8]),
"current")
serializer = GAAPSerializer()
result = serializer.dump(gaap_obj)
assert result.data['liabilities'] == 104377.0
assert result.data['net_cash_flows_financing_continuing'] == 0.0
assert result.data['revenue'] == 0.0
assert result.data['income_tax_expense_benefit'] == 0.0
assert result.data['income_from_equity_investments'] == 0.0
assert result.data['preferred_stock_dividends'] == 0.0
assert result.data['redeemable_noncontrolling_interest'] == 0.0
assert result.data['extraordary_items_gain_loss'] == 0.0
assert result.data['temporary_equity'] == 0.0
assert result.data['costs_and_expenses'] == 0.0
assert result.data['non_current_assets'] == 9556.0
assert result.data['net_cash_flows_discontinued'] == 0.0
assert result.data['income_loss'] == 29120.0
assert result.data['liabilities_and_equity'] == 69900.0
assert result.data['other_operating_income'] == 0.0
assert result.data['operating_income_loss'] == 0.0
assert result.data['net_income_parent'] == 0.0
assert result.data['equity'] == 0.0
assert result.data['net_cash_flows_operating_discontinued'] == 0.0
assert result.data['cost_of_revenue'] == 0.0
assert result.data['operating_expenses'] == 0.0
assert result.data['noncurrent_liabilities'] == 0.0
assert result.data['current_liabilities'] == 0.0
assert result.data['net_cash_flows_investing'] == 0.0
assert result.data['stockholders_equity'] == 302085.0
assert result.data['net_income_loss'] == 18079.0
assert result.data['net_cash_flows_investing_continuing'] == 0.0
assert result.data['nonoperating_income_loss'] == 0.0
assert result.data['net_cash_flows_financing'] == 0.0
assert result.data['net_income_shareholders'] == 0.0
assert result.data['comprehensive_income'] == 0.0
assert result.data['equity_attributable_interest'] == 0.0
assert result.data['commitments_and_contingencies'] == 0.0
assert result.data['comprehensive_income_parent'] == 0.0
assert result.data['income_before_equity_investments'] == 0.0
assert result.data['comprehensive_income_interest'] == 0.0
assert result.data['other_comprehensive_income'] == 0.0
assert result.data['equity_attributable_parent'] == 0.0
assert result.data['assets'] == 444075.0
assert result.data['gross_profit'] == 104628.0
assert result.data['net_cash_flows_operating_continuing'] == 0.0
assert result.data['current_assets'] == 164278.0
assert result.data['interest_and_debt_expense'] == 0.0
assert result.data['net_income_loss_noncontrolling'] == 0.0
assert result.data['net_cash_flows_operating'] == 0.0
assert result.data['common_shares_outstanding'] == 0.0
assert result.data['common_shares_issued'] == 0.0
assert result.data['common_shares_authorized'] == 0.0
def test_parse_GAAP10K_Webfilings():
xbrl_parser = XBRLParser(0)
file_to_parse = "tests/goog-20131231.xml"
xbrl = xbrl_parser.parse(file_to_parse)
gaap_obj = xbrl_parser.parseGAAP(xbrl,
str(file_to_parse
.split("-")[1].split(".")[0][:4] +
file_to_parse.split("-")[1]
.split(".")[0][4:6] +
file_to_parse.split("-")[1]
.split(".")[0][6:8]),
"current")
serializer = GAAPSerializer()
result = serializer.dump(gaap_obj)
assert result.data['liabilities'] == 3755.0
assert result.data['net_cash_flows_financing_continuing'] == 0.0
assert result.data['revenue'] == 0.0
assert result.data['income_tax_expense_benefit'] == 0.0
assert result.data['income_from_equity_investments'] == 0.0
assert result.data['preferred_stock_dividends'] == 0.0
assert result.data['redeemable_noncontrolling_interest'] == 0.0
assert result.data['extraordary_items_gain_loss'] == 0.0
assert result.data['temporary_equity'] == 0.0
assert result.data['costs_and_expenses'] == 0.0
assert result.data['non_current_assets'] == 38034.0
assert result.data['net_cash_flows_discontinued'] == 0.0
assert result.data['income_loss'] == 0.0
assert result.data['liabilities_and_equity'] == 110920.0
assert result.data['other_operating_income'] == 0.0
assert result.data['operating_income_loss'] == 0.0
assert result.data['net_income_parent'] == 0.0
assert result.data['equity'] == 975.0
assert result.data['net_cash_flows_operating_discontinued'] == 0.0
assert result.data['cost_of_revenue'] == 0.0
assert result.data['operating_expenses'] == 0.0
assert result.data['noncurrent_liabilities'] == 0.0
assert result.data['current_liabilities'] == 0.0
assert result.data['net_cash_flows_investing'] == 0.0
assert result.data['stockholders_equity'] == 87309.0
assert result.data['net_income_loss'] == 0.0
assert result.data['net_cash_flows_investing_continuing'] == 0.0
assert result.data['nonoperating_income_loss'] == 0.0
assert result.data['net_cash_flows_financing'] == 0.0
assert result.data['net_income_shareholders'] == 0.0
assert result.data['comprehensive_income'] == 0.0
assert result.data['equity_attributable_interest'] == 0.0
assert result.data['commitments_and_contingencies'] == 0.0
assert result.data['comprehensive_income_parent'] == 0.0
assert result.data['income_before_equity_investments'] == 0.0
assert result.data['comprehensive_income_interest'] == 0.0
assert result.data['other_comprehensive_income'] == 0.0
assert result.data['equity_attributable_parent'] == 0.0
assert result.data['assets'] == 110920.0
assert result.data['gross_profit'] == 0.0
assert result.data['net_cash_flows_operating_continuing'] == 0.0
assert result.data['current_assets'] == 72886.0
assert result.data['interest_and_debt_expense'] == 0.0
assert result.data['net_income_loss_noncontrolling'] == 0.0
assert result.data['net_cash_flows_operating'] == 0.0
assert result.data['common_shares_outstanding'] == 0.0
assert result.data['common_shares_issued'] == 0.0
assert result.data['common_shares_authorized'] == 0.0
def test_parse_GAAP10Q_Webfilings():
xbrl_parser = XBRLParser(0)
file_to_parse = "tests/goog-20140630.xml"
xbrl = xbrl_parser.parse(file_to_parse)
gaap_obj = xbrl_parser.parseGAAP(xbrl,
str(file_to_parse
.split("-")[1].split(".")[0][:4] +
file_to_parse.split("-")[1]
.split(".")[0][4:6] +
file_to_parse.split("-")[1]
.split(".")[0][6:8]),
"current")
serializer = GAAPSerializer()
result = serializer.dump(gaap_obj)
assert result.data['liabilities'] == 3683.0
assert result.data['net_cash_flows_financing_continuing'] == 0.0
assert result.data['revenue'] == 0.0
assert result.data['income_tax_expense_benefit'] == 913.0
assert result.data['income_from_equity_investments'] == 0.0
assert result.data['preferred_stock_dividends'] == 0.0
assert result.data['redeemable_noncontrolling_interest'] == 0.0
assert result.data['extraordary_items_gain_loss'] == 0.0
assert result.data['temporary_equity'] == 0.0
assert result.data['costs_and_expenses'] == 11697.0
assert result.data['non_current_assets'] == 43703.0
assert result.data['net_cash_flows_discontinued'] == 0.0
assert result.data['income_loss'] == 3490.0
assert result.data['liabilities_and_equity'] == 121608.0
assert result.data['other_operating_income'] == 0.0
assert result.data['operating_income_loss'] == 0.0
assert result.data['net_income_parent'] == 0.0
assert result.data['equity'] == 0.0
assert result.data['net_cash_flows_operating_discontinued'] == 0.0
assert result.data['cost_of_revenue'] == 0.0
assert result.data['operating_expenses'] == 0.0
assert result.data['noncurrent_liabilities'] == 0.0
assert result.data['current_liabilities'] == 0.0
assert result.data['net_cash_flows_investing'] == 0.0
assert result.data['stockholders_equity'] == 95749.0
assert result.data['net_income_loss'] == 3422.0
assert result.data['net_cash_flows_investing_continuing'] == 0.0
assert result.data['nonoperating_income_loss'] == 0.0
assert result.data['net_cash_flows_financing'] == 0.0
assert result.data['net_income_shareholders'] == 0.0
assert result.data['comprehensive_income'] == 3579.0
assert result.data['equity_attributable_interest'] == 0.0
assert result.data['commitments_and_contingencies'] == 0.0
assert result.data['comprehensive_income_parent'] == 3579.0
assert result.data['income_before_equity_investments'] == 4403.0
assert result.data['comprehensive_income_interest'] == 0.0
assert result.data['other_comprehensive_income'] == 0.0
assert result.data['equity_attributable_parent'] == 0.0
assert result.data['assets'] == 121608.0
assert result.data['gross_profit'] == 0.0
assert result.data['net_cash_flows_operating_continuing'] == 0.0
assert result.data['current_assets'] == 77905.0
assert result.data['interest_and_debt_expense'] == 0.0
assert result.data['net_income_loss_noncontrolling'] == 0.0
assert result.data['net_cash_flows_operating'] == 0.0
assert result.data['common_shares_outstanding'] == 0.0
assert result.data['common_shares_issued'] == 0.0
assert result.data['common_shares_authorized'] == 0.0
def test_parse_GAAP10Q_Rivet():
xbrl_parser = XBRLParser(0)
file_to_parse = "tests/c289-20140503.xml"
xbrl = xbrl_parser.parse(file_to_parse)
gaap_obj = xbrl_parser.parseGAAP(xbrl,
str(file_to_parse
.split("-")[1].split(".")[0][:4] +
file_to_parse.split("-")[1]
.split(".")[0][4:6] +
file_to_parse.split("-")[1]
.split(".")[0][6:8]),
"current")
serializer = GAAPSerializer()
result = serializer.dump(gaap_obj)
assert result.data['liabilities'] == 12535.0
assert result.data['net_cash_flows_financing_continuing'] == 0.0
assert result.data['revenue'] == 0.0
assert result.data['income_tax_expense_benefit'] == 14.0
assert result.data['income_from_equity_investments'] == 0.0
assert result.data['preferred_stock_dividends'] == 0.0
assert result.data['redeemable_noncontrolling_interest'] == 0.0
assert result.data['extraordary_items_gain_loss'] == 0.0
assert result.data['temporary_equity'] == 0.0
assert result.data['costs_and_expenses'] == 0.0
assert result.data['non_current_assets'] == 708.0
assert result.data['net_cash_flows_discontinued'] == 0.0
assert result.data['income_loss'] == -983.0
assert result.data['liabilities_and_equity'] == 13261.0
assert result.data['other_operating_income'] == 0.0
assert result.data['operating_income_loss'] == 0.0
assert result.data['net_income_parent'] == 0.0
assert result.data['equity'] == 0.0
assert result.data['net_cash_flows_operating_discontinued'] == 0.0
assert result.data['cost_of_revenue'] == 0.0
assert result.data['operating_expenses'] == 3497.0
assert result.data['noncurrent_liabilities'] == 0.0
assert result.data['current_liabilities'] == 0.0
assert result.data['net_cash_flows_investing'] == 0.0
assert result.data['stockholders_equity'] == 726.0
assert result.data['net_income_loss'] == -983.0
assert result.data['net_cash_flows_investing_continuing'] == 0.0
assert result.data['nonoperating_income_loss'] == 0.0
assert result.data['net_cash_flows_financing'] == 0.0
assert result.data['net_income_shareholders'] == 0.0
assert result.data['comprehensive_income'] == -977.0
assert result.data['equity_attributable_interest'] == 0.0
assert result.data['commitments_and_contingencies'] == 0.0
assert result.data['comprehensive_income_parent'] == -977.0
assert result.data['income_before_equity_investments'] == -969.0
assert result.data['comprehensive_income_interest'] == 0.0
assert result.data['other_comprehensive_income'] == 0.0
assert result.data['equity_attributable_parent'] == 0.0
assert result.data['assets'] == 13261.0
assert result.data['gross_profit'] == 2687.0
assert result.data['net_cash_flows_operating_continuing'] == 0.0
assert result.data['current_assets'] == 10747.0
assert result.data['interest_and_debt_expense'] == 0.0
assert result.data['net_income_loss_noncontrolling'] == 0.0
assert result.data['net_cash_flows_operating'] == 0.0
assert result.data['common_shares_outstanding'] == 0.0
assert result.data['common_shares_issued'] == 0.0
assert result.data['common_shares_authorized'] == 0.0
def test_parse_GAAP10K_Rivet():
xbrl_parser = XBRLParser(0)
file_to_parse = "tests/rsh-20131231.xml"
xbrl = xbrl_parser.parse(file_to_parse)
gaap_obj = xbrl_parser.parseGAAP(xbrl,
str(file_to_parse
.split("-")[1].split(".")[0][:4] +
file_to_parse.split("-")[1]
.split(".")[0][4:6] +
file_to_parse.split("-")[1]
.split(".")[0][6:8]),
"current")
serializer = GAAPSerializer()
result = serializer.dump(gaap_obj)
assert result.data['liabilities'] == 234.0
assert result.data['net_cash_flows_financing_continuing'] == 0.0
assert result.data['revenue'] == 0.0
assert result.data['income_tax_expense_benefit'] == 42.0
assert result.data['income_from_equity_investments'] == 0.0
assert result.data['preferred_stock_dividends'] == 0.0
assert result.data['redeemable_noncontrolling_interest'] == 0.0
assert result.data['extraordary_items_gain_loss'] == 0.0
assert result.data['temporary_equity'] == 0.0
assert result.data['costs_and_expenses'] == 0.0
assert result.data['non_current_assets'] == 583.0
assert result.data['net_cash_flows_discontinued'] == 0.0
assert result.data['income_loss'] == -1914.0
assert result.data['liabilities_and_equity'] == 15912.0
assert result.data['other_operating_income'] == 0.0
assert result.data['operating_income_loss'] == 0.0
assert result.data['net_income_parent'] == 0.0
assert result.data['equity'] == 0.0
assert result.data['net_cash_flows_operating_discontinued'] == 0.0
assert result.data['cost_of_revenue'] == 0.0
assert result.data['operating_expenses'] == 4445.0
assert result.data['noncurrent_liabilities'] == 0.0
assert result.data['current_liabilities'] == 0.0
assert result.data['net_cash_flows_investing'] == 0.0
assert result.data['stockholders_equity'] == 2064.0
assert result.data['net_income_loss'] == -1914.0
assert result.data['net_cash_flows_investing_continuing'] == 0.0
assert result.data['nonoperating_income_loss'] == 0.0
assert result.data['net_cash_flows_financing'] == 0.0
assert result.data['net_income_shareholders'] == 0.0
assert result.data['comprehensive_income'] == -1899.0
assert result.data['equity_attributable_interest'] == 0.0
assert result.data['commitments_and_contingencies'] == 0.0
assert result.data['comprehensive_income_parent'] == -1899.0
assert result.data['income_before_equity_investments'] == -1872.0
assert result.data['comprehensive_income_interest'] == 0.0
assert result.data['other_comprehensive_income'] == 0.0
assert result.data['equity_attributable_parent'] == 0.0
assert result.data['assets'] == 15912.0
assert result.data['gross_profit'] == 2784.0
assert result.data['net_cash_flows_operating_continuing'] == 0.0
assert result.data['current_assets'] == 13330.0
assert result.data['interest_and_debt_expense'] == 0.0
assert result.data['net_income_loss_noncontrolling'] == 0.0
assert result.data['net_cash_flows_operating'] == 0.0
assert result.data['common_shares_outstanding'] == 0.0
assert result.data['common_shares_issued'] == 0.0
assert result.data['common_shares_authorized'] == 0.0
def test_parse_GAAP10Q_QXInteractive():
xbrl_parser = XBRLParser(0)
file_to_parse = "tests/aaoi-20140630.xml"
xbrl = xbrl_parser.parse(file_to_parse)
gaap_obj = xbrl_parser.parseGAAP(xbrl,
str(file_to_parse
.split("-")[1].split(".")[0][:4] +
file_to_parse.split("-")[1]
.split(".")[0][4:6] +
file_to_parse.split("-")[1]
.split(".")[0][6:8]),
"current")
serializer = GAAPSerializer()
result = serializer.dump(gaap_obj)
assert result.data['liabilities'] == 5606.0
assert result.data['net_cash_flows_financing_continuing'] == 0.0
assert result.data['revenue'] == 0.0
assert result.data['income_tax_expense_benefit'] == 85.0
assert result.data['income_from_equity_investments'] == 0.0
assert result.data['preferred_stock_dividends'] == 0.0
assert result.data['redeemable_noncontrolling_interest'] == 0.0
assert result.data['extraordary_items_gain_loss'] == 0.0
assert result.data['temporary_equity'] == 0.0
assert result.data['costs_and_expenses'] == 0.0
assert result.data['non_current_assets'] == 978.0
assert result.data['net_cash_flows_discontinued'] == 0.0
assert result.data['income_loss'] == 1730.0
assert result.data['liabilities_and_equity'] == 153524.0
assert result.data['other_operating_income'] == 0.0
assert result.data['operating_income_loss'] == 0.0
assert result.data['net_income_parent'] == 0.0
assert result.data['equity'] == 0.0
assert result.data['net_cash_flows_operating_discontinued'] == 0.0
assert result.data['cost_of_revenue'] == 0.0
assert result.data['operating_expenses'] == 9458.0
assert result.data['noncurrent_liabilities'] == 0.0
assert result.data['current_liabilities'] == 0.0
assert result.data['net_cash_flows_investing'] == 0.0
assert result.data['stockholders_equity'] == 111781.0
assert result.data['net_income_loss'] == 1919.0
assert result.data['net_cash_flows_investing_continuing'] == 0.0
assert result.data['nonoperating_income_loss'] == 0.0
assert result.data['net_cash_flows_financing'] == 0.0
assert result.data['net_income_shareholders'] == 0.0
assert result.data['comprehensive_income'] == 2058.0
assert result.data['equity_attributable_interest'] == 0.0
assert result.data['commitments_and_contingencies'] == 0.0
assert result.data['comprehensive_income_parent'] == 2058.0
assert result.data['income_before_equity_investments'] == 0.0
assert result.data['comprehensive_income_interest'] == 0.0
assert result.data['other_comprehensive_income'] == 0.0
assert result.data['equity_attributable_parent'] == 0.0
assert result.data['assets'] == 153524.0
assert result.data['gross_profit'] == 11188.0
assert result.data['net_cash_flows_operating_continuing'] == 0.0
assert result.data['current_assets'] == 106114.0
assert result.data['interest_and_debt_expense'] == 0.0
assert result.data['net_income_loss_noncontrolling'] == 0.0
assert result.data['net_cash_flows_operating'] == 0.0
assert result.data['common_shares_outstanding'] == 0.0
assert result.data['common_shares_issued'] == 0.0
assert result.data['common_shares_authorized'] == 0.0
def test_parse_GAAP10K_ThomsonReuters():
xbrl_parser = XBRLParser(0)
file_to_parse = "tests/aaoi-20131231.xml"
xbrl = xbrl_parser.parse(file_to_parse)
gaap_obj = xbrl_parser.parseGAAP(xbrl,
str(file_to_parse
.split("-")[1].split(".")[0][:4] +
file_to_parse.split("-")[1]
.split(".")[0][4:6] +
file_to_parse.split("-")[1]
.split(".")[0][6:8]),
"current")
serializer = GAAPSerializer()
result = serializer.dump(gaap_obj)
assert result.data['liabilities'] == 39057.0
assert result.data['net_cash_flows_financing_continuing'] == 0.0
assert result.data['revenue'] == 0.0
assert result.data['income_tax_expense_benefit'] == 0.0
assert result.data['income_from_equity_investments'] == 0.0
assert result.data['preferred_stock_dividends'] == 0.0
assert result.data['redeemable_noncontrolling_interest'] == 0.0
assert result.data['extraordary_items_gain_loss'] == 0.0
assert result.data['temporary_equity'] == 0.0
assert result.data['costs_and_expenses'] == 0.0
assert result.data['non_current_assets'] == 177.0
assert result.data['net_cash_flows_discontinued'] == 0.0
assert result.data['income_loss'] == -297.0
assert result.data['liabilities_and_equity'] == 111057.0
assert result.data['other_operating_income'] == 0.0
assert result.data['operating_income_loss'] == 0.0
assert result.data['net_income_parent'] == 0.0
assert result.data['equity'] == 0.0
assert result.data['net_cash_flows_operating_discontinued'] == 0.0
assert result.data['cost_of_revenue'] == 0.0
assert result.data['operating_expenses'] == 6973.0
assert result.data['noncurrent_liabilities'] == 0.0
assert result.data['current_liabilities'] == 0.0
assert result.data['net_cash_flows_investing'] == 0.0
assert result.data['stockholders_equity'] == 63077.0
assert result.data['net_income_loss'] == -520.0
assert result.data['net_cash_flows_investing_continuing'] == 0.0
assert result.data['nonoperating_income_loss'] == 0.0
assert result.data['net_cash_flows_financing'] == 0.0
assert result.data['net_income_shareholders'] == 0.0
assert result.data['comprehensive_income'] == 0.0
assert result.data['equity_attributable_interest'] == 0.0
assert result.data['commitments_and_contingencies'] == 0.0
assert result.data['comprehensive_income_parent'] == 0.0
assert result.data['income_before_equity_investments'] == 0.0
assert result.data['comprehensive_income_interest'] == 0.0
assert result.data['other_comprehensive_income'] == 0.0
assert result.data['equity_attributable_parent'] == 0.0
assert result.data['assets'] == 111057.0
assert result.data['gross_profit'] == 6676.0
assert result.data['net_cash_flows_operating_continuing'] == 0.0
assert result.data['current_assets'] == 77936.0
assert result.data['interest_and_debt_expense'] == 0.0
assert result.data['net_income_loss_noncontrolling'] == 0.0
assert result.data['net_cash_flows_operating'] == 0.0
assert result.data['common_shares_outstanding'] == 0.0
assert result.data['common_shares_issued'] == 0.0
assert result.data['common_shares_authorized'] == 0.0
def test_parse_GAAP10Q_Fujitsu():
xbrl_parser = XBRLParser(0)
file_to_parse = "tests/aaww-20140630.xml"
xbrl = xbrl_parser.parse(file_to_parse)
gaap_obj = xbrl_parser.parseGAAP(xbrl,
str(file_to_parse
.split("-")[1].split(".")[0][:4] +
file_to_parse.split("-")[1]
.split(".")[0][4:6] +
file_to_parse.split("-")[1]
.split(".")[0][6:8]),
"current")
serializer = GAAPSerializer()
result = serializer.dump(gaap_obj)
assert result.data['liabilities'] == 233079.0
assert result.data['net_cash_flows_financing_continuing'] == 0.0
assert result.data['revenue'] == 0.0
assert result.data['income_tax_expense_benefit'] == -23815.0
assert result.data['income_from_equity_investments'] == 0.0
assert result.data['preferred_stock_dividends'] == 0.0
assert result.data['redeemable_noncontrolling_interest'] == 0.0
assert result.data['extraordary_items_gain_loss'] == 0.0
assert result.data['temporary_equity'] == 0.0
assert result.data['costs_and_expenses'] == 414512.0
assert result.data['non_current_assets'] == 3568474.0
assert result.data['net_cash_flows_discontinued'] == 0.0
assert result.data['income_loss'] == 26657.0
assert result.data['liabilities_and_equity'] == 4100064.0
assert result.data['other_operating_income'] == 0.0
assert result.data['operating_income_loss'] == 0.0
assert result.data['net_income_parent'] == 0.0
assert result.data['equity'] == 4870.0
assert result.data['net_cash_flows_operating_discontinued'] == 0.0
assert result.data['cost_of_revenue'] == 0.0
assert result.data['operating_expenses'] == 0.0
assert result.data['noncurrent_liabilities'] == 0.0
assert result.data['current_liabilities'] == 0.0
assert result.data['net_cash_flows_investing'] == 0.0
assert result.data['stockholders_equity'] == 1355551.0
assert result.data['net_income_loss'] == 0.0
assert result.data['net_cash_flows_investing_continuing'] == 0.0
assert result.data['nonoperating_income_loss'] == 0.0
assert result.data['net_cash_flows_financing'] == 0.0
assert result.data['net_income_shareholders'] == 0.0
assert result.data['comprehensive_income'] == -515.0
assert result.data['equity_attributable_interest'] == 0.0
assert result.data['commitments_and_contingencies'] == 0.0
assert result.data['comprehensive_income_parent'] == -515.0
assert result.data['income_before_equity_investments'] == 0.0
assert result.data['comprehensive_income_interest'] == 0.0
assert result.data['other_comprehensive_income'] == 0.0
assert result.data['equity_attributable_parent'] == 0.0
assert result.data['assets'] == 4100064.0
assert result.data['gross_profit'] == 0.0
assert result.data['net_cash_flows_operating_continuing'] == 0.0
assert result.data['current_assets'] == 531590.0
assert result.data['interest_and_debt_expense'] == 0.0
assert result.data['net_income_loss_noncontrolling'] == 0.0
assert result.data['net_cash_flows_operating'] == 0.0
assert result.data['common_shares_outstanding'] == 0.0
assert result.data['common_shares_issued'] == 0.0
assert result.data['common_shares_authorized'] == 0.0
def test_parse_GAAP10K_Fujitsu():
xbrl_parser = XBRLParser(0)
file_to_parse = "tests/aaww-20131231.xml"
xbrl = xbrl_parser.parse(file_to_parse)
gaap_obj = xbrl_parser.parseGAAP(xbrl,
str(file_to_parse
.split("-")[1].split(".")[0][:4] +
file_to_parse.split("-")[1]
.split(".")[0][4:6] +
file_to_parse.split("-")[1]
.split(".")[0][6:8]),
"current")
serializer = GAAPSerializer()
result = serializer.dump(gaap_obj)
assert result.data['liabilities'] == 194292.0
assert result.data['net_cash_flows_financing_continuing'] == 0.0
assert result.data['revenue'] == 0.0
assert result.data['income_tax_expense_benefit'] == 0.0
assert result.data['income_from_equity_investments'] == 0.0
assert result.data['preferred_stock_dividends'] == 0.0
assert result.data['redeemable_noncontrolling_interest'] == 0.0
assert result.data['extraordary_items_gain_loss'] == 0.0
assert result.data['temporary_equity'] == 0.0
assert result.data['costs_and_expenses'] == 0.0
assert result.data['non_current_assets'] == 3124306.0
assert result.data['net_cash_flows_discontinued'] == 0.0
assert result.data['income_loss'] == 0.0
assert result.data['liabilities_and_equity'] == 3718259.0
assert result.data['other_operating_income'] == 0.0
assert result.data['operating_income_loss'] == 0.0
assert result.data['net_income_parent'] == 0.0
assert result.data['equity'] == 4870.0
assert result.data['net_cash_flows_operating_discontinued'] == 0.0
assert result.data['cost_of_revenue'] == 0.0
assert result.data['operating_expenses'] == 0.0
assert result.data['noncurrent_liabilities'] == 0.0
assert result.data['current_liabilities'] == 0.0
assert result.data['net_cash_flows_investing'] == 0.0
assert result.data['stockholders_equity'] == 1317773.0
assert result.data['net_income_loss'] == 0.0
assert result.data['net_cash_flows_investing_continuing'] == 0.0
assert result.data['nonoperating_income_loss'] == 0.0
assert result.data['net_cash_flows_financing'] == 0.0
assert result.data['net_income_shareholders'] == 0.0
assert result.data['comprehensive_income'] == 0.0
assert result.data['equity_attributable_interest'] == 4352.0
assert result.data['commitments_and_contingencies'] == 0.0
assert result.data['comprehensive_income_parent'] == 0.0
assert result.data['income_before_equity_investments'] == 0.0
assert result.data['comprehensive_income_interest'] == 0.0
assert result.data['other_comprehensive_income'] == 0.0
assert result.data['equity_attributable_parent'] == 0.0
assert result.data['assets'] == 3718259.0
assert result.data['gross_profit'] == 0.0
assert result.data['net_cash_flows_operating_continuing'] == 0.0
assert result.data['current_assets'] == 593953.0
assert result.data['interest_and_debt_expense'] == 0.0
assert result.data['net_income_loss_noncontrolling'] == 0.0
assert result.data['net_cash_flows_operating'] == 0.0
assert result.data['common_shares_outstanding'] == 0.0
assert result.data['common_shares_issued'] == 0.0
assert result.data['common_shares_authorized'] == 0.0
def test_parse_GAAP10Q_Ez_XBRL():
xbrl_parser = XBRLParser(0)
file_to_parse = "tests/ggho-20140930.xml"
xbrl = xbrl_parser.parse(file_to_parse)
gaap_obj = xbrl_parser.parseGAAP(xbrl,
str(file_to_parse
.split("-")[1].split(".")[0][:4] +
file_to_parse.split("-")[1]
.split(".")[0][4:6] +
file_to_parse.split("-")[1]
.split(".")[0][6:8]),
"current")
serializer = GAAPSerializer()
result = serializer.dump(gaap_obj)
assert result.data['liabilities'] == 34273.0
assert result.data['net_cash_flows_financing_continuing'] == 0.0
assert result.data['revenue'] == 0.0
assert result.data['income_tax_expense_benefit'] == 127.0
assert result.data['income_from_equity_investments'] == 0.0
assert result.data['preferred_stock_dividends'] == 0.0
assert result.data['redeemable_noncontrolling_interest'] == 0.0
assert result.data['extraordary_items_gain_loss'] == 0.0
assert result.data['temporary_equity'] == 0.0
assert result.data['costs_and_expenses'] == 0.0
assert result.data['non_current_assets'] == 58615.0
assert result.data['net_cash_flows_discontinued'] == 0.0
assert result.data['income_loss'] == -7593.0
assert result.data['liabilities_and_equity'] == 79451.0
assert result.data['other_operating_income'] == 0.0
assert result.data['operating_income_loss'] == 0.0
assert result.data['net_income_parent'] == 0.0
assert result.data['equity'] == 0.0
assert result.data['net_cash_flows_operating_discontinued'] == 0.0
assert result.data['cost_of_revenue'] == 0.0
assert result.data['operating_expenses'] == 13026.0
assert result.data['noncurrent_liabilities'] == 0.0
assert result.data['current_liabilities'] == 0.0
assert result.data['net_cash_flows_investing'] == 0.0
assert result.data['stockholders_equity'] == 30543.0
assert result.data['net_income_loss'] == -8642.0
assert result.data['net_cash_flows_investing_continuing'] == 0.0
assert result.data['nonoperating_income_loss'] == 0.0
assert result.data['net_cash_flows_financing'] == 0.0
assert result.data['net_income_shareholders'] == 0.0
assert result.data['comprehensive_income'] == 0.0
assert result.data['equity_attributable_interest'] == 309.0
assert result.data['commitments_and_contingencies'] == 0.0
assert result.data['comprehensive_income_parent'] == 0.0
assert result.data['income_before_equity_investments'] == -8531.0
assert result.data['comprehensive_income_interest'] == 0.0
assert result.data['other_comprehensive_income'] == 0.0
assert result.data['equity_attributable_parent'] == 0.0
assert result.data['assets'] == 79451.0
assert result.data['gross_profit'] == 5433.0
assert result.data['net_cash_flows_operating_continuing'] == 0.0
assert result.data['current_assets'] == 20836.0
assert result.data['interest_and_debt_expense'] == 0.0
assert result.data['net_cash_flows_operating'] == 0.0
assert result.data['common_shares_outstanding'] == 0.0
assert result.data['common_shares_issued'] == 0.0
assert result.data['common_shares_authorized'] == 0.0
def test_parse_DEI10Q_RRDonnelley():
xbrl_parser = XBRLParser(0)
file_to_parse = "tests/sam-20130629.xml"
xbrl = xbrl_parser.parse(file_to_parse)
dei_obj = xbrl_parser.parseDEI(xbrl)
serializer = DEISerializer()
result = serializer.dump(dei_obj)
assert result.data['trading_symbol'] == "SAM"
assert result.data['company_name'] == "BOSTON BEER CO INC"
assert result.data['shares_outstanding'] == 4007355.0
assert result.data['public_float'] == 0.0
def test_parse_Custom10Q_RRDonnelley():
xbrl_parser = XBRLParser(0)
file_to_parse = "tests/sam-20130629.xml"
xbrl = xbrl_parser.parse(file_to_parse)
custom_obj = xbrl_parser.parseCustom(xbrl)
if six.PY3:
result = len(custom_obj())
assert result == 13
if six.PY2 and not __pypy__:
result = custom_obj()
assert result[0] == ('conversionofclassbcommonstocktoclassacommonstockshares', '100000')
assert result[1] == ('percentageofproductionvolumes', '0.90')
assert result[2] == ('sharebasedcompensationarrangementbysharebasedpaymentawardoptionstovestineachtranche', '5000')
assert result[3] == ('weightedaveragenumberofsharesoutstandingbasicincludingnonvestedparticipatingsecurities', '12866000')
assert result[4] == ('incrementalcommonsharesattributabletoconversionofcommonstock', '4007000')
assert result[5] == ('sharebasedcompensationarrangementbysharebasedpaymentawardinvestmentsharesweightedaveragegrantdat', '59.62')
assert result[6] == ('incomeallocatedtoequityinstrumentsotherthanoptionnonvested', '7000')
assert result[7] == ('netproceedsfromsaleofinvestmentshares', '531000')
assert result[8] == ('weightedaveragenumberofbasicsharesoutstandingequityinstrumentsotherthanoptionnonvested', '94000')
assert result[9] == ('sharebasedcompensationarrangementbysharebasedpaymentawardemployeeinvestmentsharespurchase', '12894')
assert result[10] == ('provisionforreductionofdoubtfulaccounts', '-28000')
assert result[11] == ('receiptofgovernmentgrantsforfacilitiesexpansion', '770000')
assert result[12] == ('netincomelossallocatedtoequityinstrumentsotherthanoptionnonvested', '-143000')
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
manage
~~~~~~
Map Kibera Schools management script
Usage:
build everything: ./manage.py build all
build templates: ./manage.py build html
build statics: ./manage.py build static [type] [--no-filters]
where type is one of css or js
build map tile images: ./manage.py build tiles
clean up ./manage.py clean
build for a target: ./manage.py build2 target
where target is one of staging or production
run a local preview: ./manage.py preview
:licence: BSD, see LICENSE
"""
from __future__ import print_function # python2 compat
if 'FileNotFoundError' not in dir(__builtins__):
# python2
FileNotFoundError = OSError
import os
import shutil
import json
def get_config(_cached={}):
if 'config' not in _cached:
try:
with open('config.json') as conf_file:
try:
config = json.load(conf_file)
except ValueError as e:
err_msg = e.args[0]
raise SystemExit('Error (config.json): {}'.format(err_msg))
except FileNotFoundError:
raise SystemExit('Error: config file (config.json) not found.')
_cached['config'] = config
return _cached['config']
def command(func, _func_cache={}):
"""Decorate functions to register them as commands."""
# register the command
func_name = func.__name__.lower()
if func_name in _func_cache:
raise Exception('Duplicate definition of command {}'.format(func_name))
_func_cache[func_name] = func
return func # leave the functions so they are still importable and stuff
@command
def help():
"""Get usage information about this script"""
print('\nUsage: {} [command]'.format(sys.argv[0]), end='\n\n')
print('Available commands:')
for name, func in command.__defaults__[0].items(): # from _func_cache
print(' * {:16s} {}'.format(name, func.__doc__ or ''))
raise SystemExit()
def build_html():
from flask_frozen import Freezer
from app import views
views.app.config['FREEZER_DESTINATION'] = '../build'
views.app.testing = True
freezer = Freezer(views.app, with_static_files=False)
freezer.register_generator(views.school_url_generator)
freezer.freeze()
def get_file_contents(dir, filenames):
"""Iterateable file contents by file"""
for filename in filenames:
file_path = os.path.join(dir, filename)
with open(file_path) as file_obj:
contents = file_obj.read()
yield contents
def apply_filters(filters, sources):
from app import static
filters = [getattr(static, f) for f in filters]
for filter_func in filters:
sources = filter_func(sources)
return sources
def save_sources(type_name, prefix, sources):
from hashlib import sha1
full_prefix = os.path.join('build', prefix)
try:
shutil.rmtree(full_prefix)
except FileNotFoundError:
pass
os.makedirs(full_prefix)
filenames = []
for source in sources:
try:
hashed = sha1(bytes(source, 'utf-8')).hexdigest()
except TypeError:
hashed = sha1(source).hexdigest() # python2
filename = '{}.{}'.format(hashed, type_name)
file_path = os.path.join(prefix, filename)
file_full_path = os.path.join('build', file_path)
with open(file_full_path, 'w') as file_obj:
file_obj.write(source)
filenames.append(file_path)
return filenames
def get_templates(base):
"""iterate through all template files"""
for root, dirs, files in os.walk(base):
for filename in files:
file_path = os.path.join(root, filename)
if os.path.splitext(filename)[1] == '.html':
with open(file_path) as file_obj:
original_contents = file_obj.read()
with open(file_path, 'w') as file_obj:
yield original_contents, file_obj
def template_inject(root, name, filenames=None):
import re
tag_marker = (r'<!-- tag\s+{name}\n'
r'\s*template=(?P<template>.*?\n)'
r'(\s*path=(?P<path>.*?)\n)?'
r'\s*-->\n?'.format(name=name))
tag_end = r'<!--\s+endtag\s+{name}\s+-->'.format(name=name)
tag_pattern = re.compile(tag_marker)
end_pattern = re.compile(tag_end)
for original_contents, file_obj in get_templates('build'):
tag_marker = re.search(tag_pattern, original_contents)
end_marker = re.search(end_pattern, original_contents)
if tag_marker is not None and end_marker is not None:
tag_info = tag_marker.groupdict()
tag_template = tag_info['template']
tags = ''
if filenames is None and tag_info['path'] is not None:
tags += tag_template.format(path=root + tag_info['path'])
else:
assert filenames is not None, 'lalala'
for filename in filenames:
tags += tag_template.format(path=root + filename)
new_stuff = '{}{}{}'.format(original_contents[:tag_marker.end()],
tags, original_contents[end_marker.start():])
file_obj.write(new_stuff)
def build_static_thing(type_name, conf):
print('building static: {}...'.format(type_name))
thing_conf = conf[type_name]
sources = get_file_contents(thing_conf['prefix'], thing_conf['files'])
sources = apply_filters(thing_conf['filters'], sources)
filenames = save_sources(type_name, thing_conf['prefix'], sources)
template_inject('/', type_name, filenames)
def build_static(what, no_filter):
conf = get_config()['static']
if what in ('all', 'css'):
build_static_thing('css', conf)
if what in ('all', 'js'):
build_static_thing('js', conf)
@command
def build(what, *args):
# ensure the build folder exists
if 'build' not in os.listdir('.'):
os.makedirs('build')
if what not in ('html', 'static'):
raise SystemExit('no such build: {}'.format(what))
if what in ('all', 'html'):
build_html()
if what in ('all', 'static'):
static_args = list(args)
no_filter = '--no-filters' in static_args
if no_filter:
static_args.remove('--no-filters')
if len(static_args) == 0:
static = 'all'
elif len(static_args) == 1:
static = static_args[0]
if static not in ('css', 'js'):
raise SystemExit('unrecognized argument: {}'.format(static))
elif len(static_args) > 1:
raise SystemExit('too many arguments to static: {}'.format(args))
build_static(static, no_filter)
print('built {}.'.format(what))
@command
def clean():
try:
shutil.rmtree('build')
except FileNotFoundError: # python3 only, add ioerror, errno check for py2
pass
print('cleaned.')
@command
def build2(target):
print('buildit yo')
if __name__ == '__main__':
import sys
# get the command or else help
try:
cmd, args = sys.argv[1].lower(), sys.argv[2:]
except IndexError:
cmd, args = 'help', []
# run the command
funcs = command.__defaults__[0] # from _func_cache
if cmd in funcs:
funcs[cmd](*args)
else:
print('Command "{}" not found :('.format(cmd))
help()
|
|
import os
import sys
import shutil
import string
import math
import mahotas
import PIL
import PIL.Image
import numpy as np
import scipy
import scipy.io
import cv2
import h5py
import lxml
import lxml.etree
import glob
import sqlite3
import colorsys
## Open existing volume and create a series of subvolumes
## (with the same ids / names etc)
#input_mojo_path = 'D:\\dev\\datasets\\NewPipelineResults2\\mojo'
#output_subvolume_path = 'D:\\dev\\datasets\\NewPipelineResults2_Subvolumes'
#min_slices_per_subvolume = 100
#overlap_slices = 1
input_mojo_path = 'D:\\dev\\datasets\\Cube2\\mojo'
output_subvolume_path = 'D:\\dev\\datasets\\Cube2_Subvolumes'
min_slices_per_subvolume = 100
overlap_slices = 1
#input_mojo_path = 'C:\\dev\\datasets\\ac3x75_compress\\mojo'
#output_subvolume_path = 'C:\\dev\\datasets\\ac3x75_compress_subvolumes'
#min_slices_per_subvolume = 25
#overlap_slices = 1
input_tile_images_path = input_mojo_path + '\\images\\tiles'
input_tile_images_volume_file = input_mojo_path + '\\images\\tiledVolumeDescription.xml'
input_ids_path = input_mojo_path + '\\ids'
input_tile_ids_path = input_ids_path + '\\tiles'
input_tile_ids_volume_file = input_ids_path + '\\tiledVolumeDescription.xml'
input_color_map_file = input_ids_path + '\\colorMap.hdf5'
input_segment_info_db_file = input_ids_path + '\\segmentInfo.db'
def mkdir_safe( dir_to_make ):
if not os.path.exists( dir_to_make ):
execute_string = 'mkdir ' + '"' + dir_to_make + '"'
print execute_string
print
os.system( execute_string )
## Open input volume xml
print 'Reading TiledVolumeDescription files'
with open( input_tile_images_volume_file, 'r' ) as file:
imageTiledVolumeDescription = lxml.etree.parse(file).getroot()
with open( input_tile_ids_volume_file, 'r' ) as file:
idTiledVolumeDescription = lxml.etree.parse(file).getroot()
## Open input volume database
print 'Reading segmentInfo file (sqlite) {0}.'.format(input_segment_info_db_file)
in_con = sqlite3.connect(input_segment_info_db_file)
cur = in_con.cursor()
# Get max segment id
cur.execute('SELECT MAX(id) FROM segmentInfo;')
id_max = cur.fetchone()[0]
segment_remap = np.arange(0, id_max + 1, dtype=np.uint32)
segment_confidence = np.zeros(id_max + 1, dtype=np.int8)
segment_names = (id_max + 1) * [ None ]
# Read in name / id table
cur.execute("SELECT id, name, confidence FROM segmentInfo;")
while True:
segment_info_row = cur.fetchone()
if segment_info_row == None:
break
segment_names[segment_info_row[0]] = segment_info_row[1]
segment_confidence[segment_info_row[0]] = segment_info_row[2]
# Read in id remap table
cur.execute("CREATE TABLE IF NOT EXISTS relabelMap ( fromId int PRIMARY KEY, toId int);")
cur.execute("SELECT fromId, toId FROM relabelMap WHERE fromId != toId ORDER BY fromId;")
while True:
remap_row = cur.fetchone()
if remap_row == None:
break
segment_remap[remap_row[0]] = remap_row[1]
in_con.close()
original_image_num_pixels_x = int ( idTiledVolumeDescription.xpath('@numVoxelsX')[0] )
original_image_num_pixels_y = int ( idTiledVolumeDescription.xpath('@numVoxelsY')[0] )
original_image_num_tiles_z = int ( idTiledVolumeDescription.xpath('@numTilesZ')[0] )
tile_num_pixels_x = int ( idTiledVolumeDescription.xpath('@numVoxelsPerTileX')[0] )
tile_num_pixels_y = int ( idTiledVolumeDescription.xpath('@numVoxelsPerTileY')[0] )
## Calculate subvolume sizes
if original_image_num_tiles_z < min_slices_per_subvolume:
min_slices_per_subvolume = original_image_num_tiles_z
n_subvolumes = int(math.floor(original_image_num_tiles_z / min_slices_per_subvolume))
subvolume_size = int(math.floor(original_image_num_tiles_z / n_subvolumes))
subvolume_start_indices = range(0, n_subvolumes * subvolume_size, subvolume_size)
## Loop for each subvolume
for subvolume_i in [len(subvolume_start_indices) - 1]: #range(len(subvolume_start_indices)):
subvolume_first_z = subvolume_start_indices[subvolume_i]
if subvolume_i == len(subvolume_start_indices) - 1:
subvolume_last_z = original_image_num_tiles_z - 1
else:
subvolume_last_z = subvolume_start_indices[subvolume_i + 1] - 1 + overlap_slices
output_path = output_subvolume_path + '\\z={0:04d}-{1:04d}\\mojo'.format(subvolume_first_z, subvolume_last_z)
output_tile_images_path = output_path + '\\images\\tiles'
output_tile_images_volume_file = output_path + '\\images\\tiledVolumeDescription.xml'
output_ids_path = output_path + '\\ids'
output_tile_ids_path = output_ids_path + '\\tiles'
output_tile_ids_volume_file = output_ids_path + '\\tiledVolumeDescription.xml'
output_color_map_file = output_ids_path + '\\colorMap.hdf5'
output_segment_info_db_file = output_ids_path + '\\segmentInfo.db'
segment_sizes = np.zeros(id_max + 1, dtype=np.int64)
id_tile_list = [];
for tile_index_z in range(subvolume_last_z - subvolume_first_z + 1):
from_tile_index_z = subvolume_first_z + tile_index_z
## Copy tile images (measure segment sizes for w=0)
current_image_num_pixels_y = original_image_num_pixels_y
current_image_num_pixels_x = original_image_num_pixels_x
current_tile_data_space_y = tile_num_pixels_y
current_tile_data_space_x = tile_num_pixels_x
tile_index_w = 0
while current_image_num_pixels_y > tile_num_pixels_y / 2 or current_image_num_pixels_x > tile_num_pixels_x / 2:
from_tile_ids_path = input_tile_ids_path + '\\' + 'w=' + '%08d' % ( tile_index_w ) + '\\' + 'z=' + '%08d' % ( from_tile_index_z )
current_tile_ids_path = output_tile_ids_path + '\\' + 'w=' + '%08d' % ( tile_index_w ) + '\\' + 'z=' + '%08d' % ( tile_index_z )
mkdir_safe( current_tile_ids_path )
from_tile_images_path = input_tile_images_path + '\\' + 'w=' + '%08d' % ( tile_index_w ) + '\\' + 'z=' + '%08d' % ( from_tile_index_z )
current_tile_images_path = output_tile_images_path + '\\' + 'w=' + '%08d' % ( tile_index_w ) + '\\' + 'z=' + '%08d' % ( tile_index_z )
mkdir_safe( current_tile_images_path )
num_tiles_y = int( math.ceil( float( current_image_num_pixels_y ) / tile_num_pixels_y ) )
num_tiles_x = int( math.ceil( float( current_image_num_pixels_x ) / tile_num_pixels_x ) )
for tile_index_y in range( num_tiles_y ):
for tile_index_x in range( num_tiles_x ):
y = tile_index_y * tile_num_pixels_y
x = tile_index_x * tile_num_pixels_x
from_tile_ids_name = from_tile_ids_path + '\\' + 'y=' + '%08d' % ( tile_index_y ) + ',' + 'x=' + '%08d' % ( tile_index_x ) + '.' + idTiledVolumeDescription.xpath('@fileExtension')[0]
current_tile_ids_name = current_tile_ids_path + '\\' + 'y=' + '%08d' % ( tile_index_y ) + ',' + 'x=' + '%08d' % ( tile_index_x ) + '.' + idTiledVolumeDescription.xpath('@fileExtension')[0]
from_tile_images_name = from_tile_images_path + '\\' + 'y=' + '%08d' % ( tile_index_y ) + ',' + 'x=' + '%08d' % ( tile_index_x ) + '.' + imageTiledVolumeDescription.xpath('@fileExtension')[0]
current_tile_images_name = current_tile_images_path + '\\' + 'y=' + '%08d' % ( tile_index_y ) + ',' + 'x=' + '%08d' % ( tile_index_x ) + '.' + imageTiledVolumeDescription.xpath('@fileExtension')[0]
tile_hdf5 = h5py.File( from_tile_ids_name, 'r' )
tile_ids = tile_hdf5['IdMap'][:,:]
tile_hdf5.close()
unique_tile_ids = np.unique( tile_ids )
for unique_tile_id in unique_tile_ids:
id_tile_list.append( (unique_tile_id, tile_index_w, tile_index_z, tile_index_y, tile_index_x ) );
if tile_index_w == 0:
current_image_counts = np.bincount( tile_ids.ravel() )
current_image_counts_ids = np.nonzero( current_image_counts )[0]
current_max = np.max( current_image_counts_ids )
if id_max < current_max:
print 'WARNING: Found id ({0}) greater than id_max ({1})!'.format(current_max, id_max)
id_max = current_max;
segment_sizes.resize( id_max + 1 )
current_image_counts_ids = segment_remap[ current_image_counts_ids ]
segment_sizes[ current_image_counts_ids ] = segment_sizes[ current_image_counts_ids ] + np.uint32( current_image_counts [ current_image_counts_ids ] )
shutil.copyfile(from_tile_ids_name, current_tile_ids_name)
print current_tile_ids_name
shutil.copyfile(from_tile_images_name, current_tile_images_name)
print current_tile_images_name
current_image_num_pixels_y = current_image_num_pixels_y / 2
current_image_num_pixels_x = current_image_num_pixels_x / 2
current_tile_data_space_y = current_tile_data_space_y * 2
current_tile_data_space_x = current_tile_data_space_x * 2
tile_index_w = tile_index_w + 1
## Sort the tile list so that the same id appears together
id_tile_list = np.array( sorted( id_tile_list ), np.uint32 )
## Save subvolume xml and database files
print 'Copying colorMap file (hdf5)'
shutil.copyfile(input_color_map_file, output_color_map_file)
print 'Writing segmentInfo file (sqlite)'
if os.path.exists(output_segment_info_db_file):
os.remove(output_segment_info_db_file)
print "Deleted existing database file."
con = sqlite3.connect(output_segment_info_db_file)
cur = con.cursor()
cur.execute('PRAGMA main.cache_size=10000;')
cur.execute('PRAGMA main.locking_mode=EXCLUSIVE;')
cur.execute('PRAGMA main.synchronous=OFF;')
cur.execute('PRAGMA main.journal_mode=WAL;')
cur.execute('PRAGMA count_changes=OFF;')
cur.execute('PRAGMA main.temp_store=MEMORY;')
cur.execute('DROP TABLE IF EXISTS idTileIndex;')
cur.execute('CREATE TABLE idTileIndex (id int, w int, z int, y int, x int);')
cur.execute('CREATE INDEX I_idTileIndex ON idTileIndex (id);')
cur.execute('DROP TABLE IF EXISTS segmentInfo;')
cur.execute('CREATE TABLE segmentInfo (id int, name text, size int, confidence int);')
cur.execute('CREATE UNIQUE INDEX I_segmentInfo ON segmentInfo (id);')
cur.execute('DROP TABLE IF EXISTS relabelMap;')
cur.execute('CREATE TABLE relabelMap ( fromId int PRIMARY KEY, toId int);')
for entry_index in xrange(0, id_tile_list.shape[0]):
cur.execute("INSERT INTO idTileIndex VALUES({0}, {1}, {2}, {3}, {4});".format( *id_tile_list[entry_index, :] ))
for segment_index in xrange( 1, id_max ):
if len( segment_sizes ) > segment_index and segment_sizes[ segment_index ] > 0:
## Add the segment info entry
if segment_index == 0:
segment_name = '__boundary__'
elif segment_names[segment_index] != None:
segment_name = segment_names[segment_index]
else:
print 'WARNING: Found segment id ({0}) with no name in database!'.format(segment_index)
segment_name = "segment{0}".format( segment_index )
cur.execute('INSERT INTO segmentInfo VALUES({0}, "{1}", {2}, {3});'.format( segment_index, segment_name, segment_sizes[ segment_index ], segment_confidence[ segment_index ] ))
## Add the segment remap entry
if len( segment_remap ) > segment_index and segment_remap[ segment_index ] != segment_index:
cur.execute('INSERT INTO relabelMap VALUES({0}, {1});'.format( segment_index, segment_remap[ segment_index ]))
con.commit()
con.close()
#Output TiledVolumeDescription xml files
print 'Writing TiledVolumeDescription files'
output_imageTiledVolumeDescription = lxml.etree.Element( "tiledVolumeDescription",
fileExtension = imageTiledVolumeDescription.xpath('@fileExtension')[0],
numTilesX = imageTiledVolumeDescription.xpath('@numTilesX')[0],
numTilesY = imageTiledVolumeDescription.xpath('@numTilesY')[0],
numTilesZ = str( tile_index_z ),
numTilesW = imageTiledVolumeDescription.xpath('@numTilesW')[0],
numVoxelsPerTileX = imageTiledVolumeDescription.xpath('@numVoxelsPerTileX')[0],
numVoxelsPerTileY = imageTiledVolumeDescription.xpath('@numVoxelsPerTileY')[0],
numVoxelsPerTileZ = imageTiledVolumeDescription.xpath('@numVoxelsPerTileZ')[0],
numVoxelsX = imageTiledVolumeDescription.xpath('@numVoxelsX')[0],
numVoxelsY = imageTiledVolumeDescription.xpath('@numVoxelsY')[0],
numVoxelsZ = str( tile_index_z ),
dxgiFormat = imageTiledVolumeDescription.xpath('@dxgiFormat')[0],
numBytesPerVoxel = imageTiledVolumeDescription.xpath('@numBytesPerVoxel')[0],
isSigned = imageTiledVolumeDescription.xpath('@isSigned')[0] )
with open( output_tile_images_volume_file, 'w' ) as file:
file.write( lxml.etree.tostring( output_imageTiledVolumeDescription, pretty_print = True ) )
output_idTiledVolumeDescription = lxml.etree.Element( "tiledVolumeDescription",
fileExtension = idTiledVolumeDescription.xpath('@fileExtension')[0],
numTilesX = idTiledVolumeDescription.xpath('@numTilesX')[0],
numTilesY = idTiledVolumeDescription.xpath('@numTilesY')[0],
numTilesZ = str( tile_index_z ),
numTilesW = idTiledVolumeDescription.xpath('@numTilesW')[0],
numVoxelsPerTileX = idTiledVolumeDescription.xpath('@numVoxelsPerTileX')[0],
numVoxelsPerTileY = idTiledVolumeDescription.xpath('@numVoxelsPerTileY')[0],
numVoxelsPerTileZ = idTiledVolumeDescription.xpath('@numVoxelsPerTileZ')[0],
numVoxelsX = idTiledVolumeDescription.xpath('@numVoxelsX')[0],
numVoxelsY = idTiledVolumeDescription.xpath('@numVoxelsY')[0],
numVoxelsZ = str( tile_index_z ),
dxgiFormat = idTiledVolumeDescription.xpath('@dxgiFormat')[0],
numBytesPerVoxel = idTiledVolumeDescription.xpath('@numBytesPerVoxel')[0],
isSigned = idTiledVolumeDescription.xpath('@isSigned')[0] )
with open( output_tile_ids_volume_file, 'w' ) as file:
file.write( lxml.etree.tostring( output_idTiledVolumeDescription, pretty_print = True ) )
print
print "Subvolume {0} of {1} created.".format( subvolume_i + 1, len(subvolume_start_indices))
print
|
|
"""
vmfmerge.py
By DKY
Version 0.1.2 BETA
VMF Merge Tool
"""
__version__ = '0.1.2 BETA'
import os
import sys
import copy
import shutil
from datetime import datetime
from argparse import ArgumentParser
from collections import OrderedDict
from vmf import VMF, InvalidVMF, load_vmfs, get_parent, compare_vmfs
from vmfdelta import (
DeltaMergeConflict,
merge_delta_lists, create_conflict_resolution_deltas,
)
def parse_args(argv):
parser = ArgumentParser(
description="VMF Merge Tool",
)
parser.add_argument(
'--version',
action='version',
version=__version__,
)
parser.add_argument(
'-v', '--verbose',
action='store_true',
help="Noisily display progress messages throughout the procedure.",
)
parser.add_argument(
'vmfs',
nargs='+',
metavar='vmf',
help="The name of a *.vmf file, or the path to a *.vmf file.",
)
parser.add_argument(
'-n', '--no-auto-parent',
action='store_true',
help=
"Do not try to automatically figure out which VMF is the parent "
"VMF. Instead, simply assume that the first VMF in the argument "
"list is the parent. (Can be dangerous-- Use with care!)"
,
)
parser.add_argument(
'-i', '--dump-individual',
action='store_true',
help=
"Instead of merging, output a list of individual per-file deltas "
"to stdout."
,
)
parser.add_argument(
'-p', '--dump-proposed',
action='store_true',
help=
"Instead of merging, output a list of all proposed merge deltas "
"to stdout."
,
)
parser.add_argument(
'-A', '--aggressive',
action='store_true',
help="Enable aggressive conflict resolution.",
)
return parser.parse_args(argv)
def do_merge(
parent, children,
dumpIndividual=False, dumpProposed=False,
aggressive=False, verbose=False,
noParentSideEffects=False, noChildSideEffects=False,
update_callback=lambda *args, **kwargs: None
):
""" Performs a merge of the given children's deltas into the parent.
If `dumpIndividual` is True, this prints and returns the individual deltas
for each child (in `{child : deltaList}` form), and does nothing else.
If `dumpProposed` is True, this prints and returns a list of the proposed
merged deltas, and does nothing else.
The `aggressive` flag is not currently implemented.
If `noParentSideEffects` is True, this leaves the given parent VMF
untouched, modifying a deep copy instead of the original.
If `noChildSideEffects` is True, this leaves the given child VMFs untouched,
modifying deep copies instead of the originals.
The `update_callback` will be called at each stage of the process with the
following arguments:
update_callback(message, progress, maxProgress)
... where `message` is a human-readable message describing the current
stage, `progress` is a 1-indexed integer representing the current stage
number, and `maxProgress` is the number of required stages in this merge.
If both `dumpIndividual` and `dumpProposed` are False, this function
returns a list of deltas that were found to conflict during the merge.
If there were no conflicted deltas found during the process, an empty list
is returned.
"""
class ProgressTracker:
# Not including the conflict resolution step.
NUM_MERGE_STEPS = 3
def __init__(self, children):
self.progress = 0
self.maxProgress = (
int(noParentSideEffects)
+ int(noChildSideEffects) * len(children)
+ len(children)
+ self.NUM_MERGE_STEPS
)
def update(
self,
message,
preIncrement=False, postIncrement=True,
finished=False,
):
if preIncrement:
self.progress += 1
print(message)
update_callback(
message,
progress=min(self.progress, self.maxProgress),
maxProgress=self.maxProgress,
finished=finished,
)
if postIncrement:
self.progress += 1
# We're gonna be modifying this soon.
children = children[:]
progressTracker = ProgressTracker(children)
# If we don't want side-effects on the parent VMF, we should deep-copy it.
if noParentSideEffects:
progressTracker.update("Preparing parent VMF for merge...")
parent = copy.deepcopy(parent)
# If we don't want side-effects on the child VMFs, we should deep-copy them.
if noChildSideEffects:
for i, child in enumerate(children):
progressTracker.update(
"Preparing {} for merge...".format(child.get_filename())
)
children[i] = copy.deepcopy(child)
# Generate lists of deltas for each child.
deltaListForChild = OrderedDict()
for i, child in enumerate(children):
progressTracker.update(
"Generating delta list for {}...".format(
os.path.basename(child.path)
)
)
deltas = compare_vmfs(parent, child)
deltaListForChild[child] = deltas
if dumpIndividual:
for child, deltas in deltaListForChild.items():
print("Deltas for {}:".format(child.path))
print('\n'.join(repr(delta) for delta in deltas))
print("")
return deltaListForChild
# Fix up all deltas so that they have references to their origin VMF.
for child, deltas in deltaListForChild.items():
for delta in deltas:
delta.originVMF = child
# Merge the delta lists into a single list of deltas, to be applied on top
# of the parent.
progressTracker.update("Merging deltas...")
deltaLists = list(deltaListForChild.values())
try:
mergedDeltas = merge_delta_lists(
deltaLists,
aggressive=aggressive,
verbose=verbose,
)
except DeltaMergeConflict as e:
print(str(e))
mergedDeltas = e.partialDeltas
print("")
print("Conflicted deltas:")
conflictedDeltas = e.conflictedDeltas
for delta in conflictedDeltas:
print("From {}:".format(delta.get_origin_filename()), repr(delta))
print("")
progressTracker.maxProgress += 1
progressTracker.update("Creating Manual Merge VisGroups...")
conflictResolutionDeltas = create_conflict_resolution_deltas(
parent, conflictedDeltas,
verbose=verbose,
)
if verbose:
print()
print("Conflict resolution deltas:")
print('\n'.join(repr(delta) for delta in conflictResolutionDeltas))
print()
mergedDeltas += conflictResolutionDeltas
else:
conflictedDeltas = []
if dumpProposed:
print("Merged deltas:")
print('\n'.join(repr(delta) for delta in mergedDeltas))
return mergedDeltas
# Apply the merged deltas to the parent.
progressTracker.update("Applying deltas...")
parent.apply_deltas(mergedDeltas, verbose=verbose)
# Write the mutated parent to the target VMF path.
progressTracker.update("Writing merged VMF...")
def get_merged_vmf_path(parentPath):
parentDir = os.path.dirname(parentPath)
parentFileName = os.path.basename(parentPath)
parentName, ext = os.path.splitext(parentFileName)
mergedFileName = parentName + '_merged' + ext
mergedFilePath = os.path.join(parentDir, mergedFileName)
# Make sure the output filename is unique...
i = 0
while os.path.exists(mergedFilePath):
mergedFileName = parentName + '_merged_' + str(i) + ext
mergedFilePath = os.path.join(parentDir, mergedFileName)
i += 1
return mergedFilePath
parent.write_path(get_merged_vmf_path(parent.path))
# parent.write_path('out.vmf')
# Done!
progressTracker.update("Done!", finished=True)
return conflictedDeltas
def main(argv):
args = parse_args(argv[1:])
vmfPaths = args.vmfs
verbose = args.verbose
aggressive = args.aggressive
autoParent = not args.no_auto_parent
dumpIndividual = args.dump_individual
dumpProposed = args.dump_proposed
if dumpProposed and dumpIndividual:
sys.stderr.write(
"ERROR: --dump-individual and --dump-proposed are mutually "
"exclusive!\n"
)
return 1
startTime = datetime.now()
# Load all VMFs.
print("Loading VMFs...")
try:
vmfs = load_vmfs(vmfPaths)
except InvalidVMF as e:
sys.stderr.write(
"ERROR: {} is invalid: {}\n".format(e.path, e.message)
)
return 1
# Determine the parent VMF.
if autoParent:
parent = get_parent(vmfs)
else:
parent = vmfs[0]
# Determine the child VMFs.
children = [vmf for vmf in vmfs if vmf is not parent]
# Go!
do_merge(
parent, children,
dumpIndividual, dumpProposed,
aggressive, verbose,
)
print("Total time: {}".format(datetime.now() - startTime))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
import os
import re
import mimetypes
from types import (
IntType,
LongType,
ListType,
StringType,
UnicodeType,
BooleanType,
)
import calendar
from datetime import (
date,
datetime,
timedelta,
)
from random import choice
from string import (
ascii_uppercase,
ascii_lowercase,
digits,
)
import locale
import pytz
from pyramid.threadlocal import get_current_registry
################
# Phone number #
################
MSISDN_ALLOW_CHARS = map(lambda x: str(x), range(10)) + ['+']
def get_msisdn(msisdn, country='+62'):
for ch in msisdn:
if ch not in MSISDN_ALLOW_CHARS:
return
try:
i = int(msisdn)
except ValueError, err:
return
if not i:
return
if len(str(i)) < 7:
return
if re.compile(r'^\+').search(msisdn):
return msisdn
if re.compile(r'^0').search(msisdn):
return '%s%s' % (country, msisdn.lstrip('0'))
################
# Money format #
################
def should_int(value):
int_ = int(value)
if int_ == value:
return int_
return value
def thousand(value, float_count=None):
if float_count is None: # autodetection
if type(value) in (IntType, LongType):
float_count = 0
else:
float_count = 2
return locale.format('%%.%df' % float_count, value, True)
def money(value, float_count=None, currency=None):
if value < 0:
v = abs(value)
format_ = '(%s)'
else:
v = value
format_ = '%s'
if currency is None:
currency = locale.localeconv()['currency_symbol']
s = ' '.join([currency, thousand(v, float_count)])
return format_ % s
###########
# Pyramid #
###########
def get_settings():
return get_current_registry().settings
def get_timezone():
settings = get_settings()
return pytz.timezone(settings.timezone)
########
# Time #
########
one_second = timedelta(1.0/24/60/60)
DateType = type(date.today())
DateTimeType = type(datetime.now())
TimeZoneFile = '/etc/timezone'
if os.path.exists(TimeZoneFile):
DefaultTimeZone = open(TimeZoneFile).read().strip()
else:
DefaultTimeZone = 'Asia/Jakarta'
def as_timezone(tz_date):
localtz = get_timezone()
if not tz_date.tzinfo:
tz_date = create_datetime(tz_date.year, tz_date.month, tz_date.day,
tz_date.hour, tz_date.minute, tz_date.second,
tz_date.microsecond)
return tz_date.astimezone(localtz)
def create_datetime(year, month, day, hour=0, minute=7, second=0,
microsecond=0):
tz = get_timezone()
return datetime(year, month, day, hour, minute, second,
microsecond, tzinfo=tz)
def create_date(year, month, day):
return create_datetime(year, month, day)
def create_now():
tz = get_timezone()
return datetime.now(tz)
def date_from_str(value):
separator = None
value = value.split()[0] # dd-mm-yyyy HH:MM:SS
for s in ['-', '/']:
if value.find(s) > -1:
separator = s
break
if separator:
t = map(lambda x: int(x), value.split(separator))
y, m, d = t[2], t[1], t[0]
if d > 999: # yyyy-mm-dd
y, d = d, y
else:
y, m, d = int(value[:4]), int(value[4:6]), int(value[6:])
return date(y, m, d)
def dmy(tgl):
return tgl.strftime('%d-%m-%Y')
def dmyhms(t):
return t.strftime('%d-%m-%Y %H:%M:%S')
def next_month(year, month):
if month == 12:
month = 1
year += 1
else:
month += 1
return year, month
def best_date(year, month, day):
try:
return date(year, month, day)
except ValueError:
last_day = calendar.monthrange(year, month)[1]
return date(year, month, last_day)
def next_month_day(year, month, day):
year, month = next_month(year, month)
return best_date(year, month, day)
##########
# String #
##########
def one_space(s):
s = s.strip()
while s.find(' ') > -1:
s = s.replace(' ', ' ')
return s
def to_str(v):
typ = type(v)
if typ == DateType:
return dmy(v)
if typ == DateTimeType:
return dmyhms(v)
if v == 0:
return '0'
if typ in [UnicodeType, StringType]:
return v.strip()
elif typ is BooleanType:
return v and '1' or '0'
return v and str(v) or ''
def dict_to_str(d):
r = {}
for key in d:
val = d[key]
r[key] = to_str(val)
return r
def split(s, c=4):
r = []
while s:
t = s[:c]
r.append(t)
s = s[c:]
return ' '.join(r)
########
# File #
########
# http://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python
def get_random_string():
return ''.join(choice(ascii_uppercase + ascii_lowercase + digits) \
for _ in range(6))
def get_ext(filename):
return os.path.splitext(filename)[-1]
def file_type(filename):
ctype, encoding = mimetypes.guess_type(filename)
if ctype is None or encoding is not None:
ctype = 'application/octet-stream'
return ctype
class SaveFile(object):
def __init__(self, dir_path):
self.dir_path = dir_path
# Unchanged file extension, and make file prefix unique with sequential
# number.
def create_fullpath(self, ext=''):
while True:
filename = get_random_string() + ext
fullpath = os.path.join(self.dir_path, filename)
if not os.path.exists(fullpath):
return fullpath
def save(self, content, filename=None):
fullpath = create_fullpath()
f = open(fullpath, 'wb')
f.write(content)
f.close()
return fullpath
class Upload(SaveFile):
def save(self, request, name):
input_file = request.POST[name].file
ext = get_ext(request.POST[name].filename)
fullpath = self.create_fullpath(ext)
output_file = open(fullpath, 'wb')
input_file.seek(0)
while True:
data = input_file.read(2<<16)
if not data:
break
output_file.write(data)
output_file.close()
return fullpath
import xlrd
import io
import csv, codecs, cStringIO
from email.utils import parseaddr
def xls_reader(filename):
workbook = xlrd.open_workbook(filename)
worksheet = workbook.sheet_by_name('potongan')
num_rows = worksheet.nrows - 1
num_cells = worksheet.ncols - 1
curr_row = -1
csv = []
while curr_row < num_rows:
curr_row += 1
row = worksheet.row(curr_row)
curr_cell = -1
txt = []
while curr_cell < num_cells:
curr_cell += 1
# Cell Types: 0=Empty, 1=Text, 2=Number, 3=Date, 4=Boolean, 5=Error, 6=Blank
cell_type = worksheet.cell_type(curr_row, curr_cell)
cell_value = worksheet.cell_value(curr_row, curr_cell)
if cell_type==1 or cell_type==2:
try:
cell_value = str(cell_value)
except:
cell_value = '0'
else:
cell_value = clean(cell_value)
if curr_cell==0 and cell_value.strip()=="Tanggal":
curr_cell=num_cells
elif curr_cell==0 and cell_value.strip()=="":
curr_cell = num_cells
curr_row = num_rows
else:
txt.append(cell_value)
if txt:
csv.append(txt)
return csv
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
print data
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
class CSVRenderer(object):
def __init__(self, info):
pass
def __call__(self, value, system):
""" Returns a plain CSV-encoded string with content-type
``text/csv``. The content-type may be overridden by
setting ``request.response.content_type``."""
request = system.get('request')
if request is not None:
response = request.response
ct = response.content_type
if ct == response.default_content_type:
response.content_type = 'text/csv'
fout = io.BytesIO() #StringIO()
fcsv = csv.writer(fout, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL) #MINIMAL)
#fcsv = UnicodeWriter(fout, delimiter=',', quotechar=',', quoting=csv.QUOTE_MINIMAL)
#print value.get('header', [])
fcsv.writerow(value.get('header', []))
fcsv.writerows(value.get('rows', []))
return fout.getvalue()
################
# Months #
################
"""BULANS = (
('01', 'Januari'),
('02', 'Februari'),
('03', 'Maret'),
('04', 'April'),
('05', 'Mei'),
('06', 'Juni'),
('07', 'Juli'),
('08', 'Agustus'),
('09', 'September'),
('10', 'Oktober'),
('11', 'November'),
('12', 'Desember'),
)
"""
BULANS = {
'01': 'Januari',
'02': 'Februari',
'03': 'Maret',
'04': 'April',
'05': 'Mei',
'06': 'Juni',
'07': 'Juli',
'08': 'Agustus',
'09': 'September',
'10': 'Oktober',
'11': 'November',
'12': 'Desember',
}
def get_months(request):
return BULANS
def email_validator(node, value):
name, email = parseaddr(value)
if not email or email.find('@') < 0:
raise colander.Invalid(node, 'Invalid email format')
def row2dict(row):
d = {}
for column in row.__table__.columns:
d[column.name] = str(getattr(row, column.name))
return d
def clean(s):
r = ''
for ch in s:
if ch not in string.printable:
ch = ''
r += ch
return r
|
|
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.forms.models import modelformset_factory
from django.http import Http404, HttpResponse
from django.shortcuts import get_object_or_404, render_to_response, redirect
from django.template import RequestContext
from django.template.defaultfilters import slugify
from django.utils import simplejson
from django.views.generic import ListView, DetailView, View
from .forms import OrganizationUpdateForm, PersonUpdateForm
from .models import Person, Organization
from source.utils.json import render_json_to_response
USER_DEBUG = getattr(settings, 'USER_DEBUG', False)
class PersonList(ListView):
model = Person
def get_queryset(self):
queryset = Person.live_objects.exclude(show_in_lists=False).prefetch_related('organizations')
return queryset
class PersonDetail(DetailView):
model = Person
def get_queryset(self):
queryset = Person.live_objects.prefetch_related('personlink_set', 'organizations', 'code_set', 'article_set', 'article_authors')
return queryset
class PersonSearchJson(View):
def get_queryset(self):
queryset = Person.live_objects.exclude(show_in_lists=False)
return queryset
def get(self, request, *args, **kwargs):
people = self.get_queryset()
q = self.request.GET.get('q', None)
if 'q' in self.request.GET:
people = people.filter(Q(first_name__icontains = q) | Q(last_name__icontains = q))
people = people.values('first_name', 'last_name', 'email', 'twitter_username', 'github_username', 'id')
for person in list(people):
person['name'] = '%s %s' % (person['first_name'], person['last_name'])
return render_json_to_response(list(people))
class OrganizationList(ListView):
model = Organization
def get_queryset(self):
queryset = Organization.live_objects.exclude(show_in_lists=False).all()
return queryset
class OrganizationDetail(DetailView):
model = Organization
def get_queryset(self):
queryset = Organization.live_objects.prefetch_related('organizationlink_set')
return queryset
class PersonUpdate(View):
template_name = "people/person_update.html"
form_message = ''
def get_success_url(self):
return reverse('person_update')
def get_organization(self):
user = self.request.user
if user.is_authenticated() and user.is_active:
organization = get_object_or_404(Organization, is_live=True, email=user.email)
return organization
elif USER_DEBUG:
organization = get_object_or_404(Organization, is_live=True, slug='spokesman-review')
return organization
return None
def get_person(self, pk=None, organization=None, task=None):
user = self.request.user
if USER_DEBUG or (user.is_authenticated() and user.is_active):
if pk and organization:
# allow for 'add' task
if task == 'add':
person = get_object_or_404(Person, is_live=True, pk=pk)
else:
# ensure that Organization admin can modify this record
person = get_object_or_404(Person, is_live=True, pk=pk, organizations=organization)
else:
# or that the authenticated user *is* this person
person = get_object_or_404(Person, is_live=True, email=user.email)
return person
return None
def create_person(self, data, organization):
name = data['name']
# make sure we actually have been given a name
if name:
try:
first_name, last_name = name.split(' ', 1)
except:
first_name, last_name = name, ''
person_kwargs = {
'first_name': first_name,
'last_name': last_name,
'slug': slugify('-'.join([first_name, last_name]))
}
i = 0
found = True
while found:
i += 1
try:
person = Person.objects.get(slug=person_kwargs['slug'])
person_kwargs['slug'] = slugify('-'.join([first_name, last_name, str(i)]))
except ObjectDoesNotExist:
person = Person(**person_kwargs)
person.save()
person.organizations.add(organization)
found = False
return person
return None
def process_form(self, person, data):
person_form = PersonUpdateForm(instance=person, data=data)
if person_form.is_valid():
person_form.save()
form_message = 'Saved!'
else:
error_message = ''
for field in person_form:
if field.errors:
add_label = field.label
add_errors = ', '.join([error for error in field.errors])
error_message += '%s: %s ' % (add_label, add_errors)
form_message = error_message
return form_message
def post(self, request, *args, **kwargs):
data = request.POST
form_message = ''
success_url = self.get_success_url()
if 'organization_task' in data:
success_url = reverse('organization_update')
self.template_name = "people/organization_update.html"
task = data['organization_task']
organization = self.get_organization()
if task == 'create':
person = self.create_person(data, organization)
form_message = 'Created'
success_url += '?new=%s' % person.pk
else:
person = self.get_person(data['person'], organization, task)
if task == 'update':
form_message = self.process_form(person, data)
elif task == 'remove':
person.organizations.remove(organization)
form_message = 'Removed'
elif task == 'add':
person.organizations.add(organization)
form_message = 'Added'
else:
person = self.get_person()
form_message = self.process_form(person, data)
if request.is_ajax():
result = {
'message': form_message,
'person': {
'name': person.name(),
'pk': person.pk,
'first_name': person.first_name,
'last_name': person.last_name,
'email': person.email,
'twitter_username': person.twitter_username,
'github_username': person.github_username
}
}
return render_json_to_response(result)
# if for some reason we're not hitting via ajax
messages.success(request, form_message)
return redirect(success_url)
class OrganizationUpdate(View):
template_name = "people/organization_update.html"
error_message = ""
def get_organization(self, user):
if user.is_authenticated() and user.is_active:
try:
organization = Organization.objects.get(is_live=True, email=user.email)
return organization
except Organization.DoesNotExist:
self.error_message = "No Organization account found that matches the email address used to log in."
except Organization.MultipleObjectsReturned:
self.error_message = "Uh-oh, somehow there are multiple Organization accounts attached to this email address. Please contact us for cleanup."
return None
def get(self, request, *args, **kwargs):
context = {}
user = request.user
if user.is_authenticated() and user.is_active:
organization = self.get_organization(user)
if organization:
organization_form = OrganizationUpdateForm(instance=organization)
context.update({
'user': request.user,
'organization': organization,
'organization_form': organization_form,
'default_job_listing_end_date': datetime.today().date() + timedelta(days=30)
})
else:
context.update({
'error_message': self.error_message
})
return render_to_response(self.template_name, context, context_instance=RequestContext(request))
def post(self, request, *args, **kwargs):
context = {}
user = request.user
organization = self.get_organization(user)
if organization:
organization_form = OrganizationUpdateForm(instance=organization, data=request.POST)
context.update({
'user': request.user,
'organization': organization,
'organization_form': organization_form,
})
if organization_form.is_valid():
organization_form.save()
if request.is_ajax():
result = {'success': 'True'}
return render_json_to_response(result)
# if for some reason we're not hitting via ajax
messages.success(request, 'Updates saved')
return render_to_response(self.template_name, context, context_instance=RequestContext(request))
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'depot_tools/bot_update',
'file',
'depot_tools/gclient',
'depot_tools/git',
'gsutil',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/raw_io',
'recipe_engine/step',
'recipe_engine/time',
'v8',
]
REPO = 'https://chromium.googlesource.com/v8/v8'
CLUSTERFUZZ = 'https://cluster-fuzz.appspot.com/testcase?key=%d'
SHOW_MAX_ISSUES = 10
CANDIDATE_REF = 'refs/heads/candidate'
LKGR_REF = 'refs/heads/lkgr'
ROLL_REF = 'refs/heads/roll'
STATUS_URL = 'https://v8-status.appspot.com'
SEC_TO_HOURS = 60 * 60
TIME_LIMIT_HOURS = 8
TIME_LIMIT_SEC = TIME_LIMIT_HOURS * SEC_TO_HOURS
def GetRef(api, repo, ref):
# Fetch ref from remote.
api.git(
'fetch', repo, '+%s:%s' % (ref, ref),
cwd=api.path['checkout'],
)
# Read ref locally.
step_result = api.git(
'show-ref', '-s', ref,
name='git show-ref %s' % ref,
cwd=api.path['checkout'],
stdout=api.raw_io.output(),
)
result = step_result.stdout.strip()
step_result.presentation.logs['ref'] = [result]
return result
def PushRef(api, repo, ref, hsh):
api.git(
'update-ref', ref, hsh,
cwd=api.path['checkout'],
)
api.git(
'push', repo, '%s:%s' % (ref, ref),
cwd=api.path['checkout'],
)
# Upload log for debugging.
ref_log_file_name = ref.replace('/', '_') + '.log'
ref_log_path = api.path['slave_build'].join(ref_log_file_name)
log = []
if api.path.exists(ref_log_path):
log.append(api.file.read(
'Read %s' % ref_log_file_name, ref_log_path, test_data=''))
log.append('%s %s' % (hsh, str(api.time.time())))
api.file.write('Write %s' % ref_log_file_name, ref_log_path, '\n'.join(log))
api.gsutil.upload(
ref_log_path,
'chromium-v8-auto-roll',
api.path.join('v8_release_process', ref_log_file_name),
)
def ReadTimeStamp(api, name):
return int(float(
api.file.read(
name,
api.path['slave_build'].join('timestamp.txt'),
).strip()))
def WriteTimeStamp(api, name, timestamp):
api.file.write(
name,
api.path['slave_build'].join('timestamp.txt'),
str(timestamp),
)
def LogStep(api, text):
api.step('log', ['echo', text])
def AgeLimitBailout(api, new_date, old_date):
age = (new_date - old_date) / SEC_TO_HOURS
LogStep(api, 'Current candidate is %dh old (limit: %dh).' %
(age, TIME_LIMIT_HOURS))
return age < TIME_LIMIT_HOURS
def GetLKGR(api):
step_result = api.python(
'get new lkgr',
api.path['build'].join('scripts', 'tools', 'runit.py'),
[api.path['build'].join('scripts', 'tools', 'pycurl.py'),
'%s/lkgr' % STATUS_URL],
stdout=api.raw_io.output(),
)
lkgr = step_result.stdout.strip()
step_result.presentation.logs['logs'] = [
'New candidate: %s (%s)' % (lkgr, str(api.time.time())),
]
return lkgr
def ClusterfuzzHasIssues(api):
step_test_data = lambda: api.json.test_api.output([])
step_result = api.python(
'check clusterfuzz',
api.path['checkout'].join(
'tools', 'release', 'check_clusterfuzz.py'),
['--key-file', api.path['slave_build'].join('.cf_key'),
'--results-file', api.json.output(add_json_log=False)],
# Note: Output is suppressed for security reasons.
stdout=api.raw_io.output('out'),
stderr=api.raw_io.output('err'),
step_test_data=step_test_data,
)
results = step_result.json.output
if results:
step_result.presentation.text = 'Found %s issues.' % len(results)
for result in results[:SHOW_MAX_ISSUES]:
step_result.presentation.links[str(result)] = CLUSTERFUZZ % int(result)
step_result.presentation.status = api.step.FAILURE
return True
return False
def RunSteps(api):
repo = api.properties.get('repo', REPO)
fail_on_exit = []
api.gclient.set_config('v8')
api.bot_update.ensure_checkout(force=True, no_shallow=True)
# Get current lkgr ref and update.
new_lkgr = GetLKGR(api)
current_lkgr = GetRef(api, repo, LKGR_REF)
if new_lkgr != current_lkgr:
PushRef(api, repo, LKGR_REF, new_lkgr)
else:
LogStep(api, 'There is no new lkgr.')
# Get current candidate and update roll ref.
current_candidate = GetRef(api, repo, CANDIDATE_REF)
current_roll = GetRef(api, repo, ROLL_REF)
try:
current_date = ReadTimeStamp(api, 'check timestamp')
except Exception:
# If anything goes wrong, the process restarts with a fresh timestamp.
current_date = api.time.time()
WriteTimeStamp(api, 'init timestamp', current_date)
fail_on_exit.append(
'Timestamp file was missing. Starting new candidate cycle.')
# Check for clusterfuzz problems before bailout to be more informative.
clusterfuzz_has_issues = ClusterfuzzHasIssues(api)
if clusterfuzz_has_issues:
fail_on_exit.append('Clusterfuzz had issues.')
new_date = api.time.time()
if not AgeLimitBailout(api, new_date, current_date):
if current_candidate != new_lkgr:
PushRef(api, repo, CANDIDATE_REF, new_lkgr)
WriteTimeStamp(api, 'update timestamp', api.time.time())
else:
LogStep(api, 'There is no new candidate.')
# Promote the successful candidate to the roll ref in order to get
# rolled. This is independent of a new lkgr. Every candidate that is
# more than 8h old is promoted.
if current_candidate != current_roll:
PushRef(api, repo, ROLL_REF, current_candidate)
if fail_on_exit:
raise api.step.StepFailure(' '.join(fail_on_exit))
def GenTests(api):
hsh_old = '74882b7a8e55268d1658f83efefa1c2585cee723'
hsh_recent = '0df953c9e12c1e3b0e37f2d4ef1ef8c319e095cb'
hsh_new = 'c1a7fd0c98a80c52fcf6763850d2ee1c41cfe8d6'
date_old = str(100.0 * SEC_TO_HOURS + 0.5)
date_recent = str(105.0 * SEC_TO_HOURS + 0.5)
date_new = str(110.0 * SEC_TO_HOURS + 0.5)
def Test(name, current_lkgr, current_date, new_lkgr, new_date,
current_roll=None):
current_roll = current_roll or current_lkgr
return (
api.test(name) +
api.properties.generic(mastername='client.v8.fyi',
buildername='Auto-roll - release process') +
api.override_step_data(
'get new lkgr',
api.raw_io.stream_output(new_lkgr, stream='stdout'),
) +
api.override_step_data(
'git show-ref %s' % LKGR_REF,
api.raw_io.stream_output(current_lkgr, stream='stdout'),
) +
api.override_step_data(
'git show-ref %s' % CANDIDATE_REF,
api.raw_io.stream_output(current_lkgr, stream='stdout'),
) +
api.override_step_data(
'git show-ref %s' % ROLL_REF,
api.raw_io.stream_output(current_roll, stream='stdout'),
) +
api.override_step_data(
'check timestamp',
api.raw_io.output(current_date),
) +
api.time.seed(int(float(new_date))) +
api.time.step(2) +
api.path.exists(api.path['slave_build'].join(
LKGR_REF.replace('/', '_') + '.log'))
)
yield Test(
'same_lkgr',
hsh_old,
date_old,
hsh_old,
date_new,
)
yield Test(
'recent_lkgr',
hsh_recent,
date_recent,
hsh_new,
date_new,
)
yield Test(
'update',
hsh_old,
date_old,
hsh_new,
date_new,
)
yield Test(
'update_roll_only',
hsh_recent,
date_old,
hsh_recent,
date_new,
current_roll=hsh_old,
)
yield Test(
'clusterfuzz_issues',
hsh_recent,
date_old,
hsh_recent,
date_new,
current_roll=hsh_old,
) + api.override_step_data('check clusterfuzz', api.json.output([1, 2]))
yield Test(
'new_lkgr_failed_timestamp',
hsh_recent,
date_recent,
hsh_new,
date_new,
) + api.override_step_data('check timestamp', retcode=1)
|
|
from __future__ import print_function, division, absolute_import
import torch
import torch.nn as nn
from torch.autograd import Variable
from functools import reduce
class LambdaBase(nn.Sequential):
def __init__(self, *args):
super(LambdaBase, self).__init__(*args)
def forward_prepare(self, input):
output = []
for module in self._modules.values():
output.append(module(input))
return output if output else input
class Lambda(LambdaBase):
def __init__(self, *args):
super(Lambda, self).__init__(*args)
self.lambda_func = identity
def forward(self, input):
return self.lambda_func(self.forward_prepare(input))
class LambdaMap(LambdaBase):
def __init__(self, *args):
super(LambdaMap, self).__init__(*args)
self.lambda_func = identity
def forward(self, input):
return list(map(self.lambda_func,self.forward_prepare(input)))
class LambdaReduce(LambdaBase):
def __init__(self, *args):
super(LambdaReduce, self).__init__(*args)
self.lambda_func = add
def forward(self, input):
return reduce(self.lambda_func,self.forward_prepare(input))
def identity(x): return x
def add(x, y): return x + y
resnext101_64x4d_features = nn.Sequential(#Sequential,
nn.Conv2d(3, 64, (7, 7), (2, 2), (3, 3), 1, 1, bias = False),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d((3, 3), (2, 2), (1, 1)),
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(64, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(256),
),
nn.Sequential(#Sequential,
nn.Conv2d(64, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(256),
),
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(256, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(256),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(256, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(256),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
),
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(256, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (2, 2), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(512),
),
nn.Sequential(#Sequential,
nn.Conv2d(256, 512, (1, 1), (2, 2), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(512),
),
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(512, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(512),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(512, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(512),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(512, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(512),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
),
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (2, 2), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
nn.Sequential(#Sequential,
nn.Conv2d(512, 1024, (1, 1), (2, 2), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(1024),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
),
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(1024, 2048, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(2048),
nn.ReLU(),
nn.Conv2d(2048, 2048, (3, 3), (2, 2), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(2048),
nn.ReLU(),
),
nn.Conv2d(2048, 2048, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(2048),
),
nn.Sequential(#Sequential,
nn.Conv2d(1024, 2048, (1, 1), (2, 2), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(2048),
),
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(2048, 2048, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(2048),
nn.ReLU(),
nn.Conv2d(2048, 2048, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(2048),
nn.ReLU(),
),
nn.Conv2d(2048, 2048, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(2048),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
nn.Sequential(#Sequential,
LambdaMap( #ConcatTable,
nn.Sequential(#Sequential,
nn.Sequential(#Sequential,
nn.Conv2d(2048, 2048, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(2048),
nn.ReLU(),
nn.Conv2d(2048, 2048, (3, 3), (1, 1), (1, 1), 1, 64, bias = False),
nn.BatchNorm2d(2048),
nn.ReLU(),
),
nn.Conv2d(2048, 2048, (1, 1), (1, 1), (0, 0), 1, 1, bias = False),
nn.BatchNorm2d(2048),
),
Lambda(), #Identity,
),
LambdaReduce(), #CAddTable,
nn.ReLU(),
),
)
)
|
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Rt(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param value: {"type": "string", "description": "VPN extended community", "format": "string-rlx"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "rt"
self.DeviceProxy = ""
self.value = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Soo(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param value: {"type": "string", "description": "VPN extended community", "format": "string-rlx"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "soo"
self.DeviceProxy = ""
self.value = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Extcommunity(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "extcommunity"
self.DeviceProxy = ""
self.rt = {}
self.soo = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
class Origin(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param egp: {"default": 0, "not-list": ["igp", "incomplete"], "type": "number", "description": "remote EGP", "format": "flag"}
:param incomplete: {"default": 0, "not-list": ["egp", "igp"], "type": "number", "description": "unknown heritage", "format": "flag"}
:param igp: {"default": 0, "not-list": ["egp", "incomplete"], "type": "number", "description": "local IGP", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "origin"
self.DeviceProxy = ""
self.egp = ""
self.incomplete = ""
self.igp = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class AggregatorAs(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param ip: {"type": "string", "description": "IP address of aggregator", "format": "ipv4-address"}
:param asn: {"description": "AS number", "minimum": 1, "type": "number", "maximum": 4294967295, "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "aggregator-as"
self.DeviceProxy = ""
self.ip = ""
self.asn = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Aggregator(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "aggregator"
self.DeviceProxy = ""
self.aggregator_as = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
class Weight(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param weight_val: {"description": "Weight value", "minimum": 0, "type": "number", "maximum": 4294967295, "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "weight"
self.DeviceProxy = ""
self.weight_val = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Level(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param value: {"enum": ["level-1", "level-1-2", "level-2"], "type": "string", "description": "'level-1': Export into a level-1 area; 'level-1-2': Export into level-1 and level-2; 'level-2': Export into level-2 sub-domain; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "level"
self.DeviceProxy = ""
self.value = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class NextHop(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param address: {"type": "string", "description": "IP address of next hop", "format": "ipv4-address"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "next-hop"
self.DeviceProxy = ""
self.address = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Ip(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "ip"
self.DeviceProxy = ""
self.next_hop = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
class Metric(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param value: {"minLength": 1, "maxLength": 128, "type": "string", "description": "Metric value", "format": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "metric"
self.DeviceProxy = ""
self.value = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class AsPath(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param num: {"description": "AS number", "minimum": 1, "type": "number", "maximum": 4294967295, "format": "number"}
:param num2: {"description": "AS number", "minimum": 1, "type": "number", "maximum": 4294967295, "format": "number"}
:param prepend: {"type": "string", "description": "Prepend to the as-path (AS number)", "format": "string-rlx"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "as-path"
self.DeviceProxy = ""
self.num = ""
self.num2 = ""
self.prepend = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class CommList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param name: {"description": "Community-list name", "format": "string", "minLength": 1, "not-list": ["v-std", "v-exp"], "maxLength": 128, "type": "string"}
:param v_std: {"description": "Community-list number (standard)", "format": "number", "not-list": ["v-exp", "name"], "maximum": 99, "minimum": 1, "type": "number"}
:param v_exp_delete: {"default": 0, "type": "number", "description": "Delete matching communities", "format": "flag"}
:param v_exp: {"description": "Community-list number (expanded)", "format": "number", "not-list": ["v-std", "name"], "maximum": 199, "minimum": 100, "type": "number"}
:param name_delete: {"default": 0, "type": "number", "description": "Delete matching communities", "format": "flag"}
:param delete: {"default": 0, "type": "number", "description": "Delete matching communities", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "comm-list"
self.DeviceProxy = ""
self.name = ""
self.v_std = ""
self.v_exp_delete = ""
self.v_exp = ""
self.name_delete = ""
self.delete = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class LocalPreference(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param val: {"description": "Preference value", "minimum": 0, "type": "number", "maximum": 4294967295, "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "local-preference"
self.DeviceProxy = ""
self.val = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Tag(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param value: {"description": "Tag value", "minimum": 0, "type": "number", "maximum": 4294967295, "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "tag"
self.DeviceProxy = ""
self.value = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Local(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param address: {"type": "string", "description": "IPv6 address of next hop", "format": "ipv6-address"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "local"
self.DeviceProxy = ""
self.address = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class NextHop1(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param address: {"type": "string", "description": "global address of next hop", "format": "ipv6-address"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "next-hop-1"
self.DeviceProxy = ""
self.local = {}
self.address = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Ipv6(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "ipv6"
self.DeviceProxy = ""
self.next_hop_1 = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
class DampeningCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param dampening_max_supress: {"description": "Maximum duration to suppress a stable route(minutes)", "minimum": 1, "type": "number", "maximum": 255, "format": "number"}
:param dampening: {"default": 0, "type": "number", "description": "Enable route-flap dampening", "format": "flag"}
:param dampening_penalty: {"description": "Un-reachability Half-life time for the penalty(minutes)", "minimum": 1, "type": "number", "maximum": 45, "format": "number"}
:param dampening_half_time: {"description": "Reachability Half-life time for the penalty(minutes)", "minimum": 1, "type": "number", "maximum": 45, "format": "number"}
:param dampening_supress: {"description": "Value to start suppressing a route", "minimum": 1, "type": "number", "maximum": 20000, "format": "number"}
:param dampening_reuse: {"description": "Value to start reusing a route", "minimum": 1, "type": "number", "maximum": 20000, "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "dampening-cfg"
self.DeviceProxy = ""
self.dampening_max_supress = ""
self.dampening = ""
self.dampening_penalty = ""
self.dampening_half_time = ""
self.dampening_supress = ""
self.dampening_reuse = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class OriginatorId(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param originator_ip: {"type": "string", "description": "IP address of originator", "format": "ipv4-address"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "originator-id"
self.DeviceProxy = ""
self.originator_ip = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class MetricType(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param value: {"enum": ["external", "internal", "type-1", "type-2"], "type": "string", "description": "'external': IS-IS external metric type; 'internal': IS-IS internal metric type; 'type-1': OSPF external type 1 metric; 'type-2': OSPF external type 2 metric; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "metric-type"
self.DeviceProxy = ""
self.value = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Set(A10BaseClass):
"""Class Description::
Set values in destination routing protocol.
Class set supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param atomic_aggregate: {"default": 0, "optional": true, "type": "number", "description": "BGP atomic aggregate attribute", "format": "flag"}
:param community: {"optional": true, "type": "string", "description": "BGP community attribute", "format": "string-rlx"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/route-map/{tag}+{action}+{sequence}/set`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "set"
self.a10_url="/axapi/v3/route-map/{tag}+{action}+{sequence}/set"
self.DeviceProxy = ""
self.extcommunity = {}
self.origin = {}
self.aggregator = {}
self.weight = {}
self.level = {}
self.ip = {}
self.metric = {}
self.as_path = {}
self.comm_list = {}
self.atomic_aggregate = ""
self.community = ""
self.local_preference = {}
self.tag = {}
self.ipv6 = {}
self.dampening_cfg = {}
self.originator_id = {}
self.metric_type = {}
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
|
import sys
import os
import json
from collections import OrderedDict
from PyQt5.QtWidgets import QDialog, QApplication, QGridLayout, QLabel, \
QSpinBox, QDoubleSpinBox, QCheckBox, QPushButton, QLineEdit, QSizePolicy, \
QMessageBox, QSpacerItem, QFileDialog, QComboBox
from PyQt5.QtCore import Qt, pyqtSignal, pyqtSlot
from tierpsy.helper.params.tracker_param import TrackerParams, info_param, default_param, valid_options
from tierpsy.gui.HDF5VideoPlayer import LineEditDragDrop
from tierpsy import DFLT_FILTER_FILES
def save_params_json(json_file, param4file):
# save data into the json file
with open(json_file, 'w') as fid:
json.dump(param4file, fid, indent=4)
class ParamWidget():
def __init__(self, name, value=None, widget=None,
info_param=info_param, valid_options=valid_options):
self.name = name
if widget is not None:
self.widget = widget
else:
assert value is not None
self.widget = self._create(name, value)
if isinstance(self.widget, (QDoubleSpinBox, QSpinBox)):
# In windows 7 it seems this value is int16 so keep it smaller than that
self.widget.setMinimum(int(-1e9))
self.widget.setMaximum(int(1e9))
elif isinstance(self.widget, QComboBox):
if name in valid_options:
self.widget.addItems(valid_options[name])
elif name == 'filter_model_name':
self.widget.addItems([''] + DFLT_FILTER_FILES)
self.widget.setEditable(True)
if not isinstance(self.widget, QGridLayout):
self.widget.setToolTip(info_param[name])
else:
for n in range(self.widget.count()):
self.widget.itemAt(n).widget().setToolTip(info_param[name])
if value is not None:
self.write(value)
def _create(self, name, value):
value_type = type(value)
if name in valid_options or name == 'filter_model_name':
widget = QComboBox()
elif value_type is bool:
widget = QCheckBox(name)
elif value_type is int:
widget = QSpinBox()
elif value_type is float:
widget = QDoubleSpinBox()
elif value_type is str:
widget = QLineEdit(value)
elif value_type is list or value_type is tuple:
widget = QGridLayout()
for icol, val in enumerate(value):
spinbox = QSpinBox() if value_type is int else QDoubleSpinBox()
spinbox.setMinimum(int(-1e10))
spinbox.setMaximum(int(1e10))
spinbox.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
widget.addWidget(spinbox, 1, icol, 1, 2)
else:
raise ValueError('unknown type {}'.format(value_type))
return widget
def read(self):
if isinstance(self.widget, (QDoubleSpinBox, QSpinBox)):
return self.widget.value()
elif isinstance(self.widget, QCheckBox):
return self.widget.isChecked()
elif isinstance(self.widget, QLineEdit):
return self.widget.text()
elif isinstance(self.widget, QGridLayout):
return [self.widget.itemAt(ii).widget().value() for ii in range(self.widget.count())]
elif isinstance(self.widget, QComboBox):
return self.widget.currentText()
else:
raise ValueError('unknown type {}'.format(type(self.widget)))
def write(self, value):
if isinstance(self.widget, (QDoubleSpinBox, QSpinBox)):
self.widget.setValue(value)
elif isinstance(self.widget, QCheckBox):
self.widget.setChecked(value)
elif isinstance(self.widget, QLineEdit):
self.widget.setText(value)
elif isinstance(self.widget, QGridLayout):
for ii, val in enumerate(value):
self.widget.itemAt(ii).widget().setValue(val)
elif isinstance(self.widget, QComboBox):
index = self.widget.findText(value)
self.widget.setCurrentIndex(index)
else:
raise ValueError('unknown type {}'.format(type(self.widget)))
class ParamWidgetMapper():
'''
Class used to read/write data in different inputs.
The corresponding widget must an element in the form p_(agument_name)
'''
def __init__(self,
central_widget,
default_param=default_param,
info_param=info_param,
valid_options=valid_options
):
self.params_widgets = {}
self.default_param=default_param
for attr_name in dir(central_widget):
if attr_name.startswith('p_'):
param_name = attr_name[2:]
widget = getattr(central_widget, attr_name)
w = ParamWidget(param_name,
widget=widget,
value=default_param[param_name],
info_param=info_param,
valid_options=valid_options
)
self.params_widgets[param_name] = w
def __setitem__(self, param_name, value):
assert param_name in self.params_widgets
if value is None:
return None
else:
self.params_widgets[param_name].write(value)
def __getitem__(self, param_name):
w = self.params_widgets[param_name]
if w.widget.isEnabled():
return w.read()
else:
return self.default_param[param_name]
def __iter__(self):
self.remaining_names = list(self.params_widgets.keys())
return self
def __next__(self):
if len(self.remaining_names)==0:
raise StopIteration
return self.remaining_names.pop(0)
class GetAllParameters(QDialog):
file_saved = pyqtSignal(str)
def __init__(self, param_file='', param_per_row=5):
super(GetAllParameters, self).__init__()
self.param_file = param_file
self.param_per_row = param_per_row
self.initUI()
self.updateParamFile(param_file)
self.pushbutton_save.clicked.connect(self.saveParamFile)
self.pushbutton_file.clicked.connect(self.getParamFile)
def closeEvent(self, event):
currentparams = self._readParams()
if self.lastreadparams != currentparams:
reply = QMessageBox.question(
self,
'Message',
'''You select new parameters. Do you want to save them?''',
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if reply == QMessageBox.Yes:
if not self.saveParamFile():
return
super(GetAllParameters, self).closeEvent(event)
def initUI(self):
grid = QGridLayout()
self.setLayout(grid)
self.widgetlabels = {}
for ii, (name, value) in enumerate(default_param.items()):
row = ii // self.param_per_row * 2
col = (ii % self.param_per_row)
w = ParamWidget(name, value=value)
self.widgetlabels[name] = w
if isinstance(w.widget, QCheckBox):
grid.addWidget(w.widget, row, col, 2, 1)
w.widget.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
else:
label = QLabel(name)
grid.addWidget(label, row, col, 1, 1)
label.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
if isinstance(w.widget, QGridLayout):
grid.addLayout(w.widget, row+1, col, 1, 1)
else:
grid.addWidget(w.widget, row+1, col, 1, 1)
w.widget.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
#assert all(x for x in self.widgetlabels)
spacer = QSpacerItem(
40,
20,
QSizePolicy.Preferred,
QSizePolicy.Preferred)
self.pushbutton_save = QPushButton('Save')
self.pushbutton_file = QPushButton('Select File')
self.lineEdit_file = QLineEdit(self.param_file)
last_row = len(default_param) // self.param_per_row * 2 + 3
last_col = max(self.param_per_row - 1, 3)
grid.addWidget(self.pushbutton_save, last_row, 0, 1, 1)
grid.addWidget(self.pushbutton_file, last_row, last_col, 1, 1)
grid.addWidget(self.lineEdit_file, last_row, 1, 1, last_col - 1)
grid.addItem(spacer, last_row - 1, 0, 1, 1)
LineEditDragDrop(
self.lineEdit_file,
self.updateParamFile,
os.path.isfile)
# used to find if anything was modified.
self.lastreadparams = self._readParams()
self.show()
self.setAttribute(Qt.WA_DeleteOnClose)
# file dialog to the the hdf5 file
def getParamFile(self):
json_file, _ = QFileDialog.getOpenFileName(
self, "Find parameters file", '', "JSON files (*.json);; All (*)")
if json_file:
self.updateParamFile(json_file)
def updateParamFile(self, json_file):
# set the widgets with the default parameters, in case the parameters are not given
# by the json file.
if os.path.exists(json_file):
try:
params = TrackerParams(json_file)
json_param = params.p_dict
except (OSError, UnicodeDecodeError, json.decoder.JSONDecodeError):
QMessageBox.critical(
self,
'Cannot read parameters file.',
"Cannot read parameters file. Try another file",
QMessageBox.Ok)
return
else:
json_param = {}
for param_name in json_param:
if param_name not in self.widgetlabels:
QMessageBox.critical(
self, '', "'%s' is not a valid variable. Please correct the parameters file" %
param_name, QMessageBox.Ok)
return
# Set the parameters in the correct widget. Any paramter not contained
# in the json file will be keep with the default value.
for name in self.widgetlabels:
w = self.widgetlabels[name]
if name in json_param:
value = json_param[name]
else:
value = default_param[name]
w.write(value)
self.lineEdit_file.setText(json_file)
# used to find if anything was modified.
self.lastreadparams = self._readParams()
def _readParams(self):
# read all the values in the GUI
parameters = {}
for name in self.widgetlabels:
parameters[name] = self.widgetlabels[name].read()
return parameters
@pyqtSlot()
def saveParamFile(self):
json_file = self.lineEdit_file.text()
if not json_file:
QMessageBox.critical(
self,
'No parameter file name given.',
'No parameter file name given. Please select name using the "Parameters File" button',
QMessageBox.Ok)
return 0
if os.path.exists(json_file):
reply = QMessageBox.question(
self,
'Message',
'''The parameters file already exists. Do you want to overwrite it?''',
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if reply == QMessageBox.No:
return 0
# read all the values in the GUI
param4file = self._readParams()
self.lastreadparams = param4file
save_params_json(json_file, param4file)
self.file_saved.emit(json_file)
return 1
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = GetAllParameters()
sys.exit(app.exec_())
|
|
'''
:mod:`em_graph` -- External memory graph implementation
=======================================================
.. module: em_graph
:platform: Unix, Windows
:synopsis: External memory graph implementation
.. moduleauthor:: huku <huku@grhack.net>
About
-----
A simple external memory graph implementation built on top of external memory
dictionaries.
An :class:`EMGraph` is composed of four private ``EMDict`` instances:
* The first holds the graph's adjacency structure. That is, each vertex is a
key that maps to a normal Python set holding the vertex' successors. Notice
that we assume that each vertex' adjacency list can fit in main memory (and,
in fact, this is an assumption made by all external memory algorithms).
* The second holds the graph's transpose, the inverse adjacency lists.
* The third maps each vertex to a normal Python dictionary, which, in turn,
maps attribute names to their values. You can use the relevant :class:`EMGraph`
API to set vertex attributes as shown below:
.. code-block:: python
graph.add_vertex_attribute(vertex, 'visited', True)
graph.add_vertex_attribute(vertex, 'visited', False)
graph.remove_vertex_attribute(vertex, 'visited')
* The fourth does a similar job, but maps graph edges to their attributes
instead. The relevant :class:`EMGraph` API allows you to set a weight value,
for example, to each edge as shown below:
.. code-block:: python
edge = (tail_vertex, head_vertex)
graph.add_edge_attribute(edge, 'weight', 0.4)
A vertex can be any object as long as it's ``cPickle`` friendly. To avoid strange
behavior make sure your objects implement ``__eq__()`` and ``__hash__()``. People
already familiar with Python object serialization are aware of these complications.
Knowing this in advance, will save you many hours of debugging.
An example vertex object is shown in the following example:
.. code-block:: python
class VertexObject(object):
def __init__(self, vertex_id):
self.vertex_id = vertex_id
def __eq__(self, other):
return self.vertex_id == other.vertex_id
def __hash__(self):
return self.vertex_id
An edge is just a 2-tuple of vertex objects.
Classes
-------
'''
__author__ = 'huku <huku@grhack.net>'
import sys
import os
try:
import pyrsistence
except ImportError:
sys.exit('Pyrsistence not installed?')
class EMGraph(object):
'''
This class represents an external memory graph.
.. automethod:: __init__
.. automethod:: _add_attribute
.. automethod:: _remove_attribute
.. automethod:: _get_attribute
'''
def __init__(self, dirname):
'''
:param dirname: Directory where memory mapped files will be stored. The
directory is created if it does not exist.
'''
# Create container directory if not there.
if os.access(dirname, os.F_OK) == False:
os.makedirs(dirname, 0750)
# Create new, or open existing external memory dictionaries. See the
# module's documentation for more information on each dictionary.
self._graph = pyrsistence.EMDict('%s/graph' % dirname)
self._transpose_graph = pyrsistence.EMDict('%s/transpose_graph' % dirname)
self._vertex_attributes = pyrsistence.EMDict('%s/vertex_attributes' % dirname)
self._edge_attributes = pyrsistence.EMDict('%s/edge_attributes' % dirname)
def __del__(self):
self.close()
def _add_attribute(self, attributes, subject, name, value):
'''
Template function used for implementing :func:`add_vertex_attribute()`
and :func:`add_edge_attribute()`, both defined below. Adds or updates
subject attribute *name* with value *value*. Previous value if any is
returned.
:param attributes: An external memory dictionary that maps subjects to
normal Python dictionaries, which, in turn, map attribute names to
their values.
:param subject: The subject whose attributes will be updated.
:param name: Name of attribute to add or update.
:param value: Value assigned to attribute *name*.
:returns: Previous attribute value, if any.
:rtype: ``object``
.. warning:: This is a private function, don't use it directly.
'''
prev_value = None
# Check if the subject has any attributes set.
if subject in attributes:
# Read dictionary of subject attributes and the current attribute
# value if one is set.
subject_attributes = attributes[subject]
prev_value = subject_attributes.get(name, None)
# Add or replace attribute value.
subject_attributes[name] = value
# Update subject's attributes in attributes container.
attributes[subject] = subject_attributes
return prev_value
def _remove_attribute(self, attributes, subject, name):
'''
Template function used for implementing :func:`remove_vertex_attribute()`
and :func:`remove_edge_attribute()`, both defined below. Removes subject
attribute *name*. Previous value, if any, is returned.
:param attributes: An external memory dictionary that maps subjects to
normal Python dictionaries, which, in turn, map attribute names to
their values.
:param subject: The subject whose attributes will be updated.
:param name: Name of attribute to remove.
:returns: Previous attribute value, if any.
:rtype: ``object``
.. warning:: This is a private function, don't use it directly.
'''
value = None
# Check if the subject has any attributes set.
if subject in attributes:
# Read dictionary of subject attributes and the current attribute
# value if one is set.
subject_attributes = attributes[subject]
value = subject_attributes.get(name, None)
# Delete given attribute.
del subject_attributes[name]
# Update subject's attributes in attribute container.
attributes[subject] = subject_attributes
return value
def _get_attribute(self, attributes, subject, name):
'''
A template function used for implementing :func:`get_vertex_attribute()`
and :func:`get_edge_attribute()`, both defined below. Returns the value
of subject attribute *name*.
:param attributes: An external memory dictionary that maps subjects to
normal Python dictionaries, which, in turn, map attribute names to
their values.
:param subject: The subject whose attributes will be looked up.
:param name: Name of attribute whose value to return.
:returns: Attribute value or ``None``.
:rtype: ``object``
.. warning:: This is a private function, don't use it directly.
'''
value = None
# Check if the subject has any attributes set.
if subject in attributes:
# Read dictionary of subject attributes and return current attribute
# value or `None'.
subject_attributes = attributes[subject]
value = subject_attributes.get(name, None)
return value
def add_vertex(self, vertex):
'''
Add a vertex in the graph.
:param vertex: The vertex to add in the graph.
'''
# Make sure we don't overwrite existing vertex.
if vertex not in self._graph:
self._graph[vertex] = set()
self._transpose_graph[vertex] = set()
self._vertex_attributes[vertex] = dict()
def remove_vertex(self, vertex):
'''
Remove a vertex as well as its incoming and outgoing edges from the
graph. If removal of the vertex generates orphan vertices, these can
later be removed by calling :func:`remove_orphan_vertices()` defined
below.
:param vertex: The vertex to remove from the graph.
'''
# Make sure vertex is in the graph.
if vertex in self._graph:
# Remove outgoing edges.
for successor in self._graph[vertex]:
# Remove vertex from successor's predecessors.
predecessors = self._transpose_graph[successor]
predecessors.discard(vertex)
self._transpose_graph[successor] = predecessors
# We are done with this variable, release some memory.
del predecessors
# Remove edge attributes.
del self._edge_attributes[(vertex, successor)]
# Remove incoming edges.
for predecessor in self._transpose_graph[vertex]:
# Remove vertex from predecessor's successors.
successors = self._graph[predecessor]
successors.discard(vertex)
self._graph[predecessor] = successors
# We are done with this variable, release some memory.
del successors
# Remove edge attributes.
del self._edge_attributes[(predecessor, vertex)]
# Now remove the vertex.
del self._graph[vertex]
del self._transpose_graph[vertex]
del self._vertex_attributes[vertex]
def remove_orphan_vertices(self):
'''
Remove orphan nodes from the graph (nodes that have neither incoming nor
outgoing edges).
'''
for vertex in self._graph.keys():
if len(self._graph[vertex]) == 0 and \
len(self._transpose_graph[vertex]) == 0:
del self._graph[vertex]
del self._transpose_graph[vertex]
del self._vertex_attributes[vertex]
def get_vertices(self):
'''
Return graph vertices.
:returns: Generator for all vertices in graph.
:rtype: ``generator``
'''
for vertex in self._graph.keys():
yield vertex
def add_vertex_attribute(self, vertex, name, value):
'''
Add vertex attribute. Previous value, if any, is returned.
:param vertex: The graph vertex whose attributes to update.
:param name: Attribute name to add or update.
:param value: Value to set the attribute to.
:returns: Previous attribute value, if any, or ``None``.
:rtype: ``object``
'''
return self._add_attribute(self._vertex_attributes, vertex, name, value)
def remove_vertex_attribute(self, vertex, name):
'''
Remove vertex attribute. Previous value, if any, is returned.
:param vertex: The graph vertex whose attribute to remove.
:param name: Attribute name to remove.
:returns: Previous attribute value, if any, or ``None``.
:rtype: ``object``
'''
return self._remove_attribute(self._vertex_attributes, vertex, name)
def get_vertex_attribute(self, vertex, name):
'''
Get value of vertex attribute.
:param vertex: The graph vertex whose attribute to retrieve.
:param name: Attribute name whose value to retrieve.
:returns: Attribute value or ``None``.
:rtype: ``object``
'''
return self._get_attribute(self._vertex_attributes, vertex, name)
def get_successors(self, vertex):
'''
Get set of immediate successors of vertex. We assume successor set can
fit in main memory.
:param vertex: The vertex whose successors to return.
:returns: Set of vertex successors.
:rtype: ``set``
'''
if vertex in self._graph:
vertices = self._graph[vertex]
else:
vertices = set()
return vertices
def get_predecessors(self, vertex):
'''
Get set of immediate predecessors of vertex. We assume predecessor set
can fit in main memory.
:param vertex: The vertex whose predecessors to return.
:returns: Set of vertex predecessors.
:rtype: ``set``
'''
if vertex in self._transpose_graph:
vertices = self._transpose_graph[vertex]
else:
vertices = set()
return vertices
def add_edge(self, edge):
'''
Add an edge in the graph.
:param edge: The graph edge to add.
'''
tail, head = edge
# Make sure vertices are there.
self.add_vertex(tail)
self.add_vertex(head)
# Read tail's successors.
successors = self._graph[tail]
# Update sets only if needed.
if head not in successors:
# Add head in tail's successors.
successors.add(head)
self._graph[tail] = successors
# We are done with this variable, release some memory.
del successors
# Add tail in head's predecessors.
predecessors = self._transpose_graph[head]
predecessors.add(tail)
self._transpose_graph[head] = predecessors
# We are done with this variable, release some memory.
del predecessors
# Initialize edge attributes to an empty dictionary.
self._edge_attributes[edge] = dict()
def remove_edge(self, edge):
'''
Remove an edge from the graph. If removal of the edge generates orphan
vertices, these can be removed by calling :func:`remove_orphan_vertices()`
defined above.
:param edge: The graph edge to remove.
'''
tail, head = edge
# Make sure vertices are there.
if tail in self._graph and head in self._graph:
# Read tail's successors.
successors = self._graph[tail]
# Make sure such an edge does exist.
if head in successors:
# Remove head from tail's successors.
successors.discard(head)
self._graph[tail] = successors
# We are done with this variable, release some memory.
del successors
# Remove tail from head's predecessors.
predecessors = self._transpose_graph[head]
predecessors.discard(tail)
self._transpose_graph[head] = predecessors
# We are done with this variable, release some memory.
del predecessors
# Delete edge attributes.
del self._edge_attributes[edge]
def get_edges(self):
'''
Return graph edges.
:returns: Generator for all edges in graph.
:rtype: ``generator``
'''
for vertex in self._graph.keys():
for successor in self._graph[vertex]:
yield (vertex, successor)
def add_edge_attribute(self, edge, name, value):
'''
Add edge attribute. Previous value, if any, is returned.
:param edge: The graph edge whose attributes to update.
:param name: Attribute name to add or update.
:param value: Value to set the attribute to.
:returns: Previous attribute value, if any, or ``None``.
:rtype: ``object``
'''
return self._add_attribute(self._edge_attributes, edge, name, value)
def remove_edge_attribute(self, edge, name):
'''
Remove edge attribute. Previous value, if any, is returned.
:param edge: The graph edge whose attribute to remove.
:param name: Attribute name to remove.
:returns: Previous attribute value, if any, or ``None``.
:rtype: ``object``
'''
return self._remove_attribute(self._edge_attributes, edge, name)
def get_edge_attribute(self, edge, name):
'''
Get value of edge attribute.
:param edge: The graph edge whose attribute to retrieve.
:param name: Attribute name whose value to retrieve.
:returns: Attribute value or ``None``.
:rtype: ``object``
'''
return self._get_attribute(self._edge_attributes, edge, name)
def close(self):
'''Finalize this :class:`EMGraph` instance.'''
self._graph.close()
self._transpose_graph.close()
self._vertex_attributes.close()
self._edge_attributes.close()
|
|
#!/usr/bin/python
#
# Copyright (C) 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for LUNode*
"""
from collections import defaultdict
import mock
from ganeti import compat
from ganeti import constants
from ganeti import objects
from ganeti import opcodes
from ganeti.cmdlib import node
from testsupport import *
import testutils
# pylint: disable=W0613
def _TcpPingFailSecondary(cfg, mock_fct, target, port, timeout=None,
live_port_needed=None, source=None):
# This will return True if target is in 192.0.2.0/24 (primary range)
# and False if not.
return "192.0.2." in target
class TestLUNodeAdd(CmdlibTestCase):
def setUp(self):
super(TestLUNodeAdd, self).setUp()
# One node for testing readding:
self.node_readd = self.cfg.AddNewNode()
self.op_readd = opcodes.OpNodeAdd(node_name=self.node_readd.name,
readd=True,
primary_ip=self.node_readd.primary_ip,
secondary_ip=self.node_readd.secondary_ip)
# One node for testing adding:
# don't add to configuration now!
self.node_add = objects.Node(name="node_add",
primary_ip="192.0.2.200",
secondary_ip="203.0.113.200")
self.op_add = opcodes.OpNodeAdd(node_name=self.node_add.name,
primary_ip=self.node_add.primary_ip,
secondary_ip=self.node_add.secondary_ip)
self.netutils_mod.TcpPing.return_value = True
self.mocked_dns_rpc = self.rpc_mod.DnsOnlyRunner.return_value
self.mocked_dns_rpc.call_version.return_value = \
self.RpcResultsBuilder(use_node_names=True) \
.AddSuccessfulNode(self.node_add, constants.CONFIG_VERSION) \
.AddSuccessfulNode(self.node_readd, constants.CONFIG_VERSION) \
.Build()
node_verify_result = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.node_add, {constants.NV_NODELIST: []})
# we can't know the node's UUID in advance, so use defaultdict here
self.rpc.call_node_verify.return_value = \
defaultdict(lambda: node_verify_result, {})
self.rpc.call_node_crypto_tokens.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.node_add,
[(constants.CRYPTO_TYPE_SSL_DIGEST, "IA:MA:FA:KE:DI:GE:ST")])
def testOvsNoLink(self):
ndparams = {
constants.ND_OVS: True,
constants.ND_OVS_NAME: "testswitch",
constants.ND_OVS_LINK: None,
}
op = self.CopyOpCode(self.op_add,
ndparams=ndparams)
self.ExecOpCode(op)
self.assertLogContainsRegex(
"No physical interface for OpenvSwitch was given."
" OpenvSwitch will not have an outside connection."
" This might not be what you want")
created_node = self.cfg.GetNodeInfoByName(op.node_name)
self.assertEqual(ndparams[constants.ND_OVS],
created_node.ndparams.get(constants.ND_OVS, None))
self.assertEqual(ndparams[constants.ND_OVS_NAME],
created_node.ndparams.get(constants.ND_OVS_NAME, None))
self.assertEqual(ndparams[constants.ND_OVS_LINK],
created_node.ndparams.get(constants.ND_OVS_LINK, None))
def testAddCandidateCert(self):
self.ExecOpCode(self.op_add)
created_node = self.cfg.GetNodeInfoByName(self.op_add.node_name)
cluster = self.cfg.GetClusterInfo()
self.assertTrue(created_node.uuid in cluster.candidate_certs)
def testReAddCandidateCert(self):
cluster = self.cfg.GetClusterInfo()
self.ExecOpCode(self.op_readd)
created_node = self.cfg.GetNodeInfoByName(self.op_readd.node_name)
self.assertTrue(created_node.uuid in cluster.candidate_certs)
def testAddNoCandidateCert(self):
op = self.CopyOpCode(self.op_add,
master_capable=False)
self.ExecOpCode(op)
created_node = self.cfg.GetNodeInfoByName(self.op_add.node_name)
cluster = self.cfg.GetClusterInfo()
self.assertFalse(created_node.uuid in cluster.candidate_certs)
def testWithoutOVS(self):
self.ExecOpCode(self.op_add)
created_node = self.cfg.GetNodeInfoByName(self.op_add.node_name)
self.assertEqual(None,
created_node.ndparams.get(constants.ND_OVS, None))
def testWithOVS(self):
ndparams = {
constants.ND_OVS: True,
constants.ND_OVS_LINK: "eth2",
}
op = self.CopyOpCode(self.op_add,
ndparams=ndparams)
self.ExecOpCode(op)
created_node = self.cfg.GetNodeInfoByName(op.node_name)
self.assertEqual(ndparams[constants.ND_OVS],
created_node.ndparams.get(constants.ND_OVS, None))
self.assertEqual(ndparams[constants.ND_OVS_LINK],
created_node.ndparams.get(constants.ND_OVS_LINK, None))
def testReaddingMaster(self):
op = opcodes.OpNodeAdd(node_name=self.cfg.GetMasterNodeName(),
readd=True)
self.ExecOpCodeExpectOpPrereqError(op, "Cannot readd the master node")
def testReaddNotVmCapableNode(self):
self.cfg.AddNewInstance(primary_node=self.node_readd)
self.netutils_mod.GetHostname.return_value = \
HostnameMock(self.node_readd.name, self.node_readd.primary_ip)
op = self.CopyOpCode(self.op_readd, vm_capable=False)
self.ExecOpCodeExpectOpPrereqError(op, "Node .* being re-added with"
" vm_capable flag set to false, but it"
" already holds instances")
def testReaddAndPassNodeGroup(self):
op = self.CopyOpCode(self.op_readd,group="groupname")
self.ExecOpCodeExpectOpPrereqError(op, "Cannot pass a node group when a"
" node is being readded")
def testPrimaryIPv6(self):
self.master.secondary_ip = self.master.primary_ip
op = self.CopyOpCode(self.op_add, primary_ip="2001:DB8::1",
secondary_ip=self.REMOVE)
self.ExecOpCode(op)
def testInvalidSecondaryIP(self):
op = self.CopyOpCode(self.op_add, secondary_ip="333.444.555.777")
self.ExecOpCodeExpectOpPrereqError(op, "Secondary IP .* needs to be a valid"
" IPv4 address")
def testNodeAlreadyInCluster(self):
op = self.CopyOpCode(self.op_readd, readd=False)
self.ExecOpCodeExpectOpPrereqError(op, "Node %s is already in the"
" configuration" % self.node_readd.name)
def testReaddNodeNotInConfiguration(self):
op = self.CopyOpCode(self.op_add, readd=True)
self.ExecOpCodeExpectOpPrereqError(op, "Node %s is not in the"
" configuration" % self.node_add.name)
def testPrimaryIpConflict(self):
# In LUNodeAdd, DNS will resolve the node name to an IP address, that is
# used to overwrite any given primary_ip value!
# Thus we need to mock this DNS resolver here!
self.netutils_mod.GetHostname.return_value = \
HostnameMock(self.node_add.name, self.node_readd.primary_ip)
op = self.CopyOpCode(self.op_add)
self.ExecOpCodeExpectOpPrereqError(op, "New node ip address.* conflict with"
" existing node")
def testSecondaryIpConflict(self):
op = self.CopyOpCode(self.op_add, secondary_ip=self.node_readd.secondary_ip)
self.ExecOpCodeExpectOpPrereqError(op, "New node ip address.* conflict with"
" existing node")
def testReaddWithDifferentIP(self):
op = self.CopyOpCode(self.op_readd, primary_ip="192.0.2.100",
secondary_ip="230.0.113.100")
self.ExecOpCodeExpectOpPrereqError(op, "Readded node doesn't have the same"
" IP address configuration as before")
def testNodeHasSecondaryIpButNotMaster(self):
self.master.secondary_ip = self.master.primary_ip
self.ExecOpCodeExpectOpPrereqError(self.op_add, "The master has no"
" secondary ip but the new node has one")
def testMasterHasSecondaryIpButNotNode(self):
op = self.CopyOpCode(self.op_add, secondary_ip=None)
self.ExecOpCodeExpectOpPrereqError(op, "The master has a secondary ip but"
" the new node doesn't have one")
def testNodeNotReachableByPing(self):
self.netutils_mod.TcpPing.return_value = False
op = self.CopyOpCode(self.op_add)
self.ExecOpCodeExpectOpPrereqError(op, "Node not reachable by ping")
def testNodeNotReachableByPingOnSecondary(self):
self.netutils_mod.GetHostname.return_value = \
HostnameMock(self.node_add.name, self.node_add.primary_ip)
self.netutils_mod.TcpPing.side_effect = \
compat.partial(_TcpPingFailSecondary, self.cfg, self.netutils_mod.TcpPing)
op = self.CopyOpCode(self.op_add)
self.ExecOpCodeExpectOpPrereqError(op, "Node secondary ip not reachable by"
" TCP based ping to node daemon port")
def testCantGetVersion(self):
self.mocked_dns_rpc.call_version.return_value = \
self.RpcResultsBuilder(use_node_names=True) \
.AddErrorNode(self.node_add) \
.Build()
op = self.CopyOpCode(self.op_add)
self.ExecOpCodeExpectOpPrereqError(op, "Can't get version information from"
" node %s" % self.node_add.name)
class TestLUNodeSetParams(CmdlibTestCase):
def setUp(self):
super(TestLUNodeSetParams, self).setUp()
self.MockOut(node, 'netutils', self.netutils_mod)
node.netutils.TcpPing.return_value = True
self.node = self.cfg.AddNewNode(
primary_ip='192.168.168.191',
secondary_ip='192.168.168.192',
master_candidate=True, uuid='blue_bunny')
self.snode = self.cfg.AddNewNode(
primary_ip='192.168.168.193',
secondary_ip='192.168.168.194',
master_candidate=True, uuid='pink_bunny')
def testSetSecondaryIp(self):
self.instance = self.cfg.AddNewInstance(primary_node=self.node,
secondary_node=self.snode,
disk_template='drbd')
op = opcodes.OpNodeSetParams(node_name=self.node.name,
secondary_ip='254.254.254.254')
self.ExecOpCode(op)
self.assertEqual('254.254.254.254', self.node.secondary_ip)
self.assertEqual(sorted(self.wconfd.all_locks.items()), [
('cluster/BGL', 'shared'),
('instance/mock_inst_1.example.com', 'shared'),
('node-res/blue_bunny', 'exclusive'),
('node/blue_bunny', 'exclusive')])
def testSetSecondaryIpNoLock(self):
self.instance = self.cfg.AddNewInstance(primary_node=self.node,
secondary_node=self.snode,
disk_template='file')
op = opcodes.OpNodeSetParams(node_name=self.node.name,
secondary_ip='254.254.254.254')
self.ExecOpCode(op)
self.assertEqual('254.254.254.254', self.node.secondary_ip)
self.assertEqual(sorted(self.wconfd.all_locks.items()), [
('cluster/BGL', 'shared'),
('node-res/blue_bunny', 'exclusive'),
('node/blue_bunny', 'exclusive')])
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
|
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import scaled_cost, mdn_nll
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
# max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
# skip_probability=0.7,
n_seq_per_batch=16,
subsample_target=4,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
standardise_input=True,
standardise_targets=True,
input_padding=0,
lag=0,
reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=partial(scaled_cost, loss_func=mdn_nll),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
updates_func=momentum,
learning_rate=1e-03,
learning_rate_changes_by_iteration={
100: 5e-04,
500: 1e-04,
1500: 5e-05
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
plotter=MDNPlotter,
layers_config=[
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
}
]
)
def exp_a(name):
# 5 appliances
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
skip_probability=0.7
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'].extend([
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
])
net = Net(**net_dict_copy)
net.load_params(iteration=4000)
return net
def exp_b(name):
# 5 appliances and normal cost func
# avg valid cost = 1.6496223211
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
skip_probability=0.7
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=lambda x, t: mdn_nll(x, t).mean(),
))
net_dict_copy['layers_config'].extend([
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
])
net = Net(**net_dict_copy)
return net
def exp_c(name):
# 3 appliances and 3 layers with 2x2x pool
# avg valid cost = -0.2468771785
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
]
net = Net(**net_dict_copy)
return net
def exp_d(name):
# 3 appliances and 3 layers with one 4x pool
# avg valid cost = -0.2574510872
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
]
net = Net(**net_dict_copy)
return net
def exp_e(name):
# 3 appliances and 3 layers with one 4x pool after 1st layer
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
]
net = Net(**net_dict_copy)
return net
def exp_f(name):
# 3 appliances and 3 layers with 2x2x pool and 1 component
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 1
}
]
net = Net(**net_dict_copy)
return net
def exp_g(name):
# 3 appliances and 3 layers with 2x2x pool and 3 components
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 3
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('bcdefg')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=5000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python -Es
"""
Script to set up a custom genome for bcbio-nextgen
"""
from argparse import ArgumentParser
import os
from Bio import SeqIO
import toolz as tz
from bcbio.utils import safe_makedir, file_exists, chdir
from bcbio.distributed.transaction import file_transaction
from bcbio.provenance import do
from bcbio.install import (REMOTES, get_cloudbiolinux, SUPPORTED_GENOMES, SUPPORTED_INDEXES,
_get_data_dir)
from bcbio.pipeline.run_info import ALLOWED_CONTIG_NAME_CHARS
from bcbio.galaxy import loc
from bcbio.log import logger
from fabric.api import *
import subprocess
import sys
import shutil
import yaml
import gffutils
from gffutils.iterators import DataIterator
import tempfile
SEQ_DIR = "seq"
RNASEQ_DIR = "rnaseq"
SRNASEQ_DIR = "srnaseq"
ERCC_BUCKET = "bcbio-data.s3.amazonaws.com/"
def gff3_to_gtf(gff3_file):
dialect = {'field separator': '; ',
'fmt': 'gtf',
'keyval separator': ' ',
'leading semicolon': False,
'multival separator': ',',
'quoted GFF2 values': True,
'order': ['gene_id', 'transcript_id'],
'repeated keys': False,
'trailing semicolon': True}
out_file = os.path.splitext(gff3_file)[0] + ".gtf"
if file_exists(out_file):
return out_file
logger.info("Converting %s to %s." %(gff3_file, out_file))
if _is_from_ncbi(gff3_file):
logger.info("NCBI format detected by the presence of the %s key."
% _is_from_ncbi(gff3_file))
_output_ncbi_gff3(gff3_file, out_file, dialect)
else:
_output_gff3(gff3_file, out_file, dialect)
return out_file
def _output_gff3(gff3_file, out_file, dialect):
db = gffutils.create_db(gff3_file, ":memory:")
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for feature in DataIterator(db.features_of_type("exon"), dialect=dialect):
transcript_id = feature["Parent"][0]
gene_id = db[transcript_id]["Parent"][0]
attr = {"transcript_id": transcript_id, "gene_id": gene_id}
attributes = gffutils.attributes.Attributes(attr)
feature.attributes = attributes
print >> out_handle, feature
def _output_ncbi_gff3(gff3_file, out_file, dialect):
gene_key = "gene"
id_spec = {"gene": gene_key}
db = gffutils.create_db(gff3_file, ":memory:", id_spec=id_spec)
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for feature in DataIterator(db.features_of_type("exon"), dialect=dialect):
# Gnomon features are often missing a transcript id
# some malformed features are also missing the gene key
try:
transcript_id = feature["transcript_id"]
except KeyError:
try:
transcript_id = feature[gene_key]
except KeyError:
continue
gene_id = feature[gene_key]
try:
biotype = feature["gene_biotype"]
except KeyError:
biotype = "unknown"
attr = {"transcript_id": transcript_id, "gene_id": gene_id,
"gene_biotype": biotype}
attributes = gffutils.attributes.Attributes(attr)
feature.attributes = attributes
print >> out_handle, feature
def _is_from_ncbi(gff3_file):
with open(gff3_file) as in_handle:
for line in tz.take(10000, in_handle):
if "Dbxref" in line:
return "Dbxref"
if "db_xref" in line:
return "db_xref"
return None
def _index_w_command(dir_name, command, ref_file, ext=None):
index_name = os.path.splitext(os.path.basename(ref_file))[0]
if ext is not None: index_name += ext
build_path = os.path.join(os.path.dirname(ref_file), os.pardir)
out_dir = os.path.join(build_path, dir_name)
index_path = os.path.join(out_dir, index_name)
if not env.safe_exists(out_dir):
env.safe_run("mkdir %s" % out_dir)
subprocess.check_call(command.format(ref_file=ref_file,
index_name=index_path), shell=True)
return index_path
def setup_base_directories(genome_dir, name, build, gtf=None):
name_dir = os.path.join(genome_dir, name)
safe_makedir(name_dir)
build_dir = os.path.join(name_dir, build)
safe_makedir(build_dir)
seq_dir = os.path.join(build_dir, SEQ_DIR)
safe_makedir(seq_dir)
if gtf:
gtf_dir = os.path.join(build_dir, RNASEQ_DIR)
safe_makedir(gtf_dir)
return build_dir
def install_fasta_file(build_dir, fasta, build):
out_file = os.path.join(build_dir, SEQ_DIR, build + ".fa")
if not os.path.exists(out_file):
recs = SeqIO.parse(fasta, "fasta")
with open(out_file, "w") as out_handle:
SeqIO.write((_clean_rec_name(rec) for rec in recs), out_handle, "fasta")
return out_file
def _clean_rec_name(rec):
"""Clean illegal characters in input fasta file which cause problems downstream.
"""
out_id = []
for char in list(rec.id):
if char in ALLOWED_CONTIG_NAME_CHARS:
out_id.append(char)
else:
out_id.append("_")
rec.id = "".join(out_id)
rec.description = ""
return rec
def install_gtf_file(build_dir, gtf, build):
out_file = os.path.join(build_dir, RNASEQ_DIR, "ref-transcripts.gtf")
if not os.path.exists(out_file):
shutil.copyfile(gtf, out_file)
return out_file
def install_srna(species, gtf):
out_file = os.path.join(SRNASEQ_DIR, "srna-transcripts.gtf")
safe_makedir(SRNASEQ_DIR)
if gtf:
if not os.path.exists(out_file):
shutil.copyfile(gtf, out_file)
try:
from seqcluster import install
except ImportError:
raise ImportError("install seqcluster first, please.")
with chdir(SRNASEQ_DIR):
hairpin, miRNA = install._install_mirbase()
cmd = ("cat %s | awk '{if ($0~/>%s/){name=$0; print name} else if ($0~/^>/){name=0};if (name!=0 && $0!~/^>/){print $0;}}' | sed 's/U/T/g' > hairpin.fa")
do.run(cmd % (hairpin, species), "set precursor.")
cmd = ("grep -A 1 {species} {miRNA} > miRNA.str")
do.run(cmd.format(**locals()), "set miRNA.")
shutil.rmtree("mirbase")
return out_file
def append_ercc(gtf_file, fasta_file):
ercc_fa = ERCC_BUCKET + "ERCC92.fasta.gz"
tmp_fa = tempfile.NamedTemporaryFile(delete=False, suffix=".gz").name
append_fa_cmd = "wget {ercc_fa} -O {tmp_fa}; gzip -cd {tmp_fa} >> {fasta_file}"
print append_fa_cmd.format(**locals())
subprocess.check_call(append_fa_cmd.format(**locals()), shell=True)
ercc_gtf = ERCC_BUCKET + "ERCC92.gtf.gz"
tmp_gtf = tempfile.NamedTemporaryFile(delete=False, suffix=".gz").name
append_gtf_cmd = "wget {ercc_gtf} -O {tmp_gtf}; gzip -cd {tmp_gtf} >> {gtf_file}"
print append_gtf_cmd.format(**locals())
subprocess.check_call(append_gtf_cmd.format(**locals()), shell=True)
class MyParser(ArgumentParser):
def error(self, message):
self.print_help()
galaxy_base = os.path.join(_get_data_dir(), "galaxy")
print "\nCurrent genomes\n"
print open(loc.get_loc_file(galaxy_base, "samtools")).read()
sys.exit(0)
if __name__ == "__main__":
description = ("Set up a custom genome for bcbio-nextgen. This will "
"place the genome under name/build in the genomes "
"directory in your bcbio-nextgen installation.")
parser = MyParser(description=description)
parser.add_argument("-c", "--cores", default=1,
help="number of cores to use")
parser.add_argument("-f", "--fasta", required=True,
help="FASTA file of the genome.")
parser.add_argument("--gff3", default=False, action='store_true',
help="File is a GFF3 file.")
parser.add_argument("-g", "--gtf", default=None,
help="GTF file of the transcriptome")
parser.add_argument("-n", "--name", required=True,
help="Name of organism, for example Hsapiens.")
parser.add_argument("-b", "--build", required=True,
help="Build of genome, for example hg19.")
parser.add_argument("-i", "--indexes", choices=SUPPORTED_INDEXES, nargs="*",
default=["seq"], help="Space separated list of indexes to make")
parser.add_argument("--ercc", action='store_true', default=False,
help="Add ERCC spike-ins.")
parser.add_argument("--mirbase", help="species in mirbase for smallRNAseq data.")
parser.add_argument("--srna_gtf", help="gtf to use for smallRNAseq data.")
args = parser.parse_args()
# if not all([args.mirbase, args.srna_gtf]) and any([args.mirbase, args.srna_gtf]):
# raise ValueError("--mirbase and --srna_gtf both need a value.")
env.hosts = ["localhost"]
env.cores = args.cores
os.environ["PATH"] += os.pathsep + os.path.dirname(sys.executable)
cbl = get_cloudbiolinux(REMOTES)
sys.path.insert(0, cbl["dir"])
genomemod = __import__("cloudbio.biodata", fromlist=["genomes"])
# monkey patch cloudbiolinux to use this indexing command instead
genomes = getattr(genomemod, 'genomes')
genomes._index_w_command = _index_w_command
fabmod = __import__("cloudbio", fromlist=["fabutils"])
fabutils = getattr(fabmod, 'fabutils')
fabutils.configure_runsudo(env)
genome_dir = os.path.abspath(os.path.join(_get_data_dir(), "genomes"))
args.fasta = os.path.abspath(args.fasta)
args.gtf = os.path.abspath(args.gtf) if args.gtf else None
args.srna_gtf = os.path.abspath(args.srna_gtf) if args.srna_gtf else None
if args.gff3:
args.gtf = gff3_to_gtf(args.gtf)
# always make a sequence dictionary
if "seq" not in args.indexes:
args.indexes.append("seq")
env.system_install = genome_dir
prepare_tx = os.path.join(cbl["dir"], "utils", "prepare_tx_gff.py")
print "Creating directories using %s as the base." % (genome_dir)
build_dir = setup_base_directories(genome_dir, args.name, args.build, args.gtf)
os.chdir(build_dir)
print "Genomes will be installed into %s." % (build_dir)
fasta_file = install_fasta_file(build_dir, args.fasta, args.build)
print "Installed genome as %s." % (fasta_file)
if args.gtf:
if "bowtie2" not in args.indexes:
args.indexes.append("bowtie2")
gtf_file = install_gtf_file(build_dir, args.gtf, args.build)
print "Installed GTF as %s." % (gtf_file)
if args.ercc:
print "Appending ERCC sequences to %s and %s." % (gtf_file, fasta_file)
append_ercc(gtf_file, fasta_file)
indexed = {}
for index in args.indexes:
print "Creating the %s index." % (index)
index_fn = genomes.get_index_fn(index)
if not index_fn:
print "Do not know how to make the index %s, skipping." % (index)
continue
indexed[index] = index_fn(fasta_file)
indexed["samtools"] = fasta_file
if args.gtf:
"Preparing transcriptome."
with chdir(os.path.join(build_dir, os.pardir)):
cmd = ("{sys.executable} {prepare_tx} --cores {args.cores} --genome-dir {genome_dir} --gtf {gtf_file} {args.name} {args.build}")
subprocess.check_call(cmd.format(**locals()), shell=True)
if args.mirbase:
"Preparing smallRNA data."
with chdir(os.path.join(build_dir)):
install_srna(args.mirbase, args.srna_gtf)
base_dir = os.path.normpath(os.path.dirname(fasta_file))
resource_file = os.path.join(base_dir, "%s-resources.yaml" % args.build)
print "Dumping genome resources to %s." % resource_file
resource_dict = {"version": 1}
if args.gtf:
transcripts = ["rnaseq", "transcripts"]
mask = ["rnaseq", "transcripts_mask"]
index = ["rnaseq", "transcriptome_index", "tophat"]
dexseq = ["rnaseq", "dexseq"]
refflat = ["rnaseq", "refflat"]
rRNA_fa = ["rnaseq", "rRNA_fa"]
resource_dict = tz.update_in(resource_dict, transcripts,
lambda x: "../rnaseq/ref-transcripts.gtf")
resource_dict = tz.update_in(resource_dict, mask,
lambda x: "../rnaseq/ref-transcripts-mask.gtf")
resource_dict = tz.update_in(resource_dict, index,
lambda x: "../rnaseq/tophat/%s_transcriptome.ver" % args.build)
resource_dict = tz.update_in(resource_dict, refflat,
lambda x: "../rnaseq/ref-transcripts.refFlat")
resource_dict = tz.update_in(resource_dict, dexseq,
lambda x: "../rnaseq/ref-transcripts.dexseq.gff3")
resource_dict = tz.update_in(resource_dict, rRNA_fa,
lambda x: "../rnaseq/rRNA.fa")
if args.mirbase:
srna_gtf = ["srnaseq", "srna_transcripts"]
srna_mirbase = ["srnaseq", "mirbase_hairpin"]
resource_dict = tz.update_in(resource_dict, srna_gtf,
lambda x: "../srnaseq/srna-transcripts.gtf")
resource_dict = tz.update_in(resource_dict, srna_mirbase,
lambda x: "../srnaseq/hairpin.fa")
# write out resource dictionarry
with file_transaction(resource_file) as tx_resource_file:
with open(tx_resource_file, "w") as out_handle:
out_handle.write(yaml.dump(resource_dict, default_flow_style=False))
print "Updating Galaxy .loc files."
galaxy_base = os.path.join(_get_data_dir(), "galaxy")
for index, index_file in indexed.items():
loc.update_loc_file(galaxy_base, index, args.build, index_file)
|
|
"""Computational algebraic number field theory. """
from sympy import (S, Expr, I, Integer, Rational, Real, Symbol, Add, Mul,
sympify, Q, ask, Dummy)
from sympy.polys.polytools import Poly, sqf_norm, invert, factor_list, groebner
from sympy.polys.polyutils import basic_from_dict
from sympy.polys.polyclasses import ANP, DMP
from sympy.polys.polyerrors import (IsomorphismFailed, CoercionFailed,
NotAlgebraic)
from sympy.utilities import any, all, numbered_symbols, variations
from sympy.ntheory import sieve
from sympy.mpmath import pslq, mp
def minimal_polynomial(ex, x=None, **args):
"""Computes the minimal polynomial of an algebraic number. """
generator = numbered_symbols('a', cls=Dummy)
mapping, symbols, replace = {}, {}, []
ex = sympify(ex)
if x is not None:
x = sympify(x)
else:
x = Dummy('x')
def update_mapping(ex, exp, base=None):
a = generator.next()
symbols[ex] = a
if base is not None:
mapping[ex] = a**exp + base
else:
mapping[ex] = exp.as_basic(a)
return a
def bottom_up_scan(ex):
if ex.is_Atom:
if ex is S.ImaginaryUnit:
if ex not in mapping:
return update_mapping(ex, 2, 1)
else:
return symbols[ex]
elif ex.is_Rational and ex.q != 0:
return ex
elif ex.is_Add:
return Add(*[ bottom_up_scan(g) for g in ex.args ])
elif ex.is_Mul:
return Mul(*[ bottom_up_scan(g) for g in ex.args ])
elif ex.is_Pow:
if ex.exp.is_Rational:
if ex.exp < 0 and ex.base.is_Add:
coeff, terms = ex.base.as_coeff_add()
elt, _ = primitive_element(terms, polys=True)
alg = ex.base - coeff
inverse = invert(elt.gen + coeff, elt)
base = inverse.subs(elt.gen, alg).expand()
if ex.exp == -1:
return bottom_up_scan(base)
else:
ex = base**(-ex.exp)
if not ex.exp.is_Integer:
base, exp = (ex.base**ex.exp.p).expand(), Rational(1, ex.exp.q)
else:
base, exp = ex.base, ex.exp
base = bottom_up_scan(base)
expr = base**exp
if expr not in mapping:
return update_mapping(expr, 1/exp, -base)
else:
return symbols[expr]
elif ex.is_AlgebraicNumber:
if ex.root not in mapping:
return update_mapping(ex.root, ex.minpoly)
else:
return symbols[ex.root]
raise NotAlgebraic("%s doesn't seem to be an algebraic number" % ex)
polys = args.get('polys', False)
if ex.is_AlgebraicNumber:
if not polys:
return ex.minpoly.as_basic(x)
else:
return ex.minpoly.replace(x)
elif ex.is_Rational and ex.q != 0:
result = ex.q*x - ex.p
else:
F = [x - bottom_up_scan(ex)] + mapping.values()
G = groebner(F, symbols.values() + [x], order='lex')
_, factors = factor_list(G[-1])
if len(factors) == 1:
((result, _),) = factors
else:
for result, _ in factors:
if result.subs(x, ex).evalf(chop=True) == 0:
break
else: # pragma: no cover
raise NotImplementedError("multiple candidates for the minimal polynomial of %s" % ex)
if polys:
return Poly(result, x, field=True)
else:
return result
minpoly = minimal_polynomial
def _coeffs_generator(n):
"""Generate coefficients for `primitive_element()`. """
for coeffs in variations([1,-1], n, repetition=True):
yield coeffs
def primitive_element(extension, x=None, **args):
"""Construct a common number field for all extensions. """
if not extension:
raise ValueError("can't compute primitive element for empty extension")
if x is not None:
x = sympify(x)
else:
x = Dummy('x')
if not args.get('ex', False):
extension = [ AlgebraicNumber(ext, gen=x) for ext in extension ]
g, coeffs = extension[0].minpoly, [1]
for ext in extension[1:]:
s, _, g = sqf_norm(g, x, extension=ext)
coeffs = [ s*c for c in coeffs ] + [1]
if not args.get('polys', False):
return g.as_basic(), coeffs
else:
return g, coeffs
generator = numbered_symbols('y', cls=Dummy)
F, Y = [], []
for ext in extension:
y = generator.next()
if ext.is_Poly:
if ext.is_univariate:
f = ext.as_basic(y)
else:
raise ValueError("expected minimal polynomial, got %s" % ext)
else:
f = minpoly(ext, y)
F.append(f)
Y.append(y)
coeffs_generator = args.get('coeffs', _coeffs_generator)
for coeffs in coeffs_generator(len(Y)):
f = x - sum([ c*y for c, y in zip(coeffs, Y)])
G = groebner(F + [f], Y + [x], order='lex')
H, g = G[:-1], Poly(G[-1], x, domain='QQ')
for i, (h, y) in enumerate(zip(H, Y)):
try:
H[i] = Poly(y - h, x, domain='QQ').all_coeffs()
except CoercionFailed: # pragma: no cover
break # G is not a triangular set
else:
break
else: # pragma: no cover
raise RuntimeError("run out of coefficient configurations")
_, g = g.ground_to_ring()
if not args.get('polys', False):
return g.as_basic(), coeffs, H
else:
return g, coeffs, H
primelt = primitive_element
def is_isomorphism_possible(a, b):
"""Returns `True` if there is a chance for isomorphism. """
n = a.minpoly.degree()
m = b.minpoly.degree()
if m % n != 0:
return False
if n == m:
return True
da = a.minpoly.discriminant()
db = b.minpoly.discriminant()
i, k, half = 1, m//n, db//2
while True:
p = sieve[i]
P = p**k
if P > half:
break
if ((da % p) % 2) and not (db % P):
return False
i += 1
return True
def field_isomorphism_pslq(a, b):
"""Construct field isomorphism using PSLQ algorithm. """
if not a.root.is_real or not b.root.is_real:
raise NotImplementedError("PSLQ doesn't support complex coefficients")
f = a.minpoly
g = b.minpoly.replace(f.gen)
n, m, prev = 100, b.minpoly.degree(), None
for i in xrange(1, 5):
A = a.root.evalf(n)
B = b.root.evalf(n)
basis = [1, B] + [ B**i for i in xrange(2, m) ] + [A]
dps, mp.dps = mp.dps, n
coeffs = pslq(basis, maxcoeff=int(1e10), maxsteps=1000)
mp.dps = dps
if coeffs is None:
break
if coeffs != prev:
prev = coeffs
else:
break
coeffs = [S(c)/coeffs[-1] for c in coeffs[:-1]]
while not coeffs[-1]:
coeffs.pop()
coeffs = list(reversed(coeffs))
h = Poly(coeffs, f.gen, domain='QQ')
if f.compose(h).rem(g).is_zero:
d, approx = len(coeffs)-1, 0
for i, coeff in enumerate(coeffs):
approx += coeff*B**(d-i)
if A*approx < 0:
return [ -c for c in coeffs ]
else:
return coeffs
elif f.compose(-h).rem(g).is_zero:
return [ -c for c in coeffs ]
else:
n *= 2
return None
def field_isomorphism_factor(a, b):
"""Construct field isomorphism via factorization. """
_, factors = factor_list(a.minpoly, extension=b)
for f, _ in factors:
if f.degree() == 1:
coeffs = f.rep.TC().to_sympy_list()
d, terms = len(coeffs)-1, []
for i, coeff in enumerate(coeffs):
terms.append(coeff*b.root**(d-i))
root = Add(*terms)
if (a.root - root).evalf(chop=True) == 0:
return coeffs
if (a.root + root).evalf(chop=True) == 0:
return [ -c for c in coeffs ]
else:
return None
def field_isomorphism(a, b, **args):
"""Construct an isomorphism between two number fields. """
a, b = sympify(a), sympify(b)
if not a.is_AlgebraicNumber:
a = AlgebraicNumber(a)
if not b.is_AlgebraicNumber:
b = AlgebraicNumber(b)
if a == b:
return a.coeffs()
n = a.minpoly.degree()
m = b.minpoly.degree()
if n == 1:
return [a.root]
if m % n != 0:
return None
if args.get('fast', True):
try:
result = field_isomorphism_pslq(a, b)
if result is not None:
return result
except NotImplementedError:
pass
return field_isomorphism_factor(a, b)
def to_number_field(extension, theta=None, **args):
"""Express `extension` in the field generated by `theta`. """
gen = args.get('gen')
if hasattr(extension, '__iter__'):
extension = list(extension)
else:
extension = [extension]
if len(extension) == 1 and type(extension[0]) is tuple:
return AlgebraicNumber(extension[0])
minpoly, coeffs = primitive_element(extension, gen, polys=True)
root = sum([ coeff*ext for coeff, ext in zip(coeffs, extension) ])
if theta is None:
return AlgebraicNumber((minpoly, root))
else:
theta = sympify(theta)
if not theta.is_AlgebraicNumber:
theta = AlgebraicNumber(theta, gen=gen)
coeffs = field_isomorphism(root, theta)
if coeffs is not None:
return AlgebraicNumber(theta, coeffs)
else:
raise IsomorphismFailed("%s is not in a subfield of %s" % (root, theta.root))
class AlgebraicNumber(Expr):
"""Class for representing algebraic numbers in SymPy. """
__slots__ = ['rep', 'root', 'alias', 'minpoly']
is_AlgebraicNumber = True
def __new__(cls, expr, coeffs=None, **args):
"""Construct a new algebraic number. """
expr = sympify(expr)
if type(expr) is tuple:
minpoly, root = expr
if not minpoly.is_Poly:
minpoly = Poly(minpoly)
elif expr.is_AlgebraicNumber:
minpoly, root = expr.minpoly, expr.root
else:
minpoly, root = minimal_polynomial(expr, args.get('gen'), polys=True), expr
dom = minpoly.get_domain()
if coeffs is not None:
if not isinstance(coeffs, ANP):
rep = DMP.from_sympy_list(sympify(coeffs), 0, dom)
else:
rep = DMP.from_list(coeffs.to_list(), 0, dom)
if rep.degree() >= minpoly.degree():
rep = rep.rem(minpoly.rep)
else:
rep = DMP.from_list([1, 0], 0, dom)
if ask(root, Q.negative):
rep = -rep
alias = args.get('alias')
if alias is not None:
if not isinstance(alias, Symbol):
alias = Symbol(alias)
obj = Expr.__new__(cls)
obj.rep = rep
obj.root = root
obj.alias = alias
obj.minpoly = minpoly
return obj
def __eq__(a, b):
if not b.is_AlgebraicNumber:
try:
b = to_number_field(b, a)
except (NotAlgebraic, IsomorphismFailed):
return False
return a.rep == b.rep and \
a.minpoly.all_coeffs() == b.minpoly.all_coeffs()
def __ne__(a, b):
if not b.is_AlgebraicNumber:
try:
b = to_number_field(b, a)
except (NotAlgebraic, IsomorphismFailed):
return True
return a.rep != b.rep or \
a.minpoly.all_coeffs() != b.minpoly.all_coeffs()
def __hash__(self):
return super(AlgebraicNumber, self).__hash__()
def _eval_evalf(self, prec):
return self.as_basic()._evalf(prec)
@property
def is_aliased(self):
"""Returns `True` if `alias` was set. """
return self.alias is not None
def as_poly(self, x=None):
"""Create a Poly instance from `self`. """
if x is not None:
return Poly(self.rep, x)
else:
if self.alias is not None:
return Poly(self.rep, self.alias)
else:
return Poly(self.rep, Dummy('x'))
def as_basic(self, x=None):
"""Create a Basic expression from `self`. """
return self.as_poly(x or self.root).as_basic().expand()
def coeffs(self):
"""Returns all SymPy coefficients of an algebraic number. """
return [ self.rep.dom.to_sympy(c) for c in self.rep.all_coeffs() ]
def native_coeffs(self):
"""Returns all native coefficients of an algebraic number. """
return self.rep.all_coeffs()
def to_algebraic_integer(self):
"""Convert `self` to an algebraic integer. """
f = self.minpoly
if f.LC() == 1:
return self
coeff = f.LC()**(f.degree()-1)
poly = f.compose(Poly(f.gen/f.LC()))
minpoly = poly*coeff
root = f.LC()*self.root
return AlgebraicNumber((minpoly, root), self.coeffs())
|
|
# Tests specific to the dask class
import os
from numpy.core.shape_base import block
import pytest
import numpy as np
from mock import patch
from numpy.testing import assert_allclose
from astropy.tests.helper import assert_quantity_allclose
from astropy import units as u
from astropy.utils import data
try:
from distributed.utils_test import client, loop, cluster_fixture # noqa
DISTRIBUTED_INSTALLED = True
except ImportError:
DISTRIBUTED_INSTALLED = False
from spectral_cube import DaskSpectralCube, SpectralCube, DaskVaryingResolutionSpectralCube
from .test_casafuncs import make_casa_testimage
try:
import casatools
from casatools import image
CASA_INSTALLED = True
except ImportError:
try:
from taskinit import ia as image
CASA_INSTALLED = True
except ImportError:
CASA_INSTALLED = False
DATA = os.path.join(os.path.dirname(__file__), 'data')
class Array:
args = None
kwargs = None
def compute(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def test_scheduler(data_adv):
cube = DaskSpectralCube.read(data_adv)
fake_array = Array()
cube._compute(fake_array)
assert fake_array.kwargs == {'scheduler': 'synchronous'}
with cube.use_dask_scheduler('threads'):
cube._compute(fake_array)
assert fake_array.kwargs == {'scheduler': 'threads'}
cube._compute(fake_array)
assert fake_array.kwargs == {'scheduler': 'synchronous'}
cube.use_dask_scheduler('threads')
cube._compute(fake_array)
assert fake_array.kwargs == {'scheduler': 'threads'}
with cube.use_dask_scheduler('processes', num_workers=4):
cube._compute(fake_array)
assert fake_array.kwargs == {'scheduler': 'processes', 'num_workers': 4}
cube._compute(fake_array)
assert fake_array.kwargs == {'scheduler': 'threads'}
def test_save_to_tmp_dir(data_adv):
pytest.importorskip('zarr')
cube = DaskSpectralCube.read(data_adv)
cube_new = cube.sigma_clip_spectrally(3, save_to_tmp_dir=True)
# The following test won't necessarily always work in future since the name
# is not really guaranteed, but this is pragmatic enough for now
assert cube_new._data.name.startswith('from-zarr')
def test_rechunk(data_adv):
cube = DaskSpectralCube.read(data_adv)
assert cube._data.chunksize == (4, 3, 2)
cube_new = cube.rechunk(chunks=(1, 2, 3))
# note last element is 2 because the chunk size we asked for
# is larger than cube - this is fine and deliberate in this test
assert cube_new._data.chunksize == (1, 2, 2)
def test_statistics(data_adv):
cube = DaskSpectralCube.read(data_adv).rechunk(chunks=(1, 2, 3))
stats = cube.statistics()
assert_quantity_allclose(stats['npts'], 24)
assert_quantity_allclose(stats['mean'], 0.4941651776136591 * u.K)
assert_quantity_allclose(stats['sigma'], 0.3021908870982011 * u.K)
assert_quantity_allclose(stats['sum'], 11.85996426272782 * u.K)
assert_quantity_allclose(stats['sumsq'], 7.961125988022091 * u.K ** 2)
assert_quantity_allclose(stats['min'], 0.0363300285196364 * u.K)
assert_quantity_allclose(stats['max'], 0.9662900439556562 * u.K)
assert_quantity_allclose(stats['rms'], 0.5759458158839716 * u.K)
@pytest.mark.skipif(not CASA_INSTALLED, reason='Requires CASA to be installed')
def test_statistics_consistency_casa(data_adv, tmp_path):
# Similar to test_statistics but compares to CASA directly.
cube = DaskSpectralCube.read(data_adv)
stats = cube.statistics()
make_casa_testimage(data_adv, tmp_path / 'casa.image')
ia = casatools.image()
ia.open(str(tmp_path / 'casa.image'))
stats_casa = ia.statistics()
ia.close()
for key in stats:
if isinstance(stats[key], u.Quantity):
value = stats[key].value
else:
value = stats[key]
assert_allclose(value, stats_casa[key])
def test_apply_function_parallel_spectral_noncube(data_adv):
'''
Testing returning a non-SpectralCube object with a user-defined
function for spectral operations.
'''
chunk_size = (-1, 1, 2)
cube = DaskSpectralCube.read(data_adv).rechunk(chunks=chunk_size)
def sum_blocks_spectral(data_chunk):
return data_chunk.sum(0)
# Tell dask.map_blocks that we expect the zeroth axis to be (1,)
output_chunk_size = (1, 2)
test = cube.apply_function_parallel_spectral(sum_blocks_spectral,
return_new_cube=False,
accepts_chunks=True,
drop_axis=[0], # The output will no longer contain the spectral axis
chunks=output_chunk_size)
# The total shape of test should be the (1,) + cube.shape[1:]
assert test.shape == cube.shape[1:]
# Test we get the same output as the builtin sum
assert_allclose(test.compute(), cube.sum(axis=0).unitless_filled_data[:])
def test_apply_function_parallel_spectral_noncube_withblockinfo(data_adv):
'''
Test receiving block_info information from da.map_blocks so we can place
the chunk's location in the whole cube when needed.
https://docs.dask.org/en/latest/array-api.html#dask.array.map_blocks
'''
chunk_size = (-1, 1, 2)
cube = DaskSpectralCube.read(data_adv).rechunk(chunks=chunk_size)
sum_spectral_plane = cube.sum(axis=0).unitless_filled_data[:]
# Each value should be different. This is important to check the right positions being used
# for the check in sums_block_spectral
assert np.unique(sum_spectral_plane).size == sum_spectral_plane.size
def sum_blocks_spectral(data_chunk, block_info=None, comparison_array=None):
chunk_sum = data_chunk.sum(0)
# When the block_info kwarg is defined, it should not be None
assert block_info is not None
# Check the block location compared to `comparison_array`
# Get the lower corner location in the whole cube.
loc = [block_range[0] for block_range in block_info[0]['array-location']]
# Should have 3 dimensions for the corner.
assert len(loc) == 3
# Slice comparison array to compare with this data chunk
thisslice = (slice(loc[1], loc[1] + chunk_sum.shape[0]),
slice(loc[2], loc[2] + chunk_sum.shape[1]),)
return chunk_sum == comparison_array[thisslice]
# Tell dask.map_blocks that we expect the zeroth axis to be (1,)
output_chunk_size = (1, 2)
test = cube.apply_function_parallel_spectral(sum_blocks_spectral,
return_new_cube=False,
accepts_chunks=True,
drop_axis=[0], # The output will no longer contain the spectral axis
chunks=output_chunk_size,
comparison_array=sum_spectral_plane) # Passed to `sum_blocks_spectral`
# The total shape of test should be the (1,) + cube.shape[1:]
assert test.shape == cube.shape[1:]
# Test all True
assert np.all(test.compute())
@pytest.mark.parametrize(('accepts_chunks'),
((True, False)))
def test_apply_function_parallel_shape(accepts_chunks):
# regression test for #772
def func(x, add=None):
if add is not None:
y = x + add
else:
raise ValueError("This test is supposed to have add=1")
return y
fn = data.get_pkg_data_filename('tests/data/example_cube.fits', 'spectral_cube')
cube = SpectralCube.read(fn, use_dask=True)
cube2 = SpectralCube.read(fn, use_dask=False)
# Check dask w/both threaded and unthreaded
rslt3 = cube.apply_function_parallel_spectral(func, add=1,
accepts_chunks=accepts_chunks)
with cube.use_dask_scheduler('threads', num_workers=4):
rslt = cube.apply_function_parallel_spectral(func, add=1,
accepts_chunks=accepts_chunks)
rslt2 = cube2.apply_function_parallel_spectral(func, add=1)
np.testing.assert_almost_equal(cube.filled_data[:].value,
cube2.filled_data[:].value)
np.testing.assert_almost_equal(rslt.filled_data[:].value,
rslt2.filled_data[:].value)
np.testing.assert_almost_equal(rslt.filled_data[:].value,
rslt3.filled_data[:].value)
@pytest.mark.parametrize('filename', ('data_adv', 'data_adv_beams',
'data_vda_beams', 'data_vda_beams_image'))
def test_cube_on_cube(filename, request):
if 'image' in filename and not CASA_INSTALLED:
pytest.skip('Requires CASA to be installed')
dataname = request.getfixturevalue(filename)
# regression test for #782
# the regression applies only to VaryingResolutionSpectralCubes
# since they are not SpectralCube subclasses
cube = DaskSpectralCube.read(dataname)
assert isinstance(cube, (DaskSpectralCube, DaskVaryingResolutionSpectralCube))
cube2 = SpectralCube.read(dataname, use_dask=False)
if 'image' not in filename:
# 'image' would be CASA and must be dask
assert not isinstance(cube2, (DaskSpectralCube, DaskVaryingResolutionSpectralCube))
with patch.object(cube, '_cube_on_cube_operation') as mock:
cube * cube
mock.assert_called_once()
with patch.object(cube, '_cube_on_cube_operation') as mock:
cube * cube2
mock.assert_called_once()
with patch.object(cube2, '_cube_on_cube_operation') as mock:
cube2 * cube
mock.assert_called_once()
del cube
del cube2
if DISTRIBUTED_INSTALLED:
def test_dask_distributed(client, tmpdir): # noqa
# Make sure that we can use dask distributed. This is a regression test for
# a bug caused by FilledArrayHandler not being serializable.
cube = DaskSpectralCube.read(os.path.join(DATA, 'basic.image'))
cube.use_dask_scheduler(client)
cube.sigma_clip_spectrally(2, save_to_tmp_dir=tmpdir.strpath)
|
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to parse .yaml files for an appengine app."""
import os
from googlecloudsdk.api_lib.app import util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.third_party.appengine.api import appinfo
from googlecloudsdk.third_party.appengine.api import appinfo_errors
from googlecloudsdk.third_party.appengine.api import appinfo_includes
from googlecloudsdk.third_party.appengine.api import croninfo
from googlecloudsdk.third_party.appengine.api import dispatchinfo
from googlecloudsdk.third_party.appengine.api import dosinfo
from googlecloudsdk.third_party.appengine.api import queueinfo
from googlecloudsdk.third_party.appengine.api import validation
from googlecloudsdk.third_party.appengine.api import yaml_errors
from googlecloudsdk.third_party.appengine.datastore import datastore_index
HINT_PROJECT = ('Project name should instead be specified either by '
'`gcloud config set project MY_PROJECT` or by setting the '
'`--project` flag on individual command executions.')
HINT_VERSION = ('Versions are generated automatically by default but can also '
'be manually specified by setting the `--version` flag on '
'individual command executions.')
class Error(exceptions.Error):
"""A base error for this module."""
pass
class AppConfigSetLoadError(Error):
"""An exception for when the set of configurations are not valid."""
def __init__(self):
"""Creates a new Error."""
super(AppConfigSetLoadError, self).__init__(
'Errors occurred while parsing the App Engine app configuration.')
class YamlParseError(Error):
"""An exception for when a specific yaml file is not well formed."""
def __init__(self, file_path, e):
"""Creates a new Error.
Args:
file_path: str, The full path of the file that failed to parse.
e: Exception, The exception that was originally raised.
"""
super(YamlParseError, self).__init__(
'An error occurred while parsing file: [{file_path}]\n{err}'
.format(file_path=file_path, err=e))
class YamlValidationError(Error):
"""An exception for when a specific yaml file has invalid info."""
pass
class AppConfigError(Error):
"""Errors in Application Config."""
class _YamlInfo(object):
"""A base class for holding some basic attributes of a parsed .yaml file."""
def __init__(self, file_path, parsed):
"""Creates a new _YamlInfo.
Args:
file_path: str, The full path the file that was parsed.
parsed: The parsed yaml data as one of the *_info objects.
"""
self.file = file_path
self.parsed = parsed
@staticmethod
def _ParseYaml(file_path, parser):
"""Parses the given file using the given parser.
Args:
file_path: str, The full path of the file to parse.
parser: str, The parser to use to parse this yaml file.
Returns:
The result of the parse.
"""
with open(file_path, 'r') as fp:
return parser(fp)
class ConfigYamlInfo(_YamlInfo):
"""A class for holding some basic attributes of a parsed config .yaml file."""
CRON = 'cron'
DISPATCH = 'dispatch'
DOS = 'dos'
INDEX = 'index'
QUEUE = 'queue'
CONFIG_YAML_PARSERS = {
CRON: croninfo.LoadSingleCron,
DISPATCH: dispatchinfo.LoadSingleDispatch,
DOS: dosinfo.LoadSingleDos,
INDEX: datastore_index.ParseIndexDefinitions,
QUEUE: queueinfo.LoadSingleQueue,
}
def __init__(self, file_path, config, parsed):
"""Creates a new ConfigYamlInfo.
Args:
file_path: str, The full path the file that was parsed.
config: str, The name of the config that was parsed (i.e. 'cron')
parsed: The parsed yaml data as one of the *_info objects.
"""
super(ConfigYamlInfo, self).__init__(file_path, parsed)
self.config = config
@staticmethod
def FromFile(file_path):
"""Parses the given config file.
Args:
file_path: str, The full path to the config file.
Raises:
YamlParseError: If the file is not valid.
Returns:
A ConfigYamlInfo object for the parsed file.
"""
(base, _) = os.path.splitext(os.path.basename(file_path))
parser = ConfigYamlInfo.CONFIG_YAML_PARSERS.get(base)
if not parser:
return None
try:
parsed = _YamlInfo._ParseYaml(file_path, parser)
if not parsed:
raise YamlParseError(file_path, 'The file is empty')
except (yaml_errors.Error, validation.Error) as e:
raise YamlParseError(file_path, e)
_CheckIllegalAttribute(
name='application',
yaml_info=parsed,
extractor_func=lambda yaml: yaml.application,
file_path=file_path,
msg=HINT_PROJECT)
return ConfigYamlInfo(file_path, config=base, parsed=parsed)
class ModuleYamlInfo(_YamlInfo):
"""A class for holding some basic attributes of a parsed module .yaml file."""
DEFAULT_MODULE_NAME = 'default'
def __init__(self, file_path, parsed):
"""Creates a new ModuleYamlInfo.
Args:
file_path: str, The full path the file that was parsed.
parsed: appinfo.AppInfoExternal, parsed Application Configuration.
"""
super(ModuleYamlInfo, self).__init__(file_path, parsed)
self.module = parsed.module
# All env: 2 apps are hermetic. All vm: false apps are not hermetic.
# vm: true apps are hermetic IFF they don't use static files.
if util.IsFlex(parsed.env):
self.is_hermetic = True
elif parsed.vm:
for urlmap in parsed.handlers:
if urlmap.static_dir or urlmap.static_files:
self.is_hermetic = False
break
else:
self.is_hermetic = True
else:
self.is_hermetic = False
self.is_vm = parsed.runtime == 'vm' or self.is_hermetic
self.runtime = (parsed.GetEffectiveRuntime()
if self.is_vm else parsed.runtime)
if self.is_vm:
self._UpdateManagedVMConfig()
@staticmethod
def FromFile(file_path):
"""Parses the given module file.
Args:
file_path: str, The full path to the module file.
Raises:
YamlParseError: If the file is not a valid Yaml-file.
YamlValidationError: If validation of parsed info fails.
Returns:
A ModuleYamlInfo object for the parsed file.
"""
try:
parsed = _YamlInfo._ParseYaml(file_path, appinfo_includes.Parse)
except (yaml_errors.Error, appinfo_errors.Error) as e:
raise YamlParseError(file_path, e)
if parsed.runtime == 'vm':
vm_runtime = parsed.GetEffectiveRuntime()
else:
vm_runtime = None
if parsed.runtime == 'python':
raise YamlValidationError(
'Module [{module}] uses unsupported Python 2.5 runtime. '
'Please use [runtime: python27] instead.'.format(
module=parsed.module))
elif parsed.runtime == 'python-compat':
raise YamlValidationError(
'"python-compat" is not a supported runtime.')
if util.IsFlex(parsed.env) and vm_runtime == 'python27':
raise YamlValidationError(
'The "python27" is not a valid runtime in env: 2. '
'Please use [python-compat] instead.')
if not parsed.module:
parsed.module = ModuleYamlInfo.DEFAULT_MODULE_NAME
_CheckIllegalAttribute(
name='application',
yaml_info=parsed,
extractor_func=lambda yaml: yaml.application,
file_path=file_path,
msg=HINT_PROJECT)
_CheckIllegalAttribute(
name='version',
yaml_info=parsed,
extractor_func=lambda yaml: yaml.version,
file_path=file_path,
msg=HINT_VERSION)
return ModuleYamlInfo(file_path, parsed)
def RequiresImage(self):
"""Returns True if we'll need to build a docker image."""
return self.is_vm
def _UpdateManagedVMConfig(self):
"""Overwrites vm_settings for Managed VMs modules.
Sets has_docker_image to be always True. Required for transition period
until all images in production are pushed via gcloud (and therefore all
builds happen locally in the SDK).
Also sets module_yaml_path which is needed for some runtimes.
Raises:
AppConfigError: if the function was called for the module which is not a
Managed VM module.
"""
if not self.is_vm:
raise AppConfigError('This is not a Managed VM module. vm != True')
if not self.parsed.vm_settings:
self.parsed.vm_settings = appinfo.VmSettings()
self.parsed.vm_settings['has_docker_image'] = True
self.parsed.vm_settings['module_yaml_path'] = os.path.basename(self.file)
def _CheckIllegalAttribute(name, yaml_info, extractor_func, file_path, msg=''):
"""Validates that an illegal attribute is not set.
Args:
name: str, The name of the attribute in the yaml files.
yaml_info: AppInfoExternal, The yaml to validate.
extractor_func: func(AppInfoExternal)->str, A function to extract the
value of the attribute from a _YamlInfo object.
file_path: str, The path of file from which yaml_info was parsed.
msg: str, Message to couple with the error
Raises:
YamlValidationError: If illegal attribute is set.
"""
attribute = extractor_func(yaml_info)
if attribute is not None:
# Disallow use of the given attribute.
raise YamlValidationError(
'The [{0}] field is specified in file [{1}]. This field is not used '
'by gcloud and must be removed. '.format(name, file_path) + msg)
class AppConfigSet(object):
"""Parses and holds information about the set of config files for an app."""
YAML_EXTS = ['.yaml', '.yml']
IGNORED_YAMLS = ['backends']
def __init__(self, files):
"""Creates a new AppConfigSet.
This will scan all files and directories in items, parse them, and
validate their contents.
Args:
files: str, The files to load into the config set.
Raises:
AppConfigSetLoadError: If validation fails on the given files.
YamlParserError: If a file fails to parse.
"""
self.__config_yamls = {}
self.__module_yamls = {}
self.__error = False
for f in files:
if os.path.isfile(f):
try:
if not self.__LoadYamlFile(f):
self.__Error('File [%s] is not a valid deployable yaml file.', f)
except YamlValidationError as err:
self.__Error('{0}'.format(err))
elif os.path.isdir(f):
self.__Error('Directories are not supported [%s]. You must provide '
'explicit yaml files.', f)
else:
self.__Error(
'File [%s] not found.', f)
if self.__error:
raise AppConfigSetLoadError()
def __Error(self, *args, **kwargs):
log.error(*args, **kwargs)
self.__error = True
def Modules(self):
"""Gets the modules that were found.
Returns:
{str, ModuleYamlInfo}, A mapping of module name to definition.
"""
return dict(self.__module_yamls)
def HermeticModules(self):
"""Gets the hermetic modules that were found.
Returns:
{str, ModuleYamlInfo}, A mapping of module name to definition.
"""
return dict((key, mod) for (key, mod) in self.__module_yamls.iteritems()
if mod.is_hermetic)
def NonHermeticModules(self):
"""Gets the non-hermetic modules that were found.
Returns:
{str, ModuleYamlInfo}, A mapping of module name to definition.
"""
return dict((key, mod) for (key, mod) in self.__module_yamls.iteritems()
if not mod.is_hermetic)
def Configs(self):
"""Gets the configs that were found.
Returns:
{str, ConfigYamlInfo}, A mapping of config name to definition.
"""
return dict(self.__config_yamls)
def __IsInterestingFile(self, f):
"""Determines if the given file is something we should try to parse.
Args:
f: str, The full path to the file.
Returns:
True if the file is a module yaml or a config yaml.
"""
(base, ext) = os.path.splitext(os.path.basename(f))
if ext not in AppConfigSet.YAML_EXTS:
return False # Not a yaml file.
if base in AppConfigSet.IGNORED_YAMLS:
return False # Something we are explicitly not supporting.
return True
def __LoadYamlFile(self, file_path):
"""Loads a single yaml file into a configuration object.
Args:
file_path: str, The full path of the file to parse.
Raises:
YamlValidationError: If the info in the yaml file is invalid.
Returns:
True if the file was valid, False if it is not a valid module or config
file.
"""
file_path = os.path.abspath(file_path)
if not self.__IsInterestingFile(file_path):
return False
yaml = ConfigYamlInfo.FromFile(file_path)
if yaml:
existing_config = self.__config_yamls.get(yaml.config)
if existing_config:
self.__Error('Found multiple files for config [%s]: [%s, %s]',
yaml.config, self.__RelPath(yaml),
self.__RelPath(existing_config))
else:
self.__config_yamls[yaml.config] = yaml
else:
yaml = ModuleYamlInfo.FromFile(file_path)
existing_module = self.__module_yamls.get(yaml.module)
if existing_module:
self.__Error('Found multiple files declaring module [%s]: [%s, %s]',
yaml.module, self.__RelPath(yaml),
self.__RelPath(existing_module))
else:
self.__module_yamls[yaml.module] = yaml
return True
def __RelPath(self, yaml):
# We are going to display full file paths for now.
return yaml.file
|
|
# testing/requirements.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Global database feature support policy.
Provides decorators to mark tests requiring specific feature support from the
target database.
External dialect test suites should subclass SuiteRequirements
to provide specific inclusion/exclusions.
"""
import sys
from . import exclusions
from .. import util
class Requirements(object):
pass
class SuiteRequirements(Requirements):
@property
def create_table(self):
"""target platform can emit basic CreateTable DDL."""
return exclusions.open()
@property
def drop_table(self):
"""target platform can emit basic DropTable DDL."""
return exclusions.open()
@property
def foreign_keys(self):
"""Target database must support foreign keys."""
return exclusions.open()
@property
def on_update_cascade(self):
""""target database must support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.open()
@property
def non_updating_cascade(self):
"""target database must *not* support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.closed()
@property
def deferrable_fks(self):
return exclusions.closed()
@property
def on_update_or_deferrable_fks(self):
# TODO: exclusions should be composable,
# somehow only_if([x, y]) isn't working here, negation/conjunctions
# getting confused.
return exclusions.only_if(
lambda: self.on_update_cascade.enabled or
self.deferrable_fks.enabled
)
@property
def self_referential_foreign_keys(self):
"""Target database must support self-referential foreign keys."""
return exclusions.open()
@property
def foreign_key_ddl(self):
"""Target database must support the DDL phrases for FOREIGN KEY."""
return exclusions.open()
@property
def named_constraints(self):
"""target database must support names for constraints."""
return exclusions.open()
@property
def subqueries(self):
"""Target database must support subqueries."""
return exclusions.open()
@property
def offset(self):
"""target database can render OFFSET, or an equivalent, in a
SELECT.
"""
return exclusions.open()
@property
def bound_limit_offset(self):
"""target database can render LIMIT and/or OFFSET using a bound
parameter
"""
return exclusions.open()
@property
def parens_in_union_contained_select_w_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when LIMIT/OFFSET is specifically present.
E.g. (SELECT ...) UNION (SELECT ..)
This is known to fail on SQLite.
"""
return exclusions.open()
@property
def parens_in_union_contained_select_wo_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when OFFSET/LIMIT is specifically not present.
E.g. (SELECT ... LIMIT ..) UNION (SELECT .. OFFSET ..)
This is known to fail on SQLite. It also fails on Oracle
because without LIMIT/OFFSET, there is currently no step that
creates an additional subquery.
"""
return exclusions.open()
@property
def boolean_col_expressions(self):
"""Target database must support boolean expressions as columns"""
return exclusions.closed()
@property
def nullsordering(self):
"""Target backends that support nulls ordering."""
return exclusions.closed()
@property
def standalone_binds(self):
"""target database/driver supports bound parameters as column expressions
without being in the context of a typed column.
"""
return exclusions.closed()
@property
def intersect(self):
"""Target database must support INTERSECT or equivalent."""
return exclusions.closed()
@property
def except_(self):
"""Target database must support EXCEPT or equivalent (i.e. MINUS)."""
return exclusions.closed()
@property
def window_functions(self):
"""Target database must support window functions."""
return exclusions.closed()
@property
def autoincrement_insert(self):
"""target platform generates new surrogate integer primary key values
when insert() is executed, excluding the pk column."""
return exclusions.open()
@property
def fetch_rows_post_commit(self):
"""target platform will allow cursor.fetchone() to proceed after a
COMMIT.
Typically this refers to an INSERT statement with RETURNING which
is invoked within "autocommit". If the row can be returned
after the autocommit, then this rule can be open.
"""
return exclusions.open()
@property
def empty_inserts(self):
"""target platform supports INSERT with no values, i.e.
INSERT DEFAULT VALUES or equivalent."""
return exclusions.only_if(
lambda config: config.db.dialect.supports_empty_insert or
config.db.dialect.supports_default_values,
"empty inserts not supported"
)
@property
def insert_from_select(self):
"""target platform supports INSERT from a SELECT."""
return exclusions.open()
@property
def returning(self):
"""target platform supports RETURNING."""
return exclusions.only_if(
lambda config: config.db.dialect.implicit_returning,
"%(database)s %(does_support)s 'returning'"
)
@property
def duplicate_names_in_cursor_description(self):
"""target platform supports a SELECT statement that has
the same name repeated more than once in the columns list."""
return exclusions.open()
@property
def denormalized_names(self):
"""Target database must have 'denormalized', i.e.
UPPERCASE as case insensitive names."""
return exclusions.skip_if(
lambda config: not config.db.dialect.requires_name_normalize,
"Backend does not require denormalized names."
)
@property
def multivalues_inserts(self):
"""target database must support multiple VALUES clauses in an
INSERT statement."""
return exclusions.skip_if(
lambda config: not config.db.dialect.supports_multivalues_insert,
"Backend does not support multirow inserts."
)
@property
def implements_get_lastrowid(self):
""""target dialect implements the executioncontext.get_lastrowid()
method without reliance on RETURNING.
"""
return exclusions.open()
@property
def emulated_lastrowid(self):
""""target dialect retrieves cursor.lastrowid, or fetches
from a database-side function after an insert() construct executes,
within the get_lastrowid() method.
Only dialects that "pre-execute", or need RETURNING to get last
inserted id, would return closed/fail/skip for this.
"""
return exclusions.closed()
@property
def dbapi_lastrowid(self):
""""target platform includes a 'lastrowid' accessor on the DBAPI
cursor object.
"""
return exclusions.closed()
@property
def views(self):
"""Target database must support VIEWs."""
return exclusions.closed()
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return exclusions.closed()
@property
def server_side_cursors(self):
"""Target dialect must support server side cursors."""
return exclusions.only_if([
lambda config: config.db.dialect.supports_server_side_cursors
], "no server side cursors support")
@property
def sequences(self):
"""Target database must support SEQUENCEs."""
return exclusions.only_if([
lambda config: config.db.dialect.supports_sequences
], "no sequence support")
@property
def sequences_optional(self):
"""Target database supports sequences, but also optionally
as a means of generating new PK values."""
return exclusions.only_if([
lambda config: config.db.dialect.supports_sequences and
config.db.dialect.sequences_optional
], "no sequence support, or sequences not optional")
@property
def reflects_pk_names(self):
return exclusions.closed()
@property
def table_reflection(self):
return exclusions.open()
@property
def view_column_reflection(self):
"""target database must support retrieval of the columns in a view,
similarly to how a table is inspected.
This does not include the full CREATE VIEW definition.
"""
return self.views
@property
def view_reflection(self):
"""target database must support inspection of the full CREATE VIEW definition.
"""
return self.views
@property
def schema_reflection(self):
return self.schemas
@property
def primary_key_constraint_reflection(self):
return exclusions.open()
@property
def foreign_key_constraint_reflection(self):
return exclusions.open()
@property
def foreign_key_constraint_option_reflection(self):
return exclusions.closed()
@property
def temp_table_reflection(self):
return exclusions.open()
@property
def temp_table_names(self):
"""target dialect supports listing of temporary table names"""
return exclusions.closed()
@property
def temporary_tables(self):
"""target database supports temporary tables"""
return exclusions.open()
@property
def temporary_views(self):
"""target database supports temporary views"""
return exclusions.closed()
@property
def index_reflection(self):
return exclusions.open()
@property
def unique_constraint_reflection(self):
"""target dialect supports reflection of unique constraints"""
return exclusions.open()
@property
def duplicate_key_raises_integrity_error(self):
"""target dialect raises IntegrityError when reporting an INSERT
with a primary key violation. (hint: it should)
"""
return exclusions.open()
@property
def unbounded_varchar(self):
"""Target database must support VARCHAR with no length"""
return exclusions.open()
@property
def unicode_data(self):
"""Target database/dialect must support Python unicode objects with
non-ASCII characters represented, delivered as bound parameters
as well as in result rows.
"""
return exclusions.open()
@property
def unicode_ddl(self):
"""Target driver must support some degree of non-ascii symbol
names.
"""
return exclusions.closed()
@property
def datetime_literals(self):
"""target dialect supports rendering of a date, time, or datetime as a
literal string, e.g. via the TypeEngine.literal_processor() method.
"""
return exclusions.closed()
@property
def datetime(self):
"""target dialect supports representation of Python
datetime.datetime() objects."""
return exclusions.open()
@property
def datetime_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects."""
return exclusions.open()
@property
def datetime_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def date(self):
"""target dialect supports representation of Python
datetime.date() objects."""
return exclusions.open()
@property
def date_coerces_from_datetime(self):
"""target dialect accepts a datetime object as the target
of a date column."""
return exclusions.open()
@property
def date_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def time(self):
"""target dialect supports representation of Python
datetime.time() objects."""
return exclusions.open()
@property
def time_microseconds(self):
"""target dialect supports representation of Python
datetime.time() with microsecond objects."""
return exclusions.open()
@property
def binary_comparisons(self):
"""target database/driver can allow BLOB/BINARY fields to be compared
against a bound parameter value.
"""
return exclusions.open()
@property
def binary_literals(self):
"""target backend supports simple binary literals, e.g. an
expression like::
SELECT CAST('foo' AS BINARY)
Where ``BINARY`` is the type emitted from :class:`.LargeBinary`,
e.g. it could be ``BLOB`` or similar.
Basically fails on Oracle.
"""
return exclusions.open()
@property
def json_type(self):
"""target platform implements a native JSON type."""
return exclusions.closed()
@property
def json_array_indexes(self):
""""target platform supports numeric array indexes
within a JSON structure"""
return self.json_type
@property
def precision_numerics_general(self):
"""target backend has general support for moderately high-precision
numerics."""
return exclusions.open()
@property
def precision_numerics_enotation_small(self):
"""target backend supports Decimal() objects using E notation
to represent very small values."""
return exclusions.closed()
@property
def precision_numerics_enotation_large(self):
"""target backend supports Decimal() objects using E notation
to represent very large values."""
return exclusions.closed()
@property
def precision_numerics_many_significant_digits(self):
"""target backend supports values with many digits on both sides,
such as 319438950232418390.273596, 87673.594069654243
"""
return exclusions.closed()
@property
def precision_numerics_retains_significant_digits(self):
"""A precision numeric type will return empty significant digits,
i.e. a value such as 10.000 will come back in Decimal form with
the .000 maintained."""
return exclusions.closed()
@property
def precision_generic_float_type(self):
"""target backend will return native floating point numbers with at
least seven decimal places when using the generic Float type.
"""
return exclusions.open()
@property
def floats_to_four_decimals(self):
"""target backend can return a floating-point number with four
significant digits (such as 15.7563) accurately
(i.e. without FP inaccuracies, such as 15.75629997253418).
"""
return exclusions.open()
@property
def fetch_null_from_numeric(self):
"""target backend doesn't crash when you try to select a NUMERIC
value that has a value of NULL.
Added to support Pyodbc bug #351.
"""
return exclusions.open()
@property
def text_type(self):
"""Target database must support an unbounded Text() "
"type such as TEXT or CLOB"""
return exclusions.open()
@property
def empty_strings_varchar(self):
"""target database can persist/return an empty string with a
varchar.
"""
return exclusions.open()
@property
def empty_strings_text(self):
"""target database can persist/return an empty string with an
unbounded text."""
return exclusions.open()
@property
def selectone(self):
"""target driver must support the literal statement 'select 1'"""
return exclusions.open()
@property
def savepoints(self):
"""Target database must support savepoints."""
return exclusions.closed()
@property
def two_phase_transactions(self):
"""Target database must support two-phase transactions."""
return exclusions.closed()
@property
def update_from(self):
"""Target must support UPDATE..FROM syntax"""
return exclusions.closed()
@property
def update_where_target_in_subquery(self):
"""Target must support UPDATE where the same table is present in a
subquery in the WHERE clause.
This is an ANSI-standard syntax that apparently MySQL can't handle,
such as:
UPDATE documents SET flag=1 WHERE documents.title IN
(SELECT max(documents.title) AS title
FROM documents GROUP BY documents.user_id
)
"""
return exclusions.open()
@property
def mod_operator_as_percent_sign(self):
"""target database must use a plain percent '%' as the 'modulus'
operator."""
return exclusions.closed()
@property
def percent_schema_names(self):
"""target backend supports weird identifiers with percent signs
in them, e.g. 'some % column'.
this is a very weird use case but often has problems because of
DBAPIs that use python formatting. It's not a critical use
case either.
"""
return exclusions.closed()
@property
def order_by_label_with_expression(self):
"""target backend supports ORDER BY a column label within an
expression.
Basically this::
select data as foo from test order by foo || 'bar'
Lots of databases including PostgreSQL don't support this,
so this is off by default.
"""
return exclusions.closed()
@property
def unicode_connections(self):
"""Target driver must support non-ASCII characters being passed at
all.
"""
return exclusions.open()
@property
def graceful_disconnects(self):
"""Target driver must raise a DBAPI-level exception, such as
InterfaceError, when the underlying connection has been closed
and the execute() method is called.
"""
return exclusions.open()
@property
def skip_mysql_on_windows(self):
"""Catchall for a large variety of MySQL on Windows failures"""
return exclusions.open()
@property
def ad_hoc_engines(self):
"""Test environment must allow ad-hoc engine/connection creation.
DBs that scale poorly for many connections, even when closed, i.e.
Oracle, may use the "--low-connections" option which flags this
requirement as not present.
"""
return exclusions.skip_if(
lambda config: config.options.low_connections)
@property
def timing_intensive(self):
return exclusions.requires_tag("timing_intensive")
@property
def memory_intensive(self):
return exclusions.requires_tag("memory_intensive")
@property
def threading_with_mock(self):
"""Mark tests that use threading and mock at the same time - stability
issues have been observed with coverage + python 3.3
"""
return exclusions.skip_if(
lambda config: util.py3k and config.options.has_coverage,
"Stability issues with coverage + py3k"
)
@property
def python2(self):
return exclusions.skip_if(
lambda: sys.version_info >= (3,),
"Python version 2.xx is required."
)
@property
def python3(self):
return exclusions.skip_if(
lambda: sys.version_info < (3,),
"Python version 3.xx is required."
)
@property
def cpython(self):
return exclusions.only_if(
lambda: util.cpython,
"cPython interpreter needed"
)
@property
def non_broken_pickle(self):
from sqlalchemy.util import pickle
return exclusions.only_if(
lambda: not util.pypy and pickle.__name__ == 'cPickle'
or sys.version_info >= (3, 2),
"Needs cPickle+cPython or newer Python 3 pickle"
)
@property
def predictable_gc(self):
"""target platform must remove all cycles unconditionally when
gc.collect() is called, as well as clean out unreferenced subclasses.
"""
return self.cpython
@property
def no_coverage(self):
"""Test should be skipped if coverage is enabled.
This is to block tests that exercise libraries that seem to be
sensitive to coverage, such as PostgreSQL notice logging.
"""
return exclusions.skip_if(
lambda config: config.options.has_coverage,
"Issues observed when coverage is enabled"
)
def _has_mysql_on_windows(self, config):
return False
def _has_mysql_fully_case_sensitive(self, config):
return False
@property
def sqlite(self):
return exclusions.skip_if(lambda: not self._has_sqlite())
@property
def cextensions(self):
return exclusions.skip_if(
lambda: not self._has_cextensions(), "C extensions not installed"
)
def _has_sqlite(self):
from sqlalchemy import create_engine
try:
create_engine('sqlite://')
return True
except ImportError:
return False
def _has_cextensions(self):
try:
from sqlalchemy import cresultproxy, cprocessors
return True
except ImportError:
return False
|
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: swift_conn
# You'll see swift_conn passed around a few places in this file. This is the
# source httplib connection of whatever it is attached to.
# It is used when early termination of reading from the connection should
# happen, such as when a range request is satisfied but there's still more the
# source connection would like to send. To prevent having to read all the data
# that could be left, the source connection can be .close() and then reads
# commence to empty out any buffers.
# These shenanigans are to ensure all related objects can be garbage
# collected. We've seen objects hang around forever otherwise.
try:
import simplejson as json
except ImportError:
import json
import mimetypes
import re
import time
from datetime import datetime
from urllib import unquote, quote
from hashlib import md5
from random import shuffle
from eventlet import sleep, GreenPile, Timeout
from eventlet.queue import Queue
from eventlet.timeout import Timeout
from swift.common.utils import ContextPool, normalize_timestamp, TRUE_VALUES, \
public
from swift.common.bufferedhttp import http_connect
from swift.common.constraints import check_metadata, check_object_creation, \
CONTAINER_LISTING_LIMIT, MAX_FILE_SIZE
from swift.common.exceptions import ChunkReadTimeout, \
ChunkWriteTimeout, ConnectionTimeout, ListingIterNotFound, \
ListingIterNotAuthorized, ListingIterError
from swift.common.http import is_success, is_client_error, HTTP_CONTINUE, \
HTTP_CREATED, HTTP_MULTIPLE_CHOICES, HTTP_NOT_FOUND, \
HTTP_INTERNAL_SERVER_ERROR, HTTP_SERVICE_UNAVAILABLE, \
HTTP_INSUFFICIENT_STORAGE
from swift.proxy.controllers.base import Controller, delay_denial
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPNotFound, \
HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPRequestTimeout, \
HTTPServerError, HTTPServiceUnavailable, Request, Response, \
HTTPClientDisconnect
class SegmentedIterable(object):
"""
Iterable that returns the object contents for a segmented object in Swift.
If there's a failure that cuts the transfer short, the response's
`status_int` will be updated (again, just for logging since the original
status would have already been sent to the client).
:param controller: The ObjectController instance to work with.
:param container: The container the object segments are within.
:param listing: The listing of object segments to iterate over; this may
be an iterator or list that returns dicts with 'name' and
'bytes' keys.
:param response: The swob.Response this iterable is associated with, if
any (default: None)
"""
def __init__(self, controller, container, listing, response=None):
self.controller = controller
self.container = container
self.listing = iter(listing)
self.segment = 0
self.segment_dict = None
self.segment_peek = None
self.seek = 0
self.segment_iter = None
# See NOTE: swift_conn at top of file about this.
self.segment_iter_swift_conn = None
self.position = 0
self.response = response
if not self.response:
self.response = Response()
self.next_get_time = 0
def _load_next_segment(self):
"""
Loads the self.segment_iter with the next object segment's contents.
:raises: StopIteration when there are no more object segments.
"""
try:
self.segment += 1
self.segment_dict = self.segment_peek or self.listing.next()
self.segment_peek = None
partition, nodes = self.controller.app.object_ring.get_nodes(
self.controller.account_name, self.container,
self.segment_dict['name'])
path = '/%s/%s/%s' % (self.controller.account_name, self.container,
self.segment_dict['name'])
req = Request.blank(path)
if self.seek:
req.range = 'bytes=%s-' % self.seek
self.seek = 0
if self.segment > self.controller.app.rate_limit_after_segment:
sleep(max(self.next_get_time - time.time(), 0))
self.next_get_time = time.time() + \
1.0 / self.controller.app.rate_limit_segments_per_sec
shuffle(nodes)
resp = self.controller.GETorHEAD_base(req, _('Object'), partition,
self.controller.iter_nodes(partition, nodes,
self.controller.app.object_ring), path,
len(nodes))
if not is_success(resp.status_int):
raise Exception(_('Could not load object segment %(path)s:' \
' %(status)s') % {'path': path, 'status': resp.status_int})
self.segment_iter = resp.app_iter
# See NOTE: swift_conn at top of file about this.
self.segment_iter_swift_conn = getattr(resp, 'swift_conn', None)
except StopIteration:
raise
except (Exception, Timeout), err:
if not getattr(err, 'swift_logged', False):
self.controller.app.logger.exception(_('ERROR: While '
'processing manifest /%(acc)s/%(cont)s/%(obj)s'),
{'acc': self.controller.account_name,
'cont': self.controller.container_name,
'obj': self.controller.object_name})
err.swift_logged = True
self.response.status_int = HTTP_SERVICE_UNAVAILABLE
raise
def next(self):
return iter(self).next()
def __iter__(self):
""" Standard iterator function that returns the object's contents. """
try:
while True:
if not self.segment_iter:
self._load_next_segment()
while True:
with ChunkReadTimeout(self.controller.app.node_timeout):
try:
chunk = self.segment_iter.next()
break
except StopIteration:
self._load_next_segment()
self.position += len(chunk)
yield chunk
except StopIteration:
raise
except (Exception, Timeout), err:
if not getattr(err, 'swift_logged', False):
self.controller.app.logger.exception(_('ERROR: While '
'processing manifest /%(acc)s/%(cont)s/%(obj)s'),
{'acc': self.controller.account_name,
'cont': self.controller.container_name,
'obj': self.controller.object_name})
err.swift_logged = True
self.response.status_int = HTTP_SERVICE_UNAVAILABLE
raise
def app_iter_range(self, start, stop):
"""
Non-standard iterator function for use with Webob in serving Range
requests more quickly. This will skip over segments and do a range
request on the first segment to return data from, if needed.
:param start: The first byte (zero-based) to return. None for 0.
:param stop: The last byte (zero-based) to return. None for end.
"""
try:
if start:
self.segment_peek = self.listing.next()
while start >= self.position + self.segment_peek['bytes']:
self.segment += 1
self.position += self.segment_peek['bytes']
self.segment_peek = self.listing.next()
self.seek = start - self.position
else:
start = 0
if stop is not None:
length = stop - start
else:
length = None
for chunk in self:
if length is not None:
length -= len(chunk)
if length < 0:
# Chop off the extra:
yield chunk[:length]
break
yield chunk
# See NOTE: swift_conn at top of file about this.
if self.segment_iter_swift_conn:
try:
self.segment_iter_swift_conn.close()
except Exception:
pass
self.segment_iter_swift_conn = None
if self.segment_iter:
try:
while self.segment_iter.next():
pass
except Exception:
pass
self.segment_iter = None
except StopIteration:
raise
except (Exception, Timeout), err:
if not getattr(err, 'swift_logged', False):
self.controller.app.logger.exception(_('ERROR: While '
'processing manifest /%(acc)s/%(cont)s/%(obj)s'),
{'acc': self.controller.account_name,
'cont': self.controller.container_name,
'obj': self.controller.object_name})
err.swift_logged = True
self.response.status_int = HTTP_SERVICE_UNAVAILABLE
raise
class ObjectController(Controller):
"""WSGI controller for object requests."""
server_type = 'Object'
def __init__(self, app, account_name, container_name, object_name,
**kwargs):
Controller.__init__(self, app)
self.account_name = unquote(account_name)
self.container_name = unquote(container_name)
self.object_name = unquote(object_name)
def _listing_iter(self, lcontainer, lprefix, env):
lpartition, lnodes = self.app.container_ring.get_nodes(
self.account_name, lcontainer)
marker = ''
while True:
lreq = Request.blank('i will be overridden by env', environ=env)
# Don't quote PATH_INFO, by WSGI spec
lreq.environ['PATH_INFO'] = \
'/%s/%s' % (self.account_name, lcontainer)
lreq.environ['REQUEST_METHOD'] = 'GET'
lreq.environ['QUERY_STRING'] = \
'format=json&prefix=%s&marker=%s' % (quote(lprefix),
quote(marker))
shuffle(lnodes)
lresp = self.GETorHEAD_base(lreq, _('Container'),
lpartition, lnodes, lreq.path_info,
len(lnodes))
if 'swift.authorize' in env:
lreq.acl = lresp.headers.get('x-container-read')
aresp = env['swift.authorize'](lreq)
if aresp:
raise ListingIterNotAuthorized(aresp)
if lresp.status_int == HTTP_NOT_FOUND:
raise ListingIterNotFound()
elif not is_success(lresp.status_int):
raise ListingIterError()
if not lresp.body:
break
sublisting = json.loads(lresp.body)
if not sublisting:
break
marker = sublisting[-1]['name']
for obj in sublisting:
yield obj
def GETorHEAD(self, req):
"""Handle HTTP GET or HEAD requests."""
container_info = self.container_info(self.account_name,
self.container_name)
req.acl = container_info['read_acl']
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
partition, nodes = self.app.object_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
shuffle(nodes)
resp = self.GETorHEAD_base(req, _('Object'), partition,
self.iter_nodes(partition, nodes, self.app.object_ring),
req.path_info, len(nodes))
# Whether we get a 416 Requested Range Not Satisfiable or not,
# we should request a manifest because size of manifest file
# can be not 0. After checking a manifest, redo the range request
# on the whole object.
if req.range:
req_range = req.range
req.range = None
resp2 = self.GETorHEAD_base(req, _('Object'), partition,
self.iter_nodes(partition,
nodes,
self.app.object_ring),
req.path_info, len(nodes))
if 'x-object-manifest' not in resp2.headers:
return resp
resp = resp2
req.range = str(req_range)
if 'x-object-manifest' in resp.headers:
lcontainer, lprefix = \
resp.headers['x-object-manifest'].split('/', 1)
lcontainer = unquote(lcontainer)
lprefix = unquote(lprefix)
try:
listing = list(self._listing_iter(lcontainer, lprefix,
req.environ))
except ListingIterNotFound:
return HTTPNotFound(request=req)
except ListingIterNotAuthorized, err:
return err.aresp
except ListingIterError:
return HTTPServerError(request=req)
if len(listing) > CONTAINER_LISTING_LIMIT:
resp = Response(headers=resp.headers, request=req,
conditional_response=True)
if req.method == 'HEAD':
# These shenanigans are because swob translates the HEAD
# request into a swob EmptyResponse for the body, which
# has a len, which eventlet translates as needing a
# content-length header added. So we call the original
# swob resp for the headers but return an empty iterator
# for the body.
def head_response(environ, start_response):
resp(environ, start_response)
return iter([])
head_response.status_int = resp.status_int
return head_response
else:
resp.app_iter = SegmentedIterable(self, lcontainer,
self._listing_iter(lcontainer, lprefix, req.environ),
resp)
else:
# For objects with a reasonable number of segments, we'll serve
# them with a set content-length and computed etag.
if listing:
content_length = sum(o['bytes'] for o in listing)
last_modified = max(o['last_modified'] for o in listing)
last_modified = datetime(*map(int, re.split('[^\d]',
last_modified)[:-1]))
etag = md5(
''.join(o['hash'] for o in listing)).hexdigest()
else:
content_length = 0
last_modified = resp.last_modified
etag = md5().hexdigest()
resp = Response(headers=resp.headers, request=req,
conditional_response=True)
resp.app_iter = SegmentedIterable(self, lcontainer, listing,
resp)
resp.content_length = content_length
resp.last_modified = last_modified
resp.etag = etag
resp.headers['accept-ranges'] = 'bytes'
return resp
@public
@delay_denial
def GET(self, req):
"""Handler for HTTP GET requests."""
return self.GETorHEAD(req)
@public
@delay_denial
def HEAD(self, req):
"""Handler for HTTP HEAD requests."""
return self.GETorHEAD(req)
@public
@delay_denial
def POST(self, req):
"""HTTP POST request handler."""
if 'x-delete-after' in req.headers:
try:
x_delete_after = int(req.headers['x-delete-after'])
except ValueError:
return HTTPBadRequest(request=req,
content_type='text/plain',
body='Non-integer X-Delete-After')
req.headers['x-delete-at'] = '%d' % (time.time() + x_delete_after)
if self.app.object_post_as_copy:
req.method = 'PUT'
req.path_info = '/%s/%s/%s' % (self.account_name,
self.container_name, self.object_name)
req.headers['Content-Length'] = 0
req.headers['X-Copy-From'] = quote('/%s/%s' % (self.container_name,
self.object_name))
req.headers['X-Fresh-Metadata'] = 'true'
req.environ['swift_versioned_copy'] = True
resp = self.PUT(req)
# Older editions returned 202 Accepted on object POSTs, so we'll
# convert any 201 Created responses to that for compatibility with
# picky clients.
if resp.status_int != HTTP_CREATED:
return resp
return HTTPAccepted(request=req)
else:
error_response = check_metadata(req, 'object')
if error_response:
return error_response
container_info = self.container_info(
self.account_name, self.container_name,
account_autocreate=self.app.account_autocreate)
container_partition = container_info['partition']
containers = container_info['nodes']
req.acl = container_info['write_acl']
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
if not containers:
return HTTPNotFound(request=req)
if 'x-delete-at' in req.headers:
try:
x_delete_at = int(req.headers['x-delete-at'])
if x_delete_at < time.time():
return HTTPBadRequest(body='X-Delete-At in past',
request=req, content_type='text/plain')
except ValueError:
return HTTPBadRequest(request=req,
content_type='text/plain',
body='Non-integer X-Delete-At')
delete_at_container = str(x_delete_at /
self.app.expiring_objects_container_divisor *
self.app.expiring_objects_container_divisor)
delete_at_part, delete_at_nodes = \
self.app.container_ring.get_nodes(
self.app.expiring_objects_account, delete_at_container)
else:
delete_at_part = delete_at_nodes = None
partition, nodes = self.app.object_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
req.headers['X-Timestamp'] = normalize_timestamp(time.time())
headers = []
for container in containers:
nheaders = dict(req.headers.iteritems())
nheaders['Connection'] = 'close'
nheaders['X-Container-Host'] = '%(ip)s:%(port)s' % container
nheaders['X-Container-Partition'] = container_partition
nheaders['X-Container-Device'] = container['device']
if delete_at_nodes:
node = delete_at_nodes.pop(0)
nheaders['X-Delete-At-Host'] = '%(ip)s:%(port)s' % node
nheaders['X-Delete-At-Partition'] = delete_at_part
nheaders['X-Delete-At-Device'] = node['device']
headers.append(nheaders)
resp = self.make_requests(req, self.app.object_ring, partition,
'POST', req.path_info, headers)
return resp
def _send_file(self, conn, path):
"""Method for a file PUT coro"""
while True:
chunk = conn.queue.get()
if not conn.failed:
try:
with ChunkWriteTimeout(self.app.node_timeout):
conn.send(chunk)
except (Exception, ChunkWriteTimeout):
conn.failed = True
self.exception_occurred(conn.node, _('Object'),
_('Trying to write to %s') % path)
conn.queue.task_done()
def _connect_put_node(self, nodes, part, path, headers,
logger_thread_locals):
"""Method for a file PUT connect"""
self.app.logger.thread_locals = logger_thread_locals
for node in nodes:
try:
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(node['ip'], node['port'],
node['device'], part, 'PUT', path, headers)
with Timeout(self.app.node_timeout):
resp = conn.getexpect()
if resp.status == HTTP_CONTINUE:
conn.node = node
return conn
elif resp.status == HTTP_INSUFFICIENT_STORAGE:
self.error_limit(node)
except:
self.exception_occurred(node, _('Object'),
_('Expect: 100-continue on %s') % path)
@public
@delay_denial
def PUT(self, req):
"""HTTP PUT request handler."""
container_info = self.container_info(
self.account_name, self.container_name,
account_autocreate=self.app.account_autocreate)
container_partition = container_info['partition']
containers = container_info['nodes']
req.acl = container_info['write_acl']
req.environ['swift_sync_key'] = container_info['sync_key']
object_versions = container_info['versions']
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
if not containers:
return HTTPNotFound(request=req)
if 'x-delete-after' in req.headers:
try:
x_delete_after = int(req.headers['x-delete-after'])
except ValueError:
return HTTPBadRequest(request=req,
content_type='text/plain',
body='Non-integer X-Delete-After')
req.headers['x-delete-at'] = '%d' % (time.time() + x_delete_after)
if 'x-delete-at' in req.headers:
try:
x_delete_at = int(req.headers['x-delete-at'])
if x_delete_at < time.time():
return HTTPBadRequest(body='X-Delete-At in past',
request=req, content_type='text/plain')
except ValueError:
return HTTPBadRequest(request=req, content_type='text/plain',
body='Non-integer X-Delete-At')
delete_at_container = str(x_delete_at /
self.app.expiring_objects_container_divisor *
self.app.expiring_objects_container_divisor)
delete_at_part, delete_at_nodes = \
self.app.container_ring.get_nodes(
self.app.expiring_objects_account, delete_at_container)
else:
delete_at_part = delete_at_nodes = None
partition, nodes = self.app.object_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
# do a HEAD request for container sync and checking object versions
if 'x-timestamp' in req.headers or (object_versions and not
req.environ.get('swift_versioned_copy')):
hreq = Request.blank(req.path_info, headers={'X-Newest': 'True'},
environ={'REQUEST_METHOD': 'HEAD'})
hresp = self.GETorHEAD_base(hreq, _('Object'), partition, nodes,
hreq.path_info, len(nodes))
# Used by container sync feature
if 'x-timestamp' in req.headers:
try:
req.headers['X-Timestamp'] = \
normalize_timestamp(float(req.headers['x-timestamp']))
if hresp.environ and 'swift_x_timestamp' in hresp.environ and \
float(hresp.environ['swift_x_timestamp']) >= \
float(req.headers['x-timestamp']):
return HTTPAccepted(request=req)
except ValueError:
return HTTPBadRequest(request=req, content_type='text/plain',
body='X-Timestamp should be a UNIX timestamp float value; '
'was %r' % req.headers['x-timestamp'])
else:
req.headers['X-Timestamp'] = normalize_timestamp(time.time())
# Sometimes the 'content-type' header exists, but is set to None.
content_type_manually_set = True
if not req.headers.get('content-type'):
guessed_type, _junk = mimetypes.guess_type(req.path_info)
req.headers['Content-Type'] = guessed_type or \
'application/octet-stream'
content_type_manually_set = False
error_response = check_object_creation(req, self.object_name)
if error_response:
return error_response
if object_versions and not req.environ.get('swift_versioned_copy'):
is_manifest = 'x-object-manifest' in req.headers or \
'x-object-manifest' in hresp.headers
if hresp.status_int != HTTP_NOT_FOUND and not is_manifest:
# This is a version manifest and needs to be handled
# differently. First copy the existing data to a new object,
# then write the data from this request to the version manifest
# object.
lcontainer = object_versions.split('/')[0]
prefix_len = '%03x' % len(self.object_name)
lprefix = prefix_len + self.object_name + '/'
ts_source = hresp.environ.get('swift_x_timestamp')
if ts_source is None:
ts_source = time.mktime(time.strptime(
hresp.headers['last-modified'],
'%a, %d %b %Y %H:%M:%S GMT'))
new_ts = normalize_timestamp(ts_source)
vers_obj_name = lprefix + new_ts
copy_headers = {
'Destination': '%s/%s' % (lcontainer, vers_obj_name)}
copy_environ = {'REQUEST_METHOD': 'COPY',
'swift_versioned_copy': True
}
copy_req = Request.blank(req.path_info, headers=copy_headers,
environ=copy_environ)
copy_resp = self.COPY(copy_req)
if is_client_error(copy_resp.status_int):
# missing container or bad permissions
return HTTPPreconditionFailed(request=req)
elif not is_success(copy_resp.status_int):
# could not copy the data, bail
return HTTPServiceUnavailable(request=req)
reader = req.environ['wsgi.input'].read
data_source = iter(lambda: reader(self.app.client_chunk_size), '')
source_header = req.headers.get('X-Copy-From')
source_resp = None
if source_header:
source_header = unquote(source_header)
acct = req.path_info.split('/', 2)[1]
if isinstance(acct, unicode):
acct = acct.encode('utf-8')
if not source_header.startswith('/'):
source_header = '/' + source_header
source_header = '/' + acct + source_header
try:
src_container_name, src_obj_name = \
source_header.split('/', 3)[2:]
except ValueError:
return HTTPPreconditionFailed(request=req,
body='X-Copy-From header must be of the form'
'<container name>/<object name>')
source_req = req.copy_get()
source_req.path_info = source_header
source_req.headers['X-Newest'] = 'true'
orig_obj_name = self.object_name
orig_container_name = self.container_name
self.object_name = src_obj_name
self.container_name = src_container_name
source_resp = self.GET(source_req)
if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
return source_resp
self.object_name = orig_obj_name
self.container_name = orig_container_name
new_req = Request.blank(req.path_info,
environ=req.environ, headers=req.headers)
data_source = source_resp.app_iter
new_req.content_length = source_resp.content_length
if new_req.content_length is None:
# This indicates a transfer-encoding: chunked source object,
# which currently only happens because there are more than
# CONTAINER_LISTING_LIMIT segments in a segmented object. In
# this case, we're going to refuse to do the server-side copy.
return HTTPRequestEntityTooLarge(request=req)
new_req.etag = source_resp.etag
# we no longer need the X-Copy-From header
del new_req.headers['X-Copy-From']
if not content_type_manually_set:
new_req.headers['Content-Type'] = \
source_resp.headers['Content-Type']
if new_req.headers.get('x-fresh-metadata', 'false').lower() \
not in TRUE_VALUES:
for k, v in source_resp.headers.items():
if k.lower().startswith('x-object-meta-'):
new_req.headers[k] = v
for k, v in req.headers.items():
if k.lower().startswith('x-object-meta-'):
new_req.headers[k] = v
req = new_req
node_iter = self.iter_nodes(partition, nodes, self.app.object_ring)
pile = GreenPile(len(nodes))
for container in containers:
nheaders = dict(req.headers.iteritems())
nheaders['Connection'] = 'close'
nheaders['X-Container-Host'] = '%(ip)s:%(port)s' % container
nheaders['X-Container-Partition'] = container_partition
nheaders['X-Container-Device'] = container['device']
nheaders['Expect'] = '100-continue'
if delete_at_nodes:
node = delete_at_nodes.pop(0)
nheaders['X-Delete-At-Host'] = '%(ip)s:%(port)s' % node
nheaders['X-Delete-At-Partition'] = delete_at_part
nheaders['X-Delete-At-Device'] = node['device']
pile.spawn(self._connect_put_node, node_iter, partition,
req.path_info, nheaders, self.app.logger.thread_locals)
conns = [conn for conn in pile if conn]
if len(conns) <= len(nodes) / 2:
self.app.logger.error(
_('Object PUT returning 503, %(conns)s/%(nodes)s '
'required connections'),
{'conns': len(conns), 'nodes': len(nodes) // 2 + 1})
return HTTPServiceUnavailable(request=req)
chunked = req.headers.get('transfer-encoding')
bytes_transferred = 0
try:
with ContextPool(len(nodes)) as pool:
for conn in conns:
conn.failed = False
conn.queue = Queue(self.app.put_queue_depth)
pool.spawn(self._send_file, conn, req.path)
while True:
with ChunkReadTimeout(self.app.client_timeout):
try:
chunk = next(data_source)
except StopIteration:
if chunked:
[conn.queue.put('0\r\n\r\n') for conn in conns]
break
bytes_transferred += len(chunk)
if bytes_transferred > MAX_FILE_SIZE:
return HTTPRequestEntityTooLarge(request=req)
for conn in list(conns):
if not conn.failed:
conn.queue.put('%x\r\n%s\r\n' % (len(chunk), chunk)
if chunked else chunk)
else:
conns.remove(conn)
if len(conns) <= len(nodes) / 2:
self.app.logger.error(_('Object PUT exceptions during'
' send, %(conns)s/%(nodes)s required connections'),
{'conns': len(conns), 'nodes': len(nodes) / 2 + 1})
return HTTPServiceUnavailable(request=req)
for conn in conns:
if conn.queue.unfinished_tasks:
conn.queue.join()
conns = [conn for conn in conns if not conn.failed]
except ChunkReadTimeout, err:
self.app.logger.warn(
_('ERROR Client read timeout (%ss)'), err.seconds)
self.app.logger.increment('client_timeouts')
return HTTPRequestTimeout(request=req)
except (Exception, Timeout):
self.app.logger.exception(
_('ERROR Exception causing client disconnect'))
return HTTPClientDisconnect(request=req)
if req.content_length and bytes_transferred < req.content_length:
req.client_disconnect = True
self.app.logger.warn(
_('Client disconnected without sending enough data'))
self.app.logger.increment('client_disconnects')
return HTTPClientDisconnect(request=req)
statuses = []
reasons = []
bodies = []
etags = set()
for conn in conns:
try:
with Timeout(self.app.node_timeout):
response = conn.getresponse()
statuses.append(response.status)
reasons.append(response.reason)
bodies.append(response.read())
if response.status >= HTTP_INTERNAL_SERVER_ERROR:
self.error_occurred(conn.node,
_('ERROR %(status)d %(body)s From Object Server ' \
're: %(path)s') % {'status': response.status,
'body': bodies[-1][:1024], 'path': req.path})
elif is_success(response.status):
etags.add(response.getheader('etag').strip('"'))
except (Exception, Timeout):
self.exception_occurred(conn.node, _('Object'),
_('Trying to get final status of PUT to %s') % req.path)
if len(etags) > 1:
self.app.logger.error(
_('Object servers returned %s mismatched etags'), len(etags))
return HTTPServerError(request=req)
etag = len(etags) and etags.pop() or None
while len(statuses) < len(nodes):
statuses.append(HTTP_SERVICE_UNAVAILABLE)
reasons.append('')
bodies.append('')
resp = self.best_response(req, statuses, reasons, bodies,
_('Object PUT'), etag=etag)
if source_header:
resp.headers['X-Copied-From'] = quote(
source_header.split('/', 2)[2])
if 'last-modified' in source_resp.headers:
resp.headers['X-Copied-From-Last-Modified'] = \
source_resp.headers['last-modified']
for k, v in req.headers.items():
if k.lower().startswith('x-object-meta-'):
resp.headers[k] = v
resp.last_modified = float(req.headers['X-Timestamp'])
return resp
@public
@delay_denial
def DELETE(self, req):
"""HTTP DELETE request handler."""
container_info = self.container_info(self.account_name,
self.container_name)
container_partition = container_info['partition']
containers = container_info['nodes']
req.acl = container_info['write_acl']
req.environ['swift_sync_key'] = container_info['sync_key']
object_versions = container_info['versions']
if object_versions:
# this is a version manifest and needs to be handled differently
lcontainer = object_versions.split('/')[0]
prefix_len = '%03x' % len(self.object_name)
lprefix = prefix_len + self.object_name + '/'
last_item = None
try:
for last_item in self._listing_iter(lcontainer, lprefix,
req.environ):
pass
except ListingIterNotFound:
# no worries, last_item is None
pass
except ListingIterNotAuthorized, err:
return err.aresp
except ListingIterError:
return HTTPServerError(request=req)
if last_item:
# there are older versions so copy the previous version to the
# current object and delete the previous version
orig_container = self.container_name
orig_obj = self.object_name
self.container_name = lcontainer
self.object_name = last_item['name']
copy_path = '/' + self.account_name + '/' + \
self.container_name + '/' + self.object_name
copy_headers = {'X-Newest': 'True',
'Destination': orig_container + '/' + orig_obj
}
copy_environ = {'REQUEST_METHOD': 'COPY',
'swift_versioned_copy': True
}
creq = Request.blank(copy_path, headers=copy_headers,
environ=copy_environ)
copy_resp = self.COPY(creq)
if is_client_error(copy_resp.status_int):
# some user error, maybe permissions
return HTTPPreconditionFailed(request=req)
elif not is_success(copy_resp.status_int):
# could not copy the data, bail
return HTTPServiceUnavailable(request=req)
# reset these because the COPY changed them
self.container_name = lcontainer
self.object_name = last_item['name']
new_del_req = Request.blank(copy_path, environ=req.environ)
container_info = self.container_info(self.account_name,
self.container_name)
container_partition = container_info['partition']
containers = container_info['nodes']
new_del_req.acl = container_info['write_acl']
new_del_req.path_info = copy_path
req = new_del_req
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
if not containers:
return HTTPNotFound(request=req)
partition, nodes = self.app.object_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
# Used by container sync feature
if 'x-timestamp' in req.headers:
try:
req.headers['X-Timestamp'] = \
normalize_timestamp(float(req.headers['x-timestamp']))
except ValueError:
return HTTPBadRequest(request=req, content_type='text/plain',
body='X-Timestamp should be a UNIX timestamp float value; '
'was %r' % req.headers['x-timestamp'])
else:
req.headers['X-Timestamp'] = normalize_timestamp(time.time())
headers = []
for container in containers:
nheaders = dict(req.headers.iteritems())
nheaders['Connection'] = 'close'
nheaders['X-Container-Host'] = '%(ip)s:%(port)s' % container
nheaders['X-Container-Partition'] = container_partition
nheaders['X-Container-Device'] = container['device']
headers.append(nheaders)
resp = self.make_requests(req, self.app.object_ring,
partition, 'DELETE', req.path_info, headers)
return resp
@public
@delay_denial
def COPY(self, req):
"""HTTP COPY request handler."""
dest = req.headers.get('Destination')
if not dest:
return HTTPPreconditionFailed(request=req,
body='Destination header required')
dest = unquote(dest)
if not dest.startswith('/'):
dest = '/' + dest
try:
_junk, dest_container, dest_object = dest.split('/', 2)
except ValueError:
return HTTPPreconditionFailed(request=req,
body='Destination header must be of the form '
'<container name>/<object name>')
source = '/' + self.container_name + '/' + self.object_name
self.container_name = dest_container
self.object_name = dest_object
# re-write the existing request as a PUT instead of creating a new one
# since this one is already attached to the posthooklogger
req.method = 'PUT'
req.path_info = '/' + self.account_name + dest
req.headers['Content-Length'] = 0
req.headers['X-Copy-From'] = quote(source)
del req.headers['Destination']
return self.PUT(req)
|
|
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""URL provides modules to download files via the network.
It can refer to HTTP and FTP files, which enables workflows to be distributed
without its associated data.
This package uses a local cache, inside the per-user VisTrails directory. This
way, files that haven't been changed do not need to be downloaded again. The
check is performed efficiently using HTTP headers.
"""
from __future__ import division
from datetime import datetime
import email.utils
import hashlib
import os
import re
import urllib
import urllib2
from vistrails.core.bundles.pyimport import py_import
from vistrails.core.configuration import get_vistrails_persistent_configuration
from vistrails.core import debug
import vistrails.core.modules.basic_modules
from vistrails.core.modules.basic_modules import PathObject
import vistrails.core.modules.module_registry
from vistrails.core.modules.vistrails_module import Module, ModuleError
from vistrails.core.system import current_dot_vistrails, strptime
from vistrails.core.upgradeworkflow import UpgradeWorkflowHandler
import vistrails.gui.repository
from vistrails.gui.utils import show_warning
from vistrails.core.repository.poster.encode import multipart_encode
from vistrails.core.repository.poster.streaminghttp import register_openers
from .identifiers import identifier
from .http_directory import download_directory
from .https_if_available import build_opener
package_directory = None
###############################################################################
class Downloader(object):
def __init__(self, url, module, insecure):
self.url = url
self.module = module
self.opener = build_opener(insecure=insecure)
def execute(self):
""" Tries to download a file from url.
Returns the path to the local file.
"""
self.local_filename = os.path.join(package_directory,
urllib.quote_plus(self.url))
# Before download
self.pre_download()
# Send request
try:
response = self.send_request()
except urllib2.URLError, e:
if self.is_in_local_cache:
debug.warning("A network error occurred. DownloadFile will "
"use a cached version of the file")
return self.local_filename
else:
raise ModuleError(
self.module,
"Network error: %s" % debug.format_exception(e))
if response is None:
return self.local_filename
# Read response headers
self.size_header = None
if not self.read_headers(response):
return self.local_filename
# Download
self.download(response)
# Post download
self.post_download(response)
return self.local_filename
def pre_download(self):
pass
def send_request(self):
return self.opener.open(self.url)
def read_headers(self, response):
return True
def download(self, response):
try:
dl_size = 0
CHUNKSIZE = 4096
f2 = open(self.local_filename, 'wb')
while True:
if self.size_header is not None:
self.module.logging.update_progress(
self.module,
dl_size * 1.0/self.size_header)
chunk = response.read(CHUNKSIZE)
if not chunk:
break
dl_size += len(chunk)
f2.write(chunk)
f2.close()
response.close()
except Exception, e:
try:
os.unlink(self.local_filename)
except OSError:
pass
raise ModuleError(
self.module,
"Error retrieving URL: %s" % debug.format_exception(e))
def post_download(self, response):
pass
@property
def is_in_local_cache(self):
return os.path.isfile(self.local_filename)
class HTTPDownloader(Downloader):
def pre_download(self):
# Get ETag from disk
try:
with open(self.local_filename + '.etag') as etag_file:
self.etag = etag_file.read()
except IOError:
self.etag = None
def send_request(self):
try:
request = urllib2.Request(self.url)
if self.etag is not None:
request.add_header(
'If-None-Match',
self.etag)
try:
mtime = email.utils.formatdate(
os.path.getmtime(self.local_filename),
usegmt=True)
request.add_header(
'If-Modified-Since',
mtime)
except OSError:
pass
return self.opener.open(request)
except urllib2.HTTPError, e:
if e.code == 304:
# Not modified
return None
raise
def read_headers(self, response):
try:
self.mod_header = response.headers['last-modified']
except KeyError:
self.mod_header = None
try:
size_header = response.headers['content-length']
if not size_header:
raise ValueError
self.size_header = int(size_header)
except (KeyError, ValueError):
self.size_header = None
return True
def _is_outdated(self):
local_time = datetime.utcfromtimestamp(
os.path.getmtime(self.local_filename))
try:
remote_time = strptime(self.mod_header,
"%a, %d %b %Y %H:%M:%S %Z")
except ValueError:
try:
remote_time = strptime(self.mod_header,
"%a, %d %B %Y %H:%M:%S %Z")
except ValueError:
# unable to parse last-modified header, download file again
debug.warning("Unable to parse Last-Modified header, "
"downloading file")
return True
return remote_time > local_time
def download(self, response):
if (not self.is_in_local_cache or
not self.mod_header or self._is_outdated()):
Downloader.download(self, response)
def post_download(self, response):
try:
etag = response.headers['ETag']
except KeyError:
pass
else:
with open(self.local_filename + '.etag', 'w') as etag_file:
etag = etag_file.write(etag)
class SSHDownloader(object):
""" SSH downloader: downloads files via SCP, using paramiko and scp.
Recognized URL schemes are:
ssh://user[:password]@host[:port]/absolute/path
Examples:
ssh://john@vistrails.nyu.edu/home/john/example.txt
ssh://eve:my%20secret@google.com/tmp/test%20file.bin
Note that both password and path are url-encoded, that the path
is absolute, and that the username must be specified
scp://[user@]host:path
Examples:
scp://john@vistrails.nyu.edu:files/test.txt
scp://poly.edu:/tmp/test.bin
Note that nothing is url encoded, that the path can be relative
(to the user's home directory) and that no username or port can
be specified
"""
SSH_FORMAT = re.compile(
r'^'
'ssh://' # Protocol
'([A-Za-z0-9_/+.-]+)' # 1 Username
'(?::([^@]+))?' # 2 Password
'@([A-Za-z0-9_.-]+)' # 3 Hostname
'(?::([0-9]+))?' # 4 Port number
'(/.+)' # 5 Path (url-encoded!)
'$'
)
SCP_FORMAT = re.compile(
r'^'
'(?:scp://)?' # Protocol
'(?:([A-Za-z0-9_/+.-]+)@)?' # 1 Username
'([A-Za-z0-9_.-]+)' # 2 Hostname
':(.+)' # 3 Path (not url-encoded)
'$'
)
def __init__(self, url, module, insecure):
self.url = url
self.module = module
def execute(self):
# Parse URL
password = None
portnum = None
if self.url.startswith('ssh:'):
m = self.SSH_FORMAT.match(self.url)
if m is None:
raise ModuleError(self.module,
"SSH error: invalid URL %r" % self.url)
username, password, hostname, portnum, path = m.groups()
password = urllib.unquote_plus(password)
path = urllib.unquote_plus(path)
elif self.url.startswith('scp:'):
m = self.SCP_FORMAT.match(self.url)
if m is None:
raise ModuleError(self.module,
"SSH error: invalid URL %r" % self.url)
username, hostname, path = m.groups()
else:
raise ModuleError(self.module, "SSHDownloader: Invalid URL")
if portnum is None:
portnum = 22
else:
portnum = int(portnum)
return self._open_ssh(username, password, hostname, portnum, path)
def _open_ssh(self, username, password, hostname, portnum, path):
paramiko = py_import('paramiko', {
'pip': 'paramiko',
'linux-debian': 'python-paramiko',
'linux-ubuntu': 'python-paramiko',
'linux-fedora': 'python-paramiko'})
scp = py_import('scp', {
'pip': 'scp'})
local_filename = os.path.join(package_directory,
urllib.quote_plus(self.url))
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
try:
ssh.connect(hostname, port=portnum,
username=username, password=password)
except paramiko.SSHException, e:
raise ModuleError(self.module, debug.format_exception(e))
client = scp.SCPClient(ssh.get_transport())
client.get(path, local_filename)
return local_filename
downloaders = {
'http': HTTPDownloader,
'https': HTTPDownloader,
'ssh': SSHDownloader,
'scp': SSHDownloader}
class DownloadFile(Module):
""" Downloads file from URL.
This modules downloads a remote file. It tries to cache files on the local
filesystem so as to not re-download unchanged files.
Recognized URL schemes are:
http://...
https://...
ftp://...
ssh://user[:password]@host[:port]/absolute/path
Examples:
ssh://john@vistrails.nyu.edu/home/john/example.txt
ssh://eve:my%20secret@google.com/tmp/test%20file.bin
Note that both password and path are url-encoded, that the path
is absolute, and that the username must be specified
scp://[user@]host:path
Examples:
scp://john@vistrails.nyu.edu:files/test.txt
scp://poly.edu:/tmp/test.bin
Note that nothing is url encoded, that the path can be relative
(to the user's home directory) and that no username or port can
be specified
"""
def compute(self):
self.check_input('url')
url = self.get_input('url')
insecure = self.get_input('insecure')
local_filename = self.download(url, insecure)
self.set_output('local_filename', local_filename)
result = PathObject(local_filename)
self.set_output('file', result)
def download(self, url, insecure):
""" Tries to download a file from url.
Returns the path to the local file.
"""
scheme = urllib2.splittype(url)[0]
DL = downloaders.get(scheme, Downloader)
return DL(url, self, insecure).execute()
class HTTPDirectory(Module):
"""Downloads a whole directory recursively from a URL
"""
def compute(self):
self.check_input('url')
url = self.get_input('url')
insecure = self.get_input('insecure')
local_path = self.download(url, insecure)
self.set_output('local_path', local_path)
local_dir = PathObject(local_path)
self.set_output('directory', local_dir)
def download(self, url, insecure):
local_path = self.interpreter.filePool.create_directory(
prefix='vt_http').name
download_directory(url, local_path, insecure)
return local_path
class RepoSync(Module):
""" VisTrails Server version of RepoSync modules. Customized to play
nicely with crowdlabs. Needs refactoring.
RepoSync enables data to be synced with a online repository. The designated file
parameter will be uploaded to the repository on execution,
creating a new pipeline version that links to online repository data.
If the local file isn't available, then the online repository data is used.
"""
def __init__(self):
Module.__init__(self)
config = get_vistrails_persistent_configuration()
if config.check('webRepositoryURL'):
self.base_url = config.webRepositoryURL
else:
raise ModuleError(self,
("No webRepositoryURL value defined"
" in the Expert Configuration"))
# check if we are running in server mode
# this effects how the compute method functions
if config.check('isInServerMode'):
self.is_server = bool(config.isInServerMode)
else:
self.is_server = False
# TODO: this '/' check should probably be done in core/configuration.py
if self.base_url[-1] == '/':
self.base_url = self.base_url[:-1]
# used for invaliding cache when user isn't logged in to crowdLabs
# but wants to upload data
def invalidate_cache(self):
return False
def validate_cache(self):
return True
def _file_is_in_local_cache(self, local_filename):
return os.path.isfile(local_filename)
def checksum_lookup(self):
""" checks if the repository has the wanted data """
checksum_url = "%s/datasets/exists/%s/" % (self.base_url,
self.checksum)
self.on_server = False
try:
check_dataset_on_repo = urllib2.urlopen(url=checksum_url)
self.up_to_date = True if \
check_dataset_on_repo.read() == 'uptodate' else False
self.on_server = True
except urllib2.HTTPError:
self.up_to_date = True
def data_sync(self):
""" downloads/uploads/uses the local file depending on availability """
self.checksum_lookup()
# local file not on repository, so upload
if not self.on_server and os.path.isfile(self.in_file.name):
cookiejar = vistrails.gui.repository.QRepositoryDialog.cookiejar
if cookiejar:
register_openers(cookiejar=cookiejar)
params = {'dataset_file': open(self.in_file.name, 'rb'),
'name': self.in_file.name.split('/')[-1],
'origin': 'vistrails',
'checksum': self.checksum}
upload_url = "%s/datasets/upload/" % self.base_url
datagen, headers = multipart_encode(params)
request = urllib2.Request(upload_url, datagen, headers)
try:
result = urllib2.urlopen(request)
if result.code != 200:
show_warning("Upload Failure",
"Data failed to upload to repository")
# make temporarily uncachable
self.is_cacheable = self.invalidate_cache
else:
debug.warning("Push to repository was successful")
# make sure module caches
self.is_cacheable = self.validate_cache
except Exception, e:
show_warning("Upload Failure",
"Data failed to upload to repository")
# make temporarily uncachable
self.is_cacheable = self.invalidate_cache
debug.warning('RepoSync uploaded %s to the repository' % \
self.in_file.name)
else:
show_warning("Please login", ("You must be logged into the web"
" repository in order to upload "
"data. No data was synced"))
# make temporarily uncachable
self.is_cacheable = self.invalidate_cache
# use local data
self.set_output("file", self.in_file)
else:
# file on repository mirrors local file, so use local file
if self.up_to_date and os.path.isfile(self.in_file.name):
self.set_output("file", self.in_file)
else:
# local file not present or out of date, download or use cache
self.url = "%s/datasets/download/%s" % (self.base_url,
self.checksum)
local_filename = package_directory + '/' + \
urllib.quote_plus(self.url)
if not self._file_is_in_local_cache(local_filename):
# file not in cache, download.
try:
urllib.urlretrieve(self.url, local_filename)
except IOError, e:
raise ModuleError(self, ("Invalid URL: %s" % e))
out_file = PathObject(local_filename)
debug.warning('RepoSync is using repository data')
self.set_output("file", out_file)
def compute(self):
# if server, grab local file using checksum id
if self.is_server:
self.check_input('checksum')
self.checksum = self.get_input("checksum")
# get file path
path_url = "%s/datasets/path/%s/"%(self.base_url, self.checksum)
dataset_path_request = urllib2.urlopen(url=path_url)
dataset_path = dataset_path_request.read()
if os.path.isfile(dataset_path):
out_file = PathObject(dataset_path)
self.set_output("file", out_file)
else: # is client
self.check_input('file')
self.in_file = self.get_input("file")
if os.path.isfile(self.in_file.name):
# do size check
size = os.path.getsize(self.in_file.name)
if size > 26214400:
show_warning("File is too large",
"file is larger than 25MB, "
"unable to sync with web repository")
self.set_output("file", self.in_file)
else:
# compute checksum
f = open(self.in_file.name, 'r')
self.checksum = hashlib.sha1()
block = 1
while block:
block = f.read(128)
self.checksum.update(block)
f.close()
self.checksum = self.checksum.hexdigest()
# upload/download file
self.data_sync()
# set checksum param in module
if not self.has_input('checksum'):
self.change_parameter('checksum', [self.checksum])
else:
# local file not present
if self.has_input('checksum'):
self.checksum = self.get_input("checksum")
# download file
self.data_sync()
class URLEncode(Module):
def compute(self):
value = self.get_input('string')
self.set_output('encoded', urllib.quote_plus(value))
class URLDecode(Module):
def compute(self):
encoded = self.get_input('encoded')
self.set_output('string', urllib.unquote_plus(encoded))
def initialize(*args, **keywords):
reg = vistrails.core.modules.module_registry.get_module_registry()
basic = vistrails.core.modules.basic_modules
reg.add_module(DownloadFile)
reg.add_input_port(DownloadFile, "url", (basic.String, 'URL'))
reg.add_input_port(DownloadFile, 'insecure',
(basic.Boolean, "Allow invalid SSL certificates"),
optional=True, defaults="['False']")
reg.add_output_port(DownloadFile, "file",
(basic.File, 'local File object'))
reg.add_output_port(DownloadFile, "local_filename",
(basic.String, 'local filename'), optional=True)
reg.add_module(HTTPDirectory)
reg.add_input_port(HTTPDirectory, 'url', (basic.String, "URL"))
reg.add_input_port(HTTPDirectory, 'insecure',
(basic.Boolean, "Allow invalid SSL certificates"),
optional=True, defaults="['False']")
reg.add_output_port(HTTPDirectory, 'directory',
(basic.Directory, "local Directory object"))
reg.add_output_port(HTTPDirectory, 'local_path',
(basic.String, "local path"), optional=True)
reg.add_module(RepoSync)
reg.add_input_port(RepoSync, "file", (basic.File, 'File'))
reg.add_input_port(RepoSync, "checksum",
(basic.String, 'Checksum'), optional=True)
reg.add_output_port(RepoSync, "file", (basic.File,
'Repository Synced File object'))
reg.add_output_port(RepoSync, "checksum",
(basic.String, 'Checksum'), optional=True)
reg.add_module(URLEncode)
reg.add_input_port(URLEncode, "string", basic.String)
reg.add_output_port(URLEncode, "encoded", basic.String)
reg.add_module(URLDecode)
reg.add_input_port(URLDecode, "encoded", basic.String)
reg.add_output_port(URLDecode, "string", basic.String)
global package_directory
dotVistrails = current_dot_vistrails()
package_directory = os.path.join(dotVistrails, "HTTP")
if not os.path.isdir(package_directory):
try:
debug.log("Creating HTTP package directory: %s" % package_directory)
os.mkdir(package_directory)
except Exception, e:
raise RuntimeError("Failed to create cache directory: %s" %
package_directory, e)
def handle_module_upgrade_request(controller, module_id, pipeline):
module_remap = {
# HTTPFile was renamed DownloadFile
'HTTPFile': [
(None, '1.0.0', 'DownloadFile', {})
],
}
return UpgradeWorkflowHandler.remap_module(controller,
module_id,
pipeline,
module_remap)
###############################################################################
import unittest
class TestDownloadFile(unittest.TestCase):
@classmethod
def setUpClass(cls):
from vistrails.core.packagemanager import get_package_manager
from vistrails.core.modules.module_registry import MissingPackage
pm = get_package_manager()
try:
pm.get_package('org.vistrails.vistrails.http')
except MissingPackage:
pm.late_enable_package('URL')
def testIncorrectURL(self):
from vistrails.tests.utils import execute
self.assertTrue(execute([
('DownloadFile', identifier, [
('url', [('String', 'http://idbetthisdoesnotexistohrly')]),
]),
]))
def testIncorrectURL_2(self):
from vistrails.tests.utils import execute
self.assertTrue(execute([
('DownloadFile', identifier, [
('url', [('String', 'http://neitherodesthisohrly')]),
]),
]))
class TestHTTPDirectory(unittest.TestCase):
def test_download(self):
url = 'http://www.vistrails.org/testing/httpdirectory/test/'
import shutil
import tempfile
testdir = tempfile.mkdtemp(prefix='vt_test_http_')
try:
download_directory(url, testdir)
files = {}
def addfiles(dirpath):
td = os.path.join(testdir, dirpath)
for name in os.listdir(td):
filename = os.path.join(testdir, dirpath, name)
dn = os.path.join(dirpath, name)
if os.path.isdir(filename):
addfiles(os.path.join(dirpath, name))
else:
with open(filename, 'rb') as f:
files[dn.replace(os.sep, '/')] = f.read()
addfiles('')
self.assertEqual(len(files), 4)
del files['f.html']
self.assertEqual(files, {
'a': 'aa\n',
'bb': 'bb\n',
'cc/d': 'dd\n',
})
finally:
shutil.rmtree(testdir)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.template}
"""
from __future__ import division, absolute_import
from zope.interface.verify import verifyObject
from twisted.internet.defer import succeed, gatherResults
from twisted.python.filepath import FilePath
from twisted.trial.unittest import TestCase
from twisted.trial.util import suppress as SUPPRESS
from twisted.web.template import (
Element, TagLoader, renderer, tags, XMLFile, XMLString)
from twisted.web.iweb import ITemplateLoader
from twisted.web.error import (FlattenerError, MissingTemplateLoader,
MissingRenderMethod)
from twisted.web.template import renderElement
from twisted.web._element import UnexposedMethodError
from twisted.web.test._util import FlattenTestCase
from twisted.web.test.test_web import DummyRequest
from twisted.web.server import NOT_DONE_YET
from twisted.python.compat import NativeStringIO as StringIO
_xmlFileSuppress = SUPPRESS(category=DeprecationWarning,
message="Passing filenames or file objects to XMLFile is "
"deprecated since Twisted 12.1. Pass a FilePath instead.")
class TagFactoryTests(TestCase):
"""
Tests for L{_TagFactory} through the publicly-exposed L{tags} object.
"""
def test_lookupTag(self):
"""
HTML tags can be retrieved through C{tags}.
"""
tag = tags.a
self.assertEqual(tag.tagName, "a")
def test_lookupHTML5Tag(self):
"""
Twisted supports the latest and greatest HTML tags from the HTML5
specification.
"""
tag = tags.video
self.assertEqual(tag.tagName, "video")
def test_lookupTransparentTag(self):
"""
To support transparent inclusion in templates, there is a special tag,
the transparent tag, which has no name of its own but is accessed
through the "transparent" attribute.
"""
tag = tags.transparent
self.assertEqual(tag.tagName, "")
def test_lookupInvalidTag(self):
"""
Invalid tags which are not part of HTML cause AttributeErrors when
accessed through C{tags}.
"""
self.assertRaises(AttributeError, getattr, tags, "invalid")
def test_lookupXMP(self):
"""
As a special case, the <xmp> tag is simply not available through
C{tags} or any other part of the templating machinery.
"""
self.assertRaises(AttributeError, getattr, tags, "xmp")
class ElementTests(TestCase):
"""
Tests for the awesome new L{Element} class.
"""
def test_missingTemplateLoader(self):
"""
L{Element.render} raises L{MissingTemplateLoader} if the C{loader}
attribute is L{None}.
"""
element = Element()
err = self.assertRaises(MissingTemplateLoader, element.render, None)
self.assertIdentical(err.element, element)
def test_missingTemplateLoaderRepr(self):
"""
A L{MissingTemplateLoader} instance can be repr()'d without error.
"""
class PrettyReprElement(Element):
def __repr__(self):
return 'Pretty Repr Element'
self.assertIn('Pretty Repr Element',
repr(MissingTemplateLoader(PrettyReprElement())))
def test_missingRendererMethod(self):
"""
When called with the name which is not associated with a render method,
L{Element.lookupRenderMethod} raises L{MissingRenderMethod}.
"""
element = Element()
err = self.assertRaises(
MissingRenderMethod, element.lookupRenderMethod, "foo")
self.assertIdentical(err.element, element)
self.assertEqual(err.renderName, "foo")
def test_missingRenderMethodRepr(self):
"""
A L{MissingRenderMethod} instance can be repr()'d without error.
"""
class PrettyReprElement(Element):
def __repr__(self):
return 'Pretty Repr Element'
s = repr(MissingRenderMethod(PrettyReprElement(),
'expectedMethod'))
self.assertIn('Pretty Repr Element', s)
self.assertIn('expectedMethod', s)
def test_definedRenderer(self):
"""
When called with the name of a defined render method,
L{Element.lookupRenderMethod} returns that render method.
"""
class ElementWithRenderMethod(Element):
@renderer
def foo(self, request, tag):
return "bar"
foo = ElementWithRenderMethod().lookupRenderMethod("foo")
self.assertEqual(foo(None, None), "bar")
def test_render(self):
"""
L{Element.render} loads a document from the C{loader} attribute and
returns it.
"""
class TemplateLoader(object):
def load(self):
return "result"
class StubElement(Element):
loader = TemplateLoader()
element = StubElement()
self.assertEqual(element.render(None), "result")
def test_misuseRenderer(self):
"""
If the L{renderer} decorator is called without any arguments, it will
raise a comprehensible exception.
"""
te = self.assertRaises(TypeError, renderer)
self.assertEqual(str(te),
"expose() takes at least 1 argument (0 given)")
def test_renderGetDirectlyError(self):
"""
Called directly, without a default, L{renderer.get} raises
L{UnexposedMethodError} when it cannot find a renderer.
"""
self.assertRaises(UnexposedMethodError, renderer.get, None,
"notARenderer")
class XMLFileReprTests(TestCase):
"""
Tests for L{twisted.web.template.XMLFile}'s C{__repr__}.
"""
def test_filePath(self):
"""
An L{XMLFile} with a L{FilePath} returns a useful repr().
"""
path = FilePath("/tmp/fake.xml")
self.assertEqual('<XMLFile of %r>' % (path,), repr(XMLFile(path)))
def test_filename(self):
"""
An L{XMLFile} with a filename returns a useful repr().
"""
fname = "/tmp/fake.xml"
self.assertEqual('<XMLFile of %r>' % (fname,), repr(XMLFile(fname)))
test_filename.suppress = [_xmlFileSuppress]
def test_file(self):
"""
An L{XMLFile} with a file object returns a useful repr().
"""
fobj = StringIO("not xml")
self.assertEqual('<XMLFile of %r>' % (fobj,), repr(XMLFile(fobj)))
test_file.suppress = [_xmlFileSuppress]
class XMLLoaderTestsMixin(object):
"""
@ivar templateString: Simple template to use to exercise the loaders.
@ivar deprecatedUse: C{True} if this use of L{XMLFile} is deprecated and
should emit a C{DeprecationWarning}.
"""
loaderFactory = None
templateString = '<p>Hello, world.</p>'
def test_load(self):
"""
Verify that the loader returns a tag with the correct children.
"""
loader = self.loaderFactory()
tag, = loader.load()
warnings = self.flushWarnings(offendingFunctions=[self.loaderFactory])
if self.deprecatedUse:
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"Passing filenames or file objects to XMLFile is "
"deprecated since Twisted 12.1. Pass a FilePath instead.")
else:
self.assertEqual(len(warnings), 0)
self.assertEqual(tag.tagName, 'p')
self.assertEqual(tag.children, [u'Hello, world.'])
def test_loadTwice(self):
"""
If {load()} can be called on a loader twice the result should be the
same.
"""
loader = self.loaderFactory()
tags1 = loader.load()
tags2 = loader.load()
self.assertEqual(tags1, tags2)
test_loadTwice.suppress = [_xmlFileSuppress]
class XMLStringLoaderTests(TestCase, XMLLoaderTestsMixin):
"""
Tests for L{twisted.web.template.XMLString}
"""
deprecatedUse = False
def loaderFactory(self):
"""
@return: an L{XMLString} constructed with C{self.templateString}.
"""
return XMLString(self.templateString)
class XMLFileWithFilePathTests(TestCase, XMLLoaderTestsMixin):
"""
Tests for L{twisted.web.template.XMLFile}'s L{FilePath} support.
"""
deprecatedUse = False
def loaderFactory(self):
"""
@return: an L{XMLString} constructed with a L{FilePath} pointing to a
file that contains C{self.templateString}.
"""
fp = FilePath(self.mktemp())
fp.setContent(self.templateString.encode("utf8"))
return XMLFile(fp)
class XMLFileWithFileTests(TestCase, XMLLoaderTestsMixin):
"""
Tests for L{twisted.web.template.XMLFile}'s deprecated file object support.
"""
deprecatedUse = True
def loaderFactory(self):
"""
@return: an L{XMLString} constructed with a file object that contains
C{self.templateString}.
"""
return XMLFile(StringIO(self.templateString))
class XMLFileWithFilenameTests(TestCase, XMLLoaderTestsMixin):
"""
Tests for L{twisted.web.template.XMLFile}'s deprecated filename support.
"""
deprecatedUse = True
def loaderFactory(self):
"""
@return: an L{XMLString} constructed with a filename that points to a
file containing C{self.templateString}.
"""
fp = FilePath(self.mktemp())
fp.setContent(self.templateString.encode('utf8'))
return XMLFile(fp.path)
class FlattenIntegrationTests(FlattenTestCase):
"""
Tests for integration between L{Element} and
L{twisted.web._flatten.flatten}.
"""
def test_roundTrip(self):
"""
Given a series of parsable XML strings, verify that
L{twisted.web._flatten.flatten} will flatten the L{Element} back to the
input when sent on a round trip.
"""
fragments = [
b"<p>Hello, world.</p>",
b"<p><!-- hello, world --></p>",
b"<p><![CDATA[Hello, world.]]></p>",
b'<test1 xmlns:test2="urn:test2">'
b'<test2:test3></test2:test3></test1>',
b'<test1 xmlns="urn:test2"><test3></test3></test1>',
b'<p>\xe2\x98\x83</p>',
]
deferreds = [
self.assertFlattensTo(Element(loader=XMLString(xml)), xml)
for xml in fragments]
return gatherResults(deferreds)
def test_entityConversion(self):
"""
When flattening an HTML entity, it should flatten out to the utf-8
representation if possible.
"""
element = Element(loader=XMLString('<p>☃</p>'))
return self.assertFlattensTo(element, b'<p>\xe2\x98\x83</p>')
def test_missingTemplateLoader(self):
"""
Rendering an Element without a loader attribute raises the appropriate
exception.
"""
return self.assertFlatteningRaises(Element(), MissingTemplateLoader)
def test_missingRenderMethod(self):
"""
Flattening an L{Element} with a C{loader} which has a tag with a render
directive fails with L{FlattenerError} if there is no available render
method to satisfy that directive.
"""
element = Element(loader=XMLString("""
<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="unknownMethod" />
"""))
return self.assertFlatteningRaises(element, MissingRenderMethod)
def test_transparentRendering(self):
"""
A C{transparent} element should be eliminated from the DOM and rendered as
only its children.
"""
element = Element(loader=XMLString(
'<t:transparent '
'xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">'
'Hello, world.'
'</t:transparent>'
))
return self.assertFlattensTo(element, b"Hello, world.")
def test_attrRendering(self):
"""
An Element with an attr tag renders the vaule of its attr tag as an
attribute of its containing tag.
"""
element = Element(loader=XMLString(
'<a xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">'
'<t:attr name="href">http://example.com</t:attr>'
'Hello, world.'
'</a>'
))
return self.assertFlattensTo(element,
b'<a href="http://example.com">Hello, world.</a>')
def test_errorToplevelAttr(self):
"""
A template with a toplevel C{attr} tag will not load; it will raise
L{AssertionError} if you try.
"""
self.assertRaises(
AssertionError,
XMLString,
"""<t:attr
xmlns:t='http://twistedmatrix.com/ns/twisted.web.template/0.1'
name='something'
>hello</t:attr>
""")
def test_errorUnnamedAttr(self):
"""
A template with an C{attr} tag with no C{name} attribute will not load;
it will raise L{AssertionError} if you try.
"""
self.assertRaises(
AssertionError,
XMLString,
"""<html><t:attr
xmlns:t='http://twistedmatrix.com/ns/twisted.web.template/0.1'
>hello</t:attr></html>""")
def test_lenientPrefixBehavior(self):
"""
If the parser sees a prefix it doesn't recognize on an attribute, it
will pass it on through to serialization.
"""
theInput = (
'<hello:world hello:sample="testing" '
'xmlns:hello="http://made-up.example.com/ns/not-real">'
'This is a made-up tag.</hello:world>')
element = Element(loader=XMLString(theInput))
self.assertFlattensTo(element, theInput.encode('utf8'))
def test_deferredRendering(self):
"""
An Element with a render method which returns a Deferred will render
correctly.
"""
class RenderfulElement(Element):
@renderer
def renderMethod(self, request, tag):
return succeed("Hello, world.")
element = RenderfulElement(loader=XMLString("""
<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="renderMethod">
Goodbye, world.
</p>
"""))
return self.assertFlattensTo(element, b"Hello, world.")
def test_loaderClassAttribute(self):
"""
If there is a non-None loader attribute on the class of an Element
instance but none on the instance itself, the class attribute is used.
"""
class SubElement(Element):
loader = XMLString("<p>Hello, world.</p>")
return self.assertFlattensTo(SubElement(), b"<p>Hello, world.</p>")
def test_directiveRendering(self):
"""
An Element with a valid render directive has that directive invoked and
the result added to the output.
"""
renders = []
class RenderfulElement(Element):
@renderer
def renderMethod(self, request, tag):
renders.append((self, request))
return tag("Hello, world.")
element = RenderfulElement(loader=XMLString("""
<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="renderMethod" />
"""))
return self.assertFlattensTo(element, b"<p>Hello, world.</p>")
def test_directiveRenderingOmittingTag(self):
"""
An Element with a render method which omits the containing tag
successfully removes that tag from the output.
"""
class RenderfulElement(Element):
@renderer
def renderMethod(self, request, tag):
return "Hello, world."
element = RenderfulElement(loader=XMLString("""
<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="renderMethod">
Goodbye, world.
</p>
"""))
return self.assertFlattensTo(element, b"Hello, world.")
def test_elementContainingStaticElement(self):
"""
An Element which is returned by the render method of another Element is
rendered properly.
"""
class RenderfulElement(Element):
@renderer
def renderMethod(self, request, tag):
return tag(Element(
loader=XMLString("<em>Hello, world.</em>")))
element = RenderfulElement(loader=XMLString("""
<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="renderMethod" />
"""))
return self.assertFlattensTo(element, b"<p><em>Hello, world.</em></p>")
def test_elementUsingSlots(self):
"""
An Element which is returned by the render method of another Element is
rendered properly.
"""
class RenderfulElement(Element):
@renderer
def renderMethod(self, request, tag):
return tag.fillSlots(test2='world.')
element = RenderfulElement(loader=XMLString(
'<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"'
' t:render="renderMethod">'
'<t:slot name="test1" default="Hello, " />'
'<t:slot name="test2" />'
'</p>'
))
return self.assertFlattensTo(element, b"<p>Hello, world.</p>")
def test_elementContainingDynamicElement(self):
"""
Directives in the document factory of an Element returned from a render
method of another Element are satisfied from the correct object: the
"inner" Element.
"""
class OuterElement(Element):
@renderer
def outerMethod(self, request, tag):
return tag(InnerElement(loader=XMLString("""
<t:ignored
xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="innerMethod" />
""")))
class InnerElement(Element):
@renderer
def innerMethod(self, request, tag):
return "Hello, world."
element = OuterElement(loader=XMLString("""
<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="outerMethod" />
"""))
return self.assertFlattensTo(element, b"<p>Hello, world.</p>")
def test_sameLoaderTwice(self):
"""
Rendering the output of a loader, or even the same element, should
return different output each time.
"""
sharedLoader = XMLString(
'<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">'
'<t:transparent t:render="classCounter" /> '
'<t:transparent t:render="instanceCounter" />'
'</p>')
class DestructiveElement(Element):
count = 0
instanceCount = 0
loader = sharedLoader
@renderer
def classCounter(self, request, tag):
DestructiveElement.count += 1
return tag(str(DestructiveElement.count))
@renderer
def instanceCounter(self, request, tag):
self.instanceCount += 1
return tag(str(self.instanceCount))
e1 = DestructiveElement()
e2 = DestructiveElement()
self.assertFlattensImmediately(e1, b"<p>1 1</p>")
self.assertFlattensImmediately(e1, b"<p>2 2</p>")
self.assertFlattensImmediately(e2, b"<p>3 1</p>")
class TagLoaderTests(FlattenTestCase):
"""
Tests for L{TagLoader}.
"""
def setUp(self):
self.loader = TagLoader(tags.i('test'))
def test_interface(self):
"""
An instance of L{TagLoader} provides L{ITemplateLoader}.
"""
self.assertTrue(verifyObject(ITemplateLoader, self.loader))
def test_loadsList(self):
"""
L{TagLoader.load} returns a list, per L{ITemplateLoader}.
"""
self.assertIsInstance(self.loader.load(), list)
def test_flatten(self):
"""
L{TagLoader} can be used in an L{Element}, and flattens as the tag used
to construct the L{TagLoader} would flatten.
"""
e = Element(self.loader)
self.assertFlattensImmediately(e, b'<i>test</i>')
class TestElement(Element):
"""
An L{Element} that can be rendered successfully.
"""
loader = XMLString(
'<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">'
'Hello, world.'
'</p>')
class TestFailureElement(Element):
"""
An L{Element} that can be used in place of L{FailureElement} to verify
that L{renderElement} can render failures properly.
"""
loader = XMLString(
'<p xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">'
'I failed.'
'</p>')
def __init__(self, failure, loader=None):
self.failure = failure
class FailingElement(Element):
"""
An element that raises an exception when rendered.
"""
def render(self, request):
a = 42
b = 0
return a // b
class FakeSite(object):
"""
A minimal L{Site} object that we can use to test displayTracebacks
"""
displayTracebacks = False
class RenderElementTests(TestCase):
"""
Test L{renderElement}
"""
def setUp(self):
"""
Set up a common L{DummyRequest} and L{FakeSite}.
"""
self.request = DummyRequest([""])
self.request.site = FakeSite()
def test_simpleRender(self):
"""
L{renderElement} returns NOT_DONE_YET and eventually
writes the rendered L{Element} to the request before finishing the
request.
"""
element = TestElement()
d = self.request.notifyFinish()
def check(_):
self.assertEqual(
b"".join(self.request.written),
b"<!DOCTYPE html>\n"
b"<p>Hello, world.</p>")
self.assertTrue(self.request.finished)
d.addCallback(check)
self.assertIdentical(NOT_DONE_YET, renderElement(self.request, element))
return d
def test_simpleFailure(self):
"""
L{renderElement} handles failures by writing a minimal
error message to the request and finishing it.
"""
element = FailingElement()
d = self.request.notifyFinish()
def check(_):
flushed = self.flushLoggedErrors(FlattenerError)
self.assertEqual(len(flushed), 1)
self.assertEqual(
b"".join(self.request.written),
(b'<!DOCTYPE html>\n'
b'<div style="font-size:800%;'
b'background-color:#FFF;'
b'color:#F00'
b'">An error occurred while rendering the response.</div>'))
self.assertTrue(self.request.finished)
d.addCallback(check)
self.assertIdentical(NOT_DONE_YET, renderElement(self.request, element))
return d
def test_simpleFailureWithTraceback(self):
"""
L{renderElement} will render a traceback when rendering of
the element fails and our site is configured to display tracebacks.
"""
self.request.site.displayTracebacks = True
element = FailingElement()
d = self.request.notifyFinish()
def check(_):
flushed = self.flushLoggedErrors(FlattenerError)
self.assertEqual(len(flushed), 1)
self.assertEqual(
b"".join(self.request.written),
b"<!DOCTYPE html>\n<p>I failed.</p>")
self.assertTrue(self.request.finished)
d.addCallback(check)
renderElement(self.request, element, _failElement=TestFailureElement)
return d
def test_nonDefaultDoctype(self):
"""
L{renderElement} will write the doctype string specified by the
doctype keyword argument.
"""
element = TestElement()
d = self.request.notifyFinish()
def check(_):
self.assertEqual(
b"".join(self.request.written),
(b'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
b' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n'
b'<p>Hello, world.</p>'))
d.addCallback(check)
renderElement(
self.request,
element,
doctype=(
b'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
b' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">'))
return d
def test_noneDoctype(self):
"""
L{renderElement} will not write out a doctype if the doctype keyword
argument is L{None}.
"""
element = TestElement()
d = self.request.notifyFinish()
def check(_):
self.assertEqual(
b"".join(self.request.written),
b'<p>Hello, world.</p>')
d.addCallback(check)
renderElement(self.request, element, doctype=None)
return d
|
|
from __future__ import unicode_literals
from future.utils import native_str
from django.contrib import admin
from django.contrib.auth import logout
from django.contrib.messages import error
from django.contrib.redirects.models import Redirect
from django.core.exceptions import MiddlewareNotUsed
from django.core.urlresolvers import reverse, resolve
from django.http import (HttpResponse, HttpResponseRedirect,
HttpResponsePermanentRedirect, HttpResponseGone)
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.template import Template, RequestContext
from django.utils.cache import get_max_age
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from mezzanine.conf import settings
from mezzanine.core.models import SitePermission
from mezzanine.core.management.commands.createdb import (DEFAULT_USERNAME,
DEFAULT_PASSWORD)
from mezzanine.utils.cache import (cache_key_prefix, nevercache_token,
cache_get, cache_set, cache_installed)
from mezzanine.utils.device import templates_for_device
from mezzanine.utils.sites import current_site_id, templates_for_host
from mezzanine.utils.urls import next_url
_deprecated = {
"AdminLoginInterfaceSelector": "AdminLoginInterfaceSelectorMiddleware",
"DeviceAwareUpdateCacheMiddleware": "UpdateCacheMiddleware",
"DeviceAwareFetchFromCacheMiddleware": "FetchFromCacheMiddleware",
}
class _Deprecated(object):
def __init__(self, *args, **kwargs):
from warnings import warn
msg = "mezzanine.core.middleware.%s is deprecated." % self.old
if self.new:
msg += (" Please change the MIDDLEWARE_CLASSES setting to use "
"mezzanine.core.middleware.%s" % self.new)
warn(msg)
for old, new in _deprecated.items():
globals()[old] = type(native_str(old),
(_Deprecated,),
{"old": old, "new": new})
class AdminLoginInterfaceSelectorMiddleware(object):
"""
Checks for a POST from the admin login view and if authentication is
successful and the "site" interface is selected, redirect to the site.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
login_type = request.POST.get("mezzanine_login_interface")
if login_type and not request.user.is_authenticated():
response = view_func(request, *view_args, **view_kwargs)
if request.user.is_authenticated():
if login_type == "admin":
next = next_url(request) or request.get_full_path()
username = request.user.get_username()
if (username == DEFAULT_USERNAME and
request.user.check_password(DEFAULT_PASSWORD)):
error(request, mark_safe(_(
"Your account is using the default password, "
"please <a href='%s'>change it</a> immediately.")
% reverse("user_change_password",
args=(request.user.id,))))
else:
next = next_url(request) or "/"
return HttpResponseRedirect(next)
else:
return response
return None
class SitePermissionMiddleware(object):
"""
Marks the current user with a ``has_site_permission`` which is
used in place of ``user.is_staff`` to achieve per-site staff
access.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
has_site_permission = False
if request.user.is_superuser:
has_site_permission = True
elif request.user.is_staff:
lookup = {"user": request.user, "sites": current_site_id()}
try:
SitePermission.objects.get(**lookup)
except SitePermission.DoesNotExist:
admin_index = reverse("admin:index")
if request.path.startswith(admin_index):
logout(request)
view_func = admin.site.login
extra_context = {"no_site_permission": True}
return view_func(request, extra_context=extra_context)
else:
has_site_permission = True
request.user.has_site_permission = has_site_permission
class TemplateForDeviceMiddleware(object):
"""
Inserts device-specific templates to the template list.
"""
def process_template_response(self, request, response):
if hasattr(response, "template_name"):
if not isinstance(response.template_name, Template):
templates = templates_for_device(request,
response.template_name)
response.template_name = templates
return response
class TemplateForHostMiddleware(object):
"""
Inserts host-specific templates to the template list.
"""
def process_template_response(self, request, response):
if hasattr(response, "template_name"):
if not isinstance(response.template_name, Template):
response.template_name = templates_for_host(
response.template_name)
return response
class UpdateCacheMiddleware(object):
"""
Response phase for Mezzanine's cache middleware. Handles caching
the response, and then performing the second phase of rendering,
for content enclosed by the ``nevercache`` tag.
"""
def process_response(self, request, response):
# Caching is only applicable for text-based, non-streaming
# responses. We also skip it for non-200 statuses during
# development, so that stack traces are correctly rendered.
is_text = response.get("content-type", "").startswith("text")
valid_status = response.status_code == 200
streaming = getattr(response, "streaming", False)
if not is_text or streaming or (settings.DEBUG and not valid_status):
return response
# Cache the response if all the required conditions are met.
# Response must be marked for updating by the
# ``FetchFromCacheMiddleware`` having a cache get miss, the
# user must not be authenticated, the HTTP status must be OK
# and the response mustn't include an expiry age, indicating it
# shouldn't be cached.
marked_for_update = getattr(request, "_update_cache", False)
anon = hasattr(request, "user") and not request.user.is_authenticated()
timeout = get_max_age(response)
if timeout is None:
timeout = settings.CACHE_MIDDLEWARE_SECONDS
if anon and valid_status and marked_for_update and timeout:
cache_key = cache_key_prefix(request) + request.get_full_path()
_cache_set = lambda r: cache_set(cache_key, r.content, timeout)
if callable(getattr(response, "render", None)):
response.add_post_render_callback(_cache_set)
else:
_cache_set(response)
# Second phase rendering for non-cached template code and
# content. Split on the delimiter the ``nevercache`` tag
# wrapped its contents in, and render only the content
# enclosed by it, to avoid possible template code injection.
token = nevercache_token()
try:
token = token.encode('utf-8')
except AttributeError:
pass
parts = response.content.split(token)
# Restore csrf token from cookie - check the response
# first as it may be being set for the first time.
csrf_token = None
try:
csrf_token = response.cookies[settings.CSRF_COOKIE_NAME].value
except KeyError:
try:
csrf_token = request.COOKIES[settings.CSRF_COOKIE_NAME]
except KeyError:
pass
if csrf_token:
request.META["CSRF_COOKIE"] = csrf_token
context = RequestContext(request)
for i, part in enumerate(parts):
if i % 2:
part = Template(part).render(context).encode("utf-8")
parts[i] = part
response.content = b"".join(parts)
response["Content-Length"] = len(response.content)
if hasattr(request, '_messages'):
# Required to clear out user messages.
request._messages.update(response)
return response
class FetchFromCacheMiddleware(object):
"""
Request phase for Mezzanine cache middleware. Return a response
from cache if found, othwerwise mark the request for updating
the cache in ``UpdateCacheMiddleware``.
"""
def process_request(self, request):
if (cache_installed() and request.method == "GET" and
not request.user.is_authenticated()):
cache_key = cache_key_prefix(request) + request.get_full_path()
response = cache_get(cache_key)
# We need to force a csrf token here, as new sessions
# won't receieve one on their first request, with cache
# middleware running.
csrf_mw_name = "django.middleware.csrf.CsrfViewMiddleware"
if csrf_mw_name in settings.MIDDLEWARE_CLASSES:
csrf_mw = CsrfViewMiddleware()
csrf_mw.process_view(request, lambda x: None, None, None)
get_token(request)
if response is None:
request._update_cache = True
else:
return HttpResponse(response)
class SSLRedirectMiddleware(object):
"""
Handles redirections required for SSL when ``SSL_ENABLED`` is ``True``.
If ``SSL_FORCE_HOST`` is ``True``, and is not the current host,
redirect to it.
Also ensure URLs defined by ``SSL_FORCE_URL_PREFIXES`` are redirect
to HTTPS, and redirect all other URLs to HTTP if on HTTPS.
"""
def process_request(self, request):
force_host = settings.SSL_FORCE_HOST
response = None
if force_host and request.get_host().split(":")[0] != force_host:
url = "http://%s%s" % (force_host, request.get_full_path())
response = HttpResponsePermanentRedirect(url)
elif settings.SSL_ENABLED and not settings.DEV_SERVER:
url = "%s%s" % (request.get_host(), request.get_full_path())
if request.path.startswith(settings.SSL_FORCE_URL_PREFIXES):
if not request.is_secure():
response = HttpResponseRedirect("https://%s" % url)
elif request.is_secure() and settings.SSL_FORCED_PREFIXES_ONLY:
response = HttpResponseRedirect("http://%s" % url)
if response and request.method == "POST":
if resolve(request.get_full_path()).url_name == "fb_do_upload":
# The handler for the flash file uploader in filebrowser
# doesn't have access to the http headers Django will use
# to determine whether the request is secure or not, so
# in this case we don't attempt a redirect - note that
# when /admin is restricted to SSL using Mezzanine's SSL
# setup, the flash uploader will post over SSL, so
# someone would need to explictly go out of their way to
# trigger this.
return
# Tell the client they need to re-POST.
response.status_code = 307
return response
class RedirectFallbackMiddleware(object):
"""
Port of Django's ``RedirectFallbackMiddleware`` that uses
Mezzanine's approach for determining the current site.
"""
def __init__(self):
if "django.contrib.redirects" not in settings.INSTALLED_APPS:
raise MiddlewareNotUsed
def process_response(self, request, response):
if response.status_code == 404:
lookup = {
"site_id": current_site_id(),
"old_path": request.get_full_path(),
}
try:
redirect = Redirect.objects.get(**lookup)
except Redirect.DoesNotExist:
pass
else:
if not redirect.new_path:
response = HttpResponseGone()
else:
response = HttpResponsePermanentRedirect(redirect.new_path)
return response
|
|
# Copyright 2016 ZTE Corporation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Network trunk and subports action implementations"""
import logging
from osc_lib.cli import format_columns
from osc_lib.cli import parseractions
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils as osc_utils
from neutronclient._i18n import _
from neutronclient.osc import utils as nc_osc_utils
from neutronclient.osc.v2 import utils as v2_utils
LOG = logging.getLogger(__name__)
TRUNK = 'trunk'
TRUNKS = 'trunks'
SUB_PORTS = 'sub_ports'
class CreateNetworkTrunk(command.ShowOne):
"""Create a network trunk for a given project"""
def get_parser(self, prog_name):
parser = super(CreateNetworkTrunk, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<name>',
help=_("Name of the trunk to create")
)
parser.add_argument(
'--description',
metavar='<description>',
help=_("A description of the trunk")
)
parser.add_argument(
'--parent-port',
metavar='<parent-port>',
required=True,
help=_("Parent port belonging to this trunk (name or ID)")
)
parser.add_argument(
'--subport',
metavar='<port=,segmentation-type=,segmentation-id=>',
action=parseractions.MultiKeyValueAction, dest='add_subports',
optional_keys=['segmentation-id', 'segmentation-type'],
required_keys=['port'],
help=_("Subport to add. Subport is of form "
"\'port=<name or ID>,segmentation-type=,segmentation-ID=\' "
"(--subport) option can be repeated")
)
admin_group = parser.add_mutually_exclusive_group()
admin_group.add_argument(
'--enable',
action='store_true',
default=True,
help=_("Enable trunk (default)")
)
admin_group.add_argument(
'--disable',
action='store_true',
help=_("Disable trunk")
)
nc_osc_utils.add_project_owner_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
attrs = _get_attrs_for_trunk(self.app.client_manager,
parsed_args)
body = {TRUNK: attrs}
obj = client.create_trunk(body)
columns = _get_columns(obj[TRUNK])
data = osc_utils.get_dict_properties(obj[TRUNK], columns,
formatters=_formatters)
return columns, data
class DeleteNetworkTrunk(command.Command):
"""Delete a given network trunk"""
def get_parser(self, prog_name):
parser = super(DeleteNetworkTrunk, self).get_parser(prog_name)
parser.add_argument(
'trunk',
metavar="<trunk>",
nargs="+",
help=_("Trunk(s) to delete (name or ID)")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
result = 0
for trunk in parsed_args.trunk:
try:
trunk_id = _get_id(client, trunk, TRUNK)
client.delete_trunk(trunk_id)
except Exception as e:
result += 1
LOG.error(_("Failed to delete trunk with name "
"or ID '%(trunk)s': %(e)s"),
{'trunk': trunk, 'e': e})
if result > 0:
total = len(parsed_args.trunk)
msg = (_("%(result)s of %(total)s trunks failed "
"to delete.") % {'result': result, 'total': total})
raise exceptions.CommandError(msg)
class ListNetworkTrunk(command.Lister):
"""List all network trunks"""
def get_parser(self, prog_name):
parser = super(ListNetworkTrunk, self).get_parser(prog_name)
parser.add_argument(
'--long',
action='store_true',
default=False,
help=_("List additional fields in output")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
data = client.list_trunks()
headers = (
'ID',
'Name',
'Parent Port',
'Description'
)
columns = (
'id',
'name',
'port_id',
'description'
)
if parsed_args.long:
headers += (
'Status',
'State',
'Created At',
'Updated At',
)
columns += (
'status',
'admin_state_up',
'created_at',
'updated_at'
)
return (headers,
(osc_utils.get_dict_properties(
s, columns,
formatters=_formatters,
) for s in data[TRUNKS]))
class SetNetworkTrunk(command.Command):
"""Set network trunk properties"""
def get_parser(self, prog_name):
parser = super(SetNetworkTrunk, self).get_parser(prog_name)
parser.add_argument(
'trunk',
metavar="<trunk>",
help=_("Trunk to modify (name or ID)")
)
parser.add_argument(
'--name',
metavar="<name>",
help=_("Set trunk name")
)
parser.add_argument(
'--description',
metavar='<description>',
help=_("A description of the trunk")
)
parser.add_argument(
'--subport',
metavar='<port=,segmentation-type=,segmentation-id=>',
action=parseractions.MultiKeyValueAction, dest='set_subports',
optional_keys=['segmentation-id', 'segmentation-type'],
required_keys=['port'],
help=_("Subport to add. Subport is of form "
"\'port=<name or ID>,segmentation-type=,segmentation-ID=\'"
"(--subport) option can be repeated")
)
admin_group = parser.add_mutually_exclusive_group()
admin_group.add_argument(
'--enable',
action='store_true',
help=_("Enable trunk")
)
admin_group.add_argument(
'--disable',
action='store_true',
help=_("Disable trunk")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
trunk_id = _get_id(client, parsed_args.trunk, TRUNK)
attrs = _get_attrs_for_trunk(self.app.client_manager, parsed_args)
body = {TRUNK: attrs}
try:
client.update_trunk(trunk_id, body)
except Exception as e:
msg = (_("Failed to set trunk '%(t)s': %(e)s")
% {'t': parsed_args.trunk, 'e': e})
raise exceptions.CommandError(msg)
if parsed_args.set_subports:
subport_attrs = _get_attrs_for_subports(self.app.client_manager,
parsed_args)
try:
client.trunk_add_subports(trunk_id, subport_attrs)
except Exception as e:
msg = (_("Failed to add subports to trunk '%(t)s': %(e)s")
% {'t': parsed_args.trunk, 'e': e})
raise exceptions.CommandError(msg)
class ShowNetworkTrunk(command.ShowOne):
"""Show information of a given network trunk"""
def get_parser(self, prog_name):
parser = super(ShowNetworkTrunk, self).get_parser(prog_name)
parser.add_argument(
'trunk',
metavar="<trunk>",
help=_("Trunk to display (name or ID)")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
trunk_id = _get_id(client, parsed_args.trunk, TRUNK)
obj = client.show_trunk(trunk_id)
columns = _get_columns(obj[TRUNK])
data = osc_utils.get_dict_properties(obj[TRUNK], columns,
formatters=_formatters)
return columns, data
class ListNetworkSubport(command.Lister):
"""List all subports for a given network trunk"""
def get_parser(self, prog_name):
parser = super(ListNetworkSubport, self).get_parser(prog_name)
parser.add_argument(
'--trunk',
required=True,
metavar="<trunk>",
help=_("List subports belonging to this trunk (name or ID)")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
trunk_id = _get_id(client, parsed_args.trunk, TRUNK)
data = client.trunk_get_subports(trunk_id)
headers = ('Port', 'Segmentation Type', 'Segmentation ID')
columns = ('port_id', 'segmentation_type', 'segmentation_id')
return (headers,
(osc_utils.get_dict_properties(
s, columns,
) for s in data[SUB_PORTS]))
class UnsetNetworkTrunk(command.Command):
"""Unset subports from a given network trunk"""
def get_parser(self, prog_name):
parser = super(UnsetNetworkTrunk, self).get_parser(prog_name)
parser.add_argument(
'trunk',
metavar="<trunk>",
help=_("Unset subports from this trunk (name or ID)")
)
parser.add_argument(
'--subport',
metavar="<subport>",
required=True,
action='append', dest='unset_subports',
help=_("Subport to delete (name or ID of the port) "
"(--subport) option can be repeated")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
attrs = _get_attrs_for_subports(self.app.client_manager, parsed_args)
trunk_id = _get_id(client, parsed_args.trunk, TRUNK)
client.trunk_remove_subports(trunk_id, attrs)
_formatters = {
'admin_state_up': v2_utils.AdminStateColumn,
'sub_ports': format_columns.ListDictColumn,
}
def _get_columns(item):
return tuple(sorted(list(item.keys())))
def _get_attrs_for_trunk(client_manager, parsed_args):
attrs = {}
if parsed_args.name is not None:
attrs['name'] = str(parsed_args.name)
if parsed_args.description is not None:
attrs['description'] = str(parsed_args.description)
if parsed_args.enable:
attrs['admin_state_up'] = True
if parsed_args.disable:
attrs['admin_state_up'] = False
if 'parent_port' in parsed_args and parsed_args.parent_port is not None:
port_id = _get_id(client_manager.neutronclient,
parsed_args.parent_port, 'port')
attrs['port_id'] = port_id
if 'add_subports' in parsed_args and parsed_args.add_subports is not None:
attrs[SUB_PORTS] = _format_subports(client_manager,
parsed_args.add_subports)
# "trunk set" command doesn't support setting project.
if 'project' in parsed_args and parsed_args.project is not None:
identity_client = client_manager.identity
project_id = nc_osc_utils.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
).id
attrs['tenant_id'] = project_id
return attrs
def _format_subports(client_manager, subports):
attrs = []
for subport in subports:
subport_attrs = {}
if subport.get('port'):
port_id = _get_id(client_manager.neutronclient,
subport['port'], 'port')
subport_attrs['port_id'] = port_id
if subport.get('segmentation-id'):
try:
subport_attrs['segmentation_id'] = int(
subport['segmentation-id'])
except ValueError:
msg = (_("Segmentation-id '%s' is not an integer") %
subport['segmentation-id'])
raise exceptions.CommandError(msg)
if subport.get('segmentation-type'):
subport_attrs['segmentation_type'] = subport['segmentation-type']
attrs.append(subport_attrs)
return attrs
def _get_attrs_for_subports(client_manager, parsed_args):
attrs = {}
if 'set_subports' in parsed_args and parsed_args.set_subports is not None:
attrs[SUB_PORTS] = _format_subports(client_manager,
parsed_args.set_subports)
if ('unset_subports' in parsed_args and
parsed_args.unset_subports is not None):
subports_list = []
for subport in parsed_args.unset_subports:
port_id = _get_id(client_manager.neutronclient,
subport, 'port')
subports_list.append({'port_id': port_id})
attrs[SUB_PORTS] = subports_list
return attrs
def _get_id(client, id_or_name, resource):
return client.find_resource(resource, str(id_or_name))['id']
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Command to upload benchmark test results to a cloud datastore.
This uploader script is typically run periodically as a cron job. It locates,
in a specified data directory, files that contain benchmark test results. The
results are written by the "run_and_gather_logs.py" script using the JSON-format
serialization of the "TestResults" protobuf message (core/util/test_log.proto).
For each file, the uploader reads the "TestResults" data, transforms it into
the schema used in the datastore (see below), and upload it to the datastore.
After processing a file, the uploader moves it to a specified archive directory
for safe-keeping.
The uploader uses file-level exclusive locking (non-blocking flock) which allows
multiple instances of this script to run concurrently if desired, splitting the
task among them, each one processing and archiving different files.
The "TestResults" object contains test metadata and multiple benchmark entries.
The datastore schema splits this information into two Kinds (like tables), one
holding the test metadata in a single "Test" Entity (like rows), and one holding
each related benchmark entry in a separate "Entry" Entity. Datastore create a
unique ID (retrieval key) for each Entity, and this ID is always returned along
with the data when an Entity is fetched.
* Test:
- test: unique name of this test (string)
- start: start time of this test run (datetime)
- info: JSON-encoded test metadata (string, not indexed)
* Entry:
- test: unique name of this test (string)
- entry: unique name of this benchmark entry within this test (string)
- start: start time of this test run (datetime)
- timing: average time (usec) per iteration of this test/entry run (float)
- info: JSON-encoded entry metadata (string, not indexed)
A few composite indexes are created (upload_test_benchmarks_index.yaml) for fast
retrieval of benchmark data and reduced I/O to the client without adding a lot
of indexing and storage burden:
* Test: (test, start) is indexed to fetch recent start times for a given test.
* Entry: (test, entry, start, timing) is indexed to use projection and only
fetch the recent (start, timing) data for a given test/entry benchmark.
Example retrieval GQL statements:
* Get the recent start times for a given test:
SELECT start FROM Test WHERE test = <test-name> AND
start >= <recent-datetime> LIMIT <count>
* Get the recent timings for a given benchmark:
SELECT start, timing FROM Entry WHERE test = <test-name> AND
entry = <entry-name> AND start >= <recent-datetime> LIMIT <count>
* Get all test names uniquified (e.g. display a list of available tests):
SELECT DISTINCT ON (test) test FROM Test
* For a given test (from the list above), get all its entry names. The list of
entry names can be extracted from the test "info" metadata for a given test
name and start time (e.g. pick the latest start time for that test).
SELECT * FROM Test WHERE test = <test-name> AND start = <latest-datetime>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import datetime
import fcntl
import json
import os
import shutil
from six import text_type
from google.cloud import datastore
from six import text_type
def is_real_file(dirpath, fname):
fpath = os.path.join(dirpath, fname)
return os.path.isfile(fpath) and not os.path.islink(fpath)
def get_mtime(dirpath, fname):
fpath = os.path.join(dirpath, fname)
return os.stat(fpath).st_mtime
def list_files_by_mtime(dirpath):
"""Return a list of files in the directory, sorted in increasing "mtime".
Return a list of files in the given directory, sorted from older to newer file
according to their modification times. Only return actual files, skipping
directories, symbolic links, pipes, etc.
Args:
dirpath: directory pathname
Returns:
A list of file names relative to the given directory path.
"""
files = [f for f in os.listdir(dirpath) if is_real_file(dirpath, f)]
return sorted(files, key=lambda f: get_mtime(dirpath, f))
# Note: The file locking code uses flock() instead of lockf() because benchmark
# files are only opened for reading (not writing) and we still want exclusive
# locks on them. This imposes the limitation that the data directory must be
# local, not NFS-mounted.
def lock(fd):
fcntl.flock(fd, fcntl.LOCK_EX)
def unlock(fd):
fcntl.flock(fd, fcntl.LOCK_UN)
def trylock(fd):
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
return True
except Exception: # pylint: disable=broad-except
return False
def upload_benchmark_data(client, data):
"""Parse benchmark data and use the client to upload it to the datastore.
Parse the given benchmark data from the serialized JSON-format used to write
the test results file. Create the different datastore Entities from that data
and upload them to the datastore in a batch using the client connection.
Args:
client: datastore client connection
data: JSON-encoded benchmark data
"""
test_result = json.loads(data)
test_name = text_type(test_result["name"])
start_time = datetime.datetime.utcfromtimestamp(
float(test_result["startTime"]))
batch = []
# Create the Test Entity containing all the test information as a
# non-indexed JSON blob.
t_key = client.key("Test")
t_val = datastore.Entity(t_key, exclude_from_indexes=["info"])
t_val.update({
"test": test_name,
"start": start_time,
"info": text_type(data)
})
batch.append(t_val)
# Create one Entry Entity for each benchmark entry. The wall-clock timing is
# the attribute to be fetched and displayed. The full entry information is
# also stored as a non-indexed JSON blob.
for ent in test_result["entries"].get("entry", []):
ent_name = text_type(ent["name"])
e_key = client.key("Entry")
e_val = datastore.Entity(e_key, exclude_from_indexes=["info"])
e_val.update({
"test": test_name,
"start": start_time,
"entry": ent_name,
"timing": ent["wallTime"],
"info": text_type(json.dumps(ent))
})
batch.append(e_val)
# Put the whole batch of Entities in the datastore.
client.put_multi(batch)
def upload_benchmark_files(opts):
"""Find benchmark files, process them, and upload their data to the datastore.
Locate benchmark files in the data directory, process them, and upload their
data to the datastore. After processing each file, move it to the archive
directory for safe-keeping. Each file is locked for processing, which allows
multiple uploader instances to run concurrently if needed, each one handling
different benchmark files, skipping those already locked by another.
Args:
opts: command line options object
Note: To use locking, the file is first opened, then its descriptor is used to
lock and read it. The lock is released when the file is closed. Do not open
that same file a 2nd time while the lock is already held, because when that
2nd file descriptor is closed, the lock will be released prematurely.
"""
client = datastore.Client()
for fname in list_files_by_mtime(opts.datadir):
fpath = os.path.join(opts.datadir, fname)
try:
with open(fpath, "r") as fd:
if trylock(fd):
upload_benchmark_data(client, fd.read())
shutil.move(fpath, os.path.join(opts.archivedir, fname))
# unlock(fd) -- When "with open()" closes fd, the lock is released.
except Exception as e: # pylint: disable=broad-except
print("Cannot process '%s', skipping. Error: %s" % (fpath, e))
def parse_cmd_line():
"""Parse command line options.
Returns:
The parsed arguments object.
"""
desc = "Upload benchmark results to datastore."
opts = [
("-a", "--archivedir", str, None, True,
"Directory where benchmark files are archived."),
("-d", "--datadir", str, None, True,
"Directory of benchmark files to upload."),
]
parser = argparse.ArgumentParser(description=desc)
for opt in opts:
parser.add_argument(opt[0], opt[1], type=opt[2], default=opt[3],
required=opt[4], help=opt[5])
return parser.parse_args()
def main():
options = parse_cmd_line()
# Check that credentials are specified to access the datastore.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS"):
raise ValueError("GOOGLE_APPLICATION_CREDENTIALS env. var. is not set.")
upload_benchmark_files(options)
if __name__ == "__main__":
main()
|
|
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define API Subscriptions."""
import datetime
from google.cloud.exceptions import NotFound
from google.cloud._helpers import _datetime_to_rfc3339
from google.cloud.pubsub.snapshot import Snapshot
from google.cloud.pubsub._helpers import topic_name_from_path
from google.cloud.pubsub.iam import Policy
from google.cloud.pubsub.message import Message
class Subscription(object):
"""Subscriptions receive messages published to their topics.
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions
:type name: str
:param name: the name of the subscription.
:type topic: :class:`google.cloud.pubsub.topic.Topic`
:param topic:
(Optional) the topic to which the subscription belongs; if ``None``,
the subscription's topic has been deleted.
:type ack_deadline: int
:param ack_deadline: the deadline (in seconds) by which messages pulled
from the back-end must be acknowledged.
:type push_endpoint: str
:param push_endpoint:
(Optional) URL to which messages will be pushed by the back-end. If
not set, the application must pull messages.
:type retain_acked_messages: bool
:param retain_acked_messages:
(Optional) Whether to retain acked messages. If set, acked messages
are retained in the subscription's backlog for a duration indicated
by `message_retention_duration`.
:type message_retention_duration: :class:`datetime.timedelta`
:param message_retention_duration:
(Optional) Whether to retain acked messages. If set, acked messages
are retained in the subscription's backlog for a duration indicated
by `message_retention_duration`. If unset, defaults to 7 days.
:type client: :class:`~google.cloud.pubsub.client.Client`
:param client:
(Optional) The client to use. If not passed, falls back to the
``client`` stored on the topic.
"""
_DELETED_TOPIC_PATH = '_deleted-topic_'
"""Value of ``projects.subscriptions.topic`` when topic has been deleted.
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions#Subscription.FIELDS.topic
"""
def __init__(self, name, topic=None, ack_deadline=None, push_endpoint=None,
retain_acked_messages=None, message_retention_duration=None,
client=None):
if client is None and topic is None:
raise TypeError("Pass only one of 'topic' or 'client'.")
if client is not None and topic is not None:
raise TypeError("Pass only one of 'topic' or 'client'.")
self.name = name
self.topic = topic
self._client = client or topic._client
self._project = self._client.project
self.ack_deadline = ack_deadline
self.push_endpoint = push_endpoint
self.retain_acked_messages = retain_acked_messages
self.message_retention_duration = message_retention_duration
@classmethod
def from_api_repr(cls, resource, client, topics=None):
"""Factory: construct a topic given its API representation
:type resource: dict
:param resource: topic resource representation returned from the API.
:type client: :class:`google.cloud.pubsub.client.Client`
:param client: Client which holds credentials and project
configuration for a topic.
:type topics: dict
:param topics:
(Optional) A mapping of topic names -> topics. If not passed, the
subscription will have a newly-created topic.
:rtype: :class:`google.cloud.pubsub.subscription.Subscription`
:returns: Subscription parsed from ``resource``.
"""
if topics is None:
topics = {}
topic_path = resource['topic']
if topic_path == cls._DELETED_TOPIC_PATH:
topic = None
else:
topic = topics.get(topic_path)
if topic is None:
# NOTE: This duplicates behavior from Topic.from_api_repr to
# avoid an import cycle.
topic_name = topic_name_from_path(topic_path, client.project)
topic = topics[topic_path] = client.topic(topic_name)
_, _, _, name = resource['name'].split('/')
ack_deadline = resource.get('ackDeadlineSeconds')
push_config = resource.get('pushConfig', {})
push_endpoint = push_config.get('pushEndpoint')
retain_acked_messages = resource.get('retainAckedMessages')
resource_duration = resource.get('duration', {})
message_retention_duration = datetime.timedelta(
seconds=resource_duration.get('seconds', 0),
microseconds=resource_duration.get('nanos', 0) / 1000)
if topic is None:
return cls(name, ack_deadline=ack_deadline,
push_endpoint=push_endpoint,
retain_acked_messages=retain_acked_messages,
message_retention_duration=message_retention_duration,
client=client)
return cls(name, topic=topic, ack_deadline=ack_deadline,
push_endpoint=push_endpoint,
retain_acked_messages=retain_acked_messages,
message_retention_duration=message_retention_duration)
@property
def project(self):
"""Project bound to the subscription."""
return self._client.project
@property
def full_name(self):
"""Fully-qualified name used in subscription APIs"""
return 'projects/%s/subscriptions/%s' % (self.project, self.name)
@property
def path(self):
"""URL path for the subscription's APIs"""
return '/%s' % (self.full_name,)
def auto_ack(self, return_immediately=False, max_messages=1, client=None):
""":class:`AutoAck` factory
:type return_immediately: bool
:param return_immediately: passed through to :meth:`Subscription.pull`
:type max_messages: int
:param max_messages: passed through to :meth:`Subscription.pull`
:type client: :class:`~google.cloud.pubsub.client.Client` or
``NoneType``
:param client: passed through to :meth:`Subscription.pull` and
:meth:`Subscription.acknowledge`.
:rtype: :class:`AutoAck`
:returns: the instance created for the given ``ack_id`` and ``message``
"""
return AutoAck(self, return_immediately, max_messages, client)
def _require_client(self, client):
"""Check client or verify over-ride.
:type client: :class:`~google.cloud.pubsub.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the topic of the
current subscription.
:rtype: :class:`google.cloud.pubsub.client.Client`
:returns: The client passed in or the currently bound client.
"""
if client is None:
client = self._client
return client
def create(self, client=None):
"""API call: create the subscription via a PUT request
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/create
Example:
.. literalinclude:: pubsub_snippets.py
:start-after: [START subscription_create]
:end-before: [END subscription_create]
:type client: :class:`~google.cloud.pubsub.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current subscription's topic.
"""
client = self._require_client(client)
api = client.subscriber_api
api.subscription_create(
self.full_name, self.topic.full_name,
ack_deadline=self.ack_deadline, push_endpoint=self.push_endpoint,
retain_acked_messages=self.retain_acked_messages,
message_retention_duration=self.message_retention_duration)
def exists(self, client=None):
"""API call: test existence of the subscription via a GET request
See
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/get
Example:
.. literalinclude:: pubsub_snippets.py
:start-after: [START subscription_exists]
:end-before: [END subscription_exists]
:type client: :class:`~google.cloud.pubsub.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current subscription's topic.
:rtype: bool
:returns: Boolean indicating existence of the subscription.
"""
client = self._require_client(client)
api = client.subscriber_api
try:
api.subscription_get(self.full_name)
except NotFound:
return False
else:
return True
def reload(self, client=None):
"""API call: sync local subscription configuration via a GET request
See
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/get
:attr:`ack_deadline` and :attr:`push_endpoint` might never have
been set locally, or might have been updated by another client. This
method fetches their values from the server.
Example:
.. literalinclude:: pubsub_snippets.py
:start-after: [START subscription_reload]
:end-before: [END subscription_reload]
:type client: :class:`~google.cloud.pubsub.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current subscription's topic.
"""
client = self._require_client(client)
api = client.subscriber_api
data = api.subscription_get(self.full_name)
self.ack_deadline = data.get('ackDeadlineSeconds')
push_config = data.get('pushConfig', {})
self.push_endpoint = push_config.get('pushEndpoint')
def delete(self, client=None):
"""API call: delete the subscription via a DELETE request.
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/delete
Example:
.. literalinclude:: pubsub_snippets.py
:start-after: [START subscription_delete]
:end-before: [END subscription_delete]
:type client: :class:`~google.cloud.pubsub.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current subscription's topic.
"""
client = self._require_client(client)
api = client.subscriber_api
api.subscription_delete(self.full_name)
def modify_push_configuration(self, push_endpoint, client=None):
"""API call: update the push endpoint for the subscription.
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/modifyPushConfig
Example:
.. literalinclude:: pubsub_snippets.py
:start-after: [START subscription_push_pull]
:end-before: [END subscription_push_pull]
.. literalinclude:: pubsub_snippets.py
:start-after: [START subscription_pull_push]
:end-before: [END subscription_pull_push]
:type push_endpoint: str
:param push_endpoint: URL to which messages will be pushed by the
back-end. If None, the application must pull
messages.
:type client: :class:`~google.cloud.pubsub.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current subscription's topic.
"""
client = self._require_client(client)
api = client.subscriber_api
api.subscription_modify_push_config(self.full_name, push_endpoint)
self.push_endpoint = push_endpoint
def pull(self, return_immediately=False, max_messages=1, client=None):
"""API call: retrieve messages for the subscription.
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/pull
Example:
.. literalinclude:: pubsub_snippets.py
:start-after: [START subscription_pull]
:end-before: [END subscription_pull]
:type return_immediately: bool
:param return_immediately: if True, the back-end returns even if no
messages are available; if False, the API
call blocks until one or more messages are
available.
:type max_messages: int
:param max_messages: the maximum number of messages to return.
:type client: :class:`~google.cloud.pubsub.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current subscription's topic.
:rtype: list of (ack_id, message) tuples
:returns: sequence of tuples: ``ack_id`` is the ID to be used in a
subsequent call to :meth:`acknowledge`, and ``message``
is an instance of
:class:`~google.cloud.pubsub.message.Message`.
"""
client = self._require_client(client)
api = client.subscriber_api
response = api.subscription_pull(
self.full_name, return_immediately, max_messages)
return [(info['ackId'], Message.from_api_repr(info['message']))
for info in response]
def acknowledge(self, ack_ids, client=None):
"""API call: acknowledge retrieved messages for the subscription.
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/acknowledge
Example:
.. literalinclude:: pubsub_snippets.py
:start-after: [START subscription_acknowledge]
:end-before: [END subscription_acknowledge]
:type ack_ids: list of string
:param ack_ids: ack IDs of messages being acknowledged
:type client: :class:`~google.cloud.pubsub.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current subscription's topic.
"""
client = self._require_client(client)
api = client.subscriber_api
api.subscription_acknowledge(self.full_name, ack_ids)
def modify_ack_deadline(self, ack_ids, ack_deadline, client=None):
"""API call: update acknowledgement deadline for a retrieved message.
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/modifyAckDeadline
:type ack_ids: list of string
:param ack_ids: ack IDs of messages being updated
:type ack_deadline: int
:param ack_deadline: new deadline for the message, in seconds
:type client: :class:`~google.cloud.pubsub.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current subscription's topic.
"""
client = self._require_client(client)
api = client.subscriber_api
api.subscription_modify_ack_deadline(
self.full_name, ack_ids, ack_deadline)
def snapshot(self, name, client=None):
"""Creates a snapshot of this subscription.
:type name: str
:param name: the name of the subscription
:rtype: :class:`Snapshot`
:returns: The snapshot created with the passed in arguments.
"""
return Snapshot(name, subscription=self)
def seek_snapshot(self, snapshot, client=None):
"""API call: seek a subscription to a given snapshot
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/seek
:type snapshot: :class:`Snapshot`
:param snapshot: The snapshot to seek to.
"""
client = self._require_client(client)
api = client.subscriber_api
api.subscription_seek(self.full_name, snapshot=snapshot.full_name)
def seek_timestamp(self, timestamp, client=None):
"""API call: seek a subscription to a given point in time
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/seek
:type time: :class:`datetime.datetime`
:param time: The time to seek to.
"""
client = self._require_client(client)
timestamp = _datetime_to_rfc3339(timestamp)
api = client.subscriber_api
api.subscription_seek(self.full_name, time=timestamp)
def get_iam_policy(self, client=None):
"""Fetch the IAM policy for the subscription.
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/getIamPolicy
Example:
.. literalinclude:: pubsub_snippets.py
:start-after: [START subscription_get_iam_policy]
:end-before: [END subscription_get_iam_policy]
:type client: :class:`~google.cloud.pubsub.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current subscription's topic.
:rtype: :class:`google.cloud.pubsub.iam.Policy`
:returns: policy created from the resource returned by the
``getIamPolicy`` API request.
"""
client = self._require_client(client)
api = client.iam_policy_api
resp = api.get_iam_policy(self.full_name)
return Policy.from_api_repr(resp)
def set_iam_policy(self, policy, client=None):
"""Update the IAM policy for the subscription.
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/setIamPolicy
Example:
.. literalinclude:: pubsub_snippets.py
:start-after: [START subscription_set_iam_policy]
:end-before: [END subscription_set_iam_policy]
:type policy: :class:`google.cloud.pubsub.iam.Policy`
:param policy: the new policy, typically fetched via
:meth:`get_iam_policy` and updated in place.
:type client: :class:`~google.cloud.pubsub.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current subscription's topic.
:rtype: :class:`google.cloud.pubsub.iam.Policy`
:returns: updated policy created from the resource returned by the
``setIamPolicy`` API request.
"""
client = self._require_client(client)
api = client.iam_policy_api
resource = policy.to_api_repr()
resp = api.set_iam_policy(self.full_name, resource)
return Policy.from_api_repr(resp)
def check_iam_permissions(self, permissions, client=None):
"""Verify permissions allowed for the current user.
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/testIamPermissions
Example:
.. literalinclude:: pubsub_snippets.py
:start-after: [START subscription_check_iam_permissions]
:end-before: [END subscription_check_iam_permissions]
:type permissions: list of string
:param permissions: list of permissions to be tested
:type client: :class:`~google.cloud.pubsub.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current subscription's topic.
:rtype: sequence of string
:returns: subset of ``permissions`` allowed by current IAM policy.
"""
client = self._require_client(client)
api = client.iam_policy_api
return api.test_iam_permissions(
self.full_name, list(permissions))
class AutoAck(dict):
"""Wrapper for :meth:`Subscription.pull` results.
Mapping, tracks messages still-to-be-acknowledged.
When used as a context manager, acknowledges all messages still in the
mapping on `__exit__`. When processing the pulled messages, application
code MUST delete messages from the :class:`AutoAck` mapping which are not
successfully processed, e.g.:
.. code-block: python
with AutoAck(subscription) as ack: # calls ``subscription.pull``
for ack_id, message in ack.items():
try:
do_something_with(message):
except:
del ack[ack_id]
:type subscription: :class:`Subscription`
:param subscription: subscription to be pulled.
:type return_immediately: bool
:param return_immediately: passed through to :meth:`Subscription.pull`
:type max_messages: int
:param max_messages: passed through to :meth:`Subscription.pull`
:type client: :class:`~google.cloud.pubsub.client.Client` or
``NoneType``
:param client: passed through to :meth:`Subscription.pull` and
:meth:`Subscription.acknowledge`.
"""
def __init__(self, subscription,
return_immediately=False, max_messages=1, client=None):
super(AutoAck, self).__init__()
self._subscription = subscription
self._return_immediately = return_immediately
self._max_messages = max_messages
self._client = client
def __enter__(self):
items = self._subscription.pull(
self._return_immediately, self._max_messages, self._client)
self.update(items)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self:
self._subscription.acknowledge(list(self), self._client)
|
|
import pytest
import pytz
from django import forms
from django.db import models
from timezone_field import TimeZoneField, TimeZoneFormField
common_tz_names = tuple(tz for tz in pytz.common_timezones)
@pytest.fixture
def common_tz_objects(tz_func):
yield tuple(tz_func(tz) for tz in pytz.common_timezones)
class _ZIChoicesDisplayForm(forms.Form):
limited_tzs = [
"Asia/Tokyo",
"Asia/Dubai",
"America/Argentina/Buenos_Aires",
"Africa/Nairobi",
]
limited_choices = [(tz, tz) for tz in limited_tzs]
tz_none = TimeZoneFormField(use_pytz=False)
tz_standard = TimeZoneFormField(choices_display="STANDARD", use_pytz=False)
tz_with_gmt_offset = TimeZoneFormField(choices_display="WITH_GMT_OFFSET", use_pytz=False)
tz_limited_none = TimeZoneFormField(choices=limited_choices, use_pytz=False)
tz_limited_standard = TimeZoneFormField(
choices=limited_choices,
choices_display="STANDARD",
use_pytz=False,
)
tz_limited_with_gmt_offset = TimeZoneFormField(
choices=limited_choices,
choices_display="WITH_GMT_OFFSET",
use_pytz=False,
)
class _TZChoicesDisplayForm(forms.Form):
limited_tzs = [
"Asia/Tokyo",
"Asia/Dubai",
"America/Argentina/Buenos_Aires",
"Africa/Nairobi",
]
limited_choices = [(tz, tz) for tz in limited_tzs]
tz_none = TimeZoneFormField(use_pytz=True)
tz_standard = TimeZoneFormField(choices_display="STANDARD", use_pytz=True)
tz_with_gmt_offset = TimeZoneFormField(choices_display="WITH_GMT_OFFSET", use_pytz=True)
tz_limited_none = TimeZoneFormField(choices=limited_choices, use_pytz=True)
tz_limited_standard = TimeZoneFormField(
choices=limited_choices,
choices_display="STANDARD",
use_pytz=True,
)
tz_limited_with_gmt_offset = TimeZoneFormField(
choices=limited_choices,
choices_display="WITH_GMT_OFFSET",
use_pytz=True,
)
class _ZIChoicesDisplayModel(models.Model):
limited_tzs = [
"Asia/Tokyo",
"Asia/Dubai",
"America/Argentina/Buenos_Aires",
"Africa/Nairobi",
]
limited_choices = [(tz, tz) for tz in limited_tzs]
tz_none = TimeZoneField(use_pytz=False)
tz_standard = TimeZoneField(choices_display="STANDARD", use_pytz=False)
tz_with_gmt_offset = TimeZoneField(choices_display="WITH_GMT_OFFSET", use_pytz=False)
tz_limited_none = TimeZoneField(choices=limited_choices, use_pytz=False)
tz_limited_standard = TimeZoneField(
choices=limited_choices,
choices_display="STANDARD",
use_pytz=False,
)
tz_limited_with_gmt_offset = TimeZoneField(
choices=limited_choices,
choices_display="WITH_GMT_OFFSET",
use_pytz=False,
)
class _TZChoicesDisplayModel(models.Model):
limited_tzs = [
"Asia/Tokyo",
"Asia/Dubai",
"America/Argentina/Buenos_Aires",
"Africa/Nairobi",
]
limited_choices = [(tz, tz) for tz in limited_tzs]
tz_none = TimeZoneField(use_pytz=True)
tz_standard = TimeZoneField(choices_display="STANDARD", use_pytz=True)
tz_with_gmt_offset = TimeZoneField(choices_display="WITH_GMT_OFFSET", use_pytz=True)
tz_limited_none = TimeZoneField(choices=limited_choices, use_pytz=True)
tz_limited_standard = TimeZoneField(
choices=limited_choices,
choices_display="STANDARD",
use_pytz=True,
)
tz_limited_with_gmt_offset = TimeZoneField(
choices=limited_choices,
choices_display="WITH_GMT_OFFSET",
use_pytz=True,
)
@pytest.fixture
def ChoicesDisplayForm(use_pytz):
yield _TZChoicesDisplayForm if use_pytz else _ZIChoicesDisplayForm
@pytest.fixture
def ChoicesDisplayModel(use_pytz):
yield _TZChoicesDisplayModel if use_pytz else _ZIChoicesDisplayModel
@pytest.fixture
def ChoicesDisplayModelForm(ChoicesDisplayModel):
class _ChoicesDisplayModelForm(forms.ModelForm):
class Meta:
model = ChoicesDisplayModel
fields = "__all__"
yield _ChoicesDisplayModelForm
def test_db_field_invalid_choices_display(use_pytz):
with pytest.raises(ValueError):
TimeZoneField(choices_display="invalid", use_pytz=use_pytz)
def test_form_field_invalid_choices_display(use_pytz):
with pytest.raises(ValueError):
TimeZoneFormField(choices_display="invalid", use_pytz=use_pytz)
def test_form_field_none(ChoicesDisplayForm):
form = ChoicesDisplayForm()
values, displays = zip(*form.fields["tz_none"].choices)
assert values == common_tz_names
assert displays[values.index("America/Los_Angeles")] == "America/Los Angeles"
assert displays[values.index("Asia/Kolkata")] == "Asia/Kolkata"
def test_form_field_standard(ChoicesDisplayForm):
form = ChoicesDisplayForm()
assert form.fields["tz_standard"].choices == form.fields["tz_none"].choices
def test_form_field_with_gmt_offset(ChoicesDisplayForm):
form = ChoicesDisplayForm()
values, displays = zip(*form.fields["tz_with_gmt_offset"].choices)
assert values != common_tz_names
assert sorted(values) == sorted(common_tz_names)
assert displays[values.index("America/Argentina/Buenos_Aires")] == "GMT-03:00 America/Argentina/Buenos Aires"
assert displays[values.index("Europe/Moscow")] == "GMT+03:00 Europe/Moscow"
def test_form_field_limited_none(ChoicesDisplayForm):
form = ChoicesDisplayForm()
assert form.fields["tz_limited_none"].choices == [
("Asia/Tokyo", "Asia/Tokyo"),
("Asia/Dubai", "Asia/Dubai"),
("America/Argentina/Buenos_Aires", "America/Argentina/Buenos_Aires"),
("Africa/Nairobi", "Africa/Nairobi"),
]
def test_form_field_limited_standard(ChoicesDisplayForm):
form = ChoicesDisplayForm()
assert form.fields["tz_limited_standard"].choices == [
("Asia/Tokyo", "Asia/Tokyo"),
("Asia/Dubai", "Asia/Dubai"),
("America/Argentina/Buenos_Aires", "America/Argentina/Buenos Aires"),
("Africa/Nairobi", "Africa/Nairobi"),
]
def test_form_field_limited_with_gmt_offset(ChoicesDisplayForm):
form = ChoicesDisplayForm()
assert form.fields["tz_limited_with_gmt_offset"].choices == [
("America/Argentina/Buenos_Aires", "GMT-03:00 America/Argentina/Buenos Aires"),
("Africa/Nairobi", "GMT+03:00 Africa/Nairobi"),
("Asia/Dubai", "GMT+04:00 Asia/Dubai"),
("Asia/Tokyo", "GMT+09:00 Asia/Tokyo"),
]
def test_model_form_field_none(ChoicesDisplayModelForm, tz_func, common_tz_objects):
form = ChoicesDisplayModelForm()
values, displays = zip(*form.fields["tz_none"].choices)
assert values == ("",) + common_tz_objects
assert displays[values.index(tz_func("America/Los_Angeles"))] == "America/Los Angeles"
assert displays[values.index(tz_func("Asia/Kolkata"))] == "Asia/Kolkata"
def test_model_form_field_standard(ChoicesDisplayModelForm):
form = ChoicesDisplayModelForm()
assert form.fields["tz_standard"].choices == form.fields["tz_none"].choices
def test_model_form_field_with_gmt_offset(ChoicesDisplayModelForm, tz_func, common_tz_objects):
form = ChoicesDisplayModelForm()
values, displays = zip(*form.fields["tz_with_gmt_offset"].choices)
assert values != common_tz_objects
assert sorted(str(v) for v in values) == sorted([""] + [str(tz) for tz in common_tz_objects])
assert (
displays[values.index(tz_func("America/Argentina/Buenos_Aires"))] == "GMT-03:00 America/Argentina/Buenos Aires"
)
assert displays[values.index(tz_func("Europe/Moscow"))] == "GMT+03:00 Europe/Moscow"
def test_model_form_field_limited_none(ChoicesDisplayModelForm, tz_func):
form = ChoicesDisplayModelForm()
assert form.fields["tz_limited_none"].choices == [
("", "---------"),
(tz_func("Asia/Tokyo"), "Asia/Tokyo"),
(tz_func("Asia/Dubai"), "Asia/Dubai"),
(tz_func("America/Argentina/Buenos_Aires"), "America/Argentina/Buenos_Aires"),
(tz_func("Africa/Nairobi"), "Africa/Nairobi"),
]
def test_moel_form_field_limited_standard(ChoicesDisplayModelForm, tz_func):
form = ChoicesDisplayModelForm()
assert form.fields["tz_limited_standard"].choices == [
("", "---------"),
(tz_func("Asia/Tokyo"), "Asia/Tokyo"),
(tz_func("Asia/Dubai"), "Asia/Dubai"),
(tz_func("America/Argentina/Buenos_Aires"), "America/Argentina/Buenos Aires"),
(tz_func("Africa/Nairobi"), "Africa/Nairobi"),
]
def test_model_form_field_limited_with_gmt_offset(ChoicesDisplayModelForm, tz_func):
form = ChoicesDisplayModelForm()
assert form.fields["tz_limited_with_gmt_offset"].choices == [
("", "---------"),
(
tz_func("America/Argentina/Buenos_Aires"),
"GMT-03:00 America/Argentina/Buenos Aires",
),
(tz_func("Africa/Nairobi"), "GMT+03:00 Africa/Nairobi"),
(tz_func("Asia/Dubai"), "GMT+04:00 Asia/Dubai"),
(tz_func("Asia/Tokyo"), "GMT+09:00 Asia/Tokyo"),
]
|
|
import nanosim
import numpy as np
from itertools import repeat, izip, chain
import scipy.ndimage
from scipy.interpolate import griddata
from scipy.ndimage.interpolation import zoom
from scipy.ndimage.filters import gaussian_filter
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.ticker import MultipleLocator
from sets import Set
from collections import namedtuple
from utilities import *
import numexpr as ne
smoothing_factor = 0.01 # Smoothing width (gaussian std. dev.) as a percentage of time-domain
m = 1000 # Number of points in the smoothed line
y_paddding_percentage = .3 # Percentage of blank space around the y-range of the data
plt = nanosim.plt
ncolours = 12
colourmap_positions = np.linspace(0, 1.0, int(np.ceil(ncolours * 1.5))) # Generate (1.5 * ncolours) evenly-spaced colourmap positions
colourmap_positions = np.array([p for i, p in enumerate(colourmap_positions) if ((i+1) % 3) == 0]) # Reduce to (ncolours) positions by dropping every third
# Drpping every third colour ensures that there are pairs of similar colours and that those pairs are distinct from other pairs
line_colours = list(plt.get_cmap('jet')(colourmap_positions))
image_colours = ('Blues', 'Reds', 'RdPu')
SweepData = namedtuple('SweepData', 'forward, backward')
ComponentData = namedtuple('ComponentData', 'means, time_means, vars_p, vars_m, label')
def voltage_sweep(inputs, v_ranges, steptime, all_probes, v_probes, i_probes, q_probes, V, I, Q):
'''
Recursivley (for each dimension) set voltage values, then simulate and grab the data.
'''
if len(inputs) == 0:
# Clear logs
nanosim.reset_logs()
# Run sim
nanosim.simulate(steptime, logging = all_probes)
# Extract data from component logs
for k, p in enumerate(v_probes):
V[k] = p.voltages
for k, p in enumerate(i_probes):
I[k] = p.current
for k, p in enumerate(q_probes):
Q[k] = p.charges
else:
input_component = inputs[0]
v_range = v_ranges[0]
inputs = inputs[1:]
v_ranges = v_ranges[1:]
for j, v in enumerate(v_range):
input_component.set_V(v)
voltage_sweep(inputs, v_ranges, steptime, all_probes, v_probes, i_probes, q_probes, V[:, j], I[:, j], Q[:, j])
def vars_and_means(raw_data, steptime, probes, data_array_shape, flip = False):
'''
Calculate vars and means over both time and repetitions from raw sim data.
'''
if raw_data.size == 0: return [], [], [], []
# The mean/variance values over time for each component, at each voltage level combination, and at each repetition
time_means = np.zeros([len(probes)] + data_array_shape)
time_vars_plus = np.zeros([len(probes)] + data_array_shape)
time_vars_minus = np.zeros([len(probes)] + data_array_shape)
# Index order for these arrays: component, v1, (v2), repetition
time_vars = time_vars_plus, time_vars_minus
# Fill array of means by averaging over time
it = np.nditer(raw_data, flags=['multi_index', 'refs_ok'])
while not it.finished:
events = it[0][()] # Numpy's yields a 0-d array, not the actual object, so this strange getitem call is required to extract it
times, values = zip(*events)
# Weight each value by the period of time until the next measurement
times += (steptime,) # End time
times = np.diff(times)
mean = ne.evaluate('sum(times * values)') / steptime
time_means[it.multi_index] = mean # Assign mean to array
residuals = values - mean # Residual at each event
plus_indicies, minus_indicies = (residuals > 0), (residuals < 0)
residuals = ne.evaluate('(times * (residuals ** 2)) / steptime') # Scaled (Note: square *before* scaling)
residuals = residuals[plus_indicies], residuals[minus_indicies] # +, - residuals
# Assign variance to arrays
time_vars_plus[it.multi_index] = np.sum(residuals[0])
time_vars_minus[it.multi_index] = np.sum(residuals[1])
it.iternext()
# The mean values over repetition for each component, at each voltage level combination
means = np.mean(time_means, axis = -1)
# Index order for these arrays: component, v1, (v2)
# The variance values over repetition for each component, at each voltage level combination
extended_means = means.reshape(means.shape + (1,)) # Extend with a single index in the repetitions-dimension - allows numpy to broadcast properly in the next step
repetition_residuals = time_means - extended_means # Residual at each repetition
# Splot into positive and negative residuals
res_plus, res_minus = np.zeros(repetition_residuals.shape), np.zeros(repetition_residuals.shape)
plus_indicies, minus_indicies = (repetition_residuals > 0), (repetition_residuals < 0)
res_plus[plus_indicies] = repetition_residuals[plus_indicies]
res_minus[minus_indicies] = repetition_residuals[minus_indicies]
repetition_residuals = res_plus**2, res_minus**2
# Add mean variance over time, to mean variance between repetitions, then take the sqrt to get the std. dev.
variances_plus, variances_minus = [np.sqrt(np.mean(t_vars, axis = -1) + np.mean(rep_vars, axis = -1)) for t_vars, rep_vars in zip(time_vars, repetition_residuals)]
# Index order for these arrays: component, v1, (v2)
# Undo backwards-voltage sweep ordering
if flip:
if len(means.shape) == 2: # For dim-1 plots
means = means[:, ::-1]
time_means = time_means[:, ::-1, :]
variances_plus = variances_plus[:, ::-1]
variances_minus = variances_minus[:, ::-1]
elif len(means.shape) == 3: # For dim-2 plots
means = means[:, ::-1, ::-1]
time_means = time_means[:, ::-1, ::-1, :]
variances_plus = variances_plus[:, ::-1, ::-1]
variances_minus = variances_minus[:, ::-1, ::-1]
means = np.transpose(means, (0, 2, 1))
time_means = np.transpose(time_means, (0, 2, 1, 3))
variances_plus = np.transpose(variances_plus, (0, 2, 1))
variances_minus = np.transpose(variances_minus, (0, 2, 1))
return means, time_means, variances_plus, variances_minus
def presmooth_current(I, steptime):
'''
Calculate vars and means for currents over repetitions from raw sim data.
Currents are different to voltages and charges, since they are all instantaneous events; measuring current variance over time has no meaning*.
* Technically you could estimate variance in frequency of events over time, and probably do some Poisson distribution stuff to characterise the
time-variance, but for now, I think it's probably fine the way it is.
'''
if I.size == 0: return [], []
it = np.nditer(I, flags=['multi_index', 'refs_ok'])
while not it.finished:
events = it[0][()] # Numpy's yields a 0-d array, not the actual object, so this strange getitem call is required to extract it
times, currents = zip(*events)
times, currents = spread_signal_spikes(times, currents, x_range = (0, steptime), m = m)
smoothing_window = int(m * 0.4) # Smoothing radius for softening varaiance computation
times = smooth1D(times, k = smoothing_window)
currents = smooth1D(currents, k = smoothing_window)
I[it.multi_index] = zip(times, currents)
it.iternext()
def characterise(runtime, inputs, input_ranges, steps = 200, v_probes = [], i_probes = [], q_probes = [], v_diffs = [], repetitions = 10, raw_data = True, mean_data = True, variance = True, show = True, fig = False, axs = False, colours = None):
'''
Characterise a circuit over two, independant input dimensions.
runtime: time, in seconds that each simulation is to be run for
inputs: list of reservoirs to voltage-sweep with
input_ranges: a 2-tuple for each input that has min and max voltages to sweep over
steps: number of voltage-steps
v_probes: components to be monitored for voltage-levels
i_probes: components to be monitored for current
q_probes: components to be monitored for charge-levels
v_diffs: pairs (tuples) of components whose relative voltage is to be monitored (the plotted value will be: pair[0].V - pair[1].V)
variance: claculate and display variances in behaviour (for stability analysis)
repetitions: run the sim this many times and average the results
raw_data: plot the raw data-points (for dim-1 plots only)
mean_data: plot the mean data-points (for dim-1 plots only)
show: call pyplot.show() at the end. Us ethis if you want to modify the graph externally before showing.
fig, axs: provide fig and axs objects for the plot to work with (if you want external control over the graph). Both must be provided if either is, and axs MUST have one subplot axis for each of voltage, current and charge if they are to be monitored.
colours: change the default list of colours for plotting.
inputs and input_ranges must be at most length two.
At least one of v_probes, i_probes , q_probes or v_diffs should be filled or there will just be an empty plot.
'''
dim = len(inputs) # 2 control voltages, or 1? (only dim 1 and dim 2 are supported)
steptime = runtime/float(steps)
if not colours:
if dim == 1:
colours = line_colours
elif dim == 2:
colours = image_colours
dont_plot = [] # Components that need to monitored for v_diff, but whose voltage shouldn't be independantly plotted.
for pair in v_diffs:
for c in pair:
if c not in v_probes:
dont_plot.append(c)
v_probes += dont_plot # Add the not-for-plotting nodes to the monitoring-list
dont_plot = Set(dont_plot) # For quick membership tests
v_probe_indicies = dict(zip(v_probes, range(len(v_probes)))) # For quick-lookup of data when plotting v_diff pairs
all_probes = v_probes + i_probes + q_probes
all_probe_lists = (v_probes, i_probes, q_probes)
if (fig is False) or (axs is False): # If fig and axs aren't provided as args
ncolumns = 1
if dim == 1:
nplots = len(nonempty(v_probes, i_probes, q_probes))
elif dim == 2:
nplots = len(all_probes) - len(dont_plot)
if variance: ncolumns = 2
fig, axs = plt.subplots(nplots, ncolumns, figsize=(14, 10)) # Plot size
# The data for each component will be stored in these lists in the same order as the components are listed in the v/i/q_probes arguments
data_array_shape = [steps] * dim + [repetitions] # voltages (* voltages) * repetitions
V = np.empty([len(v_probes)] + data_array_shape, dtype = 'object')
I = np.empty([len(i_probes)] + data_array_shape, dtype = 'object')
Q = np.empty([len(q_probes)] + data_array_shape, dtype = 'object')
# Index order for these arrays: component, v1, (v2,) repetition
# Each element of these arrays will be a list of logged time-value tuples
all_data = (V, I, Q)
print 'Calculating...',
# Main computation for forward sweep
for j in xrange(repetitions):
nanosim.reset_sim() # Set time to 0 and clear logs on all components
v_ranges = [np.linspace(r[0], r[1], steps) for r in input_ranges]
# Step over all voltage combinations, simulate, then log the results.
voltage_sweep(inputs, v_ranges, steptime, all_probes, v_probes, i_probes, q_probes, V[..., j], I[..., j], Q[..., j])
print '%.1f%%' % (50 * float(j+1)/repetitions)
v_components, i_components, q_components = [], [], []
all_component_lists = (v_components, i_components, q_components)
presmooth_current(I, steptime)
# Temporarily sort forward-data
for data, probes, component_list in zip(all_data, all_probe_lists, all_component_lists):
means, time_means, vars_p, vars_m = vars_and_means(data, steptime, probes, data_array_shape)
for i, probe in enumerate(probes): component_list.append(ComponentData(means[i], time_means[i], vars_p[i], vars_m[i], probe.label))
inputs.reverse() # Reverse component order
input_ranges = [tuple(reversed(r)) for r in reversed(input_ranges)] # Reverse both range and component order
# Main computation for backward sweep
for j in xrange(repetitions):
nanosim.reset_sim() # Set time to 0 and clear logs on all components
v_ranges = [np.linspace(r[0], r[1], steps) for r in input_ranges]
# Step over all voltage combinations, simulate, then log the results.
voltage_sweep(inputs, v_ranges, steptime, all_probes, v_probes, i_probes, q_probes, V[..., j], I[..., j], Q[..., j])
print '%.1f%%' % (50 + 50 * float(j+1)/repetitions)
# Undo reversal
inputs.reverse()
input_ranges = [tuple(reversed(r)) for r in reversed(input_ranges)]
presmooth_current(I, steptime)
for data, probes, component_list in zip(all_data, all_probe_lists, all_component_lists):
means, time_means, vars_p, vars_m = vars_and_means(data, steptime, probes, data_array_shape, flip = True)
for i, probe in enumerate(probes):
sweep_mean = SweepData(component_list[i].means, means[i])
sweep_time_mean = SweepData(component_list[i].time_means, time_means[i])
sweep_vars_p = SweepData(component_list[i].vars_p, vars_p[i])
sweep_vars_m = SweepData(component_list[i].vars_m, vars_m[i])
component_list[i] = ComponentData(sweep_mean, sweep_time_mean, sweep_vars_p, sweep_vars_m, probe.label)
# Calculate voltage-difference data
for pair in v_diffs:
i, j = v_probe_indicies[pair[0]], v_probe_indicies[pair[1]]
means = SweepData((v_components[i].means.forward - v_components[j].means.forward), (v_components[i].means.backward - v_components[j].means.backward))
time_means = SweepData((v_components[i].time_means.forward - v_components[j].time_means.forward), (v_components[i].time_means.backward - v_components[j].time_means.backward))
vars_p = SweepData((v_components[i].vars_p.forward - v_components[j].vars_p.forward), (v_components[i].vars_p.backward - v_components[j].vars_p.backward))
vars_m = SweepData((v_components[i].vars_m.forward - v_components[j].vars_m.forward), (v_components[i].vars_m.backward - v_components[j].vars_m.backward))
v_components.append(ComponentData(means, time_means, vars_p, vars_m, 'V[%s - %s]' % (pair[0].label, pair[1].label)))
# Remove v_probes that shouldn't be plotted on their own (ie. that were only recorded for voltage-difference calculations)
v_components = [data for i, data in enumerate(v_components[:len(v_probes)]) if v_probes[i] not in dont_plot] + v_components[len(v_probes):]
# Now for the actual plotting. The logic is totally different for 1d or 2d data, so this big if-statement does the branching
if dim == 1:
v_range = input_ranges[0]
X = np.linspace(v_range[0], v_range[1], steps) # Input voltage data
smoothing_window = int(steps * smoothing_factor) # Smoothing width in array-indicies
for ax, components, axlabel in zip(axs, all_component_lists, ('Voltage (V)', 'Current Avg. (e/s)', 'Charge (e)')): # For axis/plot
# Labels
ax.set_title('%s - Voltage Performance' % axlabel.split()[0])
ax.set_xlabel('Probe Voltage (V)')
ax.set_ylabel(axlabel)
for component, colourpair in zip(components, pairwise(colours)): # For each component
for means, time_means, vars_p, vars_m, colour, direction in zip(component.means, component.time_means, component.vars_p, component.vars_m, colourpair, ('->', '<-')): # For forwards and backwards datasets
# Raw Data
if raw_data:
raw_data_alpha = max(1.0/repetitions, 0.01)
for i in range(repetitions):
ax.scatter(X, time_means[..., i], marker = '.', color = colour, alpha = raw_data_alpha)
# Smoothed Mean Curve
ax.plot(smooth1D(X, k = smoothing_window), smooth1D(means, k = smoothing_window), color = colour, alpha = 0.7, lw = 2, label = '%s (%s sweep)' % (component.label, direction))
# Means
if mean_data:
ax.scatter(X, means, color = colour, alpha = 0.4)
# Variances
if variance:
ax.fill_between(X, means - vars_m, means + vars_p, facecolor = colour, alpha=0.05)
# Legend
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
# Bounds
ax.set_xlim(v_range[0], v_range[1])
all_data = sum(([c.means.forward, c.means.backward] for c in components), [])
# Check that there is any data
if any(len(data) > 0 for data in all_data): ymin, ymax = min(chain(*all_data)), max(chain(*all_data))
else: ymin, ymax = 0, 0
ypadding = y_paddding_percentage * (ymax - ymin)
ax.set_ylim(ymin - ypadding, ymax + ypadding)
# For title
dVdt = '%.1g' % ((v_range[1] - v_range[0])/float(runtime))
if dim == 2:
x_range = input_ranges[0]
y_range = input_ranges[1]
X = np.linspace(x_range[0], x_range[1], steps)
Y = np.linspace(y_range[0], y_range[1], steps)
#X_GRID, Y_GRID = np.meshgrid(X, Y) # Input voltage grid
# Smoothing Params
order = 3
zlevel = 3
smoothing_window = smoothing_factor * steps
#X_GRID = zoom(X_GRID, zlevel, order = order)
#Y_GRID = zoom(Y_GRID, zlevel, order = order)
#Z_GRID = zoom(Z_GRID, zlevel, order = order)
ax_counter = 0
for components, axlabel in zip(all_component_lists, ('Voltage (V)', 'Current Avg. (e/s)', 'Charge (e)')): # For quantity (V, I, Q)
for ax, component in zip(axs[ax_counter:], components): # For each component/plot
ax_counter += 1
if variance:
var_ax = ax[1]
ax = ax[0]
var_ax.set_title(axlabel.split()[0] + ' Variance ' + axlabel.split()[-1])
var_ax.set_xlabel('%s Voltage (V)' % inputs[0].label)
var_ax.set_ylabel('%s Voltage (V)' % inputs[1].label)
# Labels
ax.set_title('%s %s' % (component.label, axlabel))
ax.set_xlabel('%s Voltage (V)' % inputs[0].label)
ax.set_ylabel('%s Voltage (V)' % inputs[1].label)
imgs = []
for means, colourmap, alpha in zip(component.means, colours, (1, 0.5)): # For forwards and backwards datasets
# Smooth
Z_GRID = gaussian_filter(means, smoothing_window)
Z_GRID = zoom(Z_GRID, zlevel, order = order)
# Plot
img = ax.imshow(Z_GRID.T, origin = 'lower', extent = x_range + y_range, aspect = 'auto', alpha = alpha)
img.set_cmap(colourmap)
imgs.append(img)
# Colourbar
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size="5%", pad = 0.1)
cbar = plt.colorbar(imgs[0], cax = cax)
cbar.set_label('Forward sweep')
cbar.set_ticks([])
cax = divider.append_axes('right', size="5%", pad = 0.3)
cbar = plt.colorbar(imgs[1], cax = cax)
cbar.set_label('Backward sweep')
ax.set_xlim(x_range)
ax.set_ylim(y_range)
# Variances
if variance:
variances = (sum(component.vars_p) + sum(component.vars_m)) / 2
# Smooth
variances = gaussian_filter(variances, smoothing_window)
variances = zoom(variances, zlevel, order = order)
# Plot
img = var_ax.imshow(variances.T, origin = 'lower', extent = x_range + y_range, aspect = 'auto', alpha = 1.0)
img.set_cmap(colours[2])
# Colourbar
divider = make_axes_locatable(var_ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(img, cax = cax)
cbar.set_label('%s Variance %s' % (axlabel.split()[0], axlabel.split()[-1]))
var_ax.set_xlim(x_range)
var_ax.set_ylim(y_range)
# For title
dVdt = '(%.1g, %.1g)' % ((x_range[1] - x_range[0])/float(runtime), (y_range[1] - y_range[0])/float(runtime))
# Title
frequency = (1/float(runtime))
frequency = '%.1f %sHz' % prefix(frequency)
fig.text(.5, .93,
'Characterisation Under Stepped (Quasi-DC), %s Bi-Directional\n Voltage Sweeps (mean dV/dt = %s V/s), T=%.1fK, Averaged Over %d repetitions' % (frequency, dVdt, nanosim.T, repetitions),
horizontalalignment='center', fontsize=17)
plt.tight_layout(rect = [0, 0, 1, .93])
print 'done!'
if show: plt.show()
def dynamic_analysis(runtime, v_probes = [], i_probes = [], q_probes = [], v_diffs = [], repetitions = 1, raw_data = True, show = True, fig = False, axs = False, colours = line_colours):
'''
Analyse a circuit over time.
runtime: time, in seconds that each simulation is to be run for
v_probes: components to be monitored for voltage-levels
i_probes: components to be monitored for current
q_probes: components to be monitored for charge-levels
v_diffs: pairs (tuples) of components whose relative voltage is to be monitored (the plotted value will be: pair[0].V - pair[1].V)
repetitions: run the sim this many times and average the results
raw_data: plot the raw data-points?
show: call pyplot.show() at the end. Us ethis if you want to modify the graph externally before showing.
fig, axs: provide fig and axs objects for the plot to work with (if you want external control over the graph). Both must be provided if either is, and axs MUST have one subplot axis for each of voltage, current and charge if they are to be monitored.
colours: change the default list of colours for plotting.
At least one of v_probes, i_probes , q_probes or v_diffs should be filled or there will just be an empty plot.
'''
dont_plot = [] # Components that need to monitored for v_diff, but whose voltage shouldn't be independantly plotted.
for pair in v_diffs:
for c in pair:
if c not in v_probes:
dont_plot.append(c)
v_probes += dont_plot # Add the not-for-plotting nodes to the monitoring-list
dont_plot = Set(dont_plot) # For quick membership tests
v_probe_indicies = dict(zip(v_probes, range(len(v_probes)))) # For quick-lookup of data when plotting v_diff pairs
if (fig is False) or (axs is False): # If fig and axs aren't provided as args
fig, axs = plt.subplots(len(nonempty(v_probes, i_probes, q_probes)), figsize=(14, 10)) # Plot size
# The data for each component will be stored in these lists in the same order as the components are listed in the v/i/q_probes arguments
V = [[] for p in v_probes]
I = [[] for p in i_probes]
Q = [[] for p in q_probes]
all_probes = v_probes + i_probes + q_probes
all_data = (V, I, Q)
print 'Calculating...',
# Main computation
for j in xrange(repetitions):
nanosim.reset_logs() # Clear logs
nanosim.simulate(runtime, logging = all_probes)
# Extract data from component logs
for i, p in enumerate(v_probes):
V[i] += p.voltages
for i, p in enumerate(i_probes):
I[i] += p.current
for i, p in enumerate(q_probes):
Q[i] += p.charges
print '%.1f%%' % (100 * float(j + 1)/repetitions)
# Current is different to voltages because it's made up of delta-functions (which can't be easily interpolated).
# Pre-smooth the delta functions into square-functions so that standard smoothing will work properly
for i in range(len(I)):
t_presmoothed, I_presmoothed = spread_signal_spikes(*zip(*I[i]), x_range = (0, runtime), m = m)
I_presmoothed /= float(repetitions)
I[i] = zip(t_presmoothed, I_presmoothed) # Replace the stored data with this smoothed version
labels = (l for l, x in zip(('Voltage (V)', 'Current (e/s)', 'Charge (e)'), all_data) if bool(x)) # Lables for each graph (used for both title and y-axis)
# Plot all the data (except voltage-diffs)
smoothing_window = int(m * smoothing_factor) # Smoothing width in array-indicies
for ax, data in zip(axs, nonempty(*all_data)): # Loop though V, I and Q
for component_data, colour, component in zip(data, colours[::2], all_probes): # Loop though monitored components
if component in dont_plot: continue # Skip voltage-diff only components
component_data.sort(key = lambda x: x[0]) # Sort data by time
t, y = zip(*component_data)
# Plot raw data
if raw_data: ax.scatter(t, y, color = colour, alpha = 0.4)
# Interpolate data into evenly-spaced samples
x_smooth = np.linspace(0, runtime, m)
y_smooth = np.interp(x_smooth, t, y)
# Gaussian-smooth the interpolated data
x_smooth = smooth1D(x_smooth, k = smoothing_window) # The edges of the data will be auto-trimmed to the region of convolution-validity, so the x data needs to be smoothed and trimmed to match the y-data
y_smooth = smooth1D(y_smooth, k = smoothing_window)
# Plot the smoothed curve
a = ax.plot(x_smooth, y_smooth, color = colour, alpha = 0.7, lw = 2, label = component.label)
# Plot voltage-diffs
ax = axs[0] # V axis
already_plotted = len(v_probes) - len(dont_plot)
for colour, pair in zip(colours[2*already_plotted::2], v_diffs):
i, j = v_probe_indicies[pair[0]], v_probe_indicies[pair[1]]
data1, data2 = V[i], V[j]
# Sort data by time
data1.sort(key = lambda x: x[0])
data2.sort(key = lambda x: x[0])
t1, y1 = zip(*data1)
t2, y2 = zip(*data2)
# Interpolate data into evenly-spaced samples
x_smooth = np.linspace(0, runtime, m)
y1_smooth = np.interp(x_smooth, t1, y1)
y2_smooth = np.interp(x_smooth, t2, y2)
# Calcualte the voltage-difference values
y_smooth = y1_smooth - y2_smooth
# Gaussian-smooth the interpolated data
x_smooth = smooth1D(x_smooth, k = smoothing_window) # The edges of the data will be auto-trimmed to the region of convolution-validity, so the x data needs to be smoothed and trimmed to match the y-data
y_smooth = smooth1D(y_smooth, k = smoothing_window)
# Plot the smoothed curve
a = ax.plot(x_smooth, y_smooth, color = colour, alpha = 0.7, lw = 2, label = 'V[%s - %s]' % (pair[0].label, pair[1].label))
# Finalise the plots with labels, legends and bounds
for ax, label in zip(axs, labels):
# Legend
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
# Labels
ax.set_xlabel('Time (s)')
ax.set_ylabel(label)
ax.set_title('%s vs Time' % label.split()[0])
# Bounds
ax.set_xlim(0, runtime) # Ensure that all the plots align properly
print 'done!'
# Title
fig.text(.52, .955, 'Dynamic Circuit Analysis', horizontalalignment='center', fontsize=17)
plt.tight_layout(rect = [0, 0, 1, .94])
if show: plt.show()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of VM records and
their attributes like VDIs, VIFs, as well as their lookup functions.
"""
import os
import pickle
import re
import tempfile
import time
import urllib
import uuid
from xml.dom import minidom
import glance.client
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from nova.auth.manager import AuthManager
from nova.compute import instance_types
from nova.compute import power_state
from nova.virt import disk
from nova.virt import images
from nova.virt.xenapi import HelperBase
from nova.virt.xenapi.volume_utils import StorageError
LOG = logging.getLogger("nova.virt.xenapi.vm_utils")
FLAGS = flags.FLAGS
flags.DEFINE_string('default_os_type', 'linux', 'Default OS type')
flags.DEFINE_integer('block_device_creation_timeout', 10,
'time to wait for a block device to be created')
flags.DEFINE_integer('max_kernel_ramdisk_size', 16 * 1024 * 1024,
'maximum size in bytes of kernel or ramdisk images')
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
'Running': power_state.RUNNING,
'Paused': power_state.PAUSED,
'Suspended': power_state.SUSPENDED,
'Crashed': power_state.CRASHED}
SECTOR_SIZE = 512
MBR_SIZE_SECTORS = 63
MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE
KERNEL_DIR = '/boot/guest'
class ImageType:
"""
Enumeration class for distinguishing different image types
0 - kernel/ramdisk image (goes on dom0's filesystem)
1 - disk image (local SR, partitioned by objectstore plugin)
2 - raw disk image (local SR, NOT partitioned by plugin)
3 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
linux, HVM assumed for Windows)
"""
KERNEL_RAMDISK = 0
DISK = 1
DISK_RAW = 2
DISK_VHD = 3
class VMHelper(HelperBase):
"""
The class that wraps the helper methods together.
"""
@classmethod
def create_vm(cls, session, instance, kernel, ramdisk,
use_pv_kernel=False):
"""Create a VM record. Returns a Deferred that gives the new
VM reference.
the use_pv_kernel flag indicates whether the guest is HVM or PV
There are 3 scenarios:
1. Using paravirtualization, kernel passed in
2. Using paravirtualization, kernel within the image
3. Using hardware virtualization
"""
inst_type_id = instance.instance_type_id
instance_type = instance_types.get_instance_type(inst_type_id)
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
vcpus = str(instance_type['vcpus'])
rec = {
'actions_after_crash': 'destroy',
'actions_after_reboot': 'restart',
'actions_after_shutdown': 'destroy',
'affinity': '',
'blocked_operations': {},
'ha_always_run': False,
'ha_restart_priority': '',
'HVM_boot_params': {},
'HVM_boot_policy': '',
'is_a_template': False,
'memory_dynamic_min': mem,
'memory_dynamic_max': mem,
'memory_static_min': '0',
'memory_static_max': mem,
'memory_target': mem,
'name_description': '',
'name_label': instance.name,
'other_config': {'allowvssprovider': False},
'other_config': {},
'PCI_bus': '',
'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true',
'viridian': 'true', 'timeoffset': '0'},
'PV_args': '',
'PV_bootloader': '',
'PV_bootloader_args': '',
'PV_kernel': '',
'PV_legacy_args': '',
'PV_ramdisk': '',
'recommendations': '',
'tags': [],
'user_version': '0',
'VCPUs_at_startup': vcpus,
'VCPUs_max': vcpus,
'VCPUs_params': {},
'xenstore_data': {}}
# Complete VM configuration record according to the image type
# non-raw/raw with PV kernel/raw in HVM mode
if use_pv_kernel:
rec['platform']['nx'] = 'false'
if instance.kernel_id:
# 1. Kernel explicitly passed in, use that
rec['PV_args'] = 'root=/dev/xvda1'
rec['PV_kernel'] = kernel
rec['PV_ramdisk'] = ramdisk
else:
# 2. Use kernel within the image
rec['PV_args'] = 'clocksource=jiffies'
rec['PV_bootloader'] = 'pygrub'
else:
# 3. Using hardware virtualization
rec['platform']['nx'] = 'true'
rec['HVM_boot_params'] = {'order': 'dc'}
rec['HVM_boot_policy'] = 'BIOS order'
LOG.debug(_('Created VM %s...'), instance.name)
vm_ref = session.call_xenapi('VM.create', rec)
instance_name = instance.name
LOG.debug(_('Created VM %(instance_name)s as %(vm_ref)s.') % locals())
return vm_ref
@classmethod
def ensure_free_mem(cls, session, instance):
inst_type_id = instance.instance_type_id
instance_type = instance_types.get_instance_type(inst_type_id)
mem = long(instance_type['memory_mb']) * 1024 * 1024
#get free memory from host
host = session.get_xenapi_host()
host_free_mem = long(session.get_xenapi().host.
compute_free_memory(host))
return host_free_mem >= mem
@classmethod
def create_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable):
"""Create a VBD record. Returns a Deferred that gives the new
VBD reference."""
vbd_rec = {}
vbd_rec['VM'] = vm_ref
vbd_rec['VDI'] = vdi_ref
vbd_rec['userdevice'] = str(userdevice)
vbd_rec['bootable'] = bootable
vbd_rec['mode'] = 'RW'
vbd_rec['type'] = 'disk'
vbd_rec['unpluggable'] = True
vbd_rec['empty'] = False
vbd_rec['other_config'] = {}
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
LOG.debug(_('Creating VBD for VM %(vm_ref)s,'
' VDI %(vdi_ref)s ... ') % locals())
vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
LOG.debug(_('Created VBD %(vbd_ref)s for VM %(vm_ref)s,'
' VDI %(vdi_ref)s.') % locals())
return vbd_ref
@classmethod
def find_vbd_by_number(cls, session, vm_ref, number):
"""Get the VBD reference from the device number"""
vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref)
if vbd_refs:
for vbd_ref in vbd_refs:
try:
vbd_rec = session.get_xenapi().VBD.get_record(vbd_ref)
if vbd_rec['userdevice'] == str(number):
return vbd_ref
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('VBD not found in instance %s') % vm_ref)
@classmethod
def unplug_vbd(cls, session, vbd_ref):
"""Unplug VBD from VM"""
try:
vbd_ref = session.call_xenapi('VBD.unplug', vbd_ref)
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
if exc.details[0] != 'DEVICE_ALREADY_DETACHED':
raise StorageError(_('Unable to unplug VBD %s') % vbd_ref)
@classmethod
def destroy_vbd(cls, session, vbd_ref):
"""Destroy VBD from host database"""
try:
task = session.call_xenapi('Async.VBD.destroy', vbd_ref)
session.wait_for_task(task)
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to destroy VBD %s') % vbd_ref)
@classmethod
def create_vif(cls, session, vm_ref, network_ref, mac_address,
dev, rxtx_cap=0):
"""Create a VIF record. Returns a Deferred that gives the new
VIF reference."""
vif_rec = {}
vif_rec['device'] = str(dev)
vif_rec['network'] = network_ref
vif_rec['VM'] = vm_ref
vif_rec['MAC'] = mac_address
vif_rec['MTU'] = '1500'
vif_rec['other_config'] = {}
vif_rec['qos_algorithm_type'] = "ratelimit" if rxtx_cap else ''
vif_rec['qos_algorithm_params'] = \
{"kbps": str(rxtx_cap * 1024)} if rxtx_cap else {}
LOG.debug(_('Creating VIF for VM %(vm_ref)s,'
' network %(network_ref)s.') % locals())
vif_ref = session.call_xenapi('VIF.create', vif_rec)
LOG.debug(_('Created VIF %(vif_ref)s for VM %(vm_ref)s,'
' network %(network_ref)s.') % locals())
return vif_ref
@classmethod
def create_vdi(cls, session, sr_ref, name_label, virtual_size, read_only):
"""Create a VDI record and returns its reference."""
vdi_ref = session.get_xenapi().VDI.create(
{'name_label': name_label,
'name_description': '',
'SR': sr_ref,
'virtual_size': str(virtual_size),
'type': 'User',
'sharable': False,
'read_only': read_only,
'xenstore_data': {},
'other_config': {},
'sm_config': {},
'tags': []})
LOG.debug(_('Created VDI %(vdi_ref)s (%(name_label)s,'
' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.')
% locals())
return vdi_ref
@classmethod
def get_vdi_for_vm_safely(cls, session, vm_ref):
vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref)
if vdi_refs is None:
raise Exception(_("No VDIs found for VM %s") % vm_ref)
else:
num_vdis = len(vdi_refs)
if num_vdis != 1:
raise Exception(
_("Unexpected number of VDIs (%(num_vdis)s) found"
" for VM %(vm_ref)s") % locals())
vdi_ref = vdi_refs[0]
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
return vdi_ref, vdi_rec
@classmethod
def create_snapshot(cls, session, instance_id, vm_ref, label):
"""Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
Snapshot VHD"""
#TODO(sirp): Add quiesce and VSS locking support when Windows support
# is added
LOG.debug(_("Snapshotting VM %(vm_ref)s with label '%(label)s'...")
% locals())
vm_vdi_ref, vm_vdi_rec = cls.get_vdi_for_vm_safely(session, vm_ref)
sr_ref = vm_vdi_rec["SR"]
original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref)
task = session.call_xenapi('Async.VM.snapshot', vm_ref, label)
template_vm_ref = session.wait_for_task(task, instance_id)
template_vdi_rec = cls.get_vdi_for_vm_safely(session,
template_vm_ref)[1]
template_vdi_uuid = template_vdi_rec["uuid"]
LOG.debug(_('Created snapshot %(template_vm_ref)s from'
' VM %(vm_ref)s.') % locals())
parent_uuid = wait_for_vhd_coalesce(
session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid)
#TODO(sirp): we need to assert only one parent, not parents two deep
template_vdi_uuids = {'image': parent_uuid,
'snap': template_vdi_uuid}
return template_vm_ref, template_vdi_uuids
@classmethod
def get_sr(cls, session, sr_label='slices'):
"""Finds the SR named by the given name label and returns
the UUID"""
return session.call_xenapi('SR.get_by_name_label', sr_label)[0]
@classmethod
def get_sr_path(cls, session):
"""Return the path to our storage repository
This is used when we're dealing with VHDs directly, either by taking
snapshots or by restoring an image in the DISK_VHD format.
"""
sr_ref = safe_find_sr(session)
sr_rec = session.get_xenapi().SR.get_record(sr_ref)
sr_uuid = sr_rec["uuid"]
return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
@classmethod
def upload_image(cls, session, instance, vdi_uuids, image_id):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
# NOTE(sirp): Currently we only support uploading images as VHD, there
# is no RAW equivalent (yet)
logging.debug(_("Asking xapi to upload %(vdi_uuids)s as"
" ID %(image_id)s") % locals())
os_type = instance.os_type or FLAGS.default_os_type
params = {'vdi_uuids': vdi_uuids,
'image_id': image_id,
'glance_host': FLAGS.glance_host,
'glance_port': FLAGS.glance_port,
'sr_path': cls.get_sr_path(session),
'os_type': os_type}
kwargs = {'params': pickle.dumps(params)}
task = session.async_call_plugin('glance', 'upload_vhd', kwargs)
session.wait_for_task(task, instance.id)
@classmethod
def fetch_image(cls, session, instance_id, image, user, project,
image_type):
"""
image_type is interpreted as an ImageType instance
Related flags:
xenapi_image_service = ['glance', 'objectstore']
glance_address = 'address for glance services'
glance_port = 'port for glance services'
"""
access = AuthManager().get_access_key(user, project)
if FLAGS.xenapi_image_service == 'glance':
return cls._fetch_image_glance(session, instance_id, image,
access, image_type)
else:
return cls._fetch_image_objectstore(session, instance_id, image,
access, user.secret,
image_type)
@classmethod
def _fetch_image_glance_vhd(cls, session, instance_id, image, access,
image_type):
LOG.debug(_("Asking xapi to fetch vhd image %(image)s")
% locals())
sr_ref = safe_find_sr(session)
# NOTE(sirp): The Glance plugin runs under Python 2.4 which does not
# have the `uuid` module. To work around this, we generate the uuids
# here (under Python 2.6+) and pass them as arguments
uuid_stack = [str(uuid.uuid4()) for i in xrange(2)]
params = {'image_id': image,
'glance_host': FLAGS.glance_host,
'glance_port': FLAGS.glance_port,
'uuid_stack': uuid_stack,
'sr_path': cls.get_sr_path(session)}
kwargs = {'params': pickle.dumps(params)}
task = session.async_call_plugin('glance', 'download_vhd', kwargs)
vdi_uuid = session.wait_for_task(task, instance_id)
cls.scan_sr(session, instance_id, sr_ref)
# Set the name-label to ease debugging
vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid)
name_label = get_name_label_for_image(image)
session.get_xenapi().VDI.set_name_label(vdi_ref, name_label)
LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(vdi_uuid)s")
% locals())
return vdi_uuid
@classmethod
def _fetch_image_glance_disk(cls, session, instance_id, image, access,
image_type):
"""Fetch the image from Glance
NOTE:
Unlike _fetch_image_glance_vhd, this method does not use the Glance
plugin; instead, it streams the disks through domU to the VDI
directly.
"""
# FIXME(sirp): Since the Glance plugin seems to be required for the
# VHD disk, it may be worth using the plugin for both VHD and RAW and
# DISK restores
sr_ref = safe_find_sr(session)
client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
meta, image_file = client.get_image(image)
virtual_size = int(meta['size'])
vdi_size = virtual_size
LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals())
if image_type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
elif image_type == ImageType.KERNEL_RAMDISK and \
vdi_size > FLAGS.max_kernel_ramdisk_size:
max_size = FLAGS.max_kernel_ramdisk_size
raise exception.Error(
_("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, "
"max %(max_size)d bytes") % locals())
name_label = get_name_label_for_image(image)
vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False)
with_vdi_attached_here(session, vdi_ref, False,
lambda dev:
_stream_disk(dev, image_type,
virtual_size, image_file))
if image_type == ImageType.KERNEL_RAMDISK:
#we need to invoke a plugin for copying VDI's
#content into proper path
LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi_ref)
fn = "copy_kernel_vdi"
args = {}
args['vdi-ref'] = vdi_ref
#let the plugin copy the correct number of bytes
args['image-size'] = str(vdi_size)
task = session.async_call_plugin('glance', fn, args)
filename = session.wait_for_task(task, instance_id)
#remove the VDI as it is not needed anymore
session.get_xenapi().VDI.destroy(vdi_ref)
LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref)
return filename
else:
return session.get_xenapi().VDI.get_uuid(vdi_ref)
@classmethod
def determine_disk_image_type(cls, instance):
"""Disk Image Types are used to determine where the kernel will reside
within an image. To figure out which type we're dealing with, we use
the following rules:
1. If we're using Glance, we can use the image_type field to
determine the image_type
2. If we're not using Glance, then we need to deduce this based on
whether a kernel_id is specified.
"""
def log_disk_format(image_type):
pretty_format = {ImageType.KERNEL_RAMDISK: 'KERNEL_RAMDISK',
ImageType.DISK: 'DISK',
ImageType.DISK_RAW: 'DISK_RAW',
ImageType.DISK_VHD: 'DISK_VHD'}
disk_format = pretty_format[image_type]
image_id = instance.image_id
instance_id = instance.id
LOG.debug(_("Detected %(disk_format)s format for image "
"%(image_id)s, instance %(instance_id)s") % locals())
def determine_from_glance():
glance_disk_format2nova_type = {
'ami': ImageType.DISK,
'aki': ImageType.KERNEL_RAMDISK,
'ari': ImageType.KERNEL_RAMDISK,
'raw': ImageType.DISK_RAW,
'vhd': ImageType.DISK_VHD}
client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
meta = client.get_image_meta(instance.image_id)
disk_format = meta['disk_format']
try:
return glance_disk_format2nova_type[disk_format]
except KeyError:
raise exception.InvalidDiskFormat(disk_format=disk_format)
def determine_from_instance():
if instance.kernel_id:
return ImageType.DISK
else:
return ImageType.DISK_RAW
# FIXME(sirp): can we unify the ImageService and xenapi_image_service
# abstractions?
if FLAGS.xenapi_image_service == 'glance':
image_type = determine_from_glance()
else:
image_type = determine_from_instance()
log_disk_format(image_type)
return image_type
@classmethod
def _fetch_image_glance(cls, session, instance_id, image, access,
image_type):
if image_type == ImageType.DISK_VHD:
return cls._fetch_image_glance_vhd(
session, instance_id, image, access, image_type)
else:
return cls._fetch_image_glance_disk(
session, instance_id, image, access, image_type)
@classmethod
def _fetch_image_objectstore(cls, session, instance_id, image, access,
secret, image_type):
url = images.image_url(image)
LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
if image_type == ImageType.KERNEL_RAMDISK:
fn = 'get_kernel'
else:
fn = 'get_vdi'
args = {}
args['src_url'] = url
args['username'] = access
args['password'] = secret
args['add_partition'] = 'false'
args['raw'] = 'false'
if image_type != ImageType.KERNEL_RAMDISK:
args['add_partition'] = 'true'
if image_type == ImageType.DISK_RAW:
args['raw'] = 'true'
task = session.async_call_plugin('objectstore', fn, args)
uuid = session.wait_for_task(task, instance_id)
return uuid
@classmethod
def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type,
os_type):
"""
Determine whether the VM will use a paravirtualized kernel or if it
will use hardware virtualization.
1. Objectstore (any image type):
We use plugin to figure out whether the VDI uses PV
2. Glance (VHD): then we use `os_type`, raise if not set
3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
available
4. Glance (DISK): pv is assumed
"""
if FLAGS.xenapi_image_service == 'glance':
# 2, 3, 4: Glance
return cls._determine_is_pv_glance(
session, vdi_ref, disk_image_type, os_type)
else:
# 1. Objecstore
return cls._determine_is_pv_objectstore(session, instance_id,
vdi_ref)
@classmethod
def _determine_is_pv_objectstore(cls, session, instance_id, vdi_ref):
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
fn = "is_vdi_pv"
args = {}
args['vdi-ref'] = vdi_ref
task = session.async_call_plugin('objectstore', fn, args)
pv_str = session.wait_for_task(task, instance_id)
pv = None
if pv_str.lower() == 'true':
pv = True
elif pv_str.lower() == 'false':
pv = False
LOG.debug(_("PV Kernel in VDI:%s"), pv)
return pv
@classmethod
def _determine_is_pv_glance(cls, session, vdi_ref, disk_image_type,
os_type):
"""
For a Glance image, determine if we need paravirtualization.
The relevant scenarios are:
2. Glance (VHD): then we use `os_type`, raise if not set
3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
available
4. Glance (DISK): pv is assumed
"""
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
if disk_image_type == ImageType.DISK_VHD:
# 2. VHD
if os_type == 'windows':
is_pv = False
else:
is_pv = True
elif disk_image_type == ImageType.DISK_RAW:
# 3. RAW
is_pv = with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv)
elif disk_image_type == ImageType.DISK:
# 4. Disk
is_pv = True
else:
raise exception.Error(_("Unknown image format %(disk_image_type)s")
% locals())
return is_pv
@classmethod
def lookup(cls, session, name_label):
"""Look the instance i up, and returns it if available"""
vm_refs = session.get_xenapi().VM.get_by_name_label(name_label)
n = len(vm_refs)
if n == 0:
return None
elif n > 1:
raise exception.InstanceExists(name=name_label)
else:
return vm_refs[0]
@classmethod
def lookup_vm_vdis(cls, session, vm_ref):
"""Look for the VDIs that are attached to the VM"""
# Firstly we get the VBDs, then the VDIs.
# TODO(Armando): do we leave the read-only devices?
vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref)
vdi_refs = []
if vbd_refs:
for vbd_ref in vbd_refs:
try:
vdi_ref = session.get_xenapi().VBD.get_VDI(vbd_ref)
# Test valid VDI
record = session.get_xenapi().VDI.get_record(vdi_ref)
LOG.debug(_('VDI %s is still available'), record['uuid'])
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
else:
vdi_refs.append(vdi_ref)
if len(vdi_refs) > 0:
return vdi_refs
else:
return None
@classmethod
def preconfigure_instance(cls, session, instance, vdi_ref, network_info):
"""Makes alterations to the image before launching as part of spawn.
"""
# As mounting the image VDI is expensive, we only want do do it once,
# if at all, so determine whether it's required first, and then do
# everything
mount_required = False
key, net = _prepare_injectables(instance, network_info)
mount_required = key or net
if not mount_required:
return
with_vdi_attached_here(session, vdi_ref, False,
lambda dev: _mounted_processing(dev, key, net))
@classmethod
def lookup_kernel_ramdisk(cls, session, vm):
vm_rec = session.get_xenapi().VM.get_record(vm)
if 'PV_kernel' in vm_rec and 'PV_ramdisk' in vm_rec:
return (vm_rec['PV_kernel'], vm_rec['PV_ramdisk'])
else:
return (None, None)
@classmethod
def compile_info(cls, record):
"""Fill record with VM status information"""
LOG.info(_("(VM_UTILS) xenserver vm state -> |%s|"),
record['power_state'])
LOG.info(_("(VM_UTILS) xenapi power_state -> |%s|"),
XENAPI_POWER_STATE[record['power_state']])
return {'state': XENAPI_POWER_STATE[record['power_state']],
'max_mem': long(record['memory_static_max']) >> 10,
'mem': long(record['memory_dynamic_max']) >> 10,
'num_cpu': record['VCPUs_max'],
'cpu_time': 0}
@classmethod
def compile_diagnostics(cls, session, record):
"""Compile VM diagnostics data"""
try:
host = session.get_xenapi_host()
host_ip = session.get_xenapi().host.get_record(host)["address"]
except (cls.XenAPI.Failure, KeyError) as e:
return {"Unable to retrieve diagnostics": e}
try:
diags = {}
xml = get_rrd(host_ip, record["uuid"])
if xml:
rrd = minidom.parseString(xml)
for i, node in enumerate(rrd.firstChild.childNodes):
# We don't want all of the extra garbage
if i >= 3 and i <= 11:
ref = node.childNodes
# Name and Value
if len(ref) > 6:
diags[ref[0].firstChild.data] = \
ref[6].firstChild.data
return diags
except cls.XenAPI.Failure as e:
return {"Unable to retrieve diagnostics": e}
@classmethod
def scan_sr(cls, session, instance_id=None, sr_ref=None):
"""Scans the SR specified by sr_ref"""
if sr_ref:
LOG.debug(_("Re-scanning SR %s"), sr_ref)
task = session.call_xenapi('Async.SR.scan', sr_ref)
session.wait_for_task(task, instance_id)
@classmethod
def scan_default_sr(cls, session):
"""Looks for the system default SR and triggers a re-scan"""
#FIXME(sirp/mdietz): refactor scan_default_sr in there
sr_ref = cls.get_sr(session)
session.call_xenapi('SR.scan', sr_ref)
def get_rrd(host, vm_uuid):
"""Return the VM RRD XML as a string"""
try:
xml = urllib.urlopen("http://%s:%s@%s/vm_rrd?uuid=%s" % (
FLAGS.xenapi_connection_username,
FLAGS.xenapi_connection_password,
host,
vm_uuid))
return xml.read()
except IOError:
return None
#TODO(sirp): This code comes from XS5.6 pluginlib.py, we should refactor to
# use that implmenetation
def get_vhd_parent(session, vdi_rec):
"""
Returns the VHD parent of the given VDI record, as a (ref, rec) pair.
Returns None if we're at the root of the tree.
"""
if 'vhd-parent' in vdi_rec['sm_config']:
parent_uuid = vdi_rec['sm_config']['vhd-parent']
parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid)
parent_rec = session.get_xenapi().VDI.get_record(parent_ref)
vdi_uuid = vdi_rec['uuid']
LOG.debug(_("VHD %(vdi_uuid)s has parent %(parent_ref)s") % locals())
return parent_ref, parent_rec
else:
return None
def get_vhd_parent_uuid(session, vdi_ref):
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
ret = get_vhd_parent(session, vdi_rec)
if ret:
parent_ref, parent_rec = ret
return parent_rec["uuid"]
else:
return None
def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
original_parent_uuid):
""" Spin until the parent VHD is coalesced into its parent VHD
Before coalesce:
* original_parent_vhd
* parent_vhd
snapshot
Atter coalesce:
* parent_vhd
snapshot
"""
max_attempts = FLAGS.xenapi_vhd_coalesce_max_attempts
attempts = {'counter': 0}
def _poll_vhds():
attempts['counter'] += 1
if attempts['counter'] > max_attempts:
counter = attempts['counter']
msg = (_("VHD coalesce attempts exceeded (%(counter)d >"
" %(max_attempts)d), giving up...") % locals())
raise exception.Error(msg)
VMHelper.scan_sr(session, instance_id, sr_ref)
parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
if original_parent_uuid and (parent_uuid != original_parent_uuid):
LOG.debug(_("Parent %(parent_uuid)s doesn't match original parent"
" %(original_parent_uuid)s, waiting for coalesce...")
% locals())
else:
# Breakout of the loop (normally) and return the parent_uuid
raise utils.LoopingCallDone(parent_uuid)
loop = utils.LoopingCall(_poll_vhds)
loop.start(FLAGS.xenapi_vhd_coalesce_poll_interval, now=True)
parent_uuid = loop.wait()
return parent_uuid
def get_vdi_for_vm_safely(session, vm_ref):
vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref)
if vdi_refs is None:
raise Exception(_("No VDIs found for VM %s") % vm_ref)
else:
num_vdis = len(vdi_refs)
if num_vdis != 1:
raise Exception(_("Unexpected number of VDIs (%(num_vdis)s) found"
" for VM %(vm_ref)s") % locals())
vdi_ref = vdi_refs[0]
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
return vdi_ref, vdi_rec
def safe_find_sr(session):
"""Same as find_sr except raises a NotFound exception if SR cannot be
determined
"""
sr_ref = find_sr(session)
if sr_ref is None:
raise exception.StorageRepositoryNotFound()
return sr_ref
def find_sr(session):
"""Return the storage repository to hold VM images"""
host = session.get_xenapi_host()
sr_refs = session.get_xenapi().SR.get_all()
for sr_ref in sr_refs:
sr_rec = session.get_xenapi().SR.get_record(sr_ref)
if not ('i18n-key' in sr_rec['other_config'] and
sr_rec['other_config']['i18n-key'] == 'local-storage'):
continue
for pbd_ref in sr_rec['PBDs']:
pbd_rec = session.get_xenapi().PBD.get_record(pbd_ref)
if pbd_rec['host'] == host:
return sr_ref
return None
def remap_vbd_dev(dev):
"""Return the appropriate location for a plugged-in VBD device
Ubuntu Maverick moved xvd? -> sd?. This is considered a bug and will be
fixed in future versions:
https://bugs.launchpad.net/ubuntu/+source/linux/+bug/684875
For now, we work around it by just doing a string replace.
"""
# NOTE(sirp): This hack can go away when we pull support for Maverick
should_remap = FLAGS.xenapi_remap_vbd_dev
if not should_remap:
return dev
old_prefix = 'xvd'
new_prefix = FLAGS.xenapi_remap_vbd_dev_prefix
remapped_dev = dev.replace(old_prefix, new_prefix)
return remapped_dev
def _wait_for_device(dev):
"""Wait for device node to appear"""
for i in xrange(0, FLAGS.block_device_creation_timeout):
if os.path.exists('/dev/%s' % dev):
return
time.sleep(1)
raise StorageError(_('Timeout waiting for device %s to be created') % dev)
def with_vdi_attached_here(session, vdi_ref, read_only, f):
this_vm_ref = get_this_vm_ref(session)
vbd_rec = {}
vbd_rec['VM'] = this_vm_ref
vbd_rec['VDI'] = vdi_ref
vbd_rec['userdevice'] = 'autodetect'
vbd_rec['bootable'] = False
vbd_rec['mode'] = read_only and 'RO' or 'RW'
vbd_rec['type'] = 'disk'
vbd_rec['unpluggable'] = True
vbd_rec['empty'] = False
vbd_rec['other_config'] = {}
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
LOG.debug(_('Creating VBD for VDI %s ... '), vdi_ref)
vbd_ref = session.get_xenapi().VBD.create(vbd_rec)
LOG.debug(_('Creating VBD for VDI %s done.'), vdi_ref)
try:
LOG.debug(_('Plugging VBD %s ... '), vbd_ref)
session.get_xenapi().VBD.plug(vbd_ref)
LOG.debug(_('Plugging VBD %s done.'), vbd_ref)
orig_dev = session.get_xenapi().VBD.get_device(vbd_ref)
LOG.debug(_('VBD %(vbd_ref)s plugged as %(orig_dev)s') % locals())
dev = remap_vbd_dev(orig_dev)
if dev != orig_dev:
LOG.debug(_('VBD %(vbd_ref)s plugged into wrong dev, '
'remapping to %(dev)s') % locals())
if dev != 'autodetect':
# NOTE(johannes): Unit tests will end up with a device called
# 'autodetect' which obviously won't exist. It's not ideal,
# but the alternatives were much messier
_wait_for_device(dev)
return f(dev)
finally:
LOG.debug(_('Destroying VBD for VDI %s ... '), vdi_ref)
vbd_unplug_with_retry(session, vbd_ref)
ignore_failure(session.get_xenapi().VBD.destroy, vbd_ref)
LOG.debug(_('Destroying VBD for VDI %s done.'), vdi_ref)
def vbd_unplug_with_retry(session, vbd_ref):
"""Call VBD.unplug on the given VBD, with a retry if we get
DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're
seeing the device still in use, even when all processes using the device
should be dead."""
# FIXME(sirp): We can use LoopingCall here w/o blocking sleep()
while True:
try:
session.get_xenapi().VBD.unplug(vbd_ref)
LOG.debug(_('VBD.unplug successful first time.'))
return
except VMHelper.XenAPI.Failure, e:
if (len(e.details) > 0 and
e.details[0] == 'DEVICE_DETACH_REJECTED'):
LOG.debug(_('VBD.unplug rejected: retrying...'))
time.sleep(1)
LOG.debug(_('Not sleeping anymore!'))
elif (len(e.details) > 0 and
e.details[0] == 'DEVICE_ALREADY_DETACHED'):
LOG.debug(_('VBD.unplug successful eventually.'))
return
else:
LOG.error(_('Ignoring XenAPI.Failure in VBD.unplug: %s'),
e)
return
def ignore_failure(func, *args, **kwargs):
try:
return func(*args, **kwargs)
except VMHelper.XenAPI.Failure, e:
LOG.error(_('Ignoring XenAPI.Failure %s'), e)
return None
def get_this_vm_uuid():
with file('/sys/hypervisor/uuid') as f:
return f.readline().strip()
def get_this_vm_ref(session):
return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid())
def _is_vdi_pv(dev):
LOG.debug(_("Running pygrub against %s"), dev)
output = os.popen('pygrub -qn /dev/%s' % dev)
for line in output.readlines():
#try to find kernel string
m = re.search('(?<=kernel:)/.*(?:>)', line)
if m and m.group(0).find('xen') != -1:
LOG.debug(_("Found Xen kernel %s") % m.group(0))
return True
LOG.debug(_("No Xen kernel found. Booting HVM."))
return False
def _stream_disk(dev, image_type, virtual_size, image_file):
offset = 0
if image_type == ImageType.DISK:
offset = MBR_SIZE_BYTES
_write_partition(virtual_size, dev)
with open('/dev/%s' % dev, 'wb') as f:
f.seek(offset)
for chunk in image_file:
f.write(chunk)
def _write_partition(virtual_size, dev):
dest = '/dev/%s' % dev
primary_first = MBR_SIZE_SECTORS
primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1
LOG.debug(_('Writing partition table %(primary_first)d %(primary_last)d'
' to %(dest)s...') % locals())
def execute(*cmd, **kwargs):
return utils.execute(*cmd, **kwargs)
execute('sudo', 'parted', '--script', dest, 'mklabel', 'msdos')
execute('sudo', 'parted', '--script', dest, 'mkpart', 'primary',
'%ds' % primary_first,
'%ds' % primary_last)
LOG.debug(_('Writing partition table %s done.'), dest)
def get_name_label_for_image(image):
# TODO(sirp): This should eventually be the URI for the Glance image
return _('Glance image %s') % image
def _mount_filesystem(dev_path, dir):
"""mounts the device specified by dev_path in dir"""
try:
out, err = utils.execute('sudo', 'mount',
'-t', 'ext2,ext3',
dev_path, dir)
except exception.ProcessExecutionError as e:
err = str(e)
return err
def _find_guest_agent(base_dir, agent_rel_path):
"""
tries to locate a guest agent at the path
specificed by agent_rel_path
"""
agent_path = os.path.join(base_dir, agent_rel_path)
if os.path.isfile(agent_path):
# The presence of the guest agent
# file indicates that this instance can
# reconfigure the network from xenstore data,
# so manipulation of files in /etc is not
# required
LOG.info(_('XenServer tools installed in this '
'image are capable of network injection. '
'Networking files will not be'
'manipulated'))
return True
xe_daemon_filename = os.path.join(base_dir,
'usr', 'sbin', 'xe-daemon')
if os.path.isfile(xe_daemon_filename):
LOG.info(_('XenServer tools are present '
'in this image but are not capable '
'of network injection'))
else:
LOG.info(_('XenServer tools are not '
'installed in this image'))
return False
def _mounted_processing(device, key, net):
"""Callback which runs with the image VDI attached"""
dev_path = '/dev/' + device + '1' # NB: Partition 1 hardcoded
tmpdir = tempfile.mkdtemp()
try:
# Mount only Linux filesystems, to avoid disturbing NTFS images
err = _mount_filesystem(dev_path, tmpdir)
if not err:
try:
# This try block ensures that the umount occurs
if not _find_guest_agent(tmpdir, FLAGS.xenapi_agent_path):
LOG.info(_('Manipulating interface files '
'directly'))
disk.inject_data_into_fs(tmpdir, key, net,
utils.execute)
finally:
utils.execute('sudo', 'umount', dev_path)
else:
LOG.info(_('Failed to mount filesystem (expected for '
'non-linux instances): %s') % err)
finally:
# remove temporary directory
os.rmdir(tmpdir)
def _prepare_injectables(inst, networks_info):
"""
prepares the ssh key and the network configuration file to be
injected into the disk image
"""
#do the import here - Cheetah.Template will be loaded
#only if injection is performed
from Cheetah import Template as t
template = t.Template
template_data = open(FLAGS.injected_network_template).read()
key = str(inst['key_data'])
net = None
if networks_info:
ifc_num = -1
interfaces_info = []
have_injected_networks = False
for (network_ref, info) in networks_info:
ifc_num += 1
if not network_ref['injected']:
continue
have_injected_networks = True
ip_v4 = ip_v6 = None
if 'ips' in info and len(info['ips']) > 0:
ip_v4 = info['ips'][0]
if 'ip6s' in info and len(info['ip6s']) > 0:
ip_v6 = info['ip6s'][0]
if len(info['dns']) > 0:
dns = info['dns'][0]
interface_info = {'name': 'eth%d' % ifc_num,
'address': ip_v4 and ip_v4['ip'] or '',
'netmask': ip_v4 and ip_v4['netmask'] or '',
'gateway': info['gateway'],
'broadcast': info['broadcast'],
'dns': dns,
'address_v6': ip_v6 and ip_v6['ip'] or '',
'netmask_v6': ip_v6 and ip_v6['netmask'] or '',
'gateway_v6': ip_v6 and info['gateway6'] or '',
'use_ipv6': FLAGS.use_ipv6}
interfaces_info.append(interface_info)
if have_injected_networks:
net = str(template(template_data,
searchList=[{'interfaces': interfaces_info,
'use_ipv6': FLAGS.use_ipv6}]))
return key, net
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.