input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<filename>flask_generic_views/__init__.py
# -*- coding: utf-8 -*-
"""
flask.ext.generic_views
~~~~~~~~~~~~~~~~~~~~~~~
Flask-GenericViews (FGV) provides view abstraction for common CRUD
operations. Each CRUD operation has its own view for example:
- show details of an object (ShowView)
- create an object (CreateView)
- update an object (UpdateView)
- delete an object (DeleteView)
- list objects (ListView)
Similar to Django Generic Views FGV is built very modular.
Flask-GenericViews also provides means for plugging the standard API
routes for the views (see ModelRouter for more info). FGV is built on
top of Flask, SQLAlchemy, WTForms.
:copyright: (c) 2012 <NAME>.
:license: BSD, see LICENSE for more details.
"""
from copy import copy
from datetime import datetime, date, time
from decimal import Decimal
from flask import (render_template, request, redirect, url_for, flash,
current_app, Blueprint)
from inflection import underscore, humanize
from sqlalchemy import types
from wtforms.ext.sqlalchemy.orm import model_form
from .core import BaseView, TemplateView
from .exceptions import ImproperlyConfigured
try:
__version__ = __import__('pkg_resources')\
.get_distribution('flask_generic_views').version
except Exception:
__version__ = 'unknown'
TYPE_MAP = {
types.BigInteger: int,
types.SmallInteger: int,
types.Integer: int,
types.DateTime: datetime,
types.Date: date,
types.Time: time,
types.Text: str,
types.Unicode: unicode,
types.UnicodeText: unicode,
types.Float: float,
types.Numeric: Decimal,
types.Boolean: bool
}
def get_native_type(sqlalchemy_type):
"""
Converts sqlalchemy type to python type, is smart enough to understand
types that extend basic sqlalchemy types
"""
for base_type in TYPE_MAP:
if isinstance(sqlalchemy_type, base_type):
return TYPE_MAP[base_type]
break
return None
class ModelMixin(object):
"""
Base class for all views interacting with models
:param model_class: SQLAlchemy Model class
:param query: the query to be used for fetching the object
:param pk_param: name of the primary key parameter
"""
model_class = None
query = None
pk_param = 'id'
def get_model(self):
if not self.model_class:
raise Exception()
return self.model_class
@property
def db(self):
return current_app.extensions['sqlalchemy'].db
def get_query(self):
"""
Returns the query associated with this view
If no query was given, tries to use the query class of the model
"""
if self.query:
return self.query
return self.model_class.query
def get_object(self, **kwargs):
pk = kwargs[self.pk_param]
return self.get_query().get_or_404(pk)
class TemplateMixin(object):
"""
Generic template mixin
:param template: name of the template to be rendered on html request
:param context: dict containing context arguments that will be passed
to template
"""
template = None
context = {}
def render_template(self, **kwargs):
context = self.get_context(**kwargs)
return render_template(self.get_template(), **context)
def get_template(self):
if not self.template:
raise Exception()
return self.template
def get_context(self, **kwargs):
"""
Returns the context variables
"""
if callable(self.context):
context = self.context()
else:
context = self.context
context.update(kwargs)
return context
class ModelView(BaseView, ModelMixin, TemplateMixin):
def get_template(self):
return TemplateMixin.get_template(self) % dict(
resource=underscore(self.model_class.__name__),
)
class ShowView(ModelView):
"""
Generic show view
On text/html request returns html template with requested object
Example ::
>>> app.add_url_rule('/users/<int:id>',
... view_func=ShowUserView.as_view('show', model_class=User),
... )
Now consider issuing a GET request to http://localhost:5000/users/3
ShowView renders user/show.html template if given user exists otherwise
returns 404
:param model_class: SQLAlchemy Model class
:param resource: Name of the resource, if None the resource name
will be constructed from the model class, e.g. DeviceType ->
device_type
:param template: name of the template to be rendered on html request
:param context: dict containing context arguments that will be passed
to template
:param template_object_name: Designates the name of the template variable
to use in the template context. By default, this is 'item'.
Rendered template context:
In addition to given `context`, the template's context will be:
item: The requested item. This variable's name depends on the
template_object_name parameter, which is 'item' by default. If
template_object_name is 'foo', this variable's name will be foo.
"""
template = '%(resource)s/show.html'
def dispatch_request(self, *args, **kwargs):
item = self.get_object(**kwargs)
return self.render_template(item=item)
class FormMixin(object):
"""
Generic form view
:param form_class: form class to be used for request params validation
:param success_redirect: endpoint to be redirected on success
:param success_message: message to be flashed on success
:param failure_message: message to be flashed on failure
"""
form_class = None
failure_message = ''
success_message = ''
success_url = None
def flash(self, message, *args, **kwargs):
"""
Flashes given message with arguments
"""
if message:
flash(message, *args, **kwargs)
def is_submitted(self):
return request.method in set(self.methods).difference(['GET'])
def validate_on_submit(self, form):
return self.is_submitted() and form.validate()
def save(self, form, object):
"""
Validates request data and saves object, on success redirects to
success url and flashes success message (if any)
"""
if self.validate_on_submit(form):
form.populate_obj(object)
self.db.session.commit()
self.flash(self.get_success_message(), 'success')
return True
else:
self.flash(self.get_failure_message(), 'failure')
return False
class FormView(BaseView, FormMixin):
def get_form(self, obj=None):
"""
Returns the form associated with this view
"""
if not self.form_class:
raise Exception()
return self.form_class(request.form, obj=obj)
def get_success_redirect(self):
"""
Returns the url to redirect to on successful request
TODO: make this support absolute urls also
"""
return self.success_url
def get_success_message(self):
"""
Returns the formatted success message (if any)
"""
return self.success_message
def get_failure_message(self):
"""
Returns the formatted failure message (if any)
"""
return self.failure_message
def dispatch_request(self, *args, **kwargs):
item = self.get_object(**kwargs)
form = self.get_form(obj=item)
if self.save(form, item):
return redirect(url_for(self.get_success_redirect(), id=item.id))
return self.render_template(item=item, form=form)
class ModelFormView(ModelView, FormMixin):
def get_form(self, obj=None):
"""
Returns the form associated with this view if the form_class could
not be found FormView tries to build the form using model_form
function of wtforms sqlalchemy extension
"""
if self.form_class:
return self.form_class(request.form, obj=obj)
return model_form(self.model_class, db_session=self.db.session)(
request.form, obj=obj
)
def get_success_redirect(self):
"""
Returns the url to redirect to on successful request
TODO: make this support absolute urls also
"""
return self.success_url % dict(
resource=underscore(self.model_class.__name__)
)
def get_success_message(self):
"""
Returns the formatted success message (if any)
"""
return self.success_message % dict(
model=humanize(self.model_class.__name__)
)
def get_failure_message(self):
"""
Returns the formatted failure message (if any)
"""
return self.failure_message % dict(
model=humanize(self.model_class.__name__)
)
def dispatch_request(self, *args, **kwargs):
item = self.get_object(**kwargs)
form = self.get_form(obj=item)
if self.save(form, item):
return redirect(url_for(self.get_success_redirect(), id=item.id))
return self.render_template(item=item, form=form)
class CreateFormView(ModelFormView):
"""
Generic create form view
Template name:
If template_name isn't specified, this view will use the template
<resource>/create.html by default, where:
<resource> is your model's name underscored. For a model
StaffMember, that'd be staff_member.
Template context:
In addition to given context, the template's context will be:
form: A form instance representing the form for editing the object. This
lets you refer to form fields easily in the template system.
"""
template = '%(resource)s/create.html'
success_message = '%(model)s created!'
success_url = '%(resource)s.show'
methods = ['GET', 'POST']
def get_object(self):
object = self.model_class()
self.db.session.add(object)
return object
class UpdateFormView(ModelFormView):
"""
Generic update form view
Template name:
If template_name isn't specified, this view will use the template
<resource>/edit.html by default, where:
<resource> is your model's name underscored. For a model
StaffMember, that'd be staff_member.
Template context:
In addition to given context, the template's context will be:
form: A form instance representing the form for editing the object. This
lets you refer to form fields easily in the template system.
"""
template = '%(resource)s/edit.html'
success_message = '%(model)s updated!'
success_url = '%(resource)s.show'
methods = ['GET', 'POST', 'PUT', 'PATCH']
class CreateView(ModelFormView):
"""
Creates a model object
By default on html request redirects to resource.show and creates a
simple success message
"""
methods = ['POST']
success_message = '%(model)s created!'
success_url = '%(resource)s.show'
def get_object(self):
object = self.model_class()
self.db.session.add(object)
return object
def dispatch_request(self, *args, **kwargs):
item = self.get_object()
form = self.get_form(obj=item)
self.save(form, item)
return redirect(url_for(self.get_success_redirect(), id=item.id))
class UpdateView(ModelFormView):
"""
Updates a model object
By default on html request redirects to resource.show and creates a
simple success message
"""
methods = ['PUT', 'PATCH']
success_message = '%(model)s updated!'
success_url = '%(resource)s.show'
def dispatch_request(self, *args, **kwargs):
item = self.get_object(**kwargs)
form = self.get_form(obj=item)
self.save(form, item)
return redirect(url_for(self.get_success_redirect(), id=item.id))
class DeleteView(ModelFormView):
"""
Deletes a model object
By default on html request redirects to resource.index and creates a
simple success message
"""
methods = ['DELETE', 'POST']
success_message = '%(model)s deleted.'
success_url = '%(resource)s.index'
def delete(self, item):
"""
This method is used for the actual deletion of given item
Child classes may override the behaviour of this method
"""
self.db.session.delete(item)
def dispatch_request(self, *args, **kwargs):
item = self.get_object(**kwargs)
self.delete(item)
self.db.session.commit()
self.flash(self.get_success_message(), 'success')
return redirect(url_for(self.get_success_redirect()))
class ListView(ModelView):
"""
Views several items as a list
:param query the query to be used for fetching the items, by default
this is model.query (= all records for given model)
"""
template = '%(resource)s/index.html'
def __init__(self,
query_field_names=None,
columns=None,
*args, **kwargs):
ModelView.__init__(self, *args, **kwargs)
if | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
import tensorflow as tf
import layer_utils
import match_utils
class SentenceMatchModelGraph(object):
"""
Create Natural Language Sentence Matching Models.
-- sentence-sentence pairs
-- question-answer pairs
-- premise-hypothesis pairs
"""
def __init__(self, num_classes, word_vocab=None, char_vocab=None, is_training=True, options=None, global_step=None):
self.options = options
self.create_placeholders()
self.create_embedding(num_classes, word_vocab, char_vocab, is_training, global_step)
match_representations = []
match_dims = 0
if 'feat' in options.using_algo:
with tf.variable_scope("feat"):
match_representation, match_dim = self.create_features(num_classes, word_vocab, char_vocab, is_training, global_step=global_step)
match_representations.append(match_representation)
match_dims += match_dim
print('Using Features')
if 'bimpm' in options.using_algo:
with tf.variable_scope("bimpm"):
match_representation, match_dim = self.create_bimpm_model_graph(num_classes, word_vocab, char_vocab, is_training, global_step=global_step)
match_representations.append(match_representation)
match_dims += match_dim
print('Using BIMPM')
if 'bimpm_char' in options.using_algo:
with tf.variable_scope("bimpm_char"):
match_representation, match_dim = self.create_bimpm_char_model_graph(num_classes, word_vocab, char_vocab, is_training, global_step=global_step)
match_representations.append(match_representation)
match_dims += match_dim
print('Using BIMPM CHAR')
if 'mpcnn' in options.using_algo:
with tf.variable_scope("mpcnn"):
match_representation, match_dim = self.create_mpcnn_model_graph(num_classes, word_vocab, char_vocab, is_training, global_step=global_step)
match_representations.append(match_representation)
match_dims += match_dim
print('Using MPCNN')
if 'mpcnn_char' in options.using_algo:
with tf.variable_scope("mpcnn_char"):
match_representation, match_dim = self.create_mpcnn_char_model_graph(num_classes, word_vocab, char_vocab, is_training, global_step=global_step)
match_representations.append(match_representation)
match_dims += match_dim
print('Using MPCNN CHAR')
if 'siameseLSTM' in options.using_algo:
with tf.variable_scope("siameseLSTM"):
match_representation, match_dim = self.create_siameseLSTM_model_graph(num_classes, word_vocab, char_vocab, is_training, global_step=global_step)
match_representations.append(match_representation)
match_dims += match_dim
print('Using SiameseLSTM')
if 'siameseCNN' in options.using_algo:
with tf.variable_scope("siameseCNN"):
match_representation, match_dim = self.create_SiameseCNN_model_graph(num_classes, word_vocab, char_vocab, is_training, global_step=global_step)
match_representations.append(match_representation)
match_dims += match_dim
print('Using SiameseCNN')
if 'MatchPyramid' in options.using_algo:
with tf.variable_scope("MatchPyramid"):
match_representation, match_dim = self.create_MatchPyramid_model_graph(num_classes, word_vocab, char_vocab, is_training, global_step=global_step)
match_representations.append(match_representation)
match_dims += match_dim
print('Using MatchPyramid')
if 'esim' in options.using_algo:
with tf.variable_scope("esim"):
match_representation, match_dim = self.create_esim_model_graph(num_classes, word_vocab, char_vocab, is_training, global_step=global_step)
match_representations.append(match_representation)
match_dims += match_dim
print('Using ESIM')
if 'DecAtt' in options.using_algo:
with tf.variable_scope("DecAtt"):
match_representation, match_dim = self.create_DecAtt_model_graph(num_classes, word_vocab, char_vocab, is_training, global_step=global_step)
match_representations.append(match_representation)
match_dims += match_dim
print('Using DecAtt')
if 'imodel' in options.using_algo:
with tf.variable_scope("imodel"):
match_representation, match_dim = self.create_my_model_graph(num_classes, word_vocab, char_vocab, is_training, global_step=global_step)
match_representations.append(match_representation)
match_dims += match_dim
print('Using MyModel')
match_representations = tf.concat(axis=1, values=match_representations)
self.prediction_layer(num_classes, match_representations, match_dims, is_training, global_step)
def create_placeholders(self):
self.truth = tf.placeholder(tf.int32, [None]) # [batch_size]
self.question_lengths = tf.placeholder(tf.int32, [None]) # [batch_size]
self.passage_lengths = tf.placeholder(tf.int32, [None]) # [batch_size]
self.in_question_words = tf.placeholder(tf.int32, [None, None]) # [batch_size, question_len]
self.in_passage_words = tf.placeholder(tf.int32, [None, None]) # [batch_size, passage_len]
self.in_question_passage_features = tf.placeholder(tf.float32, [None, None]) # [batch_size, features_dim]
if self.options.with_char:
self.question_char_lengths = tf.placeholder(tf.int32, [None,None]) # [batch_size, question_len]
self.passage_char_lengths = tf.placeholder(tf.int32, [None,None]) # [batch_size, passage_len]
self.in_question_chars = tf.placeholder(tf.int32, [None, None, None]) # [batch_size, question_len, q_char_len]
self.in_passage_chars = tf.placeholder(tf.int32, [None, None, None]) # [batch_size, passage_len, p_char_len]
self.question_sent_char_lengths = tf.placeholder(tf.int32, [None]) # [batch_size]
self.passage_sent_char_lengths = tf.placeholder(tf.int32, [None]) # [batch_size]
self.in_question_sent_chars = tf.placeholder(tf.int32, [None, None]) # [batch_size, question_len]
self.in_passage_sent_chars = tf.placeholder(tf.int32, [None, None]) # [batch_size, passage_len]
def create_feed_dict(self, cur_batch, is_training=False):
feed_dict = {
self.truth : cur_batch.label_truth,
self.question_lengths: cur_batch.question_lengths,
self.passage_lengths: cur_batch.passage_lengths,
self.in_question_words: cur_batch.in_question_words,
self.in_passage_words: cur_batch.in_passage_words,
self.in_question_passage_features: cur_batch.in_question_passage_features,
}
if self.options.with_char:
feed_dict[self.question_char_lengths] = cur_batch.question_char_lengths
feed_dict[self.passage_char_lengths] = cur_batch.passage_char_lengths
feed_dict[self.in_question_chars] = cur_batch.in_question_chars
feed_dict[self.in_passage_chars] = cur_batch.in_passage_chars
feed_dict[self.question_sent_char_lengths] = cur_batch.question_sent_char_lengths
feed_dict[self.passage_sent_char_lengths] = cur_batch.passage_sent_char_lengths
feed_dict[self.in_question_sent_chars] = cur_batch.in_question_sent_chars
feed_dict[self.in_passage_sent_chars] = cur_batch.in_passage_sent_chars
return feed_dict
# ==================================================== Embedding =================================================
def create_embedding(self, num_classes, word_vocab=None, char_vocab=None, is_training=True, global_step=None):
options = self.options
if word_vocab is not None:
word_vec_trainable = True
cur_device = '/gpu:0'
if options.fix_word_vec:
word_vec_trainable = False
cur_device = '/cpu:0'
with tf.device(cur_device):
self.w_embedding = tf.placeholder(tf.float32, shape=word_vocab.word_vecs.shape)
self.word_embedding = tf.get_variable("word_embedding", trainable=word_vec_trainable,
initializer=self.w_embedding, dtype=tf.float32) # tf.constant(word_vocab.word_vecs)
# with tf.device('/gpu:0'):
# self.w_embedding_trainable = tf.placeholder(tf.float32, shape=word_vocab.word_vecs.shape)
# self.word_embedding_trainable = tf.get_variable("word_embedding_trainable", trainable=True,
# initializer=self.w_embedding_trainable, dtype=tf.float32) # tf.constant(word_vocab.word_vecs)
# tf.truncated_normal([tf.shape(self.w_embedding)[0], options.word_emb_dim])
if options.with_char and char_vocab is not None:
char_vec_trainable = True
cur_device = '/gpu:0'
if options.fix_char_vec:
char_vec_trainable = False
cur_device = '/cpu:0'
with tf.device(cur_device):
self.c_embedding = tf.placeholder(tf.float32, shape=char_vocab.word_vecs.shape)
self.char_embedding = tf.get_variable("char_embedding", trainable=char_vec_trainable,
initializer=self.c_embedding, dtype=tf.float32)
# ==================================================== BiMPM =====================================================
def create_bimpm_model_graph(self, num_classes, word_vocab=None, char_vocab=None, is_training=True, global_step=None):
"""
"""
options = self.options
# ======word representation layer======
in_question_repres = []
in_passage_repres = []
input_dim = 0
if word_vocab is not None:
in_question_word_repres = tf.nn.embedding_lookup(self.word_embedding, self.in_question_words) # [batch_size, question_len, word_dim]
in_passage_word_repres = tf.nn.embedding_lookup(self.word_embedding, self.in_passage_words) # [batch_size, passage_len, word_dim]
in_question_repres.append(in_question_word_repres)
in_passage_repres.append(in_passage_word_repres)
input_shape = tf.shape(self.in_question_words)
batch_size = input_shape[0]
question_len = input_shape[1]
input_shape = tf.shape(self.in_passage_words)
passage_len = input_shape[1]
input_dim += word_vocab.word_dim
if options.with_char and char_vocab is not None:
input_shape = tf.shape(self.in_question_chars)
batch_size = input_shape[0]
question_len = input_shape[1]
q_char_len = input_shape[2]
input_shape = tf.shape(self.in_passage_chars)
passage_len = input_shape[1]
p_char_len = input_shape[2]
char_dim = char_vocab.word_dim
in_question_char_repres = tf.nn.embedding_lookup(self.char_embedding, self.in_question_chars) # [batch_size, question_len, q_char_len, char_dim]
in_question_char_repres = tf.reshape(in_question_char_repres, shape=[-1, q_char_len, char_dim])
question_char_lengths = tf.reshape(self.question_char_lengths, [-1])
quesiton_char_mask = tf.sequence_mask(question_char_lengths, q_char_len, dtype=tf.float32) # [batch_size*question_len, q_char_len]
in_question_char_repres = tf.multiply(in_question_char_repres, tf.expand_dims(quesiton_char_mask, axis=-1))
in_passage_char_repres = tf.nn.embedding_lookup(self.char_embedding, self.in_passage_chars) # [batch_size, passage_len, p_char_len, char_dim]
in_passage_char_repres = tf.reshape(in_passage_char_repres, shape=[-1, p_char_len, char_dim])
passage_char_lengths = tf.reshape(self.passage_char_lengths, [-1])
passage_char_mask = tf.sequence_mask(passage_char_lengths, p_char_len, dtype=tf.float32) # [batch_size*passage_len, p_char_len]
in_passage_char_repres = tf.multiply(in_passage_char_repres, tf.expand_dims(passage_char_mask, axis=-1))
(question_char_outputs_fw, question_char_outputs_bw, _) = layer_utils.my_lstm_layer(in_question_char_repres, options.char_lstm_dim,
input_lengths=question_char_lengths,scope_name="char_lstm", reuse=False,
is_training=is_training, dropout_rate=options.dropout_rate, use_cudnn=options.use_cudnn)
if options.lstm_out_type == 'mean':
question_char_outputs_fw = layer_utils.collect_mean_step_of_lstm(question_char_outputs_fw)
question_char_outputs_bw = layer_utils.collect_mean_step_of_lstm(question_char_outputs_bw)
elif options.lstm_out_type == 'end':
question_char_outputs_fw = layer_utils.collect_final_step_of_lstm(question_char_outputs_fw, question_char_lengths - 1)
question_char_outputs_bw = question_char_outputs_bw[:, 0, :]
question_char_outputs = tf.concat(axis=1, values=[question_char_outputs_fw, question_char_outputs_bw])
question_char_outputs = tf.reshape(question_char_outputs, [batch_size, question_len, 2*options.char_lstm_dim]) # [batch_size, question_len, 2*options.char_lstm_dim]
(passage_char_outputs_fw, passage_char_outputs_bw, _) = layer_utils.my_lstm_layer(in_passage_char_repres, options.char_lstm_dim,
input_lengths=passage_char_lengths, scope_name="char_lstm", reuse=True,
is_training=is_training, dropout_rate=options.dropout_rate, use_cudnn=options.use_cudnn)
if options.lstm_out_type == 'mean':
passage_char_outputs_fw = layer_utils.collect_mean_step_of_lstm(passage_char_outputs_fw)
passage_char_outputs_bw = layer_utils.collect_mean_step_of_lstm(passage_char_outputs_bw)
elif options.lstm_out_type == 'end':
passage_char_outputs_fw = layer_utils.collect_final_step_of_lstm(passage_char_outputs_fw, passage_char_lengths - 1)
passage_char_outputs_bw = passage_char_outputs_bw[:, 0, :]
passage_char_outputs = tf.concat(axis=1, values=[passage_char_outputs_fw, passage_char_outputs_bw])
passage_char_outputs = tf.reshape(passage_char_outputs, [batch_size, passage_len, 2*options.char_lstm_dim]) # [batch_size, passage_len, 2*options.char_lstm_dim]
in_question_repres.append(question_char_outputs)
in_passage_repres.append(passage_char_outputs)
input_dim += 2*options.char_lstm_dim
in_question_repres = tf.concat(axis=2, values=in_question_repres) # [batch_size, question_len, dim]
in_passage_repres = tf.concat(axis=2, values=in_passage_repres) # [batch_size, passage_len, dim]
# ======rcnn context layer======
if options.with_rcnn:
in_question_repres = layer_utils.my_rcnn_layer(
in_question_repres, options.word_emb_dim, options.word_context_dim, options.fc_dim,
input_lengths=self.question_lengths, scope_name="word_rcnn", reuse=False,
is_training=is_training, dropout_rate=options.dropout_rate, use_cudnn=options.use_cudnn)
in_passage_repres = layer_utils.my_rcnn_layer(
in_passage_repres, options.word_emb_dim, options.word_context_dim, options.fc_dim,
input_lengths=self.passage_lengths, scope_name="word_rcnn", reuse=True,
is_training=is_training, dropout_rate=options.dropout_rate, use_cudnn=options.use_cudnn)
input_dim += 2 * options.word_context_dim
if is_training:
in_question_repres = tf.nn.dropout(in_question_repres, (1 - options.dropout_rate))
in_passage_repres = tf.nn.dropout(in_passage_repres, (1 - options.dropout_rate))
mask = tf.sequence_mask(self.passage_lengths, passage_len, dtype=tf.float32) # [batch_size, passage_len]
question_mask = tf.sequence_mask(self.question_lengths, question_len, dtype=tf.float32) # [batch_size, question_len]
# ======Highway layer======
if options.with_highway:
with tf.variable_scope("input_highway"):
in_question_repres = match_utils.multi_highway_layer(in_question_repres, input_dim, options.highway_layer_num)
tf.get_variable_scope().reuse_variables()
in_passage_repres = match_utils.multi_highway_layer(in_passage_repres, input_dim, options.highway_layer_num)
# in_question_repres = tf.multiply(in_question_repres, tf.expand_dims(question_mask, axis=-1))
# in_passage_repres = tf.multiply(in_passage_repres, tf.expand_dims(mask, axis=-1))
# ========Bilateral Matching=====
(match_representation, match_dim) = match_utils.bilateral_match_func(in_question_repres, in_passage_repres,
self.question_lengths, self.passage_lengths, question_mask, mask, input_dim, is_training, options=options)
# ========Projection layer=====
# (output_representation, output_dim) = layer_utils.projection_layer2(match_representation, match_dim, num_classes, activation_func=tf.nn.relu)
return match_representation, match_dim
# ==================================================== BiMPM CHAR=================================================
def create_bimpm_char_model_graph(self, num_classes, word_vocab=None, char_vocab=None, is_training=True, global_step=None):
"""
"""
options = self.options
# ======word representation layer======
in_question_repres = []
in_passage_repres = []
input_dim = 0
if char_vocab is not None:
in_question_char_repres = tf.nn.embedding_lookup(self.char_embedding, self.in_question_sent_chars) # [batch_size, question_len, char_dim]
in_passage_char_repres = tf.nn.embedding_lookup(self.char_embedding, self.in_passage_sent_chars) # [batch_size, passage_len, char_dim]
in_question_repres.append(in_question_char_repres)
in_passage_repres.append(in_passage_char_repres)
input_shape = tf.shape(self.in_question_sent_chars)
batch_size = input_shape[0]
question_len = input_shape[1]
input_shape = tf.shape(self.in_passage_sent_chars)
passage_len = input_shape[1]
input_dim += char_vocab.word_dim
in_question_repres = tf.concat(axis=2, values=in_question_repres) # [batch_size, question_len, dim]
in_passage_repres = tf.concat(axis=2, values=in_passage_repres) # [batch_size, passage_len, dim]
# ======rcnn context layer======
if options.with_rcnn and False:
in_question_repres = layer_utils.my_rcnn_layer(
in_question_repres, options.char_emb_dim, options.char_context_dim, options.fc_dim,
input_lengths=self.question_sent_char_lengths, scope_name="char_rcnn", reuse=False,
is_training=is_training, dropout_rate=options.dropout_rate, use_cudnn=options.use_cudnn)
in_passage_repres = layer_utils.my_rcnn_layer(
in_passage_repres, options.char_emb_dim, options.char_context_dim, options.fc_dim,
input_lengths=self.passage_sent_char_lengths, scope_name="char_rcnn", reuse=True,
is_training=is_training, dropout_rate=options.dropout_rate, use_cudnn=options.use_cudnn)
input_dim += 2 * options.char_context_dim
if is_training:
in_question_repres = tf.nn.dropout(in_question_repres, (1 - options.dropout_rate))
in_passage_repres = tf.nn.dropout(in_passage_repres, (1 - options.dropout_rate))
mask = tf.sequence_mask(self.passage_sent_char_lengths, passage_len, dtype=tf.float32) # [batch_size, passage_len]
question_mask = tf.sequence_mask(self.question_sent_char_lengths, question_len, dtype=tf.float32) # [batch_size, question_len]
# ======Highway layer======
if options.with_highway:
with tf.variable_scope("input_highway"):
in_question_repres = match_utils.multi_highway_layer(in_question_repres, input_dim, options.highway_layer_num)
tf.get_variable_scope().reuse_variables()
in_passage_repres = match_utils.multi_highway_layer(in_passage_repres, input_dim, options.highway_layer_num)
# in_question_repres = tf.multiply(in_question_repres, tf.expand_dims(question_mask, axis=-1))
# in_passage_repres = tf.multiply(in_passage_repres, tf.expand_dims(mask, axis=-1))
# ========Bilateral Matching=====
(match_representation, match_dim) = match_utils.bilateral_match_func(in_question_repres, in_passage_repres,
self.question_sent_char_lengths, self.passage_sent_char_lengths, question_mask, mask, input_dim, is_training, options=options)
# ========Projection layer=====
# (output_representation, output_dim) = layer_utils.projection_layer2(match_representation, match_dim, num_classes, activation_func=tf.nn.relu)
return match_representation, match_dim
# ==================================================== MPCNN =====================================================
def create_mpcnn_model_graph(self, num_classes, word_vocab=None, char_vocab=None, is_training=True, global_step=None):
"""
"""
options = self.options
# ======word representation layer======
in_question_repres = []
in_passage_repres = []
input_dim = 0
if word_vocab is not None:
in_question_word_repres = tf.nn.embedding_lookup(self.word_embedding, self.in_question_words) # [batch_size, question_len, word_dim]
in_passage_word_repres = tf.nn.embedding_lookup(self.word_embedding, self.in_passage_words) # [batch_size, passage_len, word_dim]
in_question_repres.append(in_question_word_repres)
in_passage_repres.append(in_passage_word_repres)
input_shape = tf.shape(self.in_question_words)
batch_size = input_shape[0]
question_len = input_shape[1]
input_shape = tf.shape(self.in_passage_words)
passage_len = input_shape[1]
input_dim += word_vocab.word_dim
in_question_repres = tf.concat(axis=2, values=in_question_repres) # [batch_size, question_len, dim]
in_passage_repres = tf.concat(axis=2, values=in_passage_repres) # [batch_size, passage_len, dim]
if is_training:
in_question_repres = tf.nn.dropout(in_question_repres, (1 - options.dropout_rate))
in_passage_repres = tf.nn.dropout(in_passage_repres, | |
indicate an unsupported platform. The user can still
# set up the host using the headless setup flow, where we can at least display
# a warning. See https://gitlab.gnome.org/GNOME/gdm/-/issues/580 for details
# of the bug and fix.
if display_manager_is_gdm():
return False;
# The session chooser expects a Debian-style Xsession script.
return os.path.isfile(DEBIAN_XSESSION_PATH);
class Config:
def __init__(self, path):
self.path = path
self.data = {}
self.changed = False
def load(self):
"""Loads the config from file.
Raises:
IOError: Error reading data
ValueError: Error parsing JSON
"""
settings_file = open(self.path, 'r')
self.data = json.load(settings_file)
self.changed = False
settings_file.close()
def save(self):
"""Saves the config to file.
Raises:
IOError: Error writing data
TypeError: Error serialising JSON
"""
if not self.changed:
return
old_umask = os.umask(0o066)
try:
settings_file = open(self.path, 'w')
settings_file.write(json.dumps(self.data, indent=2))
settings_file.close()
self.changed = False
finally:
os.umask(old_umask)
def save_and_log_errors(self):
"""Calls self.save(), trapping and logging any errors."""
try:
self.save()
except (IOError, TypeError) as e:
logging.error("Failed to save config: " + str(e))
def get(self, key):
return self.data.get(key)
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
self.changed = True
def clear(self):
self.data = {}
self.changed = True
class Authentication:
"""Manage authentication tokens for Chromoting/xmpp"""
def __init__(self):
# Note: Initial values are never used.
self.login = None
self.oauth_refresh_token = None
def copy_from(self, config):
"""Loads the config and returns false if the config is invalid."""
try:
self.login = config["xmpp_login"]
self.oauth_refresh_token = config["oauth_refresh_token"]
except KeyError:
return False
return True
def copy_to(self, config):
config["xmpp_login"] = self.login
config["oauth_refresh_token"] = self.oauth_refresh_token
class Host:
"""This manages the configuration for a host."""
def __init__(self):
# Note: Initial values are never used.
self.host_id = None
self.host_name = None
self.host_secret_hash = None
self.private_key = None
def copy_from(self, config):
try:
self.host_id = config.get("host_id")
self.host_name = config["host_name"]
self.host_secret_hash = config.get("host_secret_hash")
self.private_key = config["private_key"]
except KeyError:
return False
return bool(self.host_id)
def copy_to(self, config):
if self.host_id:
config["host_id"] = self.host_id
config["host_name"] = self.host_name
config["host_secret_hash"] = self.host_secret_hash
config["private_key"] = self.private_key
class SessionOutputFilterThread(threading.Thread):
"""Reads session log from a pipe and logs the output with the provided prefix
for amount of time defined by time_limit, or indefinitely if time_limit is
None."""
def __init__(self, stream, prefix, time_limit):
threading.Thread.__init__(self)
self.stream = stream
self.daemon = True
self.prefix = prefix
self.time_limit = time_limit
def run(self):
started_time = time.time()
is_logging = True
while True:
try:
line = self.stream.readline();
except IOError as e:
print("IOError when reading session output: ", e)
return
if line == b"":
# EOF reached. Just stop the thread.
return
if not is_logging:
continue
if self.time_limit and time.time() - started_time >= self.time_limit:
is_logging = False
print("Suppressing rest of the session output.", flush=True)
else:
# Pass stream bytes through as is instead of decoding and encoding.
sys.stdout.buffer.write(self.prefix.encode(sys.stdout.encoding) + line);
sys.stdout.flush()
class Desktop:
"""Manage a single virtual desktop"""
def __init__(self, sizes):
self.x_proc = None
self.pre_session_proc = None
self.session_proc = None
self.host_proc = None
self.child_env = None
self.sizes = sizes
self.xorg_conf = None
self.pulseaudio_pipe = None
self.server_supports_exact_resize = False
self.server_supports_randr = False
self.randr_add_sizes = False
self.host_ready = False
self.ssh_auth_sockname = None
global g_desktop
assert(g_desktop is None)
g_desktop = self
@staticmethod
def get_unused_display_number():
"""Return a candidate display number for which there is currently no
X Server lock file"""
display = FIRST_X_DISPLAY_NUMBER
while os.path.exists(X_LOCK_FILE_TEMPLATE % display):
display += 1
return display
def _init_child_env(self):
self.child_env = dict(os.environ)
# Force GDK to use the X11 backend, as otherwise parts of the host that use
# GTK can end up connecting to an active Wayland display instead of the
# CRD X11 session.
self.child_env["GDK_BACKEND"] = "x11"
# Ensure that the software-rendering GL drivers are loaded by the desktop
# session, instead of any hardware GL drivers installed on the system.
library_path = (
"/usr/lib/mesa-diverted/%(arch)s-linux-gnu:"
"/usr/lib/%(arch)s-linux-gnu/mesa:"
"/usr/lib/%(arch)s-linux-gnu/dri:"
"/usr/lib/%(arch)s-linux-gnu/gallium-pipe" %
{ "arch": platform.machine() })
if "LD_LIBRARY_PATH" in self.child_env:
library_path += ":" + self.child_env["LD_LIBRARY_PATH"]
self.child_env["LD_LIBRARY_PATH"] = library_path
def _setup_pulseaudio(self):
self.pulseaudio_pipe = None
# pulseaudio uses UNIX sockets for communication. Length of UNIX socket
# name is limited to 108 characters, so audio will not work properly if
# the path is too long. To workaround this problem we use only first 10
# symbols of the host hash.
pulse_path = os.path.join(CONFIG_DIR,
"pulseaudio#%s" % g_host_hash[0:10])
if len(pulse_path) + len("/native") >= 108:
logging.error("Audio will not be enabled because pulseaudio UNIX " +
"socket path is too long.")
return False
sink_name = "chrome_remote_desktop_session"
pipe_name = os.path.join(pulse_path, "fifo_output")
try:
if not os.path.exists(pulse_path):
os.mkdir(pulse_path)
except IOError as e:
logging.error("Failed to create pulseaudio pipe: " + str(e))
return False
try:
pulse_config = open(os.path.join(pulse_path, "daemon.conf"), "w")
pulse_config.write("default-sample-format = s16le\n")
pulse_config.write("default-sample-rate = 48000\n")
pulse_config.write("default-sample-channels = 2\n")
pulse_config.close()
pulse_script = open(os.path.join(pulse_path, "default.pa"), "w")
pulse_script.write("load-module module-native-protocol-unix\n")
pulse_script.write(
("load-module module-pipe-sink sink_name=%s file=\"%s\" " +
"rate=48000 channels=2 format=s16le\n") %
(sink_name, pipe_name))
pulse_script.close()
except IOError as e:
logging.error("Failed to write pulseaudio config: " + str(e))
return False
self.child_env["PULSE_CONFIG_PATH"] = pulse_path
self.child_env["PULSE_RUNTIME_PATH"] = pulse_path
self.child_env["PULSE_STATE_PATH"] = pulse_path
self.child_env["PULSE_SINK"] = sink_name
self.pulseaudio_pipe = pipe_name
return True
def _setup_gnubby(self):
self.ssh_auth_sockname = ("/tmp/chromoting.%s.ssh_auth_sock" %
os.environ["USER"])
# Returns child environment not containing TMPDIR.
# Certain values of TMPDIR can break the X server (crbug.com/672684), so we
# want to make sure it isn't set in the envirionment we use to start the
# server.
def _x_env(self):
if "TMPDIR" not in self.child_env:
return self.child_env
else:
env_copy = dict(self.child_env)
del env_copy["TMPDIR"]
return env_copy
def check_x_responding(self):
"""Checks if the X server is responding to connections."""
exit_code = subprocess.call("xdpyinfo", env=self.child_env,
stdout=subprocess.DEVNULL)
return exit_code == 0
def _wait_for_x(self):
# Wait for X to be active.
for _test in range(20):
if self.check_x_responding():
logging.info("X server is active.")
return
time.sleep(0.5)
raise Exception("Could not connect to X server.")
def _launch_xvfb(self, display, x_auth_file, extra_x_args):
max_width = max([width for width, height in self.sizes])
max_height = max([height for width, height in self.sizes])
logging.info("Starting Xvfb on display :%d" % display)
screen_option = "%dx%dx24" % (max_width, max_height)
self.x_proc = subprocess.Popen(
["Xvfb", ":%d" % display,
"-auth", x_auth_file,
"-nolisten", "tcp",
"-noreset",
"-screen", "0", screen_option
] + extra_x_args, env=self._x_env())
if not self.x_proc.pid:
raise Exception("Could not start Xvfb.")
self._wait_for_x()
exit_code = subprocess.call("xrandr", env=self.child_env,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if exit_code == 0:
# RandR is supported
self.server_supports_exact_resize = True
self.server_supports_randr = True
self.randr_add_sizes = True
def _launch_xorg(self, display, x_auth_file, extra_x_args):
with tempfile.NamedTemporaryFile(
prefix="chrome_remote_desktop_",
suffix=".conf", delete=False) as config_file:
config_file.write(gen_xorg_config(self.sizes).encode())
# We can't support exact resize with the current Xorg dummy driver.
self.server_supports_exact_resize = False
# But dummy does support RandR 1.0.
self.server_supports_randr = True
self.xorg_conf = config_file.name
logging.info("Starting Xorg on display :%d" % display)
# We use the child environment so the Xorg server picks up the Mesa libGL
# instead of any proprietary versions that may be installed, thanks to
# LD_LIBRARY_PATH.
# Note: This prevents any environment variable the user has set from
# affecting the Xorg server.
self.x_proc = subprocess.Popen(
["Xorg", ":%d" % display,
"-auth", x_auth_file,
"-nolisten", "tcp",
"-noreset",
# Disable logging to a file and instead bump up the stderr verbosity
# so the equivalent information gets logged in our main log file.
"-logfile", "/dev/null",
"-verbose", "3",
"-config", config_file.name
] + extra_x_args, env=self._x_env())
if not self.x_proc.pid:
raise Exception("Could not start Xorg.")
self._wait_for_x()
def _launch_x_server(self, extra_x_args):
x_auth_file = os.path.expanduser("~/.Xauthority")
self.child_env["XAUTHORITY"] = x_auth_file
display = self.get_unused_display_number()
# Run "xauth add" with |child_env| so that it modifies the same XAUTHORITY
# file which will be used for the X session.
exit_code = subprocess.call("xauth add :%d . `mcookie`" % display,
env=self.child_env, shell=True)
if exit_code != 0:
raise Exception("xauth failed with code %d" % exit_code)
# Disable the Composite extension iff the X session is the default
# Unity-2D, since it uses Metacity which fails to generate DAMAGE
# notifications correctly. See crbug.com/166468.
x_session = choose_x_session()
if (len(x_session) == 2 and
x_session[1] == "/usr/bin/gnome-session --session=ubuntu-2d"):
extra_x_args.extend(["-extension", "Composite"])
self.child_env["DISPLAY"] = ":%d" % display
self.child_env["CHROME_REMOTE_DESKTOP_SESSION"] = "1"
# Use a separate profile for any instances of Chrome that are started in
# the virtual session. Chrome doesn't support sharing a profile between
# multiple DISPLAYs, but Chrome Sync allows for a reasonable compromise.
#
# M61 introduced CHROME_CONFIG_HOME, which allows specifying a different
# config | |
& e0, SimTK::Vec< 2 >::E const & e1, SimTK::Vec< 2 >::E const & e2, SimTK::Vec< 2 >::E const & e3, SimTK::Vec< 2 >::E const & e4, SimTK::Vec< 2 >::E const & e5, SimTK::Vec< 2 >::E const & e6) -> Vec2
Parameters
----------
e0: SimTK::Vec< 2 >::E const &
e1: SimTK::Vec< 2 >::E const &
e2: SimTK::Vec< 2 >::E const &
e3: SimTK::Vec< 2 >::E const &
e4: SimTK::Vec< 2 >::E const &
e5: SimTK::Vec< 2 >::E const &
e6: SimTK::Vec< 2 >::E const &
__init__(SimTK::Vec<(2)> self, SimTK::Vec< 2 >::E const & e0, SimTK::Vec< 2 >::E const & e1, SimTK::Vec< 2 >::E const & e2, SimTK::Vec< 2 >::E const & e3, SimTK::Vec< 2 >::E const & e4, SimTK::Vec< 2 >::E const & e5, SimTK::Vec< 2 >::E const & e6, SimTK::Vec< 2 >::E const & e7) -> Vec2
Parameters
----------
e0: SimTK::Vec< 2 >::E const &
e1: SimTK::Vec< 2 >::E const &
e2: SimTK::Vec< 2 >::E const &
e3: SimTK::Vec< 2 >::E const &
e4: SimTK::Vec< 2 >::E const &
e5: SimTK::Vec< 2 >::E const &
e6: SimTK::Vec< 2 >::E const &
e7: SimTK::Vec< 2 >::E const &
__init__(SimTK::Vec<(2)> self, SimTK::Vec< 2 >::E const & e0, SimTK::Vec< 2 >::E const & e1, SimTK::Vec< 2 >::E const & e2, SimTK::Vec< 2 >::E const & e3, SimTK::Vec< 2 >::E const & e4, SimTK::Vec< 2 >::E const & e5, SimTK::Vec< 2 >::E const & e6, SimTK::Vec< 2 >::E const & e7, SimTK::Vec< 2 >::E const & e8) -> Vec2
Parameters
----------
e0: SimTK::Vec< 2 >::E const &
e1: SimTK::Vec< 2 >::E const &
e2: SimTK::Vec< 2 >::E const &
e3: SimTK::Vec< 2 >::E const &
e4: SimTK::Vec< 2 >::E const &
e5: SimTK::Vec< 2 >::E const &
e6: SimTK::Vec< 2 >::E const &
e7: SimTK::Vec< 2 >::E const &
e8: SimTK::Vec< 2 >::E const &
"""
this = _simbody.new_Vec2(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def setToNaN(self):
"""
setToNaN(Vec2 self)
Parameters
----------
self: SimTK::Vec< 2 > *
"""
return _simbody.Vec2_setToNaN(self)
def setToZero(self):
"""
setToZero(Vec2 self)
Parameters
----------
self: SimTK::Vec< 2 > *
"""
return _simbody.Vec2_setToZero(self)
def isNaN(self):
"""
isNaN(Vec2 self) -> bool
Parameters
----------
self: SimTK::Vec< 2 > const *
"""
return _simbody.Vec2_isNaN(self)
def isInf(self):
"""
isInf(Vec2 self) -> bool
Parameters
----------
self: SimTK::Vec< 2 > const *
"""
return _simbody.Vec2_isInf(self)
def isFinite(self):
"""
isFinite(Vec2 self) -> bool
Parameters
----------
self: SimTK::Vec< 2 > const *
"""
return _simbody.Vec2_isFinite(self)
def getDefaultTolerance():
"""getDefaultTolerance() -> double"""
return _simbody.Vec2_getDefaultTolerance()
getDefaultTolerance = staticmethod(getDefaultTolerance)
def isNumericallyEqual(self, *args):
"""
isNumericallyEqual(Vec2 self, double const & e, double tol) -> bool
Parameters
----------
e: double const &
tol: double
isNumericallyEqual(Vec2 self, double const & e) -> bool
Parameters
----------
e: double const &
"""
return _simbody.Vec2_isNumericallyEqual(self, *args)
def toString(self):
"""
toString(Vec2 self) -> std::string
Parameters
----------
self: SimTK::Vec< 2 > const *
"""
return _simbody.Vec2_toString(self)
def set(self, i, value):
"""
set(Vec2 self, int i, SimTK::Vec< 2 >::E const & value)
Parameters
----------
i: int
value: SimTK::Vec< 2 >::E const &
"""
return _simbody.Vec2_set(self, i, value)
def get(self, i):
"""
get(Vec2 self, int i) -> SimTK::Vec< 2 >::E const &
Parameters
----------
i: int
"""
return _simbody.Vec2_get(self, i)
def __str__(self):
"""
__str__(Vec2 self) -> std::string
Parameters
----------
self: SimTK::Vec< 2 > const *
"""
return _simbody.Vec2___str__(self)
def __len__(self):
"""
__len__(Vec2 self) -> int
Parameters
----------
self: SimTK::Vec< 2 > const *
"""
return _simbody.Vec2___len__(self)
def scalarEq(self, ee):
"""
scalarEq(Vec2 self, double const & ee) -> Vec2
Parameters
----------
ee: double const &
"""
return _simbody.Vec2_scalarEq(self, ee)
def scalarPlusEq(self, ee):
"""
scalarPlusEq(Vec2 self, double const & ee) -> Vec2
Parameters
----------
ee: double const &
"""
return _simbody.Vec2_scalarPlusEq(self, ee)
def scalarMinusEq(self, ee):
"""
scalarMinusEq(Vec2 self, double const & ee) -> Vec2
Parameters
----------
ee: double const &
"""
return _simbody.Vec2_scalarMinusEq(self, ee)
def scalarTimesEq(self, ee):
"""
scalarTimesEq(Vec2 self, double const & ee) -> Vec2
Parameters
----------
ee: double const &
"""
return _simbody.Vec2_scalarTimesEq(self, ee)
def scalarDivideEq(self, ee):
"""
scalarDivideEq(Vec2 self, double const & ee) -> Vec2
Parameters
----------
ee: double const &
"""
return _simbody.Vec2_scalarDivideEq(self, ee)
__swig_destroy__ = _simbody.delete_Vec2
__del__ = lambda self: None
Vec2_swigregister = _simbody.Vec2_swigregister
Vec2_swigregister(Vec2)
def Vec2_size():
"""Vec2_size() -> int"""
return _simbody.Vec2_size()
def Vec2_nrow():
"""Vec2_nrow() -> int"""
return _simbody.Vec2_nrow()
def Vec2_ncol():
"""Vec2_ncol() -> int"""
return _simbody.Vec2_ncol()
def Vec2_getDefaultTolerance():
"""Vec2_getDefaultTolerance() -> double"""
return _simbody.Vec2_getDefaultTolerance()
class Vec3(_object):
"""Proxy of C++ SimTK::Vec<(3)> class."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Vec3, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Vec3, name)
__repr__ = _swig_repr
def size():
"""size() -> int"""
return _simbody.Vec3_size()
size = staticmethod(size)
def nrow():
"""nrow() -> int"""
return _simbody.Vec3_nrow()
nrow = staticmethod(nrow)
def ncol():
"""ncol() -> int"""
return _simbody.Vec3_ncol()
ncol = staticmethod(ncol)
def __init__(self, *args):
"""
__init__(SimTK::Vec<(3)> self) -> Vec3
__init__(SimTK::Vec<(3)> self, Vec3 src) -> Vec3
Parameters
----------
src: SimTK::Vec< 3 > const &
__init__(SimTK::Vec<(3)> self, double const & e) -> Vec3
Parameters
----------
e: double const &
__init__(SimTK::Vec<(3)> self, int i) -> Vec3
Parameters
----------
i: int
__init__(SimTK::Vec<(3)> self, SimTK::Vec< 3 >::E const & e0, SimTK::Vec< 3 >::E const & e1) -> Vec3
Parameters
----------
e0: SimTK::Vec< 3 >::E const &
e1: SimTK::Vec< 3 >::E const &
__init__(SimTK::Vec<(3)> self, SimTK::Vec< 3 >::E const & e0, SimTK::Vec< 3 >::E const & e1, SimTK::Vec< 3 >::E const & e2) -> Vec3
Parameters
----------
e0: SimTK::Vec< 3 >::E const &
e1: SimTK::Vec< 3 >::E const &
e2: SimTK::Vec< 3 >::E const &
__init__(SimTK::Vec<(3)> self, SimTK::Vec< 3 >::E const & e0, SimTK::Vec< 3 >::E const & e1, SimTK::Vec< 3 >::E const & e2, SimTK::Vec< 3 >::E const & e3) -> Vec3
Parameters
----------
e0: SimTK::Vec< 3 >::E const &
e1: SimTK::Vec< 3 >::E const &
e2: SimTK::Vec< 3 >::E const &
e3: SimTK::Vec< 3 >::E const &
__init__(SimTK::Vec<(3)> self, SimTK::Vec< 3 >::E const & e0, SimTK::Vec< 3 >::E const & e1, SimTK::Vec< 3 >::E const & e2, SimTK::Vec< 3 >::E const & e3, SimTK::Vec< 3 >::E const & e4) -> Vec3
Parameters
----------
e0: SimTK::Vec< 3 >::E const &
e1: SimTK::Vec< 3 >::E const &
e2: SimTK::Vec< 3 >::E const &
e3: SimTK::Vec< 3 >::E const &
e4: SimTK::Vec< 3 >::E const &
__init__(SimTK::Vec<(3)> self, SimTK::Vec< 3 >::E const & e0, SimTK::Vec< 3 >::E const & e1, SimTK::Vec< 3 >::E const & e2, SimTK::Vec< 3 >::E const & e3, SimTK::Vec< 3 >::E const & e4, SimTK::Vec< 3 >::E const & e5) -> Vec3
Parameters
----------
e0: SimTK::Vec< 3 >::E const &
e1: SimTK::Vec< 3 >::E const &
e2: SimTK::Vec< 3 >::E const &
e3: SimTK::Vec< 3 >::E const &
e4: SimTK::Vec< 3 >::E const &
e5: SimTK::Vec< 3 >::E const &
__init__(SimTK::Vec<(3)> self, SimTK::Vec< 3 >::E const & e0, SimTK::Vec< 3 >::E const & e1, SimTK::Vec< 3 >::E const & e2, SimTK::Vec< 3 >::E const & e3, SimTK::Vec< 3 >::E const & e4, SimTK::Vec< 3 >::E const & e5, SimTK::Vec< 3 >::E const & e6) -> Vec3
Parameters
----------
e0: SimTK::Vec< 3 >::E const &
e1: SimTK::Vec< 3 >::E const &
e2: SimTK::Vec< 3 >::E const &
e3: SimTK::Vec< 3 >::E const &
e4: SimTK::Vec< 3 >::E const &
e5: SimTK::Vec< 3 >::E const &
e6: SimTK::Vec< 3 >::E const &
__init__(SimTK::Vec<(3)> self, SimTK::Vec< 3 >::E const & e0, SimTK::Vec< 3 >::E const & e1, SimTK::Vec< 3 >::E const & e2, SimTK::Vec< 3 >::E const & e3, SimTK::Vec< 3 >::E const & e4, SimTK::Vec< 3 >::E const & e5, SimTK::Vec< 3 >::E const & e6, SimTK::Vec< 3 >::E const & e7) -> Vec3
Parameters
----------
e0: SimTK::Vec< 3 >::E const &
e1: SimTK::Vec< 3 >::E const &
e2: SimTK::Vec< 3 >::E const &
e3: SimTK::Vec< 3 >::E const &
e4: SimTK::Vec< 3 >::E const &
e5: SimTK::Vec< 3 >::E const &
e6: SimTK::Vec< 3 >::E const &
e7: SimTK::Vec< 3 >::E const &
__init__(SimTK::Vec<(3)> self, SimTK::Vec< 3 >::E const & e0, SimTK::Vec< 3 >::E const & e1, | |
<reponame>Natsurii/nicabot-monkee
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Nekozilla is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Nekozilla is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Nekozilla. If not, see <https://www.gnu.org/licenses/>.
"""
Discord service status.
Ported from Nekozilla V1
"""
import asyncio
import datetime
import re
import typing
import aiohttp
import neko3.cog
from neko3 import embeds
from neko3 import neko_commands
from neko3 import pagination
endpoint_base = "https://status.discordapp.com/api"
api_version = "v2"
# Max fields per page on short pages
max_fields = 4
class ListMix(list):
"""Quicker than replacing a bunch of internal calls. I know this is inefficient anyway."""
def __iadd__(self, other):
self.append(other)
return self
def get_endpoint(page_name):
"""Produces the endpoint URL."""
return f"{endpoint_base}/{api_version}/{page_name}"
def get_impact_color(impact, is_global=False):
return {"none": 0x00FF00 if is_global else 0x0, "minor": 0xFF0000, "major": 0xFFA500, "critical": 0xFF0000}.get(
impact.lower(), 0x0
)
def find_highest_impact(entries):
print(entries)
for state in ("critical", "major", "minor", "none"):
for entry in entries:
if entry["impact"].lower() == state:
return state.title()
return "none"
def make_incident_update_body(recent_update):
updated_at = recent_update.get("updated at")
created_at = recent_update.get("created at")
body = recent_update.get("body")
ru_message = "\n_**" + recent_update.get("status").title() + "**_"
if updated_at:
ru_message += f" - Last updated at {friendly_date(updated_at)}"
elif created_at:
ru_message += f" - Created at {friendly_date(created_at)}"
ru_message += f"\n{body}\n"
return "\n".join(ru_message.split("\n"))
def make_incident_body(incident):
created = friendly_date(incident["created at"])
updated = incident.get("updated at")
updated = friendly_date(updated) if updated else "N/A"
monitoring = incident.get("monitoring at")
monitoring = friendly_date(monitoring) if monitoring else "N/A"
url = incident.get("shortlink", "https://status.discordapp.com")
affects = ", ".join(component["name"] for component in incident.get("components")) or "nothing"
recent_updates = incident.get("updates")
ru_message = ""
if recent_updates:
ru_message = f"Updates:\n"
for recent_update in recent_updates:
ru_message += make_incident_update_body(recent_update)
return (
f"_**[{incident['name']}]({url})**_\n\n"
f"Affects: `{affects}`\n"
f"Status: `{incident['status']}`\n"
f"Created: `{created}`\n"
f"Updated: `{updated}`\n"
f"Monitoring: `{monitoring}`\n"
f"{ru_message if recent_updates else 'No updates yet.'}"
)
def parse_timestamp(timestamp):
"""
Discord use a timestamp that is not compatible with Python by
default, which is kind of annoying.
Expected format: YYYY-mm-ddTHH:MM:SS.sss(sss)?[+-]hh:mm
:param timestamp: timestamp to parse.
:return: datetime object.
"""
if timestamp is None:
return None
# Remove the periods, T and colons.
timestamp = re.sub(r"[:.T]", "", timestamp, flags=re.I)
# extract the date part and time part.
if "+" in timestamp:
dt, tz = timestamp.rsplit("+", maxsplit=1)
tz = "+" + tz
else:
dt, tz = timestamp.rsplit("-", maxsplit=1)
tz = "-" + tz
# Remove hyphens from date (we didn't want to mess up the timezone earlier)
dt = dt.replace("-", "")
expected_dt_len = len("YYYYmmddHHMMSSssssss")
# Append zeros onto the end to make it in microseconds.
dt = dt + ("0" * (expected_dt_len - len(dt)))
timestamp = dt + tz
dt_obj = datetime.datetime.strptime(timestamp, "%Y%m%d%H%M%S%f%z")
return dt_obj.astimezone(datetime.timezone.utc)
def friendly_date(value: datetime.datetime):
"""Creates a friendly date format for the given datetime."""
if value is None:
return "N/A"
return value.strftime("%d %B %Y at %H:%M %Z")
class DiscordServiceStatusCog(neko3.cog.CogBase):
"""
Holds the service status command.
"""
@neko_commands.command(name="discord", aliases=["discordstatus"], brief="Check if Discord is down (again)")
async def discord_status_command(self, ctx):
"""
Gets a list of all Discord systems, and their service
status.
"""
async with ctx.message.channel.typing():
stat_res, comp_res, inc_res, sms_res = await asyncio.gather(
self._get(get_endpoint("summary.json")),
self._get(get_endpoint("components.json")),
self._get(get_endpoint("incidents.json")),
self._get(get_endpoint("scheduled-maintenances.json")),
)
status, components, incidents, sms = await asyncio.gather(
self.get_status(stat_res),
self.get_components(comp_res),
self.get_incidents(inc_res),
self.get_scheduled_maintenances(sms_res),
)
footer_text = status["indicator"]
@pagination.embed_generator(max_chars=1100)
def factory(_, page, __):
if not incidents["unresolved"]:
color = status["color"]
else:
color = get_impact_color(find_highest_impact(incidents["unresolved"]))
e = embeds.Embed(
colour=color, title="API Status for discordapp.com", description=page, url=status["url"]
)
if footer_text != "None":
e.set_footer(text=footer_text[:2000])
return e
nav = pagination.EmbedNavigatorFactory(factory=factory, max_lines=25)
# Make the front page, if needed.
headline = status["indicator"]
if str(headline) != "None":
nav.add_block(f"**{headline}**\n")
nav.add_block(f'{status["description"]}\n\n' f'Last updated: {friendly_date(status["updated_at"])}.')
nav.add_page_break()
if incidents["unresolved"]:
first = incidents["unresolved"][0]
name = first["name"]
body = make_incident_body(first)
nav.add_block(f"\n**{name}**\n{body}\n")
nav.add_page_break()
"""
PAGE 3
======
Incidents.
"""
if incidents["unresolved"]:
nav.add_block("**__UNRESOLVED INCIDENTS__**\n")
incident = incidents["unresolved"][0]
name = f'**{incident["name"]}**'
desc = make_incident_body(incident)
nav.add_block(name + "\n" + desc.strip())
for incident in incidents["unresolved"][1:3]:
body = make_incident_body(incident)
name = incident["name"]
body = name + "\n" + body
nav.add_block(body.strip())
nav.add_page_break()
nav.add_block("**__RESOLVED INCIDENTS__**\n")
# Add six most recent.
for incident in incidents["resolved"][:6]:
body = make_incident_body(incident)
nav.add_block(body)
nav.add_line()
nav.add_page_break()
nav.add_block("**__PRIMARY COMPONENTS__**\n")
for i, component in enumerate(components["showcase"], start=1):
if i and not (i % max_fields):
nav.add_page_break()
title = component.pop("name")
desc = []
for k, v in component.items():
line = f"**{k}**: "
if isinstance(v, datetime.datetime):
line += friendly_date(v)
else:
line += str(v)
desc.append(line)
desc = "\n".join(desc)
nav.add_block(f"**{title}**\n{desc}\n")
nav.add_page_break()
"""
PAGE 5
======
Non showcase components
"""
nav.add_block("**__OTHER COMPONENTS__**\n")
for i, component in enumerate(components["rest"], start=1):
if i and not (i % max_fields):
nav.add_page_break()
title = component.pop("name")
desc = []
for k, v in component.items():
if k == "components":
continue
line = f"{k}: "
if isinstance(v, datetime.datetime):
line += friendly_date(v)
else:
line += str(v)
desc.append(line)
nav.add_block(f"\n**{title}**\n" + "\n".join(desc))
nav.start(ctx)
@classmethod
async def _get(cls, *args, **kwargs):
async with aiohttp.ClientSession() as session:
async with session.get(*args, **kwargs) as resp:
resp.raise_for_status()
return await resp.json()
@staticmethod
async def get_status(res) -> typing.Dict[str, typing.Any]:
"""
Gets the short overall status of Discord.
:param res: the http response.
:return: a map of:
description - str, None
color - int
indicator - str
updated_at - datetime
url - str
"""
updated_at = res["page"]["updated_at"]
updated_at = parse_timestamp(updated_at)
return {
"description": res["status"]["description"],
"color": get_impact_color(res["status"]["indicator"], True),
"indicator": res["status"]["indicator"].title(),
"updated_at": updated_at,
"url": res["page"]["url"],
}
@staticmethod
async def get_components(res, hide_un_degraded=True) -> typing.Dict[str, typing.List]:
"""
Gets the status of individual components of Discord.
:param res: the http response.
:param hide_un_degraded: defaults to true. If true, we respect the
API's intent to hide any component marked true under
"only_show_if_degraded" unless the component is actually
degraded.
:return: a dict containing two lists: 'showcase' and 'rest'.
Both lists contain components, with fields:
status - str
name - str
created_at - datetime
updated_at - datetime
description - str, None
"""
# Anything that is not set to "showcase" belongs in the
# rest list instead.
showcase_result = []
rest_result = []
components: list = res["components"]
for component in components:
comp_dict = {}
for k, v in component.items():
# Skip these keys.
if k in ("id", "page_id", "position", "group", "only_show_if_degraded", "showcase", "group_id"):
continue
elif v is None:
continue
friendly_key = k.replace("_", " ")
# If a date/time
if k in ("created_at", "updated_at"):
comp_dict[friendly_key] = parse_timestamp(v)
elif k == "status":
# This is always formatted with underscores (enum really)
comp_dict[friendly_key] = v.replace("_", " ")
else:
comp_dict[friendly_key] = v
# Determine whether to skip the only-show-if-degraded element
# if it is flagged as such.
show_always = not component["only_show_if_degraded"]
if not show_always:
is_degraded = component["status"] != "operational"
should_hide = not show_always and is_degraded
if hide_un_degraded and should_hide:
continue
if component["showcase"]:
showcase_result.append(comp_dict)
else:
rest_result.append(comp_dict)
return {"showcase": showcase_result, "rest": rest_result}
@classmethod
async def get_incidents(cls, res) -> typing.Dict[str, typing.List]:
"""
Gets a dict containing two keys: 'resolved' and 'unresolved'.
These contain incidents and incident updates.
Due to the quantity of information this returns, we only get the
first 5, resolved. All unresolved are returned.
:param res: the http response.
"""
max_resolved = 5
res = res["incidents"]
unresolved = []
resolved = []
for inc in res:
if inc["status"] in ("investigating", "identified", "monitoring"):
target = unresolved
elif len(resolved) < max_resolved:
target = resolved
else:
continue
incident = {}
for k, v in inc.items():
if k in ("id", "page_id") or v is None:
continue
friendly_key = k.replace("_", " ")
if k in ("updated_at", "created_at", "monitoring_at"):
incident[friendly_key] = parse_timestamp(v)
elif k == "incident_updates":
incident["updates"] = cls.__parse_incident_updates(v)
elif k in ("impact", "status"):
incident[friendly_key] = v.replace("_", " ")
else:
incident[friendly_key] = v
target.append(incident)
return {"resolved": resolved, "unresolved": unresolved}
@staticmethod
def __parse_incident_updates(v):
# Parse incident updates.
updates = []
if v is None:
return updates
for up in v:
update = {}
for up_k, up_v in up.items():
up_f_k = up_k.replace("_", | |
"""
Basic visualization of neurite morphologies using matplotlib.
Usage is restricted to morphologies in the sWC format with the three-point soma `standard <http://neuromorpho.org/neuroMorpho/SomaFormat.html>`_
"""
import sys,time
import os, sys
from matplotlib.cm import get_cmap
from Crypto.Protocol.AllOrNothing import isInt
sys.setrecursionlimit(10000)
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cm
import matplotlib.animation as animation
import pylab as pl
from matplotlib import collections as mc
from PIL import Image
from numpy.linalg import inv
from McNeuron import Neuron
from McNeuron import Node
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.gridspec as gridspec
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank
from pylab import plot,subplot,axis,stem,show,figure, Normalize
import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
import pylab as pl
import matplotlib
from matplotlib import collections as mc
from matplotlib.patches import Circle, Wedge, Polygon
from matplotlib.collections import PatchCollection
def get_2d_image(path, size, dpi, background, show_width):
neu = McNeuron.Neuron(file_format = 'swc without attributes', input_file=path)
depth = neu.location[2,:]
p = neu.location[0:2,:]
widths= 5*neu.diameter
widths[0:3] = 0
m = min(depth)
M = max(depth)
depth = background * ((depth - m)/(M-m))
colors = []
lines = []
patches = []
for i in range(neu.n_soma):
x1 = neu.location[0,i]
y1 = neu.location[1,i]
r = 1*neu.diameter[i]
circle = Circle((x1, y1), r, color = str(depth[i]), ec = 'none',fc = 'none')
patches.append(circle)
pa = PatchCollection(patches, cmap=matplotlib.cm.gray)
pa.set_array(depth[0]*np.zeros(neu.n_soma))
for i in range(len(neu.nodes_list)):
colors.append(str(depth[i]))
j = neu.parent_index[i]
lines.append([(p[0,i],p[1,i]),(p[0,j],p[1,j])])
if(show_width):
lc = mc.LineCollection(lines, colors=colors, linewidths = widths)
else:
lc = mc.LineCollection(lines, colors=colors)
fig, ax = plt.subplots()
ax.add_collection(lc)
ax.add_collection(pa)
fig.set_size_inches([size + 1, size + 1])
fig.set_dpi(dpi)
plt.axis('off')
plt.xlim((min(p[0,:]),max(p[0,:])))
plt.ylim((min(p[1,:]),max(p[1,:])))
plt.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
border = (dpi/2)
return np.squeeze(data[border:-border,border:-border,0])
def projection_on_plane(neuron, normal_vec = np.array([0,0,1]), distance = 10, resolution = np.array([256,256]), gap = 3.0):
"""
Parameters
----------
return
------
dependency
----------
This function needs following data from neuron:
location
diameter
parent_index
"""
# projection all the nodes on the plane and finding the right pixel for their centers
image = np.zeros(resolution)
shift = resolution[0]/2
normal_vec1 = np.array([0,0,1])
normal_vec2 = np.array([0,1,0])
P = project_points(neuron.location, normal_vec1, normal_vec2)
for n in neuron.nodes_list:
if(n.parent != None):
n1, n2, dis = project_point(n, normal_vec1, normal_vec2)
pix1 = np.floor(n1/gap) + shift
pix2 = np.floor(n2/gap) + shift
if(0 <= pix1 and 0 <= pix2 and pix1<resolution[0] and pix2 < resolution[1]):
image[pix1, pix2] = dis
return image
def project_points(location, normal_vectors):
"""
Parameters
----------
normal_vectors : array of shape [2,3]
Each row should a normal vector and both of them should be orthogonal.
location : array of shape [3, n_nodes]
the location of n_nodes number of points
Returns
-------
cordinates: array of shape [2, n_nodes]
The cordinates of the location on the plane defined by the normal vectors.
"""
cordinates = np.dot(normal_vectors, location)
return cordinates
def depth_points(location, orthogonal_vector):
"""
Parameters
----------
orthogonal_vector : array of shape [3]
orthogonal_vector that define the plane
location : array of shape [3, n_nodes]
the location of n_nodes number of points
Returns
-------
depth: array of shape [n_nodes]
The depth of the cordinates when they project on the plane.
"""
depth = np.dot(orthogonal_vector, location)
return depth
def make_image(neuron, A, scale_depth, index_neuron):
normal_vectors = A[0:2,:]
orthogonal_vector = A[2,:]
depth = depth_points(neuron.location, orthogonal_vector)
p = project_points(neuron.location, normal_vectors)
m = min(depth)
M = max(depth)
depth = scale_depth * ((depth - m)/(M-m))
colors = []
lines = []
for i in range(len(neuron.nodes_list)):
colors.append((depth[i],depth[i],depth[i],1))
j = neuron.parent_index[i]
lines.append([(p[0,i],p[1,i]),(p[0,j],p[1,j])])
lc = mc.LineCollection(lines, colors=colors, linewidths=2)
fig, ax = pl.subplots()
ax.add_collection(lc)
pl.axis('off')
pl.xlim((min(p[0,:]),max(p[0,:])))
pl.ylim((min(p[1,:]),max(p[1,:])))
Name = "neuron" + str(index_neuron[0]+1) + "resample" + str(index_neuron[1]+1) + "angle" + str(index_neuron[2]+1) + ".png"
fig.savefig(Name,figsize=(6, 6), dpi=80)
img = Image.open(Name)
img.load()
data = np.asarray( img, dtype="int32" )
data = data[:,:,0]
return data
def random_unitary_basis(kappa):
Ax1 = random_2d_rotation_in_3d('x', kappa)
Ay1 = random_2d_rotation_in_3d('y', kappa)
Az1 = random_2d_rotation_in_3d('z', kappa)
Ax2 = random_2d_rotation_in_3d('x', kappa)
Ay1 = random_2d_rotation_in_3d('y', kappa)
Az1 = random_2d_rotation_in_3d('z', kappa)
A = np.dot(np.dot(Ax1,Ay1),Az1)
B = np.dot(np.dot(Az1,Ay1),Ax1)
return np.dot(A,B)
def random_2d_rotation_in_3d(axis, kappa):
theta = np.random.vonmises(0, kappa, 1)
A = np.eye(3)
if axis is 'z':
A[0,0] = np.cos(theta)
A[1,0] = np.sin(theta)
A[0,1] = - np.sin(theta)
A[1,1] = np.cos(theta)
return A
if axis is 'y':
A[0,0] = np.cos(theta)
A[2,0] = np.sin(theta)
A[0,2] = - np.sin(theta)
A[2,2] = np.cos(theta)
return A
if axis is 'x':
A[1,1] = np.cos(theta)
A[2,1] = np.sin(theta)
A[1,2] = - np.sin(theta)
A[2,2] = np.cos(theta)
return A
def make_six_matrix(A):
six = []
six.append(A[[0,1,2],:])
six.append(A[[0,2,1],:])
six.append(A[[1,2,0],:])
six.append(A[[1,0,2],:])
six.append(A[[2,0,1],:])
six.append(A[[2,1,0],:])
return six
def make_six_images(neuron,scale_depth,neuron_index, kappa):
#A = random_unitary_basis(kappa)
A = np.eye(3)
six = make_six_matrix(A)
D = []
for i in range(6):
a = np.append(neuron_index,i)
D.append(make_image(neuron, six[i], scale_depth, a))
return D
def generate_data(path, scale_depth, n_camrea, kappa):
"""
input
-----
path : list
list of all the pathes of swc. each element of the list should be a string.
scale_depth : float in the interval [0,1]
a value to differentiate between the background and gray level in the image.
n_camera : int
number of different angles to set the six images. For each angle, six images will be generated (up,down and four sides)
kappa : float
The width of the distribution that the angles come from. Large value for kappa results in the angles close to x aixs
kappa = 1 is equvalent to the random angle.
output
------
Data : list of length
"""
Data = []
for i in range(len(path)):
print path[i]
neuron = Neuron(file_format = 'swc without attributes', input_file=path[i])
if(len(neuron.nodes_list) != 0):
for j in range(n_camrea):
D = np.asarray(make_six_images(neuron,scale_depth,np.array([i,j]), kappa))
Data.append(D)
return Data
def get_all_path(directory):
fileSet = []
for root, dirs, files in os.walk(directory):
for fileName in files:
if(fileName[-3:] == 'swc'):
fileSet.append(directory + root.replace(directory, "") + os.sep + fileName)
return fileSet
def plot_2d(neuron, show_depth, line_width):
depth = neuron.location[0,:]
m = min(depth)
M = max(depth)
depth = ((depth - m)/(M-m))
p = neuron.location[0:2,:]
colors = []
lines = []
for i in range(len(neuron.nodes_list)):
colors.append((depth[i],depth[i],depth[i],1))
j = neuron.parent_index[i]
lines.append([(p[0,i],p[1,i]),(p[0,j],p[1,j])])
if(show_depth == False):
lc = mc.LineCollection(lines, colors='k', linewidths=line_width)
else:
lc = mc.LineCollection(lines, colors=colors, linewidths=line_width)
fig, ax = pl.subplots()
ax.add_collection(lc)
pl.axis('off')
pl.xlim((min(p[0,:]),max(p[0,:])))
pl.ylim((min(p[1,:]),max(p[1,:])))
def plot_dendrograph(neuron):
print 1
def plot_2D(neuron,
background = 1,
show_width = False,
show_depth = False,
size = 5,
dpi = 80,
line_width = 1,
show_soma = False,
give_image = False,
red_after = False,
node_red = 0,
translation = (0,0),
scale_on = False,
scale = (1,1),
save = []):
depth = neuron.location[2,:]
p = neuron.location[0:2,:]
if scale_on:
p[0,:] = scale[0] * (p[0,:]-min(p[0,:]))/(max(p[0,:]) - min(p[0,:]) )
p[1,:] = scale[1] * (p[1,:]-min(p[1,:]))/(max(p[1,:]) - min(p[1,:]) )
widths= neuron.diameter
#widths[0:3] = 0
m = min(depth)
M = max(depth)
depth = background * ((depth - m)/(M-m))
colors = []
lines = []
patches = []
for i in range(neuron.n_soma):
x1 = neuron.location[0,i] + translation[0]
y1 = neuron.location[1,i] + translation[1]
r = widths[i]
circle = Circle((x1, y1), r, color = str(depth[i]), ec = 'none',fc = 'none')
patches.append(circle)
pa = PatchCollection(patches, cmap=matplotlib.cm.gray)
pa.set_array(depth[0]*np.zeros(neuron.n_soma))
for i in range(len(neuron.nodes_list)):
colors.append(str(depth[i]))
j = neuron.parent_index[i]
lines.append([(p[0,i] + translation[0],p[1,i] + translation[1]),(p[0,j] + translation[0],p[1,j] + translation[1])])
if(show_width):
if(show_depth):
lc = mc.LineCollection(lines, colors=colors, linewidths = line_width*widths)
else:
lc = mc.LineCollection(lines, linewidths = line_width*widths)
else:
if(show_depth):
lc = mc.LineCollection(lines, colors=colors, linewidths = line_width)
else:
lc = mc.LineCollection(lines, linewidths = line_width, color = 'k')
if(give_image):
if(red_after):
line1 = []
line2 = []
(I1,) = np.where(~np.isnan(neuron.connection[:,node_red]))
(I2,) = np.where(np.isnan(neuron.connection[:,node_red]))
for i in I1:
j = neuron.parent_index[i]
line1.append([(p[0,i],p[1,i]),(p[0,j],p[1,j])])
lc1 = mc.LineCollection(line1, linewidths = 2*line_width, color = 'r')
for i in I2:
j = neuron.parent_index[i]
line2.append([(p[0,i],p[1,i]),(p[0,j],p[1,j])])
lc2 = mc.LineCollection(line2, linewidths = line_width, color = 'k')
return (lc1, lc2, (min(p[0,:]),max(p[0,:])), (min(p[1,:]),max(p[1,:])))
else:
return (lc, (min(p[0,:]),max(p[0,:])), (min(p[1,:]),max(p[1,:])))
else:
fig, ax = plt.subplots()
ax.add_collection(lc)
if(show_soma):
ax.add_collection(pa)
fig.set_size_inches([size + 1, size + 1])
fig.set_dpi(dpi)
plt.axis('off')
plt.xlim((min(p[0,:]),max(p[0,:])))
plt.ylim((min(p[1,:]),max(p[1,:])))
plt.draw()
if(len(save)!=0):
plt.savefig(save, format = "eps")
# def plot_2D(neuron, background = 1, show_width = False, show_depth = False , size = 5, dpi = 80, line_width = 1):
# depth = neuron.location[2,:]
# p = neuron.location[0:2,:]
# widths= neuron.diameter
# m = min(depth)
# M = max(depth)
# depth = background * ((depth - m)/(M-m))
# colors = []
# lines = []
# patches = []
#
# for i in range(neuron.n_soma):
# x1 = neuron.location[0,i]
# y1 = neuron.location[1,i]
# r = neuron.diameter[i]
# circle = Circle((x1, y1), r, color = str(depth[i]), ec = 'none',fc = 'none')
# patches.append(circle)
#
# pa = PatchCollection(patches, cmap=matplotlib.cm.gray)
# pa.set_array(depth[0]*np.zeros(neuron.n_soma))
#
# for i in range(len(neuron.nodes_list)):
# | |
class LenStringArray(AbstractTemplate):
# def generic(self, args, kws):
# if not kws and len(args)==1 and args[0]==string_array_type:
# return signature(types.intp, *args)
# XXX: should these be exposed?
# make_attribute_wrapper(StringArrayType, 'num_items', 'num_items')
# make_attribute_wrapper(StringArrayType, 'num_total_chars', 'num_total_chars')
# make_attribute_wrapper(StringArrayType, 'offsets', 'offsets')
# make_attribute_wrapper(StringArrayType, 'data', 'data')
# make_attribute_wrapper(StringArrayPayloadType, 'offsets', 'offsets')
# make_attribute_wrapper(StringArrayPayloadType, 'data', 'data')
# XXX can't use this with overload_method
@infer_getattr
class StrArrayAttribute(AttributeTemplate):
key = StringArrayType
def resolve_size(self, ctflags):
return types.intp
@bound_function("str_arr.copy")
def resolve_copy(self, ary, args, kws):
return signature(string_array_type, *args)
@lower_builtin("str_arr.copy", string_array_type)
def str_arr_copy_impl(context, builder, sig, args):
return context.compile_internal(builder, copy_impl, sig, args)
def copy_impl(arr):
n = len(arr)
n_chars = num_total_chars(arr)
new_arr = pre_alloc_string_array(n, np.int64(n_chars))
copy_str_arr_slice(new_arr, arr, n)
return new_arr
# @overload_method(StringArrayType, 'copy')
# def string_array_copy(arr_t):
# return copy_impl
# @overload_attribute(string_array_type, 'size')
# def string_array_attr_size(arr_t):
# return get_str_arr_size
# def get_str_arr_size(arr): # pragma: no cover
# return len(arr)
# @infer_global(get_str_arr_size)
# class StrArrSizeInfer(AbstractTemplate):
# def generic(self, args, kws):
# assert not kws
# assert len(args) == 1 and args[0] == string_array_type
# return signature(types.intp, *args)
# @lower_builtin(get_str_arr_size, string_array_type)
# def str_arr_size_impl(context, builder, sig, args):
@lower_getattr(string_array_type, 'size')
def str_arr_size_impl(context, builder, typ, val):
string_array = context.make_helper(builder, string_array_type, val)
attrval = string_array.num_items
attrty = types.intp
return impl_ret_borrowed(context, builder, attrty, attrval)
# @lower_builtin(StringArray, types.Type, types.Type)
# def impl_string_array(context, builder, sig, args):
# typ = sig.return_type
# offsets, data = args
# string_array = cgutils.create_struct_proxy(typ)(context, builder)
# string_array.offsets = offsets
# string_array.data = data
# return string_array._getvalue()
@overload(len)
def str_arr_len_overload(str_arr):
if is_str_arr_typ(str_arr):
def str_arr_len(str_arr):
return str_arr.size
return str_arr_len
ll.add_symbol('get_str_len', hstr_ext.get_str_len)
ll.add_symbol('allocate_string_array', hstr_ext.allocate_string_array)
ll.add_symbol('setitem_string_array', hstr_ext.setitem_string_array)
ll.add_symbol('getitem_string_array', hstr_ext.getitem_string_array)
ll.add_symbol('getitem_string_array_std', hstr_ext.getitem_string_array_std)
ll.add_symbol('is_na', hstr_ext.is_na)
ll.add_symbol('string_array_from_sequence', hstr_ext.string_array_from_sequence)
ll.add_symbol('np_array_from_string_array', hstr_ext.np_array_from_string_array)
ll.add_symbol('print_int', hstr_ext.print_int)
ll.add_symbol('convert_len_arr_to_offset', hstr_ext.convert_len_arr_to_offset)
ll.add_symbol('set_string_array_range', hstr_ext.set_string_array_range)
ll.add_symbol('str_arr_to_int64', hstr_ext.str_arr_to_int64)
ll.add_symbol('str_arr_to_float64', hstr_ext.str_arr_to_float64)
ll.add_symbol('dtor_string_array', hstr_ext.dtor_string_array)
ll.add_symbol('c_glob', hstr_ext.c_glob)
ll.add_symbol('decode_utf8', hstr_ext.decode_utf8)
ll.add_symbol('get_utf8_size', hstr_ext.get_utf8_size)
convert_len_arr_to_offset = types.ExternalFunction("convert_len_arr_to_offset", types.void(types.voidptr, types.intp))
setitem_string_array = types.ExternalFunction("setitem_string_array",
types.void(types.voidptr, types.voidptr, types.intp, string_type,
types.intp))
_get_utf8_size = types.ExternalFunction("get_utf8_size",
types.intp(types.voidptr, types.intp, types.int32))
def construct_string_array(context, builder):
"""Creates meminfo and sets dtor.
"""
alloc_type = context.get_data_type(str_arr_payload_type)
alloc_size = context.get_abi_sizeof(alloc_type)
llvoidptr = context.get_value_type(types.voidptr)
llsize = context.get_value_type(types.uintp)
dtor_ftype = lir.FunctionType(lir.VoidType(),
[llvoidptr, llsize, llvoidptr])
dtor_fn = builder.module.get_or_insert_function(
dtor_ftype, name="dtor_string_array")
meminfo = context.nrt.meminfo_alloc_dtor(
builder,
context.get_constant(types.uintp, alloc_size),
dtor_fn,
)
meminfo_data_ptr = context.nrt.meminfo_data(builder, meminfo)
meminfo_data_ptr = builder.bitcast(meminfo_data_ptr,
alloc_type.as_pointer())
# Nullify all data
# builder.store( cgutils.get_null_value(alloc_type),
# meminfo_data_ptr)
return meminfo, meminfo_data_ptr
# TODO: overload of constructor doesn't work
# @overload(StringArray)
# def string_array_const(in_list=None):
# if in_list is None:
# return lambda: pre_alloc_string_array(0, 0)
# def str_arr_from_list(in_list):
# n_strs = len(in_list)
# total_chars = 0
# # TODO: use vector to avoid two passes?
# # get total number of chars
# for s in in_list:
# total_chars += len(s)
# A = pre_alloc_string_array(n_strs, total_chars)
# for i in range(n_strs):
# A[i] = in_list[i]
# return A
# return str_arr_from_list
# used in pd.DataFrame() and pd.Series() to convert list of strings
@lower_builtin(StringArray)
@lower_builtin(StringArray, types.List)
@lower_builtin(StringArray, types.UniTuple)
@lower_builtin(StringArray, types.Tuple)
def impl_string_array_single(context, builder, sig, args):
arg = args[0]
if isinstance(arg, (types.UniTuple, types.List)):
assert (arg.dtype == string_type
or (isinstance(arg.dtype, types.Optional) and arg.dtype.type == string_type))
# FIXME: doesn't work for Tuple with None values
if isinstance(arg, types.Tuple):
for i in arg:
assert i.dtype == string_type or i.dtype == types.StringLiteral
if not sig.args: # return empty string array if no args
res = context.compile_internal(
builder, lambda: pre_alloc_string_array(0, 0), sig, args)
return res
def str_arr_from_sequence(in_list):
n_strs = len(in_list)
total_chars = 0
# TODO: use vector to avoid two passes?
# get total number of chars
nan_mask = np.zeros(n_strs, dtype=np.bool_)
for i in numba.prange(n_strs):
s = in_list[i]
if s is None:
nan_mask[i] = True
else:
total_chars += get_utf8_size(s)
A = pre_alloc_string_array(n_strs, total_chars)
for i in np.arange(n_strs):
A[i] = '' if nan_mask[i] else in_list[i]
str_arr_set_na_by_mask(A, nan_mask)
return A
res = context.compile_internal(builder, str_arr_from_sequence, sig, args)
return res
# @lower_builtin(StringArray)
# @lower_builtin(StringArray, types.List)
# def impl_string_array_single(context, builder, sig, args):
# typ = sig.return_type
# zero = context.get_constant(types.intp, 0)
# meminfo, meminfo_data_ptr = construct_string_array(context, builder)
# str_arr_payload = cgutils.create_struct_proxy(str_arr_payload_type)(context, builder)
# if not sig.args: # return empty string array if no args
# # XXX alloc empty arrays for dtor to safely delete?
# builder.store(str_arr_payload._getvalue(), meminfo_data_ptr)
# string_array = context.make_helper(builder, typ)
# string_array.meminfo = meminfo
# string_array.num_items = zero
# string_array.num_total_chars = zero
# ret = string_array._getvalue()
# #context.nrt.decref(builder, ty, ret)
# return impl_ret_new_ref(context, builder, typ, ret)
# string_list = ListInstance(context, builder, sig.args[0], args[0])
# # get total size of string buffer
# fnty = lir.FunctionType(lir.IntType(64),
# [lir.IntType(8).as_pointer()])
# fn_len = builder.module.get_or_insert_function(fnty, name="get_str_len")
# total_size = cgutils.alloca_once_value(builder, zero)
# # loop through all strings and get length
# with cgutils.for_range(builder, string_list.size) as loop:
# str_value = string_list.getitem(loop.index)
# str_len = builder.call(fn_len, [str_value])
# builder.store(builder.add(builder.load(total_size), str_len), total_size)
# # allocate string array
# fnty = lir.FunctionType(lir.VoidType(),
# [lir.IntType(32).as_pointer().as_pointer(),
# lir.IntType(8).as_pointer().as_pointer(),
# lir.IntType(8).as_pointer().as_pointer(),
# lir.IntType(64),
# lir.IntType(64)])
# fn_alloc = builder.module.get_or_insert_function(fnty,
# name="allocate_string_array")
# builder.call(fn_alloc, [str_arr_payload._get_ptr_by_name('offsets'),
# str_arr_payload._get_ptr_by_name('data'),
# str_arr_payload._get_ptr_by_name('null_bitmap'),
# string_list.size, builder.load(total_size)])
# # set string array values
# fnty = lir.FunctionType(lir.VoidType(),
# [lir.IntType(32).as_pointer(),
# lir.IntType(8).as_pointer(),
# lir.IntType(8).as_pointer(),
# lir.IntType(64)])
# fn_setitem = builder.module.get_or_insert_function(fnty,
# name="setitem_string_array")
# with cgutils.for_range(builder, string_list.size) as loop:
# str_value = string_list.getitem(loop.index)
# builder.call(fn_setitem, [str_arr_payload.offsets, str_arr_payload.data,
# str_value, loop.index])
# builder.store(str_arr_payload._getvalue(), meminfo_data_ptr)
# string_array = context.make_helper(builder, typ)
# string_array.num_items = string_list.size
# string_array.num_total_chars = builder.load(total_size)
# #cgutils.printf(builder, "str %d %d\n", string_array.num_items, string_array.num_total_chars)
# string_array.offsets = str_arr_payload.offsets
# string_array.data = str_arr_payload.data
# string_array.null_bitmap = str_arr_payload.null_bitmap
# string_array.meminfo = meminfo
# ret = string_array._getvalue()
# #context.nrt.decref(builder, ty, ret)
# return impl_ret_new_ref(context, builder, typ, ret)
@intrinsic
def pre_alloc_string_array(typingctx, num_strs_typ, num_total_chars_typ=None):
assert isinstance(num_strs_typ, types.Integer) and isinstance(num_total_chars_typ, types.Integer)
def codegen(context, builder, sig, args):
num_strs, num_total_chars = args
meminfo, meminfo_data_ptr = construct_string_array(context, builder)
str_arr_payload = cgutils.create_struct_proxy(str_arr_payload_type)(context, builder)
# allocate string array
fnty = lir.FunctionType(lir.VoidType(),
[lir.IntType(32).as_pointer().as_pointer(),
lir.IntType(8).as_pointer().as_pointer(),
lir.IntType(8).as_pointer().as_pointer(),
lir.IntType(64),
lir.IntType(64)])
fn_alloc = builder.module.get_or_insert_function(fnty,
name="allocate_string_array")
builder.call(fn_alloc, [str_arr_payload._get_ptr_by_name('offsets'),
str_arr_payload._get_ptr_by_name('data'),
str_arr_payload._get_ptr_by_name('null_bitmap'),
num_strs,
num_total_chars])
builder.store(str_arr_payload._getvalue(), meminfo_data_ptr)
string_array = context.make_helper(builder, string_array_type)
string_array.num_items = num_strs
string_array.num_total_chars = num_total_chars
string_array.offsets = str_arr_payload.offsets
string_array.data = str_arr_payload.data
string_array.null_bitmap = str_arr_payload.null_bitmap
string_array.meminfo = meminfo
ret = string_array._getvalue()
# context.nrt.decref(builder, ty, ret)
return impl_ret_new_ref(context, builder, string_array_type, ret)
return string_array_type(types.intp, types.intp), codegen
@intrinsic
def set_string_array_range(typingctx, out_typ, in_typ, curr_str_typ, curr_chars_typ=None):
assert is_str_arr_typ(out_typ) and is_str_arr_typ(in_typ)
assert curr_str_typ == types.intp and curr_chars_typ == types.intp
def codegen(context, builder, sig, args):
out_arr, in_arr, curr_str_ind, curr_chars_ind = args
# get input/output struct
out_string_array = context.make_helper(builder, string_array_type, out_arr)
in_string_array = context.make_helper(builder, string_array_type, in_arr)
fnty = lir.FunctionType(lir.VoidType(),
[lir.IntType(32).as_pointer(),
lir.IntType(8).as_pointer(),
lir.IntType(32).as_pointer(),
lir.IntType(8).as_pointer(),
lir.IntType(64),
lir.IntType(64),
lir.IntType(64),
lir.IntType(64), ])
fn_alloc = builder.module.get_or_insert_function(fnty,
name="set_string_array_range")
builder.call(fn_alloc, [out_string_array.offsets,
out_string_array.data,
in_string_array.offsets,
in_string_array.data,
curr_str_ind,
curr_chars_ind,
in_string_array.num_items,
in_string_array.num_total_chars,
])
return context.get_dummy_value()
return types.void(string_array_type, string_array_type, types.intp, types.intp), codegen
# box series calls this too
@box(StringArrayType)
def box_str_arr(typ, val, c):
"""
"""
string_array = c.context.make_helper(c.builder, string_array_type, val)
fnty = lir.FunctionType(c.context.get_argument_type(types.pyobject), # lir.IntType(8).as_pointer(),
[lir.IntType(64),
lir.IntType(32).as_pointer(),
lir.IntType(8).as_pointer(),
lir.IntType(8).as_pointer(),
])
fn_get = c.builder.module.get_or_insert_function(fnty, name="np_array_from_string_array")
arr = c.builder.call(fn_get, [string_array.num_items, string_array.offsets,
string_array.data, string_array.null_bitmap])
# TODO: double check refcounting here
c.context.nrt.decref(c.builder, typ, val)
return arr # c.builder.load(arr)
@intrinsic
def str_arr_is_na(typingctx, str_arr_typ, ind_typ=None):
# None default to make IntelliSense happy
assert is_str_arr_typ(str_arr_typ)
def codegen(context, builder, sig, args):
in_str_arr, ind = args
string_array = context.make_helper(builder, string_array_type, in_str_arr)
# (null_bitmap[i / 8] & kBitmask[i % 8]) == 0;
byte_ind = builder.lshr(ind, lir.Constant(lir.IntType(64), 3))
bit_ind = builder.urem(ind, lir.Constant(lir.IntType(64), 8))
byte = builder.load(builder.gep(string_array.null_bitmap, [byte_ind], inbounds=True))
ll_typ_mask = lir.ArrayType(lir.IntType(8), 8)
mask_tup = cgutils.alloca_once_value(builder, lir.Constant(ll_typ_mask, (1, 2, 4, 8, 16, 32, 64, 128)))
mask = builder.load(builder.gep(mask_tup, [lir.Constant(lir.IntType(64), 0), bit_ind], inbounds=True))
return builder.icmp_unsigned('==', builder.and_(byte, mask), lir.Constant(lir.IntType(8), 0))
return types.bool_(string_array_type, types.intp), codegen
@intrinsic
def str_arr_set_na(typingctx, str_arr_typ, ind_typ=None):
# None default to make IntelliSense happy
assert is_str_arr_typ(str_arr_typ)
def codegen(context, builder, sig, args):
in_str_arr, ind = args
string_array = context.make_helper(builder, string_array_type, in_str_arr)
# bits[i / 8] |= kBitmask[i % 8];
byte_ind = builder.lshr(ind, lir.Constant(lir.IntType(64), 3))
bit_ind = builder.urem(ind, lir.Constant(lir.IntType(64), 8))
byte_ptr = builder.gep(string_array.null_bitmap, [byte_ind], inbounds=True)
byte = builder.load(byte_ptr)
ll_typ_mask = lir.ArrayType(lir.IntType(8), 8)
mask_tup = cgutils.alloca_once_value(builder, lir.Constant(ll_typ_mask, (1, 2, 4, 8, 16, 32, 64, 128)))
mask = builder.load(builder.gep(mask_tup, [lir.Constant(lir.IntType(64), 0), bit_ind], inbounds=True))
# flip all bits of mask e.g. 11111101
mask = builder.xor(mask, lir.Constant(lir.IntType(8), -1))
# unset masked bit
builder.store(builder.and_(byte, mask), byte_ptr)
return context.get_dummy_value()
return types.void(string_array_type, types.intp), codegen
@intrinsic
def set_null_bits(typingctx, str_arr_typ=None):
assert is_str_arr_typ(str_arr_typ)
def codegen(context, builder, sig, args):
in_str_arr, = args
string_array = context.make_helper(builder, string_array_type, in_str_arr)
# n_bytes = (num_strings+sizeof(uint8_t)-1)/sizeof(uint8_t);
n_bytes = builder.udiv(
builder.add(
string_array.num_items, lir.Constant(
lir.IntType(64), 7)), lir.Constant(
lir.IntType(64), 8))
cgutils.memset(builder, string_array.null_bitmap, n_bytes, -1)
return context.get_dummy_value()
return types.none(string_array_type), codegen
# XXX: setitem works only if value is same size as the previous value
@lower_builtin(operator.setitem, StringArrayType, types.Integer, string_type)
def setitem_str_arr(context, builder, sig, args):
arr, ind, val = args
uni_str = cgutils.create_struct_proxy(string_type)(
context, builder, value=val)
string_array = context.make_helper(builder, string_array_type, arr)
fnty = lir.FunctionType(lir.VoidType(),
[lir.IntType(32).as_pointer(),
lir.IntType(8).as_pointer(),
lir.IntType(64),
lir.IntType(8).as_pointer(),
lir.IntType(64),
lir.IntType(32),
lir.IntType(32),
lir.IntType(64)])
fn_setitem = builder.module.get_or_insert_function(
fnty, name="setitem_string_array")
builder.call(fn_setitem, [string_array.offsets, string_array.data,
string_array.num_total_chars,
uni_str.data, uni_str.length, uni_str.kind,
uni_str.is_ascii, ind])
return context.get_dummy_value()
@numba.njit(no_cpython_wrapper=True)
def get_utf8_size(s):
if s._is_ascii == 1:
return len(s)
return _get_utf8_size(s._data, s._length, s._kind)
@intrinsic
def setitem_str_arr_ptr(typingctx, str_arr_t, ind_t, ptr_t, len_t=None):
def codegen(context, builder, sig, args):
arr, ind, ptr, length = args
string_array = context.make_helper(builder, string_array_type, arr)
fnty = lir.FunctionType(lir.VoidType(),
[lir.IntType(32).as_pointer(),
lir.IntType(8).as_pointer(),
lir.IntType(64),
lir.IntType(8).as_pointer(),
lir.IntType(64),
lir.IntType(32),
lir.IntType(32),
lir.IntType(64)])
fn_setitem = builder.module.get_or_insert_function(
fnty, name="setitem_string_array")
# kind doesn't matter since input is ASCII
kind = context.get_constant(types.int32, -1)
is_ascii = context.get_constant(types.int32, 1)
builder.call(fn_setitem, [string_array.offsets, string_array.data,
| |
to match input from function
# The second piece of information this script returns is the number of
# genes with >m mutations
for j in range(max_muts_per_gene_to_track):
expectedNumberOfGenesWithNmutations[i,j] = sum( sim_mutspergene >= j+1 ) # +1 due to 0-based
return [expectedNumberGenesMultipleMutations, expectedNumberOfGenesWithNmutations ]
def codon_composition_table( allbases, allcodons ):
''' Build table containing base counts (col) for each codon (row) '''
codoncompositiontable = np.zeros((len(allcodons),len(allbases)),dtype=int)
for i,codon in enumerate(allcodons):
for j,base in enumerate(allbases):
codoncompositiontable[i,j] = codon.count(base)
return codoncompositiontable
def codon_mutation_table(allmuts , allcodons , codon_all_dict):
''' Generates table of probabilities that a given mutation on a codon is nonsynonymous '''
table = np.zeros( (len(allcodons), len(allmuts) ) ); # Initialize table
for i,codon in enumerate(allcodons):
for j,mut in enumerate(allmuts):
# Calculates the probability that a given mutation is nonsynonymous on a given codon and then updates the table
table[i,j] = prob_nonsyn_codon_mutation( codon, mut , codon_all_dict);
return table
def prob_nonsyn_codon_mutation(codon,mut,codon_all_dict):
''' Calculates the probability that a given mutation leads to a nonsynonymous change across a given codon '''
aa0 = codon_all_dict[codon] # AA of codon
# Find the positions on the codon at which mutation could occur
possiblemuts=[i for (i, base) in enumerate(codon) if base == mut[0]]
ctr = 0
if len(possiblemuts) == 0: # if the mutation cannot occur on this codon
probability = float('nan');
else: # mut can occur at least once
for pos in possiblemuts:
newcodon = list(codon)
newcodon[pos] = mut[1] # mutate codon position that carries mut[0]
aa1 = codon_all_dict[ "".join(newcodon) ]
if aa0 != aa1:
ctr += 1
probability = ctr/len(possiblemuts) # fraction of mutations that were nonsynonymous
return probability
def codons_in_genome(annotation_genes,allcodons):
''' Get probability for each codon across all CDS annotated in the reference genome (annotation_genes)'''
# possibility to add a flag in order to restrict analysis to genomic region
annotation_genes_sglDF = pd.concat(annotation_genes,sort=False)
annotation_genes_sglDF_CDS = annotation_genes_sglDF.loc[ (annotation_genes_sglDF['type'] == 'CDS') | (annotation_genes_sglDF['type'] == 'gene') ]
# Tally codon occurrences over all proteins in genome
codonCounts = np.zeros( len(allcodons) ,dtype=int ); # for storing tally of codons
for i, row in annotation_genes_sglDF_CDS.iterrows():
seq = str(row['sequence'])
startCodon = row['codon_start']
startPos = startCodon * 3
codons_gene = [seq[i:i+3] for i in range(startPos, len(seq), 3)]
codons_gene = collections.Counter(codons_gene) # builds dictionary with all codons (key) in list and counts (value)
for i,codon in enumerate(allcodons):
if codon in codons_gene.keys():
codonCounts[i] += codons_gene[codon]
return codonCounts/sum(codonCounts) # probabilities of codons sorted by allcodons order
def mutation_probability(params_dict,annotation_genes):
''' This script examines the probability of a nonsynonymous mutation on a
reference genome given some mutational spectrum. '''
# based on arolyn's m script: @Feb 2019
## Define DNA bases, possible mutations, and codons
# All possible mutations to be considered:
allbases = np.array(['A','T','G','C']) #fourDNAbases
allmuts = np.array(['AT','AG','AC','TA','TG','TC','GA','GT','GC','CA','CT','CG']); # 'AT' denotes A to T
# All standard codons
standard_codon_table = CodonTable.unambiguous_dna_by_id[1]
allcodons = np.array([c for c in standard_codon_table.forward_table.keys()],dtype=object) # standard codon table, but miss stop codosn
allcodons = np.append(allcodons , np.array([c for c in standard_codon_table.stop_codons],dtype=object) ) # 64 codons
# build a combined dictionary (codon>>AA) containing regular and stop codon AA (stop:*)
codon_all_dict = {}
for c in allcodons:
if c in standard_codon_table.forward_table.keys():
codon_all_dict[c] = standard_codon_table.forward_table[c]
else:
codon_all_dict[c] = "*"
# Generate table of codon composition by base
codoncompositiontable = codon_composition_table( allbases, allcodons );
# Rows (64) = all possible codons
# Columns (4) = number of A's/T's/G's/C's in each codon
## Generate table of probabilities of nonsynonymous mutations
# Rows (64) = all possible codons
# Columns (12) = all possible mutations
# Entries = probability of nonsynonymous mutation (given a mutation and a codon)
# Note: If the given mutation cannot be applied to a given codon, the entry is nan.
codonnonsyntable = codon_mutation_table( allmuts, allcodons , codon_all_dict );
## Calculate mutational spectrum
# Assuming all types of mutations are equally likely to occur:
# Get mutational spectrum from experiments, but ignore if fwd or rev strand
# allmuts = ['AT','AG','AC','TA','TG','TC','GA','GT','GC','CA','CT','CG']
#AT, TA 0
#AC, TG 1
#AG, TC 2
#GC, CG 3
#GT, CA 4
#GA, CT 5
if params_dict['substitution_spectrum'] == None:
mutationalspectrum = [1/12] * 12 # uniform distribution. replace with time stratified observation based on all samples
else:
afile = open(params_dict['substitution_spectrum'], 'rb')
mutationalspectrum = pickle.load(afile)
afile.close()
print('Observed substitution spectrum loaded')
## Calculate codon distribution in reference genome ordered as in allcodons
codondistribution = codons_in_genome( annotation_genes , allcodons );
## Calculate probability of nonsynonymous mutation
# Takes into account: mutation spectrum, abundance of codons on genome,
# abundance of bases in codons, probability of a given mutation being
# nonsynonymous on a givne codon...
probnonsyn = 0; # probability of nonsynonymous mutations over all possible mutations
for i,mut in enumerate(allmuts): # loop through all possible mutations
# Probability that this mutation occurs
prob_base_base = mutationalspectrum[i]
# Find the codons that can undergo this mutation:
base = mut[0] # base that gets mutated; ex. A
baseindex = np.where(allbases==base) # ex. A is indexed in position 1
basecodonoccurrences = codoncompositiontable[:,baseindex].flatten() # ex. how many A's in each codon
# Indices of codons that have the relevant initial base:
basecodonoccurrences_bool = basecodonoccurrences > 0; # ex. AAT has A's but GGC does not
# Probability that this mutation occurs on a given relevant codon
# Take into account base composition of codons
basecountincodon = basecodonoccurrences[ basecodonoccurrences_bool ];
# Take into account codon abundance on reference genome
probcodonongenome = codondistribution[ basecodonoccurrences_bool ]
# Combine these two probabilities
probmutoncodon = basecountincodon*probcodonongenome
# Renormalize (sum = 1 over all relevant codons)
probmutoncodon = probmutoncodon/sum(probmutoncodon);
# Probability that this mutation is nonsynonymous at each relevant codon
thismutnonsynoncodon = codonnonsyntable[:,i];
probmutnonsynoncodon = thismutnonsynoncodon[basecodonoccurrences_bool];
# Overall probability that this mutation is nonsynonymous over all possible codons
probmutnonsyn=prob_base_base*sum(probmutoncodon*probmutnonsynoncodon);
# Add contribution of this mutation to the total probability of a nonsynonymous mutation
probnonsyn += probmutnonsyn
print('Probability of nonsynonymous mutation across genome: ' + str(probnonsyn) )
return probnonsyn # Prob for N occuring
def parallel_evo_module(goodpos2use,contig_positions,annotation_mutations, annotation_genes, params_dict,plot=True):
''' Module to calculate parallel evolution
# Test excess of genes with multiple mutations (thresholds for candidates defined in parameters dict )
# Test excess of NonSyn (dNdS) in candidate genes compared to expectation given reference genome
# NOTE: dNdS uses simple substitution model (equal P per mut), which should be updated based on observations '''
print('Parallel evolution inference.')
# Find mutations that are adjacent to each other.
# True for any mutation that is followed by an adjacent mutation, False if not
# keep trailing bases of adjacent sets (2+)
chr_pos_gp = contig_positions[goodpos2use,]
bool_adjacent_mut = np.full( chr_pos_gp.shape[0] , False, dtype=bool) #mutated_genes_pos = []; # keep track of the positions of each event in the table
for i in range(chr_pos_gp.shape[0]):
chr = chr_pos_gp[i,0]
pos = chr_pos_gp[i,1]
if (i+1) <= (chr_pos_gp.shape[0]-1): # (chr_pos_gp.shape[0]-1) bcs shape[0] not 0-based
if chr == chr_pos_gp[i+1,0] and (pos+1 == chr_pos_gp[i+1,1]):
bool_adjacent_mut[i] = True
# get info for candidate genes
mutated_genes = np.zeros(0,dtype=int); # keep track of gene number
mutated_genes_tally = np.zeros(0,dtype=int); # keep track of how many SNPs there are on this gene
mutated_genes_lengths = np.zeros(0,dtype=int); # keep track of the length of this gene
locustags_all = np.zeros(0,dtype=object) # record the locustag
orthologtags_all = np.zeros(0,dtype=object) # record the orthologtags
for i, row in annotation_mutations.iterrows():
if bool_adjacent_mut[i] == False: # ignore leading adjacent mutations
gene_num_global = row['gene_num_global']
if gene_num_global != 0.5: # non-genic 0.5
if gene_num_global in mutated_genes:
mutated_genes_tally[-1] = mutated_genes_tally[-1] + 1
else:
mutated_genes = np.append( mutated_genes , gene_num_global )
mutated_genes_tally = np.append( mutated_genes_tally , 1 )
mutated_genes_lengths = np.append( mutated_genes_lengths , (row['loc2']-row['loc1']) )
locustags_all = np.append(locustags_all , row['locustag'])
orthologtags_all = np.append(orthologtags_all , row['orthologtag'])
mutated_genes_tally_perGeneLen = mutated_genes_tally/mutated_genes_lengths;
#% Define candidates for selection
mutation_number_threshold = params_dict['Min_num_mutations_cand']; # minimum number of mutations per gene
mutation_density_threshold = params_dict['Min_mutation_density_cand']; # minimum number of mutations per 1000 bp
mutated_genes_of_interest = ( mutated_genes_tally >= mutation_number_threshold) & (mutated_genes_tally_perGeneLen >= mutation_density_threshold | |
test_post_ldp_nr(self, rnd_img):
"""
POST a resource with binary payload and verify checksums.
"""
rnd_img['content'].seek(0)
resp = self.client.post('/ldp/', data=rnd_img['content'],
headers={
'slug': 'ldpnr04',
'Content-Type': 'image/png',
'Content-Disposition' : 'attachment; filename={}'.format(
rnd_img['filename'])})
assert resp.status_code == 201
resp = self.client.get(
'/ldp/ldpnr04', headers={'accept' : 'image/png'})
assert resp.status_code == 200
assert sha1(resp.data).hexdigest() == rnd_img['hash']
def test_post_slug(self):
"""
Verify that a POST with slug results in the expected URI only if the
resource does not exist already.
"""
slug01_resp = self.client.post('/ldp', headers={'slug' : 'slug01'})
assert slug01_resp.status_code == 201
assert slug01_resp.headers['location'] == \
g.webroot + '/slug01'
slug02_resp = self.client.post('/ldp', headers={'slug' : 'slug01'})
assert slug02_resp.status_code == 201
assert slug02_resp.headers['location'] != \
g.webroot + '/slug01'
def test_post_404(self):
"""
Verify that a POST to a non-existing parent results in a 404.
"""
assert self.client.post('/ldp/{}'.format(uuid4()))\
.status_code == 404
def test_post_409(self, rnd_img):
"""
Verify that you cannot POST to a binary resource.
"""
rnd_img['content'].seek(0)
self.client.put('/ldp/post_409', data=rnd_img['content'], headers={
'Content-Disposition' : 'attachment; filename={}'.format(
rnd_img['filename'])})
assert self.client.post('/ldp/post_409').status_code == 409
def test_patch_root(self):
"""
Test patching root node.
"""
path = '/ldp/'
self.client.get(path)
uri = g.webroot + '/'
with open('tests/data/sparql_update/simple_insert.sparql') as data:
resp = self.client.patch(path,
data=data,
headers={'content-type' : 'application/sparql-update'})
assert resp.status_code == 204
resp = self.client.get(path)
gr = Graph().parse(data=resp.data, format='text/turtle')
assert gr[ URIRef(uri) : nsc['dc'].title : Literal('Hello') ]
def test_patch(self):
"""
Test patching a resource.
"""
path = '/ldp/test_patch01'
self.client.put(path, content_type='text/turtle')
uri = g.webroot + '/test_patch01'
with open('tests/data/sparql_update/simple_insert.sparql') as data:
resp = self.client.patch(path,
data=data,
headers={'content-type' : 'application/sparql-update'})
assert resp.status_code == 204
resp = self.client.get(path)
gr = Graph().parse(data=resp.data, format='text/turtle')
assert gr[ URIRef(uri) : nsc['dc'].title : Literal('Hello') ]
self.client.patch(path,
data=open('tests/data/sparql_update/delete+insert+where.sparql'),
headers={'content-type' : 'application/sparql-update'})
resp = self.client.get(path)
gr = Graph().parse(data=resp.data, format='text/turtle')
assert gr[ URIRef(uri) : nsc['dc'].title : Literal('Ciao') ]
def test_patch_no_single_subject(self):
"""
Test patching a resource violating the single-subject rule.
"""
path = '/ldp/test_patch_ssr'
self.client.put(path, content_type='text/turtle')
uri = g.webroot + '/test_patch_ssr'
nossr_qry = 'INSERT { <http://bogus.org> a <urn:ns:A> . } WHERE {}'
abs_qry = 'INSERT {{ <{}> a <urn:ns:A> . }} WHERE {{}}'.format(uri)
frag_qry = 'INSERT {{ <{}#frag> a <urn:ns:A> . }} WHERE {{}}'\
.format(uri)
# @TODO Leave commented until a decision is made about SSR.
assert self.client.patch(
path, data=nossr_qry,
headers={'content-type': 'application/sparql-update'}
).status_code == 204
assert self.client.patch(
path, data=abs_qry,
headers={'content-type': 'application/sparql-update'}
).status_code == 204
assert self.client.patch(
path, data=frag_qry,
headers={'content-type': 'application/sparql-update'}
).status_code == 204
def test_patch_ldp_nr_metadata(self):
"""
Test patching a LDP-NR metadata resource from the fcr:metadata URI.
"""
path = '/ldp/ldpnr01'
with open('tests/data/sparql_update/simple_insert.sparql') as data:
self.client.patch(path + '/fcr:metadata',
data=data,
headers={'content-type' : 'application/sparql-update'})
resp = self.client.get(path + '/fcr:metadata')
assert resp.status_code == 200
uri = g.webroot + '/ldpnr01'
gr = Graph().parse(data=resp.data, format='text/turtle')
assert gr[URIRef(uri) : nsc['dc'].title : Literal('Hello')]
with open(
'tests/data/sparql_update/delete+insert+where.sparql') as data:
patch_resp = self.client.patch(path + '/fcr:metadata',
data=data,
headers={'content-type' : 'application/sparql-update'})
assert patch_resp.status_code == 204
resp = self.client.get(path + '/fcr:metadata')
assert resp.status_code == 200
gr = Graph().parse(data=resp.data, format='text/turtle')
assert gr[ URIRef(uri) : nsc['dc'].title : Literal('Ciao') ]
def test_patch_ldpnr(self):
"""
Verify that a direct PATCH to a LDP-NR results in a 415.
"""
with open(
'tests/data/sparql_update/delete+insert+where.sparql') as data:
patch_resp = self.client.patch('/ldp/ldpnr01',
data=data,
headers={'content-type': 'application/sparql-update'})
assert patch_resp.status_code == 415
def test_patch_invalid_mimetype(self, rnd_img):
"""
Verify that a PATCH using anything other than an
`application/sparql-update` MIME type results in an error.
"""
self.client.put(
'/ldp/test_patch_invalid_mimetype', content_type='text/turtle')
rnd_img['content'].seek(0)
ldpnr_resp = self.client.patch('/ldp/ldpnr01/fcr:metadata',
data=rnd_img,
headers={'content-type' : 'image/jpeg'})
ldprs_resp = self.client.patch('/ldp/test_patch_invalid_mimetype',
data=b'Hello, I\'m not a SPARQL update.',
headers={'content-type' : 'text/plain'})
assert ldprs_resp.status_code == ldpnr_resp.status_code == 415
def test_patch_srv_mgd_pred(self, rnd_img):
"""
Verify that adding or removing a server-managed predicate fails.
"""
uid = '/test_patch_sm_pred'
path = f'/ldp{uid}'
self.client.put(path, content_type='text/turtle')
self.client.put(path + '/child1', content_type='text/turtle')
uri = g.webroot + uid
ins_qry1 = f'INSERT {{ <> <{nsc["ldp"].contains}> <http://bogus.com/ext1> . }} WHERE {{}}'
ins_qry2 = (
f'INSERT {{ <> <{nsc["fcrepo"].created}>'
f'"2019-04-01T05:57:36.899033+00:00"^^<{nsc["xsd"].dateTime}> . }}'
'WHERE {}'
)
# The following won't change the graph so it does not raise an error.
ins_qry3 = f'INSERT {{ <> a <{nsc["ldp"].Container}> . }} WHERE {{}}'
del_qry1 = (
f'DELETE {{ <> <{nsc["ldp"].contains}> ?o . }} '
f'WHERE {{ <> <{nsc["ldp"].contains}> ?o . }}'
)
del_qry2 = f'DELETE {{ <> a <{nsc["ldp"].Container}> . }} WHERE {{}}'
# No-op as ins_qry3
del_qry3 = (
f'DELETE {{ <> a <{nsc["ldp"].DirectContainer}> .}} '
'WHERE {}'
)
assert self.client.patch(
path, data=ins_qry1,
headers={'content-type': 'application/sparql-update'}
).status_code == 412
assert self.client.patch(
path, data=ins_qry1,
headers={
'content-type': 'application/sparql-update',
'prefer': 'handling=lenient',
}
).status_code == 204
assert self.client.patch(
path, data=ins_qry2,
headers={'content-type': 'application/sparql-update'}
).status_code == 412
assert self.client.patch(
path, data=ins_qry2,
headers={
'content-type': 'application/sparql-update',
'prefer': 'handling=lenient',
}
).status_code == 204
assert self.client.patch(
path, data=ins_qry3,
headers={'content-type': 'application/sparql-update'}
).status_code == 204
assert self.client.patch(
path, data=del_qry1,
headers={'content-type': 'application/sparql-update'}
).status_code == 412
assert self.client.patch(
path, data=del_qry1,
headers={
'content-type': 'application/sparql-update',
'prefer': 'handling=lenient',
}
).status_code == 204
assert self.client.patch(
path, data=del_qry2,
headers={'content-type': 'application/sparql-update'}
).status_code == 412
assert self.client.patch(
path, data=ins_qry2,
headers={
'content-type': 'application/sparql-update',
'prefer': 'handling=lenient',
}
).status_code == 204
assert self.client.patch(
path, data=del_qry3,
headers={'content-type': 'application/sparql-update'}
).status_code == 204
def test_delete(self):
"""
Test delete response codes.
"""
self.client.put('/ldp/test_delete01')
delete_resp = self.client.delete('/ldp/test_delete01')
assert delete_resp.status_code == 204
bogus_delete_resp = self.client.delete('/ldp/test_delete101')
assert bogus_delete_resp.status_code == 404
def test_tombstone(self):
"""
Test tombstone behaviors.
For POST on a tombstone, check `test_resurrection`.
"""
tstone_resp = self.client.get('/ldp/test_delete01')
assert tstone_resp.status_code == 410
assert tstone_resp.headers['Link'] == \
'<{}/test_delete01/fcr:tombstone>; rel="hasTombstone"'\
.format(g.webroot)
tstone_path = '/ldp/test_delete01/fcr:tombstone'
assert self.client.get(tstone_path).status_code == 405
assert self.client.put(tstone_path).status_code == 405
assert self.client.delete(tstone_path).status_code == 204
assert self.client.get('/ldp/test_delete01').status_code == 404
def test_delete_recursive(self):
"""
Test response codes for resources deleted recursively and their
tombstones.
"""
child_suffixes = ('a', 'a/b', 'a/b/c', 'a1', 'a1/b1')
self.client.put('/ldp/test_delete_recursive01')
for cs in child_suffixes:
self.client.put('/ldp/test_delete_recursive01/{}'.format(cs))
assert self.client.delete(
'/ldp/test_delete_recursive01').status_code == 204
tstone_resp = self.client.get('/ldp/test_delete_recursive01')
assert tstone_resp.status_code == 410
assert tstone_resp.headers['Link'] == \
'<{}/test_delete_recursive01/fcr:tombstone>; rel="hasTombstone"'\
.format(g.webroot)
for cs in child_suffixes:
child_tstone_resp = self.client.get(
'/ldp/test_delete_recursive01/{}'.format(cs))
assert child_tstone_resp.status_code == tstone_resp.status_code
assert 'Link' not in child_tstone_resp.headers.keys()
def test_put_fragments(self):
"""
Test the correct handling of fragment URIs on PUT and GET.
"""
with open('tests/data/fragments.ttl', 'rb') as f:
self.client.put(
'/ldp/test_fragment01', content_type='text/turtle', data=f)
rsp = self.client.get('/ldp/test_fragment01')
gr = Graph().parse(data=rsp.data, format='text/turtle')
assert gr[
URIRef(g.webroot + '/test_fragment01#hash1')
: URIRef('http://ex.org/p2') : URIRef('http://ex.org/o2')]
def test_patch_fragments(self):
"""
Test the correct handling of fragment URIs on PATCH.
"""
self.client.put('/ldp/test_fragment_patch', content_type='text/turtle')
with open('tests/data/fragments_insert.sparql', 'rb') as f:
self.client.patch(
'/ldp/test_fragment_patch',
content_type='application/sparql-update', data=f)
ins_rsp = self.client.get('/ldp/test_fragment_patch')
ins_gr = Graph().parse(data=ins_rsp.data, format='text/turtle')
assert ins_gr[
URIRef(g.webroot + '/test_fragment_patch#hash1234')
: URIRef('http://ex.org/p3') : URIRef('http://ex.org/o3')]
with open('tests/data/fragments_delete.sparql', 'rb') as f:
self.client.patch(
'/ldp/test_fragment_patch',
headers={
'Content-Type' : 'application/sparql-update',
},
data=f
)
del_rsp = self.client.get('/ldp/test_fragment_patch')
del_gr = Graph().parse(data=del_rsp.data, format='text/turtle')
assert not del_gr[
URIRef(g.webroot + '/test_fragment_patch#hash1234')
: URIRef('http://ex.org/p3') : URIRef('http://ex.org/o3')]
@pytest.mark.usefixtures('client_class')
@pytest.mark.usefixtures('db')
class TestMimeType:
"""
Test ``Accept`` headers and input & output formats.
"""
def test_accept(self):
"""
Verify the default serialization method.
"""
accept_list = {
('', 'text/turtle'),
('text/turtle', 'text/turtle'),
('application/rdf+xml', 'application/rdf+xml'),
('application/n-triples', 'application/n-triples'),
('application/bogus', 'text/turtle'),
(
'application/rdf+xml;q=0.5,application/n-triples;q=0.7',
'application/n-triples'),
(
'application/rdf+xml;q=0.5,application/bogus;q=0.7',
'application/rdf+xml'),
('application/rdf+xml;q=0.5,text/n3;q=0.7', 'text/n3'),
(
'application/rdf+xml;q=0.5,application/ld+json;q=0.7',
'application/ld+json'),
}
for mimetype, fmt in accept_list:
rsp = self.client.get('/ldp', headers={'Accept': mimetype})
assert rsp.mimetype == fmt
gr = Graph(identifier=g.webroot + '/').parse(
data=rsp.data, format=fmt)
assert nsc['fcrepo'].RepositoryRoot in set(gr.objects())
def test_provided_rdf(self):
"""
Test several input RDF serialiation formats.
"""
self.client.get('/ldp')
gr = Graph()
gr.add((
URIRef(g.webroot + '/test_mimetype'),
nsc['dcterms'].title, Literal('Test MIME type.')))
test_list = {
'application/n-triples',
'application/rdf+xml',
'text/n3',
'text/turtle',
'application/ld+json',
}
for mimetype in test_list:
rdf_data = gr.serialize(format=mimetype)
self.client.put(
'/ldp/test_mimetype', data=rdf_data, content_type=mimetype)
rsp = self.client.get('/ldp/test_mimetype')
rsp_gr = Graph(identifier=g.webroot + '/test_mimetype').parse(
data=rsp.data, format='text/turtle')
assert (
URIRef(g.webroot + '/test_mimetype'),
nsc['dcterms'].title, Literal('Test MIME type.')) in rsp_gr
@pytest.mark.usefixtures('client_class')
class TestDigestHeaders:
"""
Test Digest and ETag headers.
"""
def test_etag_digest(self):
"""
Verify ETag and Digest headers on creation.
The headers must correspond to the checksum of the binary content.
"""
uid = '/test_etag1'
path = '/ldp' + uid
content = uuid4().bytes
content_cksum = hashlib.new(digest_algo, content)
put_rsp = self.client.put(
path, data=content, content_type='text/plain')
assert content_cksum.hexdigest() in \
put_rsp.headers.get('etag').split(',')
assert put_rsp.headers.get('digest') == \
f'{digest_algo.upper()}=' + b64encode(content_cksum.digest()).decode()
get_rsp = self.client.get(path)
assert content_cksum.hexdigest() in \
put_rsp.headers.get('etag').split(',')
assert get_rsp.headers.get('digest') == \
f'{digest_algo.upper()}=' + b64encode(content_cksum.digest()).decode()
def test_etag_ident(self):
"""
Verify that two resources with the same content yield identical ETags.
"""
path1 = f'/ldp/{uuid4()}'
path2 = f'/ldp/{uuid4()}'
content = uuid4().bytes
content_cksum = hashlib.new(digest_algo, content)
self.client.put(path1, data=content, content_type='text/plain')
self.client.put(path2, data=content, content_type='text/plain')
get_rsp1 = self.client.get(path1)
get_rsp2 = self.client.get(path2)
assert get_rsp1.headers.get('etag') == get_rsp2.headers.get('etag')
assert get_rsp1.headers.get('digest') == get_rsp2.headers.get('digest')
def test_etag_diff(self):
"""
Verify that two resources with different content yield | |
m.b264 <= 1)
m.c494 = Constraint(expr= m.b39 - m.b40 + m.b265 <= 1)
m.c495 = Constraint(expr= m.b39 - m.b41 + m.b266 <= 1)
m.c496 = Constraint(expr= m.b39 - m.b42 + m.b267 <= 1)
m.c497 = Constraint(expr= m.b39 - m.b43 + m.b268 <= 1)
m.c498 = Constraint(expr= m.b39 - m.b44 + m.b269 <= 1)
m.c499 = Constraint(expr= m.b39 - m.b45 + m.b270 <= 1)
m.c500 = Constraint(expr= m.b39 - m.b46 + m.b271 <= 1)
m.c501 = Constraint(expr= m.b39 - m.b47 + m.b272 <= 1)
m.c502 = Constraint(expr= m.b40 - m.b41 + m.b273 <= 1)
m.c503 = Constraint(expr= m.b40 - m.b42 + m.b274 <= 1)
m.c504 = Constraint(expr= m.b40 - m.b43 + m.b275 <= 1)
m.c505 = Constraint(expr= m.b40 - m.b44 + m.b276 <= 1)
m.c506 = Constraint(expr= m.b40 - m.b45 + m.b277 <= 1)
m.c507 = Constraint(expr= m.b40 - m.b46 + m.b278 <= 1)
m.c508 = Constraint(expr= m.b40 - m.b47 + m.b279 <= 1)
m.c509 = Constraint(expr= m.b41 - m.b42 + m.b280 <= 1)
m.c510 = Constraint(expr= m.b41 - m.b43 + m.b281 <= 1)
m.c511 = Constraint(expr= m.b41 - m.b44 + m.b282 <= 1)
m.c512 = Constraint(expr= m.b41 - m.b45 + m.b283 <= 1)
m.c513 = Constraint(expr= m.b41 - m.b46 + m.b284 <= 1)
m.c514 = Constraint(expr= m.b41 - m.b47 + m.b285 <= 1)
m.c515 = Constraint(expr= m.b42 - m.b43 + m.b286 <= 1)
m.c516 = Constraint(expr= m.b42 - m.b44 + m.b287 <= 1)
m.c517 = Constraint(expr= m.b42 - m.b45 + m.b288 <= 1)
m.c518 = Constraint(expr= m.b42 - m.b46 + m.b289 <= 1)
m.c519 = Constraint(expr= m.b42 - m.b47 + m.b290 <= 1)
m.c520 = Constraint(expr= m.b43 - m.b44 + m.b291 <= 1)
m.c521 = Constraint(expr= m.b43 - m.b45 + m.b292 <= 1)
m.c522 = Constraint(expr= m.b43 - m.b46 + m.b293 <= 1)
m.c523 = Constraint(expr= m.b43 - m.b47 + m.b294 <= 1)
m.c524 = Constraint(expr= m.b44 - m.b45 + m.b295 <= 1)
m.c525 = Constraint(expr= m.b44 - m.b46 + m.b296 <= 1)
m.c526 = Constraint(expr= m.b44 - m.b47 + m.b297 <= 1)
m.c527 = Constraint(expr= m.b45 - m.b46 + m.b298 <= 1)
m.c528 = Constraint(expr= m.b45 - m.b47 + m.b299 <= 1)
m.c529 = Constraint(expr= m.b46 - m.b47 + m.b300 <= 1)
m.c530 = Constraint(expr= m.b48 - m.b49 + m.b70 <= 1)
m.c531 = Constraint(expr= m.b48 - m.b50 + m.b71 <= 1)
m.c532 = Constraint(expr= m.b48 - m.b51 + m.b72 <= 1)
m.c533 = Constraint(expr= m.b48 - m.b52 + m.b73 <= 1)
m.c534 = Constraint(expr= m.b48 - m.b53 + m.b74 <= 1)
m.c535 = Constraint(expr= m.b48 - m.b54 + m.b75 <= 1)
m.c536 = Constraint(expr= m.b48 - m.b55 + m.b76 <= 1)
m.c537 = Constraint(expr= m.b48 - m.b56 + m.b77 <= 1)
m.c538 = Constraint(expr= m.b48 - m.b57 + m.b78 <= 1)
m.c539 = Constraint(expr= m.b48 - m.b58 + m.b79 <= 1)
m.c540 = Constraint(expr= m.b48 - m.b59 + m.b80 <= 1)
m.c541 = Constraint(expr= m.b48 - m.b60 + m.b81 <= 1)
m.c542 = Constraint(expr= m.b48 - m.b61 + m.b82 <= 1)
m.c543 = Constraint(expr= m.b48 - m.b62 + m.b83 <= 1)
m.c544 = Constraint(expr= m.b48 - m.b63 + m.b84 <= 1)
m.c545 = Constraint(expr= m.b48 - m.b64 + m.b85 <= 1)
m.c546 = Constraint(expr= m.b48 - m.b65 + m.b86 <= 1)
m.c547 = Constraint(expr= m.b48 - m.b66 + m.b87 <= 1)
m.c548 = Constraint(expr= m.b48 - m.b67 + m.b88 <= 1)
m.c549 = Constraint(expr= m.b48 - m.b68 + m.b89 <= 1)
m.c550 = Constraint(expr= m.b48 - m.b69 + m.b90 <= 1)
m.c551 = Constraint(expr= m.b49 - m.b50 + m.b91 <= 1)
m.c552 = Constraint(expr= m.b49 - m.b51 + m.b92 <= 1)
m.c553 = Constraint(expr= m.b49 - m.b52 + m.b93 <= 1)
m.c554 = Constraint(expr= m.b49 - m.b53 + m.b94 <= 1)
m.c555 = Constraint(expr= m.b49 - m.b54 + m.b95 <= 1)
m.c556 = Constraint(expr= m.b49 - m.b55 + m.b96 <= 1)
m.c557 = Constraint(expr= m.b49 - m.b56 + m.b97 <= 1)
m.c558 = Constraint(expr= m.b49 - m.b57 + m.b98 <= 1)
m.c559 = Constraint(expr= m.b49 - m.b58 + m.b99 <= 1)
m.c560 = Constraint(expr= m.b49 - m.b59 + m.b100 <= 1)
m.c561 = Constraint(expr= m.b49 - m.b60 + m.b101 <= 1)
m.c562 = Constraint(expr= m.b49 - m.b61 + m.b102 <= 1)
m.c563 = Constraint(expr= m.b49 - m.b62 + m.b103 <= 1)
m.c564 = Constraint(expr= m.b49 - m.b63 + m.b104 <= 1)
m.c565 = Constraint(expr= m.b49 - m.b64 + m.b105 <= 1)
m.c566 = Constraint(expr= m.b49 - m.b65 + m.b106 <= 1)
m.c567 = Constraint(expr= m.b49 - m.b66 + m.b107 <= 1)
m.c568 = Constraint(expr= m.b49 - m.b67 + m.b108 <= 1)
m.c569 = Constraint(expr= m.b49 - m.b68 + m.b109 <= 1)
m.c570 = Constraint(expr= m.b49 - m.b69 + m.b110 <= 1)
m.c571 = Constraint(expr= m.b50 - m.b51 + m.b111 <= 1)
m.c572 = Constraint(expr= m.b50 - m.b52 + m.b112 <= 1)
m.c573 = Constraint(expr= m.b50 - m.b53 + m.b113 <= 1)
m.c574 = Constraint(expr= m.b50 - m.b54 + m.b114 <= 1)
m.c575 = Constraint(expr= m.b50 - m.b55 + m.b115 <= 1)
m.c576 = Constraint(expr= m.b50 - m.b56 + m.b116 <= 1)
m.c577 = Constraint(expr= m.b50 - m.b57 + m.b117 <= 1)
m.c578 = Constraint(expr= m.b50 - m.b58 + m.b118 <= 1)
m.c579 = Constraint(expr= m.b50 - m.b59 + m.b119 <= 1)
m.c580 = Constraint(expr= m.b50 - m.b60 + m.b120 <= 1)
m.c581 = Constraint(expr= m.b50 - m.b61 + m.b121 <= 1)
m.c582 = Constraint(expr= m.b50 - m.b62 + m.b122 <= 1)
m.c583 = Constraint(expr= m.b50 - m.b63 + m.b123 <= 1)
m.c584 = Constraint(expr= m.b50 - m.b64 + m.b124 <= 1)
m.c585 = Constraint(expr= m.b50 - m.b65 + m.b125 <= 1)
m.c586 = Constraint(expr= m.b50 - m.b66 + m.b126 <= 1)
m.c587 = Constraint(expr= m.b50 - m.b67 + m.b127 <= 1)
m.c588 = Constraint(expr= m.b50 - m.b68 + m.b128 <= 1)
m.c589 = Constraint(expr= m.b50 - m.b69 + m.b129 <= 1)
m.c590 = Constraint(expr= m.b51 - m.b52 + m.b130 <= 1)
m.c591 = Constraint(expr= m.b51 - m.b53 + m.b131 <= 1)
m.c592 = Constraint(expr= m.b51 - m.b54 + m.b132 <= 1)
m.c593 = Constraint(expr= m.b51 - m.b55 + m.b133 <= 1)
m.c594 = Constraint(expr= m.b51 - m.b56 + m.b134 <= 1)
m.c595 = Constraint(expr= m.b51 - m.b57 + m.b135 <= 1)
m.c596 = Constraint(expr= m.b51 - m.b58 + m.b136 <= 1)
m.c597 = Constraint(expr= m.b51 - m.b59 + m.b137 <= 1)
m.c598 = Constraint(expr= m.b51 - m.b60 + m.b138 <= 1)
m.c599 = Constraint(expr= m.b51 - m.b61 + m.b139 <= 1)
m.c600 = Constraint(expr= m.b51 - m.b62 + m.b140 <= 1)
m.c601 = Constraint(expr= m.b51 - m.b63 + m.b141 <= 1)
m.c602 = Constraint(expr= m.b51 - m.b64 + m.b142 <= 1)
m.c603 = Constraint(expr= m.b51 - m.b65 + m.b143 <= 1)
m.c604 = Constraint(expr= m.b51 - m.b66 + m.b144 <= 1)
m.c605 = Constraint(expr= m.b51 - m.b67 + m.b145 <= 1)
m.c606 = Constraint(expr= m.b51 - m.b68 + m.b146 <= 1)
m.c607 = Constraint(expr= m.b51 - m.b69 + m.b147 <= 1)
m.c608 = Constraint(expr= m.b52 - m.b53 + m.b148 <= 1)
m.c609 = Constraint(expr= m.b52 - m.b54 + m.b149 <= 1)
m.c610 = Constraint(expr= m.b52 - m.b55 + m.b150 <= 1)
m.c611 = Constraint(expr= m.b52 - m.b56 + m.b151 <= 1)
m.c612 = Constraint(expr= m.b52 - m.b57 + m.b152 <= 1)
m.c613 = Constraint(expr= m.b52 - m.b58 + m.b153 <= 1)
m.c614 = Constraint(expr= m.b52 - m.b59 + m.b154 <= 1)
m.c615 = Constraint(expr= m.b52 - m.b60 + m.b155 <= 1)
m.c616 = Constraint(expr= m.b52 - m.b61 + m.b156 <= 1)
m.c617 = Constraint(expr= m.b52 - m.b62 + m.b157 <= 1)
m.c618 = Constraint(expr= m.b52 - m.b63 + m.b158 <= 1)
m.c619 = Constraint(expr= m.b52 - m.b64 + m.b159 <= 1)
m.c620 = Constraint(expr= m.b52 - m.b65 + m.b160 <= 1)
m.c621 = Constraint(expr= m.b52 - m.b66 + m.b161 <= 1)
m.c622 = Constraint(expr= m.b52 - m.b67 + m.b162 <= 1)
m.c623 = Constraint(expr= m.b52 - m.b68 + m.b163 <= 1)
m.c624 = Constraint(expr= m.b52 - m.b69 + m.b164 <= 1)
m.c625 = Constraint(expr= m.b53 - m.b54 + m.b165 <= 1)
m.c626 = Constraint(expr= m.b53 - m.b55 + m.b166 <= 1)
m.c627 = Constraint(expr= m.b53 - m.b56 + m.b167 <= 1)
m.c628 = Constraint(expr= m.b53 - m.b57 + m.b168 <= 1)
m.c629 = Constraint(expr= m.b53 - m.b58 + m.b169 <= 1)
m.c630 = Constraint(expr= m.b53 - m.b59 + m.b170 <= 1)
m.c631 = Constraint(expr= m.b53 - m.b60 + m.b171 <= 1)
m.c632 = Constraint(expr= m.b53 - m.b61 + m.b172 <= 1)
m.c633 = Constraint(expr= m.b53 - m.b62 + m.b173 <= 1)
m.c634 = Constraint(expr= m.b53 - m.b63 + m.b174 <= 1)
m.c635 = Constraint(expr= m.b53 - m.b64 + m.b175 <= | |
<reponame>mt-software-de/wodoo<filename>wodoo/lib_module.py
import sys
import threading
import json
import base64
import subprocess
import inquirer
from git import Repo
import traceback
from datetime import datetime
import shutil
import os
import tempfile
import click
from .tools import get_hash
from .tools import get_directory_hash
from .tools import sync_folder
from .tools import __dcrun
from .tools import __cmd_interactive
from .tools import __get_installed_modules
from . import cli, pass_config, Commands
from .lib_clickhelpers import AliasedGroup
from .tools import _execute_sql
from .tools import get_services
from .tools import __try_to_set_owner
from .tools import measure_time
from pathlib import Path
class UpdateException(Exception): pass
@cli.group(cls=AliasedGroup)
@pass_config
def odoo_module(config):
pass
@odoo_module.command(name='abort-upgrade')
@pass_config
def abort_upgrade(config):
click.echo("Aborting upgrade...")
SQL = """
UPDATE ir_module_module SET state = 'installed' WHERE state = 'to upgrade';
UPDATE ir_module_module SET state = 'uninstalled' WHERE state = 'to install';
"""
_execute_sql(config.get_odoo_conn(), SQL)
def _get_default_modules_to_update():
from .module_tools import Modules, DBModules
mods = Modules()
module = mods.get_customs_modules('to_update')
module += DBModules.get_uninstalled_modules_where_others_depend_on()
module += DBModules.get_outdated_installed_modules(mods)
return module
@odoo_module.command(name='update-module-file')
@click.argument('module', nargs=-1, required=True)
def update_module_file(module):
from .module_tools import Module
for module in module:
Module.get_by_name(module).update_module_file()
@odoo_module.command(name='run-tests')
@pass_config
@click.pass_context
def run_tests(ctx, config):
started = datetime.now()
if not config.devmode and not config.force:
click.secho("Devmode required to run unit tests. Database will be destroyed.", fg='red')
sys.exit(-1)
if not config.force:
click.secho("Please provide parameter -f - database will be dropped. Otherwise tests are run against existing db. \n\nodoo -f run-tests", fg='yellow')
from .odoo_config import MANIFEST
tests = MANIFEST().get('tests', [])
if not tests:
click.secho("No test files found!")
return
if config.force:
Commands.invoke(ctx, 'wait_for_container_postgres', missing_ok=True)
Commands.invoke(ctx, 'reset-db')
Commands.invoke(ctx, 'update', "", tests=False, no_dangling_check=True)
from .module_tools import Module
from .odoo_config import customs_dir
success, failed = [], []
for module in tests:
module = Module.get_by_name(module)
testfiles = list(module.get_all_files_of_module())
testfiles = [x for x in testfiles if str(x).startswith("tests/")]
testfiles = [x for x in testfiles if str(x).endswith('.py')]
testfiles = [x for x in testfiles if x.name != '__init__.py']
testfiles = [x for x in testfiles if x.name.startswith("test_")]
# identify test files and run them, otherwise tests of dependent modules are run
for file in sorted(testfiles):
mfpath = module.manifest_path.parent
file = mfpath.relative_to(customs_dir()) / file
if config.use_docker:
params = ['odoo', '/odoolib/unit_test.py', f'{file}']
click.secho(f"Running test: {file}", fg='yellow', bold=True)
res = __dcrun(params + ['--log-level=error', '--not-interactive'], returncode=True)
if res:
failed.append(file)
click.secho(f"Failed, running again with debug on: {file}", fg='red', bold=True)
res = __cmd_interactive(*(['run', '--rm'] + params + ['--log-level=debug']))
else:
success.append(file)
elapsed = datetime.now() - started
click.secho(f"Time: {elapsed}", fg='yellow')
# in force-mode shut down
if config.force:
Commands.invoke(ctx, 'down', volumes=True)
if failed:
click.secho("Tests failed: ", fg='red')
for mod in failed:
click.secho(str(mod), fg='red')
sys.exit(-1)
else:
for mod in success:
click.secho(str(mod), fg='green')
click.secho("Tests OK", fg='green')
@odoo_module.command(name='download-openupgrade')
@pass_config
@click.option('--version', help="Destination Version", required=True)
@click.pass_context
def download_openupgrade(ctx, config, version):
from .odoo_config import customs_dir
dir_openupgrade = Path(tempfile.mktemp())
subprocess.check_call(['git', 'clone', '--depth', '1', '--branch', version, 'https://github.com/OCA/OpenUpgrade', dir_openupgrade / 'openupgrade'])
if float(version) < 14.0:
destination_path = 'odoo'
else:
destination_path = 'openupgrade'
sync_folder(
dir_openupgrade / 'openupgrade',
customs_dir() / destination_path,
excludes=['.git'],
)
shutil.rmtree(dir_openupgrade)
def _get_outdated_versioned_modules_of_deptree(modules):
"""
Gets dependency tree of modules and copmares version in manifest with version in database.
If db is newer then update is required.
This usually habens after an update of odoo core.
"""
from .module_tools import Modules, DBModules, Module
from .odoo_config import MANIFEST
mods = Modules()
for module in modules:
if module == 'base':
continue
for dep in mods.get_module_flat_dependency_tree(Module.get_by_name(module)):
meta_info = DBModules.get_meta_data(dep.name)
if not meta_info:
continue
version = meta_info['version']
if not version:
continue
try:
version = tuple([int(x) for x in version.split(".")])
except Exception:
click.secho(f"Broken version name in module {meta_info}: {version}", fg='red')
sys.exit(-1)
new_version = Module.get_by_name(dep).manifest_dict.get('version')
if not new_version:
continue
new_version = tuple([int(x) for x in new_version.split('.')])
if len(new_version) == 2:
# add odoo version in front
new_version = tuple([int(x) for x in str(MANIFEST()['version']).split('.')] + list(new_version))
if new_version > version:
yield dep
@odoo_module.command()
@click.argument('migration-file', required=True)
@click.argument('mode', required=True)
@click.option('--allow-serie', is_flag=True)
@click.option('--force-version')
@pass_config
@click.pass_context
def marabunta(ctx, config, migration_file, mode, allow_serie, force_version):
click.secho("""
_.._.-..-._
.-' .' /\\ \\`._
/ / .' `-.\\ `.
:_.' .. : _.../\\
| ;___ .-' //\\\\.
\\ _..._ / `/\\ // \\\\\\
`-.___.-' /\\ //\\\\ \\\\:
| //\\V/ :\\\\ \\\\
\\ \\\\/ \\\\ /\\\\
`.____.\\\\ \\\\ .' \\\\
// /\\\\---\\\\-' \\\\
fsc // // \\\\ \\\\ \\\\
""", fg='red')
click.secho("=================================", fg='yellow')
click.secho("MARABUNTA", fg='yellow')
click.secho("=================================", fg='yellow')
params = [
'--migration-file', '/opt/src/' + migration_file,
'--database', config.dbname,
'--db-user', config.db_user,
'--db-password', config.db_pwd,
'--db-port', config.db_port,
'--db-host', config.db_host,
'--mode', mode,
]
if allow_serie:
params += ["--allow-serie"]
if force_version:
params += ["--force-version", force_version]
params = ['run', 'odoo', '/usr/local/bin/marabunta'] + params
return __cmd_interactive(*params)
@odoo_module.command(help=(
"If menu items are missing, then recomputing the parent store"
"can help"
))
@pass_config
@click.pass_context
def recompute_parent_store(ctx, config):
if config.use_docker:
from .lib_control_with_docker import shell as lib_shell
click.secho("Recomputing parent store...", fg='blue')
lib_shell((
"for model in self.env['ir.model'].search([]):\n"
" try:\n"
" obj = self.env[model.model]\n"
" except KeyError: pass\n"
" else:\n"
" obj._parent_store_compute()\n"
" env.cr.commit()\n"
))
click.secho("Recompute parent store done.", fg='green')
@odoo_module.command(help=(
"As the name says: if db was transferred, web-icons are restored"
" on missing assets"
))
@pass_config
@click.pass_context
def restore_web_icons(ctx, config):
if config.use_docker:
from .lib_control_with_docker import shell as lib_shell
click.secho("Restoring web icons...", fg='blue')
lib_shell((
"for x in self.env['ir.ui.menu'].search([]):\n"
" if not x.web_icon: continue\n"
" x.web_icon_data = x._compute_web_icon_data(x.web_icon)\n"
" env.cr.commit()\n"
))
click.secho("Restored web icons.", fg='green')
@odoo_module.command()
@click.argument('module', nargs=-1, required=False)
@click.option('--since-git-sha', '-i', default=None, is_flag=False, help="Extracts modules changed since this git sha and updates them")
@click.option('--installed-modules', '-i', default=False, is_flag=True, help="Updates only installed modules")
@click.option('--dangling-modules', '-d', default=False, is_flag=True, help="Updates only dangling modules")
@click.option('--no-update-module-list', '-n', default=False, is_flag=True, help="Does not install/update module list module")
@click.option('--non-interactive', '-I', default=True, is_flag=True, help="Not interactive")
@click.option('--check-install-state', default=True, is_flag=True, help="Check for dangling modules afterwards")
@click.option('--no-restart', default=False, is_flag=True, help="If set, no machines are restarted afterwards")
@click.option('--no-dangling-check', default=False, is_flag=True, help="Not checking for dangling modules")
@click.option('--tests', default=False, is_flag=True, help="Runs tests")
@click.option('--i18n', default=False, is_flag=True, help="Overwrite Translations")
@click.option('--no-install-server-wide-first', default=False, is_flag=True)
@click.option('--no-extra-addons-paths', is_flag=True)
@click.option('-c', '--config-file', default='config_update', help="Specify config file to use, for example config_update")
@click.option('--server-wide-modules')
@click.option('--additional-addons-paths')
@click.option('--uninstall', is_flag=True, help="Executes just uninstallation of modules.")
@pass_config
@click.pass_context
def update(
ctx, config, module,
since_git_sha, dangling_modules, installed_modules,
non_interactive, no_update_module_list, no_install_server_wide_first,
no_extra_addons_paths, no_dangling_check=False, check_install_state=True,
no_restart=True, i18n=False, tests=False,
config_file=False, server_wide_modules=False, additional_addons_paths=False,
uninstall=False
):
"""
Just custom modules are updated, never the base modules (e.g. prohibits adding old stock-locations)
Minimal downtime;
To update all (custom) modules set "all" here
Sample call migration 14.0:
odoo update --no-dangling-check --config-file=config_migration --server-wide-modules=web,openupgrade_framework --additional-addons-paths=openupgrade base
"""
param_module = module
click.secho((
"Started with parameters: \n"
f"no_dangling_check: {no_dangling_check}\n",
f"modules: {module}\n"
))
click.secho("""
_ _ _
| | | | | |
___ __| | ___ ___ _ _ _ __ __| | __ _| |_ ___
/ _ \\ / _` |/ _ \\ / _ \\ | | | | '_ \\ / _` |/ _` | __/ _ \\
| (_) | (_| | (_) | (_) | | |_| | |_) | (_| | (_| | || __/
\\___/ \\__,_|\\___/ \\___/ \\__,_| .__/ \\__,_|\\__,_|\\__\\___|
| |
|_|
""", fg='green')
from .module_tools import Modules, DBModules, Module
# ctx.invoke(module_link)
if config.run_postgres:
Commands.invoke(ctx, 'up', machines=['postgres'], daemon=True)
Commands.invoke(ctx, 'wait_for_container_postgres', missing_ok=True)
def _perform_install(module):
if since_git_sha and module:
raise Exception("Conflict: since-git-sha and modules")
if since_git_sha:
module = list(_get_changed_modules(since_git_sha))
# filter modules to defined ones in MANIFEST
click.secho(f"Following modules change since last sha: {' '.join(module)}")
from .odoo_config import MANIFEST
module = list(filter(lambda x: x in MANIFEST()['install'], module))
click.secho(f"Following modules change since last sha (filtered to manifest): {' '.join(module)}")
if not module:
click.secho("No module update required - exiting.")
return
else:
module = list(filter(lambda x: x, sum(map(
lambda x: x.split(','), module), []))) # '1,2 3' --> ['1', '2', '3']
if not module and not since_git_sha:
module = _get_default_modules_to_update()
outdated_modules = list(map(
lambda x: x.name, set(
_get_outdated_versioned_modules_of_deptree(module))))
if not no_restart:
if config.use_docker:
Commands.invoke(
ctx, 'kill', machines=get_services(config, 'odoo_base'))
if config.run_redis:
Commands.invoke(
ctx, 'up', machines=['redis'], daemon=True)
if config.run_postgres:
Commands.invoke(
ctx, 'up', machines=['postgres'], daemon=True)
Commands.invoke(ctx, 'wait_for_container_postgres')
if not no_dangling_check:
if any(x[1] == 'uninstallable' for x in DBModules.get_dangling_modules()):
for x in DBModules.get_dangling_modules():
click.echo("{}: {}".format(*x[:2]))
if non_interactive or input((
"Uninstallable modules found - "
"shall I set them to 'uninstalled'? [y/N]"
)).lower() == 'y':
_execute_sql(config.get_odoo_conn(), (
"update ir_module_module set state = "
"'uninstalled' where state = 'uninstallable';"
))
if DBModules.get_dangling_modules() and not dangling_modules:
if not no_dangling_check:
Commands.invoke(
ctx, 'show_install_state', suppress_error=True)
input("Abort old upgrade and continue? (Ctrl+c to break)")
ctx.invoke(abort_upgrade)
if installed_modules:
module += __get_installed_modules(config)
if dangling_modules:
module += [x[0] for x in DBModules.get_dangling_modules()]
module = list(filter(bool, module))
if not module:
raise Exception("no modules to update")
click.echo("Run module update")
if config.odoo_update_start_notification_touch_file_in_container:
Path(
config.odoo_update_start_notification_touch_file_in_container).write_text(
datetime.now().strftime("%Y-%m-%d %H:%M:%S")
)
def _technically_update(modules):
try:
modules = list(map(
lambda x: x.name if isinstance(x, Module) else x, modules))
params = [','.join(modules)]
if no_extra_addons_paths:
params += ['--no-extra-addons-paths']
if non_interactive:
params += ['--non-interactive']
if no_install_server_wide_first:
| |
from JumpScale import j
import JumpScale.baselib.remote
import sys
# import importlib
import imp
try:
import ujson as json
except:
import json
import JumpScale.baselib.redis
import copy
import time
import JumpScale.baselib.webdis
from fabric.api import hide
import time
redis=j.clients.redis.getRedisClient("127.0.0.1", 9999)
class ScriptRun():
def __init__(self):
self.runid=j.admin.runid
self.epoch=int(time.time())
self.error=""
self.result=""
self.state="OK"
self.out=""
self.extraArgs=""
self.gridname=""
self.nodename=""
def __str__(self):
return str(self.__dict__)
__repr__=__str__
class JNode():
def __init__(self):
self.actionsDone={}
self.lastcheck=0
self.gridname=""
self.name=""
self.ip=""
self.host=""
self.enabled=True
self.remark=""
self.roles=[]
self.passwd=<PASSWORD>
self._basepath=j.dirs.replaceTxtDirVars(j.application.config.get("admin.basepath"))
self.cuapi=None
self.args=None
self.currentScriptRun=None
def getScriptRun(self):
if self.currentScriptRun==None:
self.currentScriptRun=ScriptRun()
self.currentScriptRun.gridname=self.gridname
self.currentScriptRun.nodename=self.name
return self.currentScriptRun
def executeCmds(self,cmds,die=True,insandbox=False):
scriptRun=self.getScriptRun()
out=scriptRun.out
for line in cmds.split("\n"):
if line.strip()<>"" and line[0]<>"#":
self.log("execcmd",line)
if insandbox:
line2="source /opt/jsbox/activate;%s"%line
else:
line2=line
try:
out+="%s\n"%self.cuapi.run(line2)
except BaseException,e:
if die:
self.raiseError("execcmd","error execute:%s"%line,e)
def killProcess(self,filterstr,die=True):
found=self.getPids(filterstr)
for item in found:
self.log("killprocess","kill:%s"%item)
try:
self.cuapi.run("kill -9 %s"%item)
except Exception,e:
if die:
self.raiseError("killprocess","kill:%s"%item,e)
def getPids(self,filterstr,die=True):
self.log("getpids","")
with hide('output'):
try:
out=self.cuapi.run("ps ax")
except Exception,e:
if die:
self.raiseError("getpids","ps ax",e)
found=[]
for line in out.split("\n"):
if line.strip()<>"":
if line.find(filterstr)<>-1:
line=line.strip()
found.append(int(line.split(" ")[0]))
return found
def jpackageStop(self,name,filterstr,die=True):
self.log("jpackagestop","%s (%s)"%(name,filterstr))
try:
self.cuapi.run("source /opt/jsbox/activate;jpackage stop -n %s"%name)
except Exception,e:
if die:
self.raiseError("jpackagestop","%s"%name,e)
found=self.getPids(filterstr)
if len(found)>0:
for item in found:
try:
self.cuapi.run("kill -9 %s"%item)
except:
pass
def jpackageStart(self,name,filterstr,nrtimes=1,retry=1):
found=self.getPids(filterstr)
self.log("jpackagestart","%s (%s)"%(name,filterstr))
for i in range(retry):
if len(found)==nrtimes:
return
scriptRun=self.getScriptRun()
try:
self.cuapi.run("source /opt/jsbox/activate;jpackage start -n %s"%name)
except Exception,e:
if die:
self.raiseError("jpackagestart","%s"%name,e)
time.sleep(1)
found=self.getPids(filterstr)
if len(found)<nrtimes:
self.raiseError("jpackagestart","could not jpackageStart %s"%name)
def serviceStop(self,name,filterstr):
self.log("servicestop","%s (%s)"%(name,filterstr))
try:
self.cuapi.run("sudo stop %s"%name)
except:
pass
found=self.getPids(filterstr)
scriptRun=self.getScriptRun()
if len(found)>0:
for item in found:
try:
self.cuapi.run("kill -9 %s"%item)
except:
pass
found=self.getPids(filterstr)
if len(found)>0:
self.raiseError("servicestop","could not serviceStop %s"%name)
def serviceStart(self,name,filterstr,die=True):
self.log("servicestart","%s (%s)"%(name,filterstr))
found=self.getPids(filterstr)
if len(found)==0:
try:
self.cuapi.run("sudo start %s"%name)
except:
pass
found=self.getPids(filterstr)
if len(found)==0 and die:
self.raiseError("servicestart","could not serviceStart %s"%name)
def serviceReStart(self,name,filterstr):
self.serviceStop(name,filterstr)
self.serviceStart(name,filterstr)
def raiseError(self,action,msg,e=None):
scriptRun=self.getScriptRun()
scriptRun.state="ERROR"
if e<>None:
msg="Stack:\n%s\nError:\n%s\n"%(j.errorconditionhandler.parsePythonErrorObject(e),e)
scriptRun.state="ERROR"
scriptRun.error+=msg
for line in msg.split("\n"):
toadd="%-10s: %s\n" % (action,line)
scriptRun.error+=toadd
print "**ERROR** %-10s:%s"%(self.name,toadd)
self.lastcheck=0
j.admin.setNode(self)
j.admin.setNode(self)
raise RuntimeError("**ERROR**")
def log(self,action,msg):
out=""
for line in msg.split("\n"):
toadd="%-10s: %s\n" % (action,line)
print "%-10s:%s"%(self.name,toadd)
out+=toadd
def setpasswd(self,passwd):
#this will make sure new password is set
self.log("setpasswd","")
cl=j.tools.expect.new("sh")
if self.args.seedpasswd=="":
self.args.seedpasswd=self.findpasswd()
try:
cl.login(remote=self.name,passwd=<PASSWORD>,seedpasswd=None)
except Exception,e:
self.raiseError("setpasswd","Could not set root passwd.")
def findpasswd(self):
self.log("findpasswd","find passwd for superadmin")
cl=j.tools.expect.new("sh")
for passwd in j.admin.rootpasswds:
try:
pass
cl.login(remote=self.name,passwd=<PASSWORD>,seedpasswd=None)
except Exception,e:
self.raiseError("findpasswd","could not login using:%s"%passwd,e)
continue
self.passwd=passwd
j.admin.setNode(self)
return "unknown"
def check(self):
j.base.time.getTimeEpoch()
def connectSSH(self):
return self._connectCuapi()
def _connectCuapi(self):
if self.ip == "":
if j.system.net.pingMachine(self.args.remote,1):
self.ip=self.args.remote
else:
j.events.opserror_critical("Could not ping node:'%s'"% self.args.remote)
self.cuapi.connect(self.ip)
if self.args.passwd<>"":
# setpasswd()
j.remote.cuisine.fabric.env["password"]=self.args.passwd
elif self.passwd<>None and self.passwd<>"unknown":
# setpasswd()
j.remote.cuisine.fabric.env["password"]=<PASSWORD>
# else:
# self.findpasswd()
return self.cuapi
def uploadFromCfgDir(self,ttype,dest,additionalArgs={}):
dest=j.dirs.replaceTxtDirVars(dest)
cfgdir=j.system.fs.joinPaths(self._basepath, "cfgs/%s/%s"%(j.admin.args.cfgname,ttype))
additionalArgs["hostname"]=self.name
cuapi=self.cuapi
if j.system.fs.exists(path=cfgdir):
self.log("uploadcfg","upload from %s to %s"%(ttype,dest))
tmpcfgdir=j.system.fs.getTmpDirPath()
j.system.fs.copyDirTree(cfgdir,tmpcfgdir)
j.dirs.replaceFilesDirVars(tmpcfgdir)
j.application.config.applyOnDir(tmpcfgdir,additionalArgs=additionalArgs)
items=j.system.fs.listFilesInDir(tmpcfgdir,True)
done=[]
for item in items:
partpath=j.system.fs.pathRemoveDirPart(item,tmpcfgdir)
partpathdir=j.system.fs.getDirName(partpath).rstrip("/")
if partpathdir not in done:
cuapi.dir_ensure("%s/%s"%(dest,partpathdir), True)
done.append(partpathdir)
try:
cuapi.file_upload("%s/%s"%(dest,partpath),item)#,True,True)
except Exception,e:
j.system.fs.removeDirTree(tmpcfgdir)
self.raiseError("uploadcfg","could not upload file %s to %s"%(ttype,dest))
j.system.fs.removeDirTree(tmpcfgdir)
def upload(self,source,dest):
args=j.admin.args
if not j.system.fs.exists(path=source):
self.raiseError("upload","could not find path:%s"%source)
self.log("upload","upload %s to %s"%(source,dest))
# from IPython import embed
# print "DEBUG NOW implement upload in Admin" #@todo
# embed()
for item in items:
partpath=j.system.fs.pathRemoveDirPart(item,cfgdir)
partpathdir=j.system.fs.getDirName(partpath).rstrip("/")
if partpathdir not in done:
print cuapi.dir_ensure("%s/%s"%(dest,partpathdir), True)
done.append(partpathdir)
cuapi.file_upload("%s/%s"%(dest,partpath),item)#,True,True)
def __repr__(self):
roles=",".join(self.roles)
return ("%-10s %-10s %-50s %-15s %-10s %s"%(self.gridname,self.name,roles,self.ip,self.host,self.enabled))
__str__=__repr__
class AdminFactory:
def get(self,args,failWhenNotExist=False):
return Admin(args,failWhenNotExist)
class Admin():
def __init__(self,args,failWhenNotExist=False):
self.args=args
self._basepath=j.dirs.replaceTxtDirVars(j.application.config.get("admin.basepath"))
self.hostKeys=[]
self.gridNameAliases={}
if args.action==None or (not args.action in ["createidentity","applyconfiglocal"]):
if args.local:
args.remote="127.0.0.1"
args.passwd=""
# else:
# if args.remote =="":
# args.remote=j.console.askString("Ip address of remote")
#create ssh connection
self.cuapi = j.remote.cuisine.api
if args.g:
roles = list()
if args.roles:
roles = args.roles.split(",")
#@todo change to use hostkeys (reem)
raise RuntimeError("not implemented")
nodes = self._getActiveNodes()
hosts = [node['name'] for node in nodes]
for node in nodes:
for role in roles:
if role not in node['roles']:
hosts.remove(node['name'])
break
self.hostKeys = hosts
self.hostKeys.sort()
elif args.remote=="" and args.gridname<>"":
for gridname in args.gridname.split(","):
for hostKey in self.getHostNamesKeys(args.gridname):
self.hostKeys.append(hostKey)
else:
for gridname in args.gridname.split(","):
self.hostKeys+=["%s__%s"%(gridname,item) for item in args.remote.split(",")]
self.hostKeys.sort()
# if hosts<>[]:
# if failWhenNotExist==False:
# for host in hosts:
# #check
# if j.system.net.tcpPortConnectionTest("m3pub",22):
# self.cuapi.fabric.api.env["hosts"].append(host)
# self.hostKeys.append(host)
# else:
# self.hostKeys=hosts
# self.cuapi.fabric.api.env["hosts"]=hosts
# else:
# self.cuapi.connect(args.remote)
self.sysadminPasswd=""
self.js={}
# DO NOT USE CREDIS IN THIS CONTEXT, NOT THREAD SAFE
self.redis = j.clients.redis.getRedisClient("127.0.0.1", 9999)
# self.nodes={}
self.hrd= j.core.hrd.getHRD(self._getPath("cfg/","superadmin.hrd"))
self.rootpasswds=self.hrd.getList("superadmin.passwds")
self.loadJumpscripts()
self.loadNodes()
if self.args.runid<>"":
self.runid=self.args.runid
else:
self.runid=self.redis.incr("admin:scriptrunid")
if self.args.__dict__.has_key("reset") and self.args.reset:
self.deleteScriptRunInfo()
#
# self.config2gridmaster() #this should not be done every time
def reset(self):
#clear redis
self.redis.delete("admin:nodes")
self.redis.delete("admin:scriptruns")
def _getActiveNodes(self):
import JumpScale.grid.osis
oscl = j.core.osis.getClientByInstance('main')
ncl = j.core.osis.getClientForCategory(oscl, 'system', 'node')
return ncl.simpleSearch({'active': True})
def _getPath(self,sub,file=""):
path= "%s/%s"%(self._basepath,sub)
path=path.replace("\\","/")
path=path.replace("//","/")
if path[-1]<>"/":
path+="/"
if file<>"":
path+=file
return path
def raiseError(self,action,msg,e=None):
#@todo make better
raise RuntimeError("%s;%s"%(action,msg))
def getNode(self,gridname="",name=""):
name=name.lower()
gridname=gridname.lower()
if gridname=="" and name=="":
node=JNode()
node.cuapi=self.cuapi
node.currentScriptRun=None
node.getScriptRun()
node.args=self.args
return node
if gridname=="":
if j.system.net.pingMachine(name.strip("/").strip(),1):
node=JNode()
node.ip=name
node.hostname=name
node.args=self.args
node.cuapi=self.cuapi
node.currentScriptRun=None
node.getScriptRun()
return node
else:
raise RuntimeError("Could not find node:'%s'"%name)
if self.redis.hexists("admin:nodes","%s:%s"%(gridname,name))==False:
raise RuntimeError("could not find node: '%s/%s'"%(gridname,name))
# node=JNode()
# node.ip=name
# node.host=name
else:
data=self.redis.hget("admin:nodes","%s:%s"%(gridname,name))
node=JNode()
try:
node.__dict__.update(json.loads(data))
except Exception,e:
raise RuntimeError("could not decode node: '%s/%s'"%(gridname,name))
# node=JNode()
# self.setNode(node)
node.args=self.args
node.gridname=gridname
node.name=name
node.cuapi=self.cuapi
node.currentScriptRun=None
node._connectCuapi()
return node
def setNode(self,node):
node2=copy.copy(node.__dict__)
for key in node2.keys():
if key[0]=="_":
node2.pop(key)
node2.pop("cuapi")
node2.pop("args")
node2.pop("currentScriptRun")
self.redis.hset("admin:nodes","%s:%s"%(node.gridname,node.name),json.dumps(node2))
sr=node.currentScriptRun
if sr<>None:
self.redis.hset("admin:scriptruns","%s:%s:%s"%(node.gridname,node.name,sr.runid),json.dumps(sr.__dict__))
def executeForNode(self,node,jsname,once=True,sshtest=True,**kwargs):
"""
return node
"""
sr=node.currentScriptRun
jsname=jsname.lower()
now= j.base.time.getTimeEpoch()
do=True
if once:
for item in self.getScriptRunInfo():
if item.state=="OK" and item.nodename==node.name and item.gridname==node.gridname:
do=False
# if self.args.force:
# do=True
if do:
print "* tcp check ssh"
if not j.admin.js.has_key(jsname):
self.raiseError("executejs","cannot find js:%s"%jsname)
if sshtest and not j.system.net.waitConnectionTest(node.ip,22, self.args.timeout):
self.raiseError("executejs","jscript:%s,COULD NOT check port (ssh)"%jsname)
return
try:
sr.result=j.admin.js[jsname](node=node,**kwargs)
node.actionsDone[jsname]=now
node.lastcheck=now
except BaseException,e:
msg="error in execution of %s.Stack:\n%s\nError:\n%s\n"%(jsname,j.errorconditionhandler.parsePythonErrorObject(e),e)
sr.state="ERROR"
sr.error+=msg
print
print msg
if node.actionsDone.has_key(jsname):
node.actionsDone.pop(jsname)
self.setNode(node)
else:
print("No need to execute %s on %s/%s"%(jsname,node.gridname,node.name))
return node
def execute(self,jsname,once=True,reset=False,**kwargs):
res=[]
for host in self.hostKeys:
gridname, _, name = host.partition('__')
node=self.getNode(gridname,name)
self.executeForNode(node,jsname,once,**kwargs)
def loadJumpscripts(self):
# print "load jumpscripts ",
sys.path.append(self._getPath("jumpscripts"))
cmds=j.system.fs.listFilesInDir(self._getPath("jumpscripts"), recursive=True, filter="*.py")
cmds.sort()
for item in cmds:
name=j.system.fs.getBaseName(item).replace(".py","")
if name[0]<>"_":
name=name.lower()
# print "load:%s"%name
# module = importlib.import_module('jscripts.%s' % name)
module=imp.load_source('jscript_%s' % name, item)
self.js[name]= getattr(module, "action")
def getWebDis(self,enable=True):
webdis=None
if enable and j.application.config.exists("grid.watchdog.secret"):
if j.application.config.exists("grid_master_ip") and j.system.net.tcpPortConnectionTest(j.application.config.get("grid_master_ip"),7779):
webdis=j.clients.webdis.get(j.application.config.get("grid_master_ip"),7779)
return webdis
def loadNodes(self,webdis=False,pprint =False):
"""
load nodes from config files
"""
webdis=self.getWebDis(webdis)
for configpath in j.system.fs.listFilesInDir("%s/apps/admin/cfg"%j.dirs.baseDir,filter="*.cfg"):
gridname=j.system.fs.getBaseName(configpath).lower().strip()
if gridname =="active.cfg":
continue
gridname=gridname[:-4]
if webdis<>None:
key="%s:admin:nodes:%s"%(j.application.config.get("grid_watchdog_secret"),gridname)
webdis.delete(key)
nodes = list()
config = j.config.getConfig(configpath[:-4])
self.gridNameAliases[gridname.lower()]=[]
if config.has_key("main"):
for alias in config["main"].get("alias","").split(","):
if alias.lower() not in self.gridNameAliases[gridname.lower()]:
self.gridNameAliases[gridname.lower()].append(alias.lower())
for name, host in config.iteritems():
node=JNode()
node.gridname=gridname
node.name = name
node.remark = host.get('remark')
node.ip = host.get('ip')
node.host = host.get('host')
node.roles = host.get('roles', '').split(',')
node.enabled = False if host.get('enabled', '1') == '1' else True
self.setNode(node)
nodes.append(node)
if webdis<>None:
webdis.hset(key,node.name,json.dumps(node.__dict__))
if pprint:
line = "Grid %s" % gridname
print line
print "=" * len(line)
print ""
for node in sorted(nodes, key=lambda x: x.name):
print node
print ''
def config2gridmaster(self):
webdis=self.getWebDis()
if webdis==None:
raise RuntimeError("cannot connect to webdis, is gridmaster running webdis?")
self.loadNodes(webdis=True,pprint=False)
sys.path.append(self._getPath("jumpscripts"))
cmds=j.system.fs.listFilesInDir(self._getPath("jumpscripts"), recursive=True, filter="*.py")
cmds.sort()
def getcode(path):
state="start"
code=""
for line in j.system.fs.fileGetContents(path).split("\n"):
if line.find("def action(")<>-1:
state="found"
if state=="found":
code+="%s\n"%line
return code
key="%s:admin:jscripts"%(j.application.config.get("grid_watchdog_secret"))
webdis.delete(key)
for item in cmds:
name=j.system.fs.getBaseName(item).replace(".py","")
if name[0]<>"_":
obj={}
name=name.lower()
# print "load:%s"%name
module=imp.load_source('jscript_%s' % name, item)
obj["descr"]= getattr(module, "descr","")
obj["version"]= getattr(module, "version","")
obj["organization"]= getattr(module, "organization","unknown")
obj["version"]= getattr(module, "version","1.0")
obj["code"]=getcode(item)
webdis.hset(key,name,json.dumps(obj))
# ret=json.loads(self.webdis.hget(key,name))
# print "OK"
def sshfs(self,gridname,name):
node=self.getNode(gridname,name)
if name<>"admin":
path="/mnt/%s_%s_jsbox"%(node.gridname,node.name)
j.system.fs.createDir(path)
cmd="sshfs %s:/opt/jsbox /mnt/%s_%s_jsbox"%(node.ip,node.gridname,node.name)
print cmd
j.system.process.executeWithoutPipe(cmd)
path="/mnt/%s_%s_jsboxdata"%(node.gridname,node.name)
j.system.fs.createDir(path)
print cmd
cmd="sshfs %s:/opt/jsbox_data /mnt/%s_%s_jsboxdata"%(node.ip,node.gridname,node.name)
j.system.process.executeWithoutPipe(cmd)
else:
path="/mnt/%s_%s_code"%(node.gridname,node.name)
j.system.fs.createDir(path)
cmd="sshfs %s:/opt/code /mnt/%s_%s_code"%(node.ip,node.gridname,node.name)
print cmd
j.system.process.executeWithoutPipe(cmd)
path="/mnt/%s_%s_jumpscale"%(node.gridname,node.name)
j.system.fs.createDir(path)
cmd="sshfs %s:/opt/jumpscale /mnt/%s_%s_jumpscale"%(node.ip,node.gridname,node.name)
print cmd
j.system.process.executeWithoutPipe(cmd)
def sshfsumount(self,gridname="",name=""):
rc,mount=j.system.process.execute("mount")
def getMntPath(mntpath):
for line in mount.split("\n"):
if line.find("sshfs")<>-1 and line.find(mntpath+" ")<>-1:
return line.split(" ")[0]
return None
def getMntPaths():
res=[]
for line in mount.split("\n"):
if line.find("sshfs")<>-1:
line=line.replace(" "," ")
line=line.replace(" "," ")
res.append(line.split(" ")[2])
return res
def do(mntpath):
mntpath2=getMntPath(mntpath)
if mntpath2==None:
return None
cmd="umount %s"%(mntpath2)
rc,out=j.system.process.execute(cmd,False)
if rc>0:
if out.find("device is busy")<>-1:
res=[]
print "MOUNTPOINT %s IS BUSY WILL TRY | |
'2HG2', 'HG22', 'HG22', 'HG22', 'HG22', 'HG22'],
['HG23', '3HG2', 'HG23', 'HG23', 'HG23', 'HG23', 'HG23'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'],
['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'], ['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'],
['CG2', 'CG2', 'CG2', 'CG2', 'CG2', 'CG2', 'CG2'], ['N', 'N', 'N', 'N', 'N', 'N', 'N'],
['O', 'O', 'O', 'O', 'O', 'O', 'O'], ['OG1', 'OG1', 'OG1', 'OG1', 'OG1', 'OG1', 'OG1']],
"TRP": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB2', '1HB', '2HB', 'HB1', 'HB2', 'HB2', 'HB2'], ['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'],
['HD1', 'HD1', 'HD1', 'HD1', 'HD1', 'HD1', 'HD1'], ['HE1', 'HE1', 'HNE1', 'HE1', 'HE1', 'HE1', 'HE1'],
['HE3', 'HE3', 'HE3', 'HE3', 'HE3', 'HE3', 'HE3'], ['HZ2', 'HZ2', 'HZ2', 'HZ2', 'HZ2', 'HZ2', 'HZ2'],
['HZ3', 'HZ3', 'HZ3', 'HZ3', 'HZ3', 'HZ3', 'HZ3'], ['HH2', 'HH2', 'HH2', 'HH2', 'HH2', 'HH2', 'HH2'],
['C', 'C', 'C', 'C', 'C', 'C', 'C'], ['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'],
['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'], ['CG', 'CG', 'CG', 'CG', 'CG', 'CG', 'CG'],
['CD1', 'CD1', 'CD1', 'CD1', 'CD1', 'CD1', 'CD1'], ['CD2', 'CD2', 'CD2', 'CD2', 'CD2', 'CD2', 'CD2'],
['CE2', 'CE2', 'CE2', 'CE2', 'CE2', 'CE2', 'CE2'], ['CE3', 'CE3', 'CE3', 'CE3', 'CE3', 'CE3', 'CE3'],
['CZ2', 'CZ2', 'CZ2', 'CZ2', 'CZ2', 'CZ2', 'CZ2'], ['CZ3', 'CZ3', 'CZ3', 'CZ3', 'CZ3', 'CZ3', 'CZ3'],
['CH2', 'CH2', 'CH2', 'CH2', 'CH2', 'CH2', 'CH2'], ['N', 'N', 'N', 'N', 'N', 'N', 'N'],
['NE1', 'NE1', 'NE1', 'NE1', 'NE1', 'NE1', 'NE1'], ['O', 'O', 'O', 'O', 'O', 'O', 'O']],
"TYR": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB2', '1HB', '2HB', 'HB1', 'HB2', 'HB2', 'HB2'], ['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'],
['HD1', 'HD1', 'HD1', 'HD1', 'HD1', 'HD1', 'HD1'], ['HD2', 'HD2', 'HD2', 'HD2', 'HD2', 'HD2', 'HD2'],
['HE1', 'HE1', 'HE1', 'HE1', 'HE1', 'HE1', 'HE1'], ['HE2', 'HE2', 'HE2', 'HE2', 'HE2', 'HE2', 'HE2'],
['HH', 'HH', 'HOH', 'HH', 'HH', 'HH', 'HH'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'],
['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'], ['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'],
['CG', 'CG', 'CG', 'CG', 'CG', 'CG', 'CG'], ['CD1', 'CD1', 'CD1', 'CD1', 'CD1', 'CD1', 'CD1'],
['CD2', 'CD2', 'CD2', 'CD2', 'CD2', 'CD2', 'CD2'], ['CE1', 'CE1', 'CE1', 'CE1', 'CE1', 'CE1', 'CE1'],
['CE2', 'CE2', 'CE2', 'CE2', 'CE2', 'CE2', 'CE2'], ['CZ', 'CZ', 'CZ', 'CZ', 'CZ', 'CZ', 'CZ'],
['N', 'N', 'N', 'N', 'N', 'N', 'N'], ['O', 'O', 'O', 'O', 'O', 'O', 'O'],
['OH', 'OH', 'OH', 'OH', 'OH', 'OH', 'OH']],
"VAL": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB', 'HB', 'HB', 'HB3', 'HB', 'HB', 'HB'], ['HG11', '1HG1', 'HG11', 'HG11', 'HG11', 'HG11', 'HG11'],
['HG12', '2HG1', 'HG12', 'HG12', 'HG12', 'HG12', 'HG12'],
['HG13', '3HG1', 'HG13', 'HG13', 'HG13', 'HG13', 'HG13'],
['HG21', '1HG2', 'HG21', 'HG21', 'HG21', 'HG21', 'HG21'],
['HG22', '2HG2', 'HG22', 'HG22', 'HG22', 'HG22', 'HG22'],
['HG23', '3HG2', 'HG23', 'HG23', 'HG23', 'HG23', 'HG23'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'],
['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'], ['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'],
['CG1', 'CG1', 'CG1', 'CG1', 'CG1', 'CG1', 'CG1'], ['CG2', 'CG2', 'CG2', 'CG2', 'CG2', 'CG2', 'CG2'],
['N', 'N', 'N', 'N', 'N', 'N', 'N'], ['O', 'O', 'O', 'O', 'O', 'O', 'O']],
"HOH": [['1HW', 'H1', 'H1 1'], ['2HW', 'H2', 'H1 2'], ['OW', 'O', 'O1']],
"ACE": [['CA', 'CH3'], ['HH31', 'HA1', 'H', '1H', 'H1'], ['HH32', 'HA2', '2H', 'H2'], ['HH33', 'HA3', '3H', 'H3'],
['C', 'C'], ['O', 'O']],
"NMA": [['HH31', '1HA', 'HA1'], ['HH32', '2HA', 'HA2'], ['HH33', '3HA', 'HA3'], ['CA', 'C'], ['H', 'H'], ['N']],
}
supported_metals = ["MN", "MG", "ZN", "CA", "CU", "FE", "NI", "CO", "PB"]
coordination_geometries = {
'octahedric': [[90, 180], 6],
'tetrahedric': [[109.5], 4]
}
default_mutations_maps = {
'ALA-ARG': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['HG2', 'HG3', 'CD', 'HD2', 'HD3', 'NE', 'HE', 'CZ', 'NH1', 'NH2', 'HH11', 'HH12', 'HH21', 'HH22'],
{'ARG-ALA': ['disappear', 1, 0], 'ALA-ARG': ['appear', 0, 1]},
],
'ALA-ASH': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['OD1', 'OD2', 'HD2'],
{'ALA-ASH': ['appear', 0, 1], 'ASH-ALA': ['disappear', 1, 0]},
],
'ALA-ASN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['OD1', 'ND2', 'HD21', 'HD22'],
{'ASN-ALA': ['disappear', 1, 0], 'ALA-ASN': ['appear', 0, 1]},
],
'ALA-ASP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['OD1', 'OD2'],
{'ALA-ASP': ['appear', 0, 1], 'ASP-ALA': ['disappear', 1, 0]},
],
'ALA-CYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'SG']],
['HG'],
{'CYS-ALA': ['disappear', 1, 0], 'ALA-CYS': ['appear', 0, 1]},
],
'ALA-GLH': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['HG2', 'HG3', 'CD', 'OE1', 'OE2', 'HE2'],
{'GLH-ALA': ['disappear', 1, 0], 'ALA-GLH': ['appear', 0, 1]},
],
'ALA-GLN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['HG2', 'HG3', 'CD', 'OE1', 'NE2', 'HE21', 'HE22'],
{'ALA-GLN': ['appear', 0, 1], 'GLN-ALA': ['disappear', 1, 0]},
],
'ALA-GLU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['HG2', 'HG3', 'CD', 'OE1', 'OE2'],
{'ALA-GLU': ['appear', 0, 1], 'GLU-ALA': ['disappear', 1, 0]},
],
'ALA-GLY': [
['N', 'H', 'CA', 'O', 'C'],
[['HA', 'HA2'], ['CB', 'HA3']],
('HB1', 'HB2', 'HB3'),
{'GLY-ALA': ['appear', 1, 0], 'ALA-GLY': ['disappear', 0, 1]},
],
'ALA-HID': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['ND1', 'CD2', 'HD1', 'CE1', 'HD2', 'NE2', 'HE1'],
{'HID-ALA': ['disappear', 1, 0], 'ALA-HID': ['appear', 0, 1]},
],
'ALA-HIE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['ND1', 'CD2', 'CE1', 'HD2', 'NE2', 'HE1', 'HE2'],
{'ALA-HIE': ['appear', 0, 1], 'HIE-ALA': ['disappear', 1, 0]},
],
'ALA-HIP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['ND1', 'CD2', 'HD1', 'CE1', 'HD2', 'NE2', 'HE1', 'HE2'],
{'HIP-ALA': ['disappear', 1, 0], 'ALA-HIP': ['appear', 0, 1]},
],
'ALA-ILE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB1', 'HB'], ['HB2', 'CG1'], ['HB3', 'CG2']],
['HG12', 'HG13', 'CD1', 'HG21', 'HG22', 'HG23', 'HD11', 'HD12', 'HD13'],
{'ILE-ALA': ['disappear', 1, 0], 'ALA-ILE': ['appear', 0, 1]},
],
'ALA-LEU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['HG', 'CD1', 'CD2', 'HD11', 'HD12', 'HD13', 'HD21', 'HD22', 'HD23'],
{'ALA-LEU': ['appear', 0, 1], 'LEU-ALA': ['disappear', 1, 0]},
],
'ALA-LYN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['HG2', 'HG3', 'CD', 'HD2', 'HD3', 'CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2'],
{'LYN-ALA': ['disappear', 1, 0], 'ALA-LYN': ['appear', 0, 1]},
],
'ALA-LYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['HG2', 'HG3', 'CD', 'HD2', 'HD3', 'CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2', 'HZ3'],
{'ALA-LYS': ['appear', 0, 1], 'LYS-ALA': ['disappear', 1, 0]},
],
'ALA-MET': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['HG2', 'HG3', 'SD', 'CE', 'HE1', 'HE2', 'HE3'],
{'MET-ALA': ['disappear', 1, 0], 'ALA-MET': ['appear', 0, 1]},
],
'ALA-PHE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['CD1', 'CD2', 'HD1', 'CE1', 'HD2', 'CE2', 'HE1', 'CZ', 'HE2', 'HZ'],
{'ALA-PHE': ['appear', 0, 1], 'PHE-ALA': ['disappear', 1, 0]},
],
'ALA-PRO': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG'], ['H', 'CD']],
['HG2', 'HG3', 'HD2', 'HD3'],
{'ALA-PRO': ['appear', 0, 1], 'PRO-ALA': ['disappear', 1, 0]},
],
'ALA-SER': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'OG']],
['HG'],
{'ALA-SER': ['appear', 0, 1], 'SER-ALA': ['disappear', 1, 0]},
],
'ALA-THR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB1', 'HB'], ['HB2', 'OG1'], ['HB3', 'CG2']],
['HG1', 'HG21', 'HG22', 'HG23'],
{'ALA-THR': ['appear', 0, 1], 'THR-ALA': ['disappear', 1, 0]},
],
'ALA-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['CD1', 'CD2', 'HD1', 'NE1', 'CE3', 'CE2', 'HE1', 'HE3', 'CZ3', 'CZ2', 'HZ3', 'HZ2', 'CH2', 'HH2'],
{'ALA-TRP': ['appear', 0, 1], 'TRP-ALA': ['disappear', 1, 0]},
],
'ALA-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['CD1', 'CD2', 'HD1', 'CE1', 'HD2', 'CE2', 'HE1', 'CZ', 'HE2', 'OH', 'HH'],
{'ALA-TYR': ['appear', 0, 1], 'TYR-ALA': ['disappear', 1, 0]},
],
'ALA-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB1', 'HB'], ['HB2', | |
#!/usr/bin/env python3
"""
This module provides IPMI utilities
"""
import argparse
import json
import os
import subprocess
import sys
import traceback
import sqlalchemy
from dateutil.parser import parse
from datamodel import DataModel
from pxlogging import PxLogger
from pxjson import PxJSON
class PxIPMI(object):
"""
IPMI configuration
"""
#STATE = "STATE"
#STATUS = "STATUS"
#MESSAGE = "MESSAGE"
#INTERNAL = "INTERNAL"
#routes
#IPMI_INFO = "IPMI_INFO"
# JSON keys
#CONNECTIONTYPE = 'CONNECTIONTYPE'
#IPV4 = 'IPV4'
#NETMASK = 'NETMASK'
#GATEWAY = 'GATEWAY'
#MAC = 'MAC'
#VLAN = 'VLAN'
# JSON values
#CONNECTIONNONE = 'none'
#CONNECTIONSTATIC = 'static'
#CONNECTIONDHCP = 'dhcp'
#CONNECTIONBIOS = 'bios'
#CONNECTIONUNKNOWN = 'unknown'
#ipmitool keys
IPMI_SOURCE = 'IP Address Source'
IPMI_IP = 'IP Address'
IPMI_NETMASK = 'Subnet Mask'
IPMI_GATEWAY = 'Default Gateway IP'
IPMI_MAC = 'MAC Address'
IPMI_VLAN = '802.1q VLAN ID'
#ipmitool values
IPMI_SOURCE_NONE = 'none'
IPMI_SOURCE_STATIC = 'Static Address'
IPMI_SOURCE_DHCP = 'DHCP Address'
IPMI_SOURCE_BIOS = 'bios'
def __init__(self):
# Logger info
logger_name = 'com.parseclabs.ipmi'
log_pathname = '/px/log/ipmi.log'
PxLogger.setloggerinfo(logger_name, log_pathname)
self.logger = PxLogger.getlogger()
self.ipmitool = '/usr/bin/ipmitool'
self.ipmisupported = os.path.exists('/dev/ipmi0')
def parseoutput(self, output):
"""
parse output from impi lan print int json format
"""
reply = None
internalreply = None
status = 1
if output is None:
return status, reply, internalreply
try:
tmpreply = {}
for line in output.splitlines():
pair = line.split(':', 1)
length = len(pair)
key = None
value = None
if length > 0:
key = pair[0].strip()
if length > 1:
value = pair[1].strip()
if key == self.IPMI_SOURCE:
tmpreply[PxJSON.CONNECTIONTYPE] = PxJSON.CONNECTIONUNKNOWN
if value == self.IPMI_SOURCE_NONE:
tmpreply[PxJSON.CONNECTIONTYPE] = PxJSON.CONNECTIONNONE
elif value == self.IPMI_SOURCE_STATIC:
tmpreply[PxJSON.CONNECTIONTYPE] = PxJSON.CONNECTIONSTATIC
elif value == self.IPMI_SOURCE_DHCP:
tmpreply[PxJSON.CONNECTIONTYPE] = PxJSON.CONNECTIONDHCP
elif value == self.IPMI_SOURCE_BIOS:
tmpreply[PxJSON.CONNECTIONTYPE] = PxJSON.CONNECTIONBIOS
elif key == self.IPMI_IP:
tmpreply[PxJSON.IPV4] = value
elif key == self.IPMI_NETMASK:
tmpreply[PxJSON.NETMASK] = value
elif key == self.IPMI_GATEWAY:
tmpreply[PxJSON.GATEWAY] = value
elif key == self.IPMI_MAC:
tmpreply[PxJSON.MAC] = value
elif key == self.IPMI_VLAN:
tmpreply[PxJSON.VLAN] = value
status = 0
if tmpreply:
reply = tmpreply
except Exception as ex:
self.logger.error(traceback.format_exc())
internalreply = {"ex": str(ex)}
return status, reply, internalreply
def querysystem(self):
"""
Fetch current IPMI status from system
:param : None
:returns: status, reply
:raises: None
"""
status = 1
obj = PxJSON("Unable to obtain IPMI information")
condition = True
while condition:
condition = False
if not self.ipmisupported:
# VM guest?
self.logger.info("IPMI is not supported. Unable to fetch values from system.")
reply = {}
reply[PxJSON.CONNECTIONTYPE] = None
reply[PxJSON.IPV4] = None
reply[PxJSON.NETMASK] = None
reply[PxJSON.GATEWAY] = None
reply[PxJSON.MAC] = None
reply[PxJSON.VLAN] = None
obj.setroute(PxJSON.IPMI_INFO, reply)
obj.setsuccess()
status = 0
break
args = [self.ipmitool, 'lan', 'print']
status, obj, output = self.runcmd(args, obj)
if status == 0:
status, reply, internalreply = self.parseoutput(output)
if status == 0:
status = 1
if reply is None:
self.logger.error("reply={0}".format(reply))
elif len(reply) != 6: # this is a count of how many json elements we expect
self.logger.error("len(reply)={0}, {1}".format(len(reply), reply))
else:
obj.setroute(PxJSON.IPMI_INFO, reply)
obj.setsuccess()
status = 0
elif internalreply:
obj.internal(internalreply)
self.logger.info("status={0} json={1}".format(status, obj.getjsonpretty()))
return status, obj.getjson()
def runcmd(self, args, obj):
"""
Executes command. Updates PxJSON obj with any errors.
:param args: list of command arguments
:param obj: PxJSON obj
:returns: status, PxJSON obj, output
:raises: None
"""
status = 0
output = None
if args is None or obj is None or not isinstance(args, list):
self.logger.error("args={0} obj={1} argsislist={2}".format(args, obj, isinstance(args, list)))
status = 1
return status, obj, output
try:
self.logger.info("cmd={0}".format(args))
output = subprocess.check_output(args, stderr=subprocess.STDOUT)
if output is not None:
output = output.decode()
self.logger.info("output={0}".format(output))
except subprocess.CalledProcessError as ex:
self.logger.error("cmd={0} status={1} output={2}".format(ex.cmd, ex.returncode, ex.output))
obj.internal({"status_code": ex.returncode, "text": str(ex.output)})
self.logger.error(traceback.format_exc())
status = ex.returncode
except Exception as ex:
self.logger.error(traceback.format_exc())
obj.internal({"ex": str(ex)})
status = 1
return status, obj, output
def checkattribute(self, obj, attribute, attributestring):
"""
Check if IPMI attribute is None or matches current system status
:param systemreply: JSON of current system status
:param attribute: IPMI attribute to change
:param attributeString: IPMI attribute string
:returns: False attribute is not None and does not match current
system status otherwise returns True
:raises: None
"""
if (
attribute is None or
PxJSON.IPMI_INFO not in (obj or {}) or
attribute not in (obj[PxJSON.IPMI_INFO] or {}) or
attributestring == obj[PxJSON.IPMI_INFO][attribute] or
attributestring == ''
):
return False
self.logger.info("Changing '{0}' from '{1}' to '{2}'".format(attribute, obj[PxJSON.IPMI_INFO][attribute], attributestring))
return True
def testip(self, ip):
"""
test an IP if it exists
:param ip - ip address
:returns: status
:raises: None
"""
obj = PxJSON("IP address already exists")
status, obj, _ = self.runcmd(['/usr/bin/ping', '-qc', '3', ip], obj)
if status == 0:
return True, obj
return False, obj
def changesystem(self, jsonstr):
"""
Apply system IPMI configuration
:param jsonstr - JSON formatted string
:returns: status, PxJSON
:raises: None
Any parameter set to None is not applied to the system.
"""
status = 1
obj = PxJSON("Unable to set IPMI configuration")
status, jsondict = self.validateipmijson(self.loadjson(jsonstr, obj))
obj.setroute(PxJSON.IPMI_INFO, jsondict[PxJSON.IPMI_INFO])
if status == 0:
status, obj = self.changesystem2(obj)
return status, obj.getjson()
def changesystem2(self, obj):
"""
Apply system IPMI configuration
:param obj: PxJSON object
:returns: status, PxJSON
:raises: None
Any parameter set to None is not applied to the system.
"""
status = 1
obj.setfailure("Unable to set IPMI configuration")
new_connectiontype = obj.getroute(PxJSON.IPMI_INFO)[PxJSON.CONNECTIONTYPE][PxJSON.VALUE]
new_ipaddress = obj.getroute(PxJSON.IPMI_INFO)[PxJSON.IPV4][PxJSON.VALUE]
new_netmask = obj.getroute(PxJSON.IPMI_INFO)[PxJSON.NETMASK][PxJSON.VALUE]
new_gateway = obj.getroute(PxJSON.IPMI_INFO)[PxJSON.GATEWAY][PxJSON.VALUE]
new_vlan = obj.getroute(PxJSON.IPMI_INFO)[PxJSON.VLAN][PxJSON.VALUE]
condition = True
while condition:
if not self.ipmisupported:
# VM guest?
self.logger.info("IPMI is not supported. Not applying changes.")
obj.setsuccess()
status = 0
break
systemstatus, systemjson = self.querysystem()
systemobj = PxJSON("")
systemobj.setjson(json.loads(systemjson))
if systemstatus != 0 or not systemobj.issuccess():
obj.internal(systemobj.getinternal())
break
self.logger.info("systemreply={0}".format(systemobj.getjson()))
if self.checkattribute(systemobj.getjsondict(), PxJSON.CONNECTIONTYPE, new_connectiontype):
self.logger.info("Setting={}".format(new_connectiontype))
args = [self.ipmitool, 'lan', 'set', '1', 'ipsrc', new_connectiontype]
status, obj, _ = self.runcmd(args, obj)
if status != 0:
break
if new_ipaddress is not None and PxJSON.CONNECTIONTYPE != PxJSON.CONNECTIONDHCP:
if self.checkattribute(systemobj.getjsondict(), PxJSON.IPV4, new_ipaddress):
args = [self.ipmitool, 'lan', 'set', '1', 'ipaddr', new_ipaddress]
status, obj, _ = self.runcmd(args, obj)
if status != 0:
break
if new_netmask is not None and PxJSON.CONNECTIONTYPE != PxJSON.CONNECTIONDHCP:
if self.checkattribute(systemobj.getjsondict(), PxJSON.NETMASK, new_netmask):
args = [self.ipmitool, 'lan', 'set', '1', 'netmask', new_netmask]
status, obj, _ = self.runcmd(args, obj)
if status != 0:
break
if new_gateway is not None and PxJSON.CONNECTIONTYPE != PxJSON.CONNECTIONDHCP:
if self.checkattribute(systemobj.getjsondict(), PxJSON.GATEWAY, new_gateway):
args = [self.ipmitool, 'lan', 'set', '1', 'defgw', 'ipaddr', new_gateway]
status, obj, _ = self.runcmd(args, obj)
if status != 0:
break
if new_vlan is not None:
if self.checkattribute(systemobj.getjsondict(), PxJSON.VLAN, new_vlan):
self.logger.info("Setting={}".format(new_vlan))
if new_vlan == "0":
args = [self.ipmitool, 'lan', 'set', '1', 'vlan', 'id', 'off']
else:
args = [self.ipmitool, 'lan', 'set', '1', 'vlan', 'id', new_vlan]
status, obj, _ = self.runcmd(args, obj)
if status != 0:
break
obj.setsuccess()
status = 0
condition = False
self.logger.info("status={0} json={1}".format(status, obj.getjsonpretty()))
return status, obj
def getconfiguration(self):
"""
Fetch IPMI configuration from database
:param: none
:returns: status, reply
:raises: None
"""
status = 1
obj = PxJSON("Unable to obtain IPMI configuration")
try:
table_name = 'systemsetups'
res = DataModel().ExecuteRawQueryStatement("SELECT ipmi_connection_type, ipmi_address, ipmi_netmask, ipmi_gateway, ipmi_vlan from {0}".format(table_name))
reply = {}
for row in res:
self.logger.info(row)
reply[PxJSON.CONNECTIONTYPE] = row['ipmi_connection_type']
reply[PxJSON.IPV4] = row['ipmi_address']
reply[PxJSON.NETMASK] = row['ipmi_netmask']
reply[PxJSON.GATEWAY] = row['ipmi_gateway']
reply[PxJSON.VLAN] = row['ipmi_vlan'] if row['ipmi_vlan'] != 0 else 'undefined'
obj.setroute(PxJSON.IPMI_INFO, reply)
obj.setsuccess()
status = 0
except sqlalchemy.exc.OperationalError as ex:
self.logger.error(traceback.format_exc())
obj.internal({"exception": str(ex)})
except Exception as ex:
self.logger.error(traceback.format_exc())
obj.internal({"exception": str(ex)})
self.logger.info("status={0} json={1}".format(status, obj.getjsonpretty()))
return status, obj.getjson()
def loadjson(self, jsonstr, obj):
""" load JSON from string """
jsondict = None
try:
jsondict = json.loads(jsonstr)
except TypeError as ex:
self.logger.error(traceback.format_exc())
obj.internal({"exception": str(ex)})
except Exception as ex:
self.logger.error(traceback.format_exc())
obj.internal({"exception": str(ex)})
self.logger.info("jsondict={0}".format(jsondict))
return jsondict
def validateipmijson(self, data):
"""
Validate IPMI information
:param data - dictionary with IPMI information
:returns: number_of_errors, dictionary
:raises: None
"""
err_count = 0
reply = {PxJSON.IPMI_INFO: {}}
ipmi_keys = {PxJSON.CONNECTIONTYPE: [PxJSON.CONNECTIONSTATIC, PxJSON.CONNECTIONDHCP], PxJSON.IPV4: [], PxJSON.NETMASK: [], PxJSON.GATEWAY: [], PxJSON.VLAN: []}
option_keys = {PxJSON.IGNOREUNUSEDKEYS: [PxJSON.TRUE, PxJSON.FALSE]}
if PxJSON.IPMI_INFO not in (data or {}):
reply[PxJSON.IPMI_INFO] = {PxJSON.VALUE: None, PxJSON.STATUS: PxJSON.MISSING_KEY}
err_count += 1
ignore_unused_keys = False
if PxJSON.OPTIONS in (data or {}) and PxJSON.IGNOREUNUSEDKEYS in (data[PxJSON.OPTIONS] or {}):
if data[PxJSON.OPTIONS][PxJSON.IGNOREUNUSEDKEYS] == PxJSON.TRUE:
ignore_unused_keys = True
for key in (data or {}):
if key == PxJSON.IPMI_INFO:
if data[key] is None:
reply[key] = {PxJSON.VALUE: data[key], PxJSON.STATUS: PxJSON.MISSING_VALUE}
err_count += 1
elif not data[key] or not isinstance(data[key], dict):
reply[key] = {PxJSON.VALUE: data[key], PxJSON.STATUS: | |
<reponame>dmquinones/qiskit-terra
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name,anomalous-backslash-in-string,missing-docstring
"""mpl circuit visualization backend."""
import collections
import fractions
import itertools
import json
import logging
import math
import numpy as np
try:
from matplotlib import patches
from matplotlib import pyplot as plt
from matplotlib import pyplot as plt, gridspec
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
from qiskit.visualization import exceptions
from qiskit.visualization import qcstyle
from qiskit.visualization import interpolation
from qiskit.visualization.qcstyle import OPStylePulse, OPStyleSched
from qiskit.pulse.channels import (DriveChannel, ControlChannel, MeasureChannel,
AcquireChannel, SnapshotChannel)
from qiskit.pulse import (SamplePulse, FrameChange, PersistentValue, Snapshot, Acquire,
PulseError)
logger = logging.getLogger(__name__)
Register = collections.namedtuple('Register', 'reg index')
WID = 0.65
HIG = 0.65
DEFAULT_SCALE = 4.3
PORDER_GATE = 5
PORDER_LINE = 2
PORDER_GRAY = 3
PORDER_TEXT = 6
PORDER_SUBP = 4
class Anchor:
def __init__(self, reg_num, yind, fold):
self.__yind = yind
self.__fold = fold
self.__reg_num = reg_num
self.__gate_placed = []
self.gate_anchor = 0
def plot_coord(self, index, gate_width):
h_pos = index % self.__fold + 1
# check folding
if self.__fold > 0:
if h_pos + (gate_width - 1) > self.__fold:
index += self.__fold - (h_pos - 1)
x_pos = index % self.__fold + 1 + 0.5 * (gate_width - 1)
y_pos = self.__yind - (index // self.__fold) * (self.__reg_num + 1)
else:
x_pos = index + 1 + 0.5 * (gate_width - 1)
y_pos = self.__yind
# could have been updated, so need to store
self.gate_anchor = index
return x_pos, y_pos
def is_locatable(self, index, gate_width):
hold = [index + i for i in range(gate_width)]
for p in hold:
if p in self.__gate_placed:
return False
return True
def set_index(self, index, gate_width):
h_pos = index % self.__fold + 1
if h_pos + (gate_width - 1) > self.__fold:
_index = index + self.__fold - (h_pos - 1)
else:
_index = index
for ii in range(gate_width):
if _index + ii not in self.__gate_placed:
self.__gate_placed.append(_index + ii)
self.__gate_placed.sort()
def get_index(self):
if self.__gate_placed:
return self.__gate_placed[-1] + 1
return 0
class MatplotlibDrawer:
def __init__(self, qregs, cregs, ops,
scale=1.0, style=None, plot_barriers=True,
reverse_bits=False):
if not HAS_MATPLOTLIB:
raise ImportError('The class MatplotlibDrawer needs matplotlib. '
'Run "pip install matplotlib" before.')
self._ast = None
self._scale = DEFAULT_SCALE * scale
self._creg = []
self._qreg = []
self._registers(cregs, qregs)
self._ops = ops
self._qreg_dict = collections.OrderedDict()
self._creg_dict = collections.OrderedDict()
self._cond = {
'n_lines': 0,
'xmax': 0,
'ymax': 0,
}
self._style = qcstyle.QCStyle()
self.plot_barriers = plot_barriers
self.reverse_bits = reverse_bits
if style:
if isinstance(style, dict):
self._style.set_style(style)
elif isinstance(style, str):
with open(style, 'r') as infile:
dic = json.load(infile)
self._style.set_style(dic)
self.figure = plt.figure()
self.figure.patch.set_facecolor(color=self._style.bg)
self.ax = self.figure.add_subplot(111)
self.ax.axis('off')
self.ax.set_aspect('equal')
self.ax.tick_params(labelbottom=False, labeltop=False,
labelleft=False, labelright=False)
def _registers(self, creg, qreg):
self._creg = []
for r in creg:
self._creg.append(Register(reg=r[0], index=r[1]))
self._qreg = []
for r in qreg:
self._qreg.append(Register(reg=r[0], index=r[1]))
@property
def ast(self):
return self._ast
def _custom_multiqubit_gate(self, xy, fc=None, wide=True, text=None,
subtext=None):
xpos = min([x[0] for x in xy])
ypos = min([y[1] for y in xy])
ypos_max = max([y[1] for y in xy])
if wide:
if subtext:
boxes_length = round(max([len(text), len(subtext)]) / 8) or 1
else:
boxes_length = round(len(text) / 8) or 1
wid = WID * 2.8 * boxes_length
else:
wid = WID
if fc:
_fc = fc
else:
_fc = self._style.gc
qubit_span = abs(ypos) - abs(ypos_max) + 1
height = HIG + (qubit_span - 1)
box = patches.Rectangle(
xy=(xpos - 0.5 * wid, ypos - .5 * HIG),
width=wid, height=height, fc=_fc, ec=self._style.lc,
linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
# Annotate inputs
for bit, y in enumerate([x[1] for x in xy]):
self.ax.text(xpos - 0.45 * wid, y, str(bit), ha='left', va='center',
fontsize=self._style.fs, color=self._style.gt,
clip_on=True, zorder=PORDER_TEXT)
if text:
disp_text = text
if subtext:
self.ax.text(xpos, ypos + 0.15 * height, disp_text, ha='center',
va='center', fontsize=self._style.fs,
color=self._style.gt, clip_on=True,
zorder=PORDER_TEXT)
self.ax.text(xpos, ypos - 0.3 * height, subtext, ha='center',
va='center', fontsize=self._style.sfs,
color=self._style.sc, clip_on=True,
zorder=PORDER_TEXT)
else:
self.ax.text(xpos, ypos + .5 * (qubit_span - 1), disp_text,
ha='center',
va='center',
fontsize=self._style.fs,
color=self._style.gt,
clip_on=True,
zorder=PORDER_TEXT)
def _gate(self, xy, fc=None, wide=False, text=None, subtext=None):
xpos, ypos = xy
if wide:
if subtext:
wid = WID * 2.8
else:
boxes_wide = round(len(text) / 10) or 1
wid = WID * 2.8 * boxes_wide
else:
wid = WID
if fc:
_fc = fc
elif text and text in self._style.dispcol:
_fc = self._style.dispcol[text]
else:
_fc = self._style.gc
box = patches.Rectangle(
xy=(xpos - 0.5 * wid, ypos - 0.5 * HIG), width=wid, height=HIG,
fc=_fc, ec=self._style.lc, linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
if text:
if text in self._style.dispcol:
disp_text = "${}$".format(self._style.disptex[text])
else:
disp_text = text
if subtext:
self.ax.text(xpos, ypos + 0.15 * HIG, disp_text, ha='center',
va='center', fontsize=self._style.fs,
color=self._style.gt, clip_on=True,
zorder=PORDER_TEXT)
self.ax.text(xpos, ypos - 0.3 * HIG, subtext, ha='center',
va='center', fontsize=self._style.sfs,
color=self._style.sc, clip_on=True,
zorder=PORDER_TEXT)
else:
self.ax.text(xpos, ypos, disp_text, ha='center', va='center',
fontsize=self._style.fs,
color=self._style.gt,
clip_on=True,
zorder=PORDER_TEXT)
def _subtext(self, xy, text):
xpos, ypos = xy
self.ax.text(xpos, ypos - 0.3 * HIG, text, ha='center', va='top',
fontsize=self._style.sfs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
def _sidetext(self, xy, text):
xpos, ypos = xy
# 0.15 = the initial gap, each char means it needs to move
# another 0.0375 over
xp = xpos + 0.15 + (0.0375 * len(text))
self.ax.text(xp, ypos+HIG, text, ha='center', va='top',
fontsize=self._style.sfs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
def _line(self, xy0, xy1, lc=None, ls=None):
x0, y0 = xy0
x1, y1 = xy1
if lc is None:
linecolor = self._style.lc
else:
linecolor = lc
if ls is None:
linestyle = 'solid'
else:
linestyle = ls
if linestyle == 'doublet':
theta = np.arctan2(np.abs(x1 - x0), np.abs(y1 - y0))
dx = 0.05 * WID * np.cos(theta)
dy = 0.05 * WID * np.sin(theta)
self.ax.plot([x0 + dx, x1 + dx], [y0 + dy, y1 + dy],
color=linecolor,
linewidth=1.0,
linestyle='solid',
zorder=PORDER_LINE)
self.ax.plot([x0 - dx, x1 - dx], [y0 - dy, y1 - dy],
color=linecolor,
linewidth=1.0,
linestyle='solid',
zorder=PORDER_LINE)
else:
self.ax.plot([x0, x1], [y0, y1],
color=linecolor,
linewidth=1.0,
linestyle=linestyle,
zorder=PORDER_LINE)
def _measure(self, qxy, cxy, cid):
qx, qy = qxy
cx, cy = cxy
self._gate(qxy, fc=self._style.dispcol['meas'])
# add measure symbol
arc = patches.Arc(xy=(qx, qy - 0.15 * HIG), width=WID * 0.7,
height=HIG * 0.7, theta1=0, theta2=180, fill=False,
ec=self._style.lc, linewidth=1.5,
zorder=PORDER_GATE)
self.ax.add_patch(arc)
self.ax.plot([qx, qx + 0.35 * WID],
[qy - 0.15 * HIG, qy + 0.20 * HIG],
color=self._style.lc, linewidth=1.5, zorder=PORDER_GATE)
# arrow
self._line(qxy, [cx, cy + 0.35 * WID], lc=self._style.cc,
ls=self._style.cline)
arrowhead = patches.Polygon(((cx - 0.20 * WID, cy + 0.35 * WID),
(cx + 0.20 * WID, cy + 0.35 * WID),
(cx, cy)),
fc=self._style.cc,
ec=None)
self.ax.add_artist(arrowhead)
# target
if self._style.bundle:
self.ax.text(cx + .25, cy + .1, str(cid), ha='left', va='bottom',
fontsize=0.8 * self._style.fs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
def _conds(self, xy, istrue=False):
xpos, ypos = xy
if istrue:
_fc = self._style.lc
else:
_fc = self._style.gc
box = patches.Circle(xy=(xpos, ypos), radius=WID * 0.15,
fc=_fc, ec=self._style.lc,
linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
def _ctrl_qubit(self, xy):
xpos, ypos = xy
box = patches.Circle(xy=(xpos, ypos), radius=WID * 0.15,
fc=self._style.lc, ec=self._style.lc,
linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
def _tgt_qubit(self, xy):
xpos, ypos = xy
box = patches.Circle(xy=(xpos, ypos), radius=HIG * 0.35,
fc=self._style.dispcol['target'],
ec=self._style.lc, linewidth=1.5,
zorder=PORDER_GATE)
self.ax.add_patch(box)
# add '+' symbol
self.ax.plot([xpos, xpos], [ypos - 0.35 * HIG, ypos + 0.35 * HIG],
color=self._style.lc, linewidth=1.0, zorder=PORDER_GATE)
self.ax.plot([xpos - 0.35 * HIG, xpos + 0.35 * HIG], [ypos, ypos],
color=self._style.lc, linewidth=1.0, zorder=PORDER_GATE)
def _swap(self, xy):
xpos, ypos = xy
self.ax.plot([xpos - 0.20 * WID, xpos + 0.20 * WID],
[ypos - 0.20 * WID, ypos + 0.20 * WID],
color=self._style.lc, linewidth=1.5, zorder=PORDER_LINE)
self.ax.plot([xpos - 0.20 * WID, xpos + 0.20 * WID],
[ypos + 0.20 * WID, ypos - 0.20 * WID],
color=self._style.lc, linewidth=1.5, zorder=PORDER_LINE)
def _barrier(self, config, anc):
xys = config['coord']
group = config['group']
y_reg = []
for qreg in self._qreg_dict.values():
if qreg['group'] in group:
y_reg.append(qreg['y'])
x0 = xys[0][0]
box_y0 = min(y_reg) - int(anc / self._style.fold) * (self._cond['n_lines'] + 1) - 0.5
box_y1 = max(y_reg) - int(anc / self._style.fold) * (self._cond['n_lines'] + 1) + 0.5
box = patches.Rectangle(xy=(x0 - | |
<gh_stars>1-10
#Code from https://github.com/zhangyaqi1989/Gcode-Reader
from enum import Enum
import os.path
import pprint
import sys
import numpy as np
import pandas as pd
class LayerError(Exception):
""" layer number error """
pass
class GcodeType(Enum):
""" enum of GcodeType """
FDM_REGULAR = 1
FDM_STRATASYS = 2
LPBF = 3
@classmethod
def has_value(cls, value):
return any(value == item.value for item in cls)
class GcodeReader:
""" Gcode reader class """
def __init__(self, filename, filetype=GcodeType.FDM_REGULAR):
if not os.path.exists(filename):
print("{} does not exist!".format(filename))
sys.exit(1)
self.filename = filename
self.filetype = filetype
# print(self.filetype)
self.n_segs = 0 # number of line segments
self.segs = None # list of line segments [(x0, y0, x1, y1, z)]
self.n_layers = 0 # number of layers
# seg_index_bars and subpath_index_bars have the same format
# e.g. ith layer has segment indexes [seg_index_bars[i-1],
# seg_index_bars[i])
self.seg_index_bars = []
self.subpath_index_bars = []
self.summary = None
self.lengths = None
self.subpaths = None
self.xyzlimits = None
self.elements = None
self.elements_index_bars = []
# read file to populate variables
self._read()
def mesh(self, max_length):
""" mesh segments according to max_length """
self.elements = []
self.elements_index_bars = []
bar = 0
n_eles = 0
for i, (x0, y0, x1, y1, z) in enumerate(self.segs):
if i == self.seg_index_bars[bar]:
bar += 1
self.elements_index_bars.append(n_eles)
length = np.hypot(x0 - x1, y0 - y1)
n_slices = int(np.ceil(length / max_length))
n_eles += n_slices
dx = (x1 - x0) / n_slices
dy = (y1 - y0) / n_slices
for _ in range(n_slices - 1):
self.elements.append((x0, y0, x0 + dx, y0 + dy, z))
x0, y0 = x0 + dx, y0 + dy
self.elements.append((x0, y0, x1, y1, z))
self.elements_index_bars.append(n_eles)
# print(self.elements_index_bars)
print("Meshing finished, {:d} elements generated".
format(len(self.elements)))
"""
def plot_mesh_layer(self, layernum, ax=None):
# plot mesh in one layer
if not self.elements:
self.mesh(max_length=1)
if not ax:
fig, ax = create_axis(projection='2d')
left, right = self.elements_index_bars[layernum - 1:layernum + 1]
for x0, y0, x1, y1, _ in self.elements[left:right]:
ax.plot([x0, x1], [y0, y1], 'b-')
# ax.scatter(0.5 * (x0 + x1), 0.5 * (y0 + y1), s=4, color='r')
ax.plot([0.5 * (x0 + x1)], [0.5 * (y0 + y1)], 'ro', markersize=4)
return fig, ax
def plot_mesh(self, ax=None):
# plot mesh
if not self.elements:
self.mesh()
if not ax:
fig, ax = create_axis(projection='3d')
for x0, y0, x1, y1, z in self.elements:
ax.plot([x0, x1], [y0, y1], [z, z], 'b-')
ax.scatter(0.5 * (x0 + x1), 0.5 * (y0 + y1), z, 'r', s=4,
color='r')
return fig, ax
"""
def _read(self):
"""
read the file and populate self.segs, self.n_segs and
self.seg_index_bars
"""
if self.filetype == GcodeType.FDM_REGULAR:
self._read_fdm_regular()
elif self.filetype == GcodeType.FDM_STRATASYS:
self._read_fdm_stratasys()
elif self.filetype == GcodeType.LPBF:
self._read_lpbf()
else:
print("file type is not supported")
sys.exit(1)
self.xyzlimits = self._compute_xyzlimits(self.segs)
def _compute_xyzlimits(self, seg_list):
""" compute axis limits of a segments list """
xmin, xmax = float('inf'), -float('inf')
ymin, ymax = float('inf'), -float('inf')
zmin, zmax = float('inf'), -float('inf')
for x0, y0, x1, y1, z in seg_list:
xmin = min(x0, x1) if min(x0, x1) < xmin else xmin
ymin = min(y0, y1) if min(y0, y1) < ymin else ymin
zmin = z if z < zmin else zmin
xmax = max(x0, x1) if max(x0, x1) > xmax else xmax
ymax = max(y0, y1) if max(y0, y1) > ymax else ymax
zmax = z if z > zmax else zmax
return (xmin, xmax, ymin, ymax, zmin, zmax)
def _read_lpbf(self):
""" read LPBF gcode """
with open(self.filename) as infile:
# read nonempty lines
lines = (line.strip() for line in infile.readlines()
if line.strip())
# only keep line that starts with 'N'
lines = (line for line in lines if line.startswith('N'))
# pp.pprint(lines) # for debug
self.segs = []
self.powers = []
temp = -float('inf')
ngxyzfl = [temp, temp, temp, temp, temp, temp, temp]
d = dict(zip(['N', 'G', 'X', 'Y', 'Z', 'F', 'L'], range(7)))
seg_count = 0
for line in lines:
old_ngxyzfl = ngxyzfl[:]
tokens = line.split()
for token in tokens:
ngxyzfl[d[token[0]]] = float(token[1:])
if ngxyzfl[d['Z']] > old_ngxyzfl[d['Z']]:
self.n_layers += 1
self.seg_index_bars.append(seg_count)
if (ngxyzfl[1] == 1 and ngxyzfl[2:4] != old_ngxyzfl[2:4]
and ngxyzfl[4] == old_ngxyzfl[4]
and ngxyzfl[5] > 0):
x0, y0, z = old_ngxyzfl[2:5]
x1, y1 = ngxyzfl[2:4]
self.segs.append((x0, y0, x1, y1, z))
self.powers.append(ngxyzfl[-1])
seg_count += 1
self.n_segs = len(self.segs)
self.segs = np.array(self.segs)
self.seg_index_bars.append(self.n_segs)
# print(self.n_layers)
assert(len(self.seg_index_bars) - self.n_layers == 1)
def _read_fdm_regular(self):
""" read fDM regular gcode type """
with open(self.filename) as infile:
# read nonempty lines
lines = (line.strip() for line in infile.readlines()
if line.strip())
# only keep line that starts with 'G1'
lines = (line for line in lines if line.startswith('G1'))
# pp.pprint(lines) # for debug
self.segs = []
temp = -float('inf')
gxyzef = [temp, temp, temp, temp, temp, temp]
d = dict(zip(['G', 'X', 'Y', 'Z', 'E', 'F'], range(6)))
seg_count = 0
for line in lines:
line = line.split(';')[0]
if line == "":
continue
old_gxyzef = gxyzef[:]
for token in line.split():
if token[1:] == "":
continue
try:
gxyzef[d[token[0]]] = float(token[1:])
except:
print("invalid token: " + token[1:] + ";")
sys.exit(1)
if gxyzef[3] > old_gxyzef[3]: # z value
self.n_layers += 1
self.seg_index_bars.append(seg_count)
if (gxyzef[0] == 1 and gxyzef[1:3] != old_gxyzef[1:3]
and gxyzef[3] == old_gxyzef[3]
and gxyzef[4] > old_gxyzef[4]):
x0, y0, z = old_gxyzef[1:4]
x1, y1 = gxyzef[1:3]
self.segs.append((x0, y0, x1, y1, z))
seg_count += 1
self.n_segs = len(self.segs)
self.segs = np.array(self.segs)
if not self.seg_index_bars[-1] == len(self.segs):
self.seg_index_bars.append(self.n_segs)
else:
self.n_layers -= 1
assert(len(self.seg_index_bars) - self.n_layers == 1)
def _read_fdm_stratasys(self):
""" read stratasys fdm G-code file """
self.areas = []
self.is_supports = []
self.styles = []
self.deltTs = []
self.segs = []
temp = -float('inf')
# x, y, z, area, deltaT, is_support, style
xyzATPS = [temp, temp, temp, temp, temp, False, '']
seg_count = 0
with open(self.filename, 'r') as in_file:
lines = in_file.readlines()
# means position denoted by the line is the start of subpath
is_start = True
for line in lines:
if line.startswith('#'):
continue
if not line.strip(): # skip empty line
start = True
continue
old_xyzATPS = xyzATPS[:]
tokens = line.split()
# print(tokens)
xyzATPS[:5] = [float(token) for token in tokens[:5]]
xyzATPS[5] = bool(tokens[5])
xyzATPS[6] = tokens[6]
if xyzATPS[2] != old_xyzATPS[2]: # z value
self.seg_index_bars.append(seg_count)
self.n_layers += 1
elif not start:
# make sure is_support and style do not change
assert(xyzATPS[5:] == old_xyzATPS[5:])
x0, y0 = old_xyzATPS[:2]
x1, y1, z = xyzATPS[:3]
self.segs.append((x0, y0, x1, y1, z))
seg_count += 1
self.areas.append(xyzATPS[3])
self.deltTs.append(xyzATPS[4])
self.is_supports.append(xyzATPS[5])
self.styles.append(xyzATPS[6])
start = False
self.n_segs = len(self.segs)
self.segs = np.array(self.segs)
self.seg_index_bars.append(self.n_segs)
# print(self.seg_index_bars)
def _compute_subpaths(self):
""" compute subpaths
a subpath is represented by (xs, ys, zs)
"""
if not self.subpaths:
self.subpaths = []
self.subpath_index_bars = [0]
x0, y0, x1, y1, z = self.segs[0, :]
xs, ys, zs = [x0, x1], [y0, y1], [z, z]
for x0, y0, x1, y1, z in self.segs[1:, :]:
if x0 != xs[-1] or y0 != ys[-1] or z != zs[-1]:
self.subpaths.append((xs, ys, zs))
if z != zs[-1]:
self.subpath_index_bars.append(len(self.subpaths))
xs, ys, zs = [x0, x1], [y0, y1], [z, z]
else:
xs.append(x1)
ys.append(y1)
zs.append(z)
if len(xs) != 0:
self.subpaths.append((xs, ys, zs))
self.subpath_index_bars.append(len(self.subpaths))
# print(self.subpath_index_bars)
# print(self.segs)
"""
def plot(self, color='blue', ax=None):
# plot the whole part in 3D
if not ax:
fig, ax = create_axis(projection='3d')
assert(self.n_segs > 0)
self._compute_subpaths()
for xs, ys, zs in self.subpaths:
ax.plot(xs, ys, zs)
return fig, ax
def plot_layers(self, min_layer, max_layer, ax=None):
# plot the layers in [min_layer, max_layer) in 3D
if (min_layer >= max_layer or min_layer < 1 or max_layer >
self.n_layers + 1):
raise LayerError("Layer number is invalid!")
self._compute_subpaths()
if not ax:
fig, ax = create_axis(projection='3d')
left, right = (self.subpath_index_bars[min_layer - 1],
self.subpath_index_bars[max_layer - 1])
for xs, ys, zs in self.subpaths[left: right]:
ax.plot(xs, ys, zs)
return fig, ax
def plot_layer(self, layer=1, ax=None):
# plot a specific layer in 2D
# make sure layer is in [1, self.n_layers]
# layer = max(layer, 1)
# layer = min(self.n_layers, layer)
if layer < 1 or layer > self.n_layers:
raise LayerError("Layer number is invalid!")
self._compute_subpaths()
if not ax:
fig, ax = create_axis(projection='2d')
left, right = (self.subpath_index_bars[layer - 1],
self.subpath_index_bars[layer])
for xs, ys, _ in self.subpaths[left: right]:
ax.plot(xs, ys)
return fig, ax
"""
| |
import time, pygame, os, sys, sqlite3
from random import randint
from pygame.constants import MOUSEBUTTONDOWN, QUIT
# inicializa todos os módulos que necessitam de inicialização dentro do pygame.
pygame.init()
#Criando conexão com o banco de dados e definindo o cursor:
banco_de_dados = sqlite3.connect('nome_pontuacao.db')
cursor = banco_de_dados.cursor() #Objeto que vai permitir realizar alterações no banco de dados
#Vai atribuir a variável 'itens' os 10 primeiros registros do banco de dados
itens1 = cursor.fetchmany(10)
# Criar a janela
janela = pygame.display.set_mode((500, 550))
clock = pygame.time.Clock()
#Variável que vai armazenar o nome do jogador temporariamente:
nome = ""
#Criar variável que vai armazenar a soma dos tempos de todos os mapas (Pontuação)
pontos = 0
diretorio_principal = os.path.dirname(__file__)
diretorio_imagens = os.path.join(diretorio_principal, 'imagens')
diretorio_sons = os.path.join(diretorio_principal, 'sons')
# Mudar o título do jogo
pygame.display.set_caption('Maze and Cannons')
icone = pygame.image.load(os.path.join(diretorio_imagens, 'lab.png')) # imagem deve estar no diretório onde se encontra o programa
pygame.display.set_icon(icone)
# Carregando sons
musicatema = pygame.mixer.music.load(os.path.join(diretorio_sons, 'enter.mp3'))
pygame.mixer.music.set_volume(0.08)
pygame.mixer.music.play(-1)
somtiro = pygame.mixer.Sound(os.path.join(diretorio_sons, 'tiro1.mp3'))
somtiro.set_volume(0.1)
somDano = pygame.mixer.Sound(os.path.join(diretorio_sons, 'dano.mp3'))
somGameOver = pygame.mixer.Sound(os.path.join(diretorio_sons, 'somGameOver.mp3'))
somClick = pygame.mixer.Sound(os.path.join(diretorio_sons, 'click.mp3'))
somClick.set_volume(0.1)
somChegada = pygame.mixer.Sound(os.path.join(diretorio_sons, 'somchegada.mp3'))
somChegada.set_volume(0.1)
# Definindo cores
preto = (0,0,0)
branco = (255,255,255)
verdelimao = (15, 255, 149)
vermelho = (255, 49, 46)
azul = (72, 172, 240)
rosa = (252, 24, 152)
amarelo = (252,252,4)
fonte = pygame.font.Font("Gamer.ttf",40)
espessura = 9
# Criar a janela
largura_janela = 500
altura_janela = 550
#Criação dos botões Continuar e sair:
class Botão:
def __init__(self,texto,largura_botão,altura_botão,posição):
#Atributos principais:
self.pressed = False
#Retângulo superior:
self.top_rect = pygame.Rect(posição,(largura_botão,altura_botão))
self.top_color = '#475F77'
#Segundo retângulo:
self.bottom_rect = pygame.Rect(posição,(largura_botão,altura_botão))
self.bottom_color = '#354B5E'
#texto do botão:
self.text_surf = fonte.render(texto,True,branco)
self.text_rect = self.text_surf.get_rect(center = self.top_rect.center)
#Função para desenhar o botão:
def desenhar_botão(self):
self.text_rect.center = self.top_rect.center
self.bottom_rect.midtop = self.top_rect.midtop
pygame.draw.rect(janela,self.bottom_color,self.bottom_rect,border_radius = 12)
pygame.draw.rect(janela,self.top_color,self.top_rect,border_radius = 15)
janela.blit(self.text_surf,self.text_rect)
self.checar_click()
#Função para verficar o click no botão:
def checar_click(self):
posição_mouse = pygame.mouse.get_pos()
if self.top_rect.collidepoint(posição_mouse):
self.top_color = vermelho
if pygame.mouse.get_pressed()[0]:
self.pressed = True
else:
if self.pressed == True:
self.pressed = False
else:
self.top_color = azul
#Função referente a tela de Ranking:
def tela_rank(largura_janela,altura_janela):
janela_rank = pygame.display.set_mode((largura_janela, altura_janela))
fonte_ranking = pygame.font.Font('Gamer.ttf', 80)
fonte_ranking2 = pygame.font.Font('Gamer.ttf', 40)
sair = False
while sair == False:
mensagem_ranking = "HIGH SCORES"
texto_ranking = fonte_ranking.render(mensagem_ranking,True,(vermelho))
for event in pygame.event.get():
if event.type == QUIT:
sair = True
#Verifando se houve clique no botão Continuar:
if event.type == pygame.MOUSEBUTTONDOWN:
x = pygame.mouse.get_pos()[0]
y = pygame.mouse.get_pos()[1]
#Verificando se houve clique no botão Sair:
if x > 170 and y > 510 and x < 330 and y < 540:
sair = True
#Inserindo texto de rank, score e nome:
mensagem_rank = "RANK"
texto_rank = fonte_ranking2.render(mensagem_rank,True,(amarelo))
mensagem_score = "SCORE"
texto_score = fonte_ranking2.render(mensagem_score,True,(amarelo))
mensagem_name = "NAME"
texto_name = fonte_ranking2.render(mensagem_name,True,(amarelo))
#Textos das posições:
mensagem_posição1 = "1ST"
texto_posição1 = fonte_ranking2.render(mensagem_posição1,True,(branco))
mensagem_posição2 = "2ND"
texto_posição2 = fonte_ranking2.render(mensagem_posição2,True,(vermelho))
mensagem_posição3 = "3RD"
texto_posição3 = fonte_ranking2.render(mensagem_posição3,True,(amarelo))
mensagem_posição4 = "4TH"
texto_posição4 = fonte_ranking2.render(mensagem_posição4,True,(amarelo))
mensagem_posição5 = "5TH"
texto_posição5 = fonte_ranking2.render(mensagem_posição5,True,(verdelimao))
mensagem_posição6 = "6TH"
texto_posição6 = fonte_ranking2.render(mensagem_posição6,True,(azul))
mensagem_posição7 = "7TH"
texto_posição7 = fonte_ranking2.render(mensagem_posição7,True,(azul))
mensagem_posição8 = "8TH"
texto_posição8 = fonte_ranking2.render(mensagem_posição8,True,(vermelho))
mensagem_posição9 = "9TH"
texto_posição9 = fonte_ranking2.render(mensagem_posição9,True,(branco))
mensagem_posição10 = "10TH"
texto_posição10 = fonte_ranking2.render(mensagem_posição10,True,(rosa))
#Textos dos scores:
mensagem_score1 = str(itens1[0][1])
texto_score1 = fonte_ranking2.render(mensagem_score1,True,(branco))
mensagem_score2 = str(itens1[1][1])
texto_score2 = fonte_ranking2.render(mensagem_score2,True,(vermelho))
mensagem_score3 = str(itens1[2][1])
texto_score3 = fonte_ranking2.render(mensagem_score3,True,(amarelo))
mensagem_score4 = str(itens1[3][1])
texto_score4 = fonte_ranking2.render(mensagem_score4,True,(amarelo))
mensagem_score5 = str(itens1[4][1])
texto_score5 = fonte_ranking2.render(mensagem_score5,True,(verdelimao))
mensagem_score6 = str(itens1[5][1])
texto_score6 = fonte_ranking2.render(mensagem_score6,True,(azul))
mensagem_score7 = str(itens1[6][1])
texto_score7 = fonte_ranking2.render(mensagem_score7,True,(azul))
mensagem_score8 = str(itens1[7][1])
texto_score8 = fonte_ranking2.render(mensagem_score8,True,(vermelho))
mensagem_score9 = str(itens1[8][1])
texto_score9 = fonte_ranking2.render(mensagem_score9,True,(branco))
mensagem_score10 = str(itens1[9][1])
texto_score10 = fonte_ranking2.render(mensagem_score10,True,(rosa))
#Textos dos nomes:
mensagem_nome1 = itens1[0][2]
texto_nome1 = fonte_ranking2.render(mensagem_nome1,True,(branco))
mensagem_nome2 = itens1[1][2]
texto_nome2 = fonte_ranking2.render(mensagem_nome2,True,(vermelho))
mensagem_nome3 = itens1[2][2]
texto_nome3 = fonte_ranking2.render(mensagem_nome3,True,(amarelo))
mensagem_nome4 = itens1[3][2]
texto_nome4 = fonte_ranking2.render(mensagem_nome4,True,(amarelo))
mensagem_nome5 = itens1[4][2]
texto_nome5 = fonte_ranking2.render(mensagem_nome5,True,(verdelimao))
mensagem_nome6 = itens1[5][2]
texto_nome6 = fonte_ranking2.render(mensagem_nome6,True,(azul))
mensagem_nome7 = itens1[6][2]
texto_nome7 = fonte_ranking2.render(mensagem_nome7,True,(azul))
mensagem_nome8 = itens1[7][2]
texto_nome8 = fonte_ranking2.render(mensagem_nome8,True,(vermelho))
mensagem_nome9 = itens1[8][2]
texto_nome9 = fonte_ranking2.render(mensagem_nome9,True,(branco))
mensagem_nome10 = itens1[9][2]
texto_nome10 = fonte_ranking2.render(mensagem_nome10,True,(rosa))
#botão_continuar.desenhar_botão() REVER
botão_sair.desenhar_botão()
#Desenhar tabelas do rank:
janela_rank.blit(texto_rank,(70,100))
janela_rank.blit(texto_score,(210,100))
janela_rank.blit(texto_name,(350,100))
janela_rank.blit(texto_ranking,(90,0))
#Desenhar números referentes a cada posição:
janela_rank.blit(texto_posição1,(75,140))
janela_rank.blit(texto_posição2,(75,170))
janela_rank.blit(texto_posição3,(75,200))
janela_rank.blit(texto_posição4,(75,230))
janela_rank.blit(texto_posição5,(75,260))
janela_rank.blit(texto_posição6,(75,290))
janela_rank.blit(texto_posição7,(75,320))
janela_rank.blit(texto_posição8,(75,350))
janela_rank.blit(texto_posição9,(75,380))
janela_rank.blit(texto_posição10,(75,410))
#Desenhar scores:
janela_rank.blit(texto_score1,(240,140))
janela_rank.blit(texto_score2,(240,170))
janela_rank.blit(texto_score3,(240,200))
janela_rank.blit(texto_score4,(240,230))
janela_rank.blit(texto_score5,(240,260))
janela_rank.blit(texto_score6,(240,290))
janela_rank.blit(texto_score7,(240,320))
janela_rank.blit(texto_score8,(240,350))
janela_rank.blit(texto_score9,(240,380))
janela_rank.blit(texto_score10,(240,410))
#Desenhar nomes:
janela_rank.blit(texto_nome1,(360,140))
janela_rank.blit(texto_nome2,(360,170))
janela_rank.blit(texto_nome3,(360,200))
janela_rank.blit(texto_nome4,(360,230))
janela_rank.blit(texto_nome5,(360,260))
janela_rank.blit(texto_nome6,(360,290))
janela_rank.blit(texto_nome7,(360,320))
janela_rank.blit(texto_nome8,(360,350))
janela_rank.blit(texto_nome9,(360,380))
janela_rank.blit(texto_nome10,(360,410))
pygame.display.flip() # atualiza a tela
clock.tick(60)
return janela_rank
#Criando instância da classe Botão:
botão_sair = Botão("Sair",160,30,(170,510))
def criarCanhão(imagem ,x, y):
janela.blit(imagem, (x, y))
def criarCoracao(imagem, x, y):
janela.blit(imagem, (x, y))
def desenharMapa(mapa, x, y):
janela.blit(mapa, (x, y))
def drawBarreirasLimitadoras(cor):
pygame.draw.line(janela, cor, [16,20], [484,20], espessura) # barra horizontal superior
pygame.draw.line(janela, cor, [20,20], [20, 480], espessura) # barra na vertical esquerda
pygame.draw.line(janela, cor, [480,20], [480,480], espessura) # barra vertical direita
pygame.draw.line(janela, cor, [16,480], [484,480], espessura) # barra horizontal inferior
def drawCoracaoHud():
criarCoracao(coracaoImagem1, coracao1X, coracao1Y)
criarCoracao(coracaoImagem2, coracao2X, coracao2Y)
criarCoracao(coracaoImagem3, coracao3X, coracao3Y)
criarCoracao(coracaoImagem4, coracao4X, coracao4Y)
criarCoracao(coracaoImagem5, coracao5X, coracao5Y)
# Carregando fonte do jogo
fonte = pygame.font.Font('Gamer.ttf', 55)
# imagem de coração
coracaoImagem1 = pygame.image.load(os.path.join(diretorio_imagens, 'coracao.png'))
coracaoImagem1 = pygame.transform.scale(coracaoImagem1, (36, 36))
coracao1X = 10
coracao1Y = 500
coracaoImagem2 = pygame.image.load(os.path.join(diretorio_imagens, 'coracao.png'))
coracaoImagem2 = pygame.transform.scale(coracaoImagem2, (36, 36))
coracao2X = 46
coracao2Y = 500
coracaoImagem3 = pygame.image.load(os.path.join(diretorio_imagens, 'coracao.png'))
coracaoImagem3 = pygame.transform.scale(coracaoImagem3, (36, 36))
coracao3X = 82
coracao3Y = 500
coracaoImagem4 = pygame.image.load(os.path.join(diretorio_imagens, 'coracao.png'))
coracaoImagem4 = pygame.transform.scale(coracaoImagem4, (36, 36))
coracao4X = 118
coracao4Y = 500
coracaoImagem5 = pygame.image.load(os.path.join(diretorio_imagens, 'coracao.png'))
coracaoImagem5 = pygame.transform.scale(coracaoImagem5, (36, 36))
coracao5X = 154
coracao5Y = 500
#----------------------------------------------------------------------------------------------
# imagem do canhão
canhaoImagem = pygame.image.load(os.path.join(diretorio_imagens, 'canhao.png'))
canhaoImagem = pygame.transform.scale(canhaoImagem, (24, 24))
canhaoImagem = pygame.transform.flip(canhaoImagem, True, False)
canhaoImagem2 = pygame.image.load(os.path.join(diretorio_imagens, 'canhao.png'))
canhaoImagem2 = pygame.transform.scale(canhaoImagem2, (24, 24))
canhaoImagem2 = pygame.transform.flip(canhaoImagem2, True, False)
canhaoImagem3 = pygame.image.load(os.path.join(diretorio_imagens, 'canhao.png'))
canhaoImagem3 = pygame.transform.scale(canhaoImagem3, (24, 24))
# posição do canhão 1
canhao1X = 475
canhao1Y = 100
# bala canhao 1
bala1X = canhao1X
bala1Y = canhao1Y + 8
bala1MudançaX = 0
bala1MudançaY = 0
tempoBala1 = randint(1,3)
auxTempoBala1 = -tempoBala1
# posição do canhão 2
canhao2X = 475
canhao2Y = 225
# bala canhao 2
bala2X = canhao2X
bala2Y = canhao2Y + 8
bala2MudançaX = 0
bala2MudançaY = 0
tempoBala2 = randint(1,3)
auxTempoBala2 = -tempoBala2
# posição do canhão 3
canhao3X = 475
canhao3Y = 350
# bala canhao 3
bala3X = canhao3X
bala3Y = canhao3Y + 8
bala3MudançaX = 0
bala3MudançaY = 0
tempoBala3 = randint(1,3)
auxTempoBala3 = -tempoBala3
#velocidade das balas dos canhões
velocidadeBala = 9
#-----------------------------------------------------------------------------------------------
# imagem do mapa 1
mapa1 = pygame.image.load(os.path.join(diretorio_imagens, 'mapa.png'))
mapa1 = pygame.transform.scale(mapa1, (510, 510))
# imagem do mapa 2
mapa2 = pygame.image.load(os.path.join(diretorio_imagens, 'mapa2.png'))
mapa2 = pygame.transform.scale(mapa2, (520, 460))
# imagem do mapa 3
mapa3 = pygame.image.load(os.path.join(diretorio_imagens, 'mapa3.png'))
mapa3 = pygame.transform.scale(mapa3, (465, 465))
# imagem da tela de game over
gameoverImagem = pygame.image.load(os.path.join(diretorio_imagens, 'Game_Over.png'))
# Posição da chegada
chegadaX = 438
chegadaY = 27
# imagem do jogador
jogadorImagem = pygame.image.load(os.path.join(diretorio_imagens, 'jogador.png'))
jogadorImagem = pygame.transform.scale(jogadorImagem, (14, 14))
rectJogador = jogadorImagem.get_rect()
# Posiçao do jogador
jogadorX = 25 #438 #25 (Padrão)
jogadorY = 25 #27 #25 (Padrão)
jogadorMudançaX = 0
jogadorMudançaY = 0
# Velocidade do jogador
velocidade = 2
# Contador de vida
vida = 5
# Espessura das paredes limitadoras
espessura = 9
# criando a variavel para armazenar o tempo
tempoMapa1 = 0
#Tela da logo ---------------------------------------------------------------------------------------
pretoImagem = pygame.image.load(os.path.join(diretorio_imagens, 'preto.png'))
pretoImagem = pygame.transform.scale(pretoImagem, (500,550))
pretoImagem.set_alpha(0)
janela.fill(preto)
logoImagem = pygame.image.load(os.path.join(diretorio_imagens, 'logofinal1.png')).convert()
logoImagem.set_alpha(0)
for i in range(255): #fadein
pygame.time.delay(20)
logoImagem.set_alpha(i)
janela.blit(logoImagem, (0,0))
pygame.display.flip()
for i in range(255): #fadeout
pygame.time.delay(3)
pretoImagem.set_alpha(i)
janela.blit(pretoImagem, (0,0))
pygame.display.flip()
informacaoImagem = pygame.image.load(os.path.join(diretorio_imagens, 'whiletrue.png'))
informacaoImagem.set_alpha(0)
for k in range(255): #fadein da segunda imagem
pygame.time.delay(30)
informacaoImagem.set_alpha(k)
janela.blit(informacaoImagem, (0,0))
pygame.display.flip()
pygame.display.flip()
pygame.time.delay(100)
#Menu do jogo ---------------------------------------------------------------------------------------
def menu (janela,wallpaper):
pygame.font.init()
fonte_base = pygame.font.SysFont("Arial",110)
strJogador = ''
ContadorLetras = 4
global nome
def wallpaper_changer (num_wallpaper):
fundo = pygame.image.load(os.path.join(diretorio_imagens, 'wallpaper{}.png'.format(num_wallpaper)))
return fundo
while janela != 0:
janela.blit (wallpaper_changer(wallpaper),(0,0))
pygame.time.delay(200)
for event in pygame.event.get ():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN and wallpaper == 3 and ContadorLetras > 0:
strJogador = strJogador.upper()
somClick.play()
if len(strJogador) == 0 and event.key == pygame.K_BACKSPACE:
strJogador = ''
elif event.key == pygame.K_BACKSPACE:
strJogador = strJogador[:-1]
ContadorLetras += 1
elif ContadorLetras > 1:
strJogador += event.unicode
ContadorLetras -= 1
strJogador = strJogador.upper()
elif event.key == pygame.K_SPACE and len(strJogador) == 3:
nome = strJogador #Inserindo o nome do jogador na variável nome
return strJogador
text_surface = fonte_base.render(strJogador,True,(255,255,255))
comando = pygame.key.get_pressed()
if wallpaper == 0 and comando[pygame.K_SPACE] : wallpaper = 1; somClick.play()
elif wallpaper == 1 and comando[pygame.K_UP] : wallpaper = 3; somClick.play()
elif wallpaper == 1 and comando[pygame.K_SPACE] : wallpaper = 1; somClick.play()
elif wallpaper == 1 and comando[pygame.K_DOWN] : wallpaper = 2; somClick.play()
elif wallpaper == 2 and comando[pygame.K_SPACE] : wallpaper = 1; somClick.play()
elif wallpaper == 3 : janela.blit(text_surface,(160,185))
pygame.display.update()
menu(janela,0)
#-----------------------------------------------------------------------------------------
janela.fill(preto) # pinta a cor do fundo
pygame.mixer.music.stop()
musicatema = pygame.mixer.music.load(os.path.join(diretorio_sons, 'Map1Song.mp3'))
pygame.mixer.music.set_volume(0.08)
pygame.mixer.music.play(-1)
# texto 'Nível 1' antes de começar o jogo
NomeNivel1 = pygame.image.load(os.path.join(diretorio_imagens, 'nivel1.png'))
janela.blit(NomeNivel1, (0,0))
pygame.display.flip()
time.sleep(4.5)
#-----------------------------------------------------------------------------------------------------
listaX = []
listaY = []
janela.fill(azul) # pinta a janela novamente
desenharMapa(mapa1, -5, -5)
for | |
<reponame>qmonnet/nic-firmware
#!/usr/bin/env python3
##
## Copyright (c) 2017-2018, Netronome Systems, Inc. All rights reserved.
## SPDX-License-Identifier: BSD-2-Clause
"""
This scripts generates troubleshoot init directives in an uc file
"""
import math
import sys
import argparse
class nfp_dump_type :
type_cpp_csr = 0
type_xpb_csr = 1
type_me_csr = 2
type_indirect_me_csr = 3
type_rtsym = 4
type_hwinfo = 5
type_fwname = 6
type_hwinfo_field = 7
type_unset = 0xff
def init_directive(symbol, offset, value):
swap = "(((" + value + " & 0xff) << 24) \\\n | ((" + value + " & 0xff00) << 8) \\\n | ((" + value + " & 0xff0000) >> 8) \\\n | ((" + value + " & 0xff000000) >> 24))"
return ".init" + " " + str(symbol) + "+" + str(offset) + " " + swap + "\n"
def generate(input_file, output_path, prefix, filename):
#Dictionary of comments
comments = {}
comments["tat"] = ";tgt << 24 | act << 16 | tok << 8 | isld\n"
comments["CSR offset"] = ";CSR offset\n"
comments["dump length"] = ";dump length\n"
comments["tlv length"] = ";tlv length\n"
comments["level"] = ";level\n"
comments["level length"] = ";level length\n"
comments["value"] = ";value\n"
comments["string index"] = ";string index\n"
comments["CSR width"] = ";CSR width\n"
comments[nfp_dump_type.type_xpb_csr] = ";type: xpb\n"
comments[nfp_dump_type.type_cpp_csr] = ";type: cpp\n"
comments[nfp_dump_type.type_me_csr] = ";type: mecsr\n"
comments[nfp_dump_type.type_indirect_me_csr] = ";type: indirect mecsr\n"
comments[nfp_dump_type.type_rtsym] = ";type: rtsym\n"
comments[nfp_dump_type.type_hwinfo] = ";type: Hardware Info\n"
comments[nfp_dump_type.type_fwname] = ";type: Firmware Name\n"
comments[nfp_dump_type.type_hwinfo_field] = ";type: Hardware Info Field\n"
symbol = str(prefix)
symbol_name = "_" + symbol
#Output to be written to file
output_lines = ["#ifndef " + symbol.upper() + "_UC\n", \
"#define " + symbol.upper() + "_UC\n",\
";This file is automatically generated\n",\
";Do not edit\n",\
"; Format of data in NFP memory:\n", \
"; Bit 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0\n", \
"; -----\ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0\n", \
"; Word +---------------+---------------+---------------+---------------+\n", \
"; 0 | Level |\n", \
"; +---------------+---------------+---------------+---------------+\n", \
"; 1 | Level length in bytes, without \"level\" and \"level length\" |\n", \
"; +-------------------------------+-------------------------------+\n", \
"; 2 | Type ( E.g. 0 = CPP, 1 = XPB, 2 = ME CSR, etc... |\n", \
"; +-------------------------------+-------------------------------+\n", \
"; 3 | Length (not including Type and Length) |\n", \
"; +-------------------------------+-------------------------------+\n", \
"; 4 | Value |\n", \
"; +-------------------------------+-------------------------------+\n", \
"; ......\n", \
"; +-------------------------------+-------------------------------+\n", \
"; . | Type |\n", \
"; +-------------------------------+-------------------------------+\n", \
"; . | Length |\n", \
"; +-------------------------------+-------------------------------+\n", \
"; . | Value |\n", \
"; +-------------------------------+-------------------------------+\n", \
"; .....\n", \
"; +-------------------------------+-------------------------------+\n", \
"; . | Level |\n", \
"; +-------------------------------+-------------------------------+\n", \
"; . | Level length |\n", \
"; +-------------------------------+-------------------------------+\n", \
"; . | Type |\n", \
"; +-------------------------------+-------------------------------+\n", \
"; . | Length |\n", \
"; +-------------------------------+-------------------------------+\n", \
"; . | Value |\n", \
"; +-------------------------------+-------------------------------+\n", \
"; (etc...)\n", \
"\n"]
alloc_mem_idx = len(output_lines)
#Dictionary of types
types = {}
types["xpb"] = nfp_dump_type.type_xpb_csr
types["cpp"] = nfp_dump_type.type_cpp_csr
types["me_csr"] = nfp_dump_type.type_me_csr
types["me_ind_csr"] = nfp_dump_type.type_indirect_me_csr
types["rtsym"] = nfp_dump_type.type_rtsym
types["hwinfo"] = nfp_dump_type.type_hwinfo
types["fwname"] = nfp_dump_type.type_fwname
types["hwinfo_field"] = nfp_dump_type.type_hwinfo_field
prev_top = ""
size_of_csr_struct = 12
levels = []
rtsyms = []
csrs = []
csr = []
type = nfp_dump_type.type_unset
level = -1
lvl_len_offset = 0
lvl_idx = -1
lvl_len = 0
level_ended = True
tat_define = ""
offset = 0
version = ""
string_index = 0
max_csr_str_len = 0
with open(input_file, 'r') as in_file_handle:
for line in in_file_handle:
#strip leading and trailing white space
line = line.rstrip()
line = line.lstrip()
#Ignore empty lines
if line == "":
continue
#Ignore comments
if line.startswith("#") :
continue
#Parse metadata
if line.startswith("@") :
meta_str = line.split(":")
if meta_str[0] == "@type" :
if level == -1:
raise ValueError('Level not set')
type = types[meta_str[1]]
elif meta_str[0] == "@level" :
if not level_ended :
raise ValueError("Previous level not ended")
level = int(meta_str[1])
lvl_len = 0
lvl_len_offset = offset
offset += 8
lvl_idx = len(output_lines)
level_ended = False
elif meta_str[0] == "@level_end" :
if level_ended :
raise ValueError("Found @level_end without @level")
#This level is done, write the length
output_lines.insert(lvl_idx, comments["level"])
lvl_idx += 1
output_lines.insert(lvl_idx, init_directive(symbol_name, lvl_len_offset, str(level)))
lvl_len_offset += 4
lvl_idx += 1
output_lines.insert(lvl_idx, comments["level length"])
lvl_idx += 1
output_lines.insert(lvl_idx, init_directive(symbol_name, lvl_len_offset, str(lvl_len)))
lvl_len_offset += 4
level_ended = True
elif meta_str[0] == "@version" :
version = meta_str[1]
else :
raise ValueError('Unknown Metadata')
continue
if type != nfp_dump_type.type_unset :
if type == nfp_dump_type.type_rtsym or type == nfp_dump_type.type_hwinfo_field :
if level == -1:
raise ValueError('Level not set')
output_lines.append(comments[type])
output_lines.append(init_directive(symbol_name, offset, str(type)))
offset += 4
lvl_len += 4
str_len = len(line) + (4 - len(line) % 4)
if str_len > 128:
raise ValueError("RT symbol \""+ line +"\" name length exceeds 127 bytes")
output_lines.append(comments["tlv length"])
output_lines.append(init_directive(symbol_name, offset, str(str_len)))
offset += 4
lvl_len += 4
output_lines.append(comments["value"])
output_lines.append(";" + line + "\n")
init_str = ""
count = 1
for c in list(line) :
init_str += str(format(ord(c), 'x'))
if count % 4 == 0:
init_str = "0x" + init_str
output_lines.append(init_directive(symbol_name, offset, init_str))
lvl_len += 4
offset += 4
init_str = ""
count +=1
for i in range(count, (str_len + 1)) :
init_str += "00"
init_str = "0x" + init_str
output_lines.append(init_directive(symbol_name, offset, init_str))
offset += 4
lvl_len += 4
output_lines.append("\n")
elif type == nfp_dump_type.type_hwinfo or type == nfp_dump_type.type_fwname:
if level == -1:
raise ValueError('Level not set')
output_lines.append(comments[type])
output_lines.append(init_directive(symbol_name, offset, str(type)))
offset += 4
lvl_len += 4
output_lines.append(comments["tlv length"])
output_lines.append(init_directive(symbol_name, offset, str(0)))
offset += 4
lvl_len += 4
output_lines.append("\n")
else :
if level == -1:
raise ValueError('Level not set')
csr = line.split()
start_csr = csr[0].split(".")
start_top = start_csr[0] + "." + start_csr[1]
from_csr = csr[0]
if len(csr) < 2 :
to_csr = csr[0]
else :
to_csr = csr[1]
if start_top != prev_top :
output_lines.append("\n")
if tat_define != "":
output_lines.append("#undef "+ tat_define + "\n")
output_lines.append("\n")
prev_top = start_top
tat_define = "__" + start_csr[0].split(":")[1] + "_" + start_csr[1]
top_value = "((csr_cpp_tgt(\"" + start_top + "\") << 24) \\\n" + \
" | (csr_cpp_rdact(\"" + start_top + "\") << 16) \\\n" + \
" | (csr_cpp_rdtok(\"" + start_top + "\") << 8) \\\n" + \
" | csr_isld(\"" + start_top + "\"))"
output_lines.append("#define "+ tat_define + " " + top_value + "\n")
output_lines.append("\n")
output_lines.append(comments[type])
output_lines.append(init_directive(symbol_name, offset, str(type)))
offset += 4
lvl_len += 4
size_of_csr_struct = 0
csr_len_idx = len(output_lines)
csr_len_offset = offset
offset += 4
lvl_len += 4
output_lines.append(comments["tat"])
output_lines.append(init_directive(symbol_name, offset, tat_define))
offset += 4
lvl_len += 4
size_of_csr_struct += 4
output_lines.append(comments["CSR offset"])
output_lines.append(init_directive(symbol_name, offset, "csr_offset(\"" + from_csr + "\")"))
offset += 4
lvl_len += 4
size_of_csr_struct += 4
output_lines.append(comments["dump length"])
length_str = "(csr_offset(\"" + to_csr + "\") - csr_offset(\"" + from_csr + "\") + csr_width(\"" + from_csr + "\")/8)"
output_lines.append(init_directive(symbol_name, offset, length_str))
offset += 4
lvl_len += 4
size_of_csr_struct += 4
output_lines.append(comments["CSR width"])
output_lines.append(init_directive(symbol_name, offset, "csr_width(\"" + from_csr + "\")"))
offset += 4
lvl_len += 4
size_of_csr_struct += 4
# output_lines.append(comments["string index"])
# output_lines.append(init_directive(symbol_name, offset, str(string_index)))
# string_index += 1
# csrs.append(from_csr)
# if len(from_csr) > max_csr_str_len:
# max_csr_str_len = len(from_csr)
# offset += 4
# lvl_len += 4
# size_of_csr_struct += 4
output_lines.insert(csr_len_idx, comments["tlv length"])
csr_len_idx += 1
output_lines.insert(csr_len_idx, init_directive(symbol_name, csr_len_offset, str(size_of_csr_struct)))
else :
raise ValueError('Unknown Type')
offset = offset + (8 - offset % 8)
output_lines.insert(alloc_mem_idx, ".alloc_mem " + symbol_name + " emem global " + str(offset) + " 256\n")
alloc_mem_idx += 1
if version :
output_lines.insert(alloc_mem_idx, ".alloc_mem " + symbol_name + "_version emem global 4 256\n")
alloc_mem_idx += 1
output_lines.insert(alloc_mem_idx, init_directive(symbol_name + "_version", 0, str(version)))
alloc_mem_idx +=1
output_lines.insert(alloc_mem_idx, "\n")
alloc_mem_idx += 1
output_lines.append("#endif\n")
asm_file_name = output_path + "/" + filename + "_init.uc"
out_file_handle = open(asm_file_name, "w")
out_file_handle.writelines(output_lines)
out_file_handle.close
print("Generated", asm_file_name)
c_file_name = output_path + "/" + filename + ".c"
out_file_handle = open(c_file_name, "w")
out_file_handle.writelines("")
out_file_handle.writelines("__export | |
<filename>data/data_nci.py
import os
import numpy as np
import logging
import gc
import h5py
from skimage import transform
import utils
import config.system_paths as sys_config
import pydicom as dicom
import nrrd
import subprocess
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
# Maximum number of data points that can be in memory at any time
MAX_WRITE_BUFFER = 5
# ===============================================================
# ===============================================================
def test_train_val_split(patient_id,
sub_dataset,
cv_fold_number):
if cv_fold_number == 1:
if sub_dataset == 'RUNMC':
if patient_id < 16: # 26
return 'train'
elif patient_id < 21: # 31
return 'validation'
else:
return 'test'
if sub_dataset == 'BMC':
if patient_id < 45: # 74
return 'train'
elif patient_id < 59: # 83
return 'validation'
else:
return 'test'
# ===============================================================
# ===============================================================
def count_slices(image_folder,
folder_base,
sub_dataset,
cv_fold_number):
num_slices = {'train': 0,
'test': 0,
'validation': 0}
for folder in os.listdir(image_folder):
if folder.startswith(folder_base + '-01'):
patient_id = int(folder.split('-')[-1])
if patient_id <= 82: #added because only 30 labeled patients
for _, _, fileList in os.walk(os.path.join(image_folder, folder)):
for filename in fileList:
if filename.lower().endswith('.dcm'): # check whether the file's DICOM
train_test = test_train_val_split(patient_id,
sub_dataset,
cv_fold_number)
num_slices[train_test] += 1
# IGNORE -02 and -03 for now
# elif folder.startswith(folder_base + '-02') or folder.startswith(folder_base + '-03'):
# for _, _, fileList in os.walk(os.path.join(image_folder, folder)):
# for filename in fileList:
# if filename.lower().endswith('.dcm'): # check whether the file's DICOM
# num_slices['test'] += 1
return num_slices
# ===============================================================
# ===============================================================
def get_patient_folders(image_folder,
folder_base,
sub_dataset,
cv_fold_number):
folder_list = {'train': [],
'test': [],
'validation': []}
for folder in os.listdir(image_folder):
if folder.startswith(folder_base + '-01'):
patient_id = int(folder.split('-')[-1])
if patient_id <= 82: #added because labels folder only goes up to 30
train_test = test_train_val_split(patient_id, sub_dataset, cv_fold_number)
folder_list[train_test].append(os.path.join(image_folder, folder))
# IGNORE -02 and -03 for now
# elif folder.startswith(folder_base + '-02') or folder.startswith(folder_base + '-03'):
# folder_list['test'].append(os.path.join(image_folder, folder))
return folder_list
# ===============================================================
# ===============================================================
def prepare_data(input_folder,
preprocessing_folder,
output_file,
size,
target_resolution,
sub_dataset,
cv_fold_num):
# =======================
# =======================
if sub_dataset == 'RUNMC':
image_folder = input_folder + 'Prostate-3T/'
label_folder = input_folder + 'NCI_ISBI_Challenge-Prostate3T_Training_Segmentations/'
folder_base = 'Prostate3T'
elif sub_dataset == 'BMC':
image_folder = input_folder + 'PROSTATE-DIAGNOSIS/Images/'
label_folder = input_folder + 'PROSTATE-DIAGNOSIS/Labels/'
folder_base = 'ProstateDx'
# =======================
# =======================
hdf5_file = h5py.File(output_file, "w")
# =======================
# =======================
logging.info('Counting files and parsing meta data...')
folder_list = get_patient_folders(image_folder,
folder_base,
sub_dataset,
cv_fold_num)
num_slices = count_slices(image_folder,
folder_base,
sub_dataset,
cv_fold_num)
nx = size
ny = size
n_test = num_slices['test']
n_train = num_slices['train']
n_val = num_slices['validation']
# =======================
# Create datasets for images and masks
# =======================
data = {}
for tt, num_points in zip(['test', 'train', 'validation'], [n_test, n_train, n_val]):
if num_points > 0:
data['images_%s' % tt] = hdf5_file.create_dataset("images_%s" % tt, list((size,size)) + [num_points], dtype=np.float32)
data['labels_%s' % tt] = hdf5_file.create_dataset("labels_%s" % tt, list((size,size)) + [num_points], dtype=np.uint8)
lbl_list = {'test': [], 'train': [], 'validation': []}
img_list = {'test': [], 'train': [], 'validation': []}
nx_list = {'test': [], 'train': [], 'validation': []}
ny_list = {'test': [], 'train': [], 'validation': []}
nz_list = {'test': [], 'train': [], 'validation': []}
px_list = {'test': [], 'train': [], 'validation': []}
py_list = {'test': [], 'train': [], 'validation': []}
pz_list = {'test': [], 'train': [], 'validation': []}
pat_names_list = {'test': [], 'train': [], 'validation': []}
# =======================
# =======================
logging.info('Parsing image files')
for train_test in ['test', 'train', 'validation']:
write_buffer = 0
counter_from = 0
patient_counter = 0
for folder in folder_list[train_test]:
patient_counter += 1
logging.info('================================')
logging.info('Doing: %s' % folder)
patname = folder_base + '-' + str(folder.split('-')[-2]) + '-' + str(folder.split('-')[-1])
pat_names_list[train_test].append(patname)
# Make a list of all dicom files in this folder
listFilesDCM = [] # create an empty list
for dirName, subdirList, fileList in os.walk(folder):
for filename in fileList:
if ".dcm" in filename.lower(): # check whether the file's DICOM
listFilesDCM.append(os.path.join(dirName, filename))
# Get a reference dicom file and extract info such as number of rows, columns, and slices (along the Z axis)
RefDs = dicom.read_file(listFilesDCM[0])
ConstPixelDims = (int(RefDs.Rows), int(RefDs.Columns), len(listFilesDCM))
pixel_size = (float(RefDs.PixelSpacing[0]), float(RefDs.PixelSpacing[1]), float(RefDs.SliceThickness))
px_list[train_test].append(float(RefDs.PixelSpacing[0]))
py_list[train_test].append(float(RefDs.PixelSpacing[1]))
pz_list[train_test].append(float(RefDs.SliceThickness))
print('PixelDims')
print(ConstPixelDims)
print('PixelSpacing')
print(pixel_size)
# The array is sized based on 'ConstPixelDims'
img = np.zeros(ConstPixelDims, dtype=RefDs.pixel_array.dtype)
# loop through all the DICOM files
for filenameDCM in listFilesDCM:
# read the file
ds = dicom.read_file(filenameDCM)
# ======
# store the raw image data
# img[:, :, listFilesDCM.index(filenameDCM)] = ds.pixel_array
# index number field is not set correctly!
# instead instance number is the slice number.
# ======
img[:, :, ds.InstanceNumber - 1] = ds.pixel_array
# ================================
# save as nifti, this sets the affine transformation as an identity matrix
# ================================
nifti_img_path = preprocessing_folder + 'Individual_NIFTI/' + patname
if not os.path.exists(nifti_img_path):
utils.makefolder(nifti_img_path)
utils.save_nii(img_path = nifti_img_path + '_img.nii.gz', data = img, affine = np.eye(4))
# ================================
# do bias field correction
# ================================
input_img = nifti_img_path + '_img.nii.gz'
output_img = nifti_img_path + '_img_n4.nii.gz'
# If bias corrected image does not exist, do it now
if os.path.isfile(output_img):
img = utils.load_nii(img_path = output_img)[0]
else:
subprocess.call(["/itet-stor/arismu/bmicdatasets_bmicnas01/Sharing/N4_th", input_img, output_img])
img = utils.load_nii(img_path = output_img)[0]
# ================================
# normalize the image
# ================================
img = utils.normalise_image(img, norm_type='div_by_max')
# ================================
# read the labels
# ================================
print(folder.split('/')[-1])
lbl_path = os.path.join(label_folder, folder.split('/')[-1] + '.nrrd')
lbl, options = nrrd.read(lbl_path)
# fix swap axis
lbl = np.swapaxes(lbl, 0, 1)
# ================================
# https://wiki.cancerimagingarchive.net/display/Public/NCI-ISBI+2013+Challenge+-+Automated+Segmentation+of+Prostate+Structures
# A competitor reported an issue with case ProstateDx-01-0055, which has a dimension mismatch.
# The segmentation has dimensions 400x400x23 whereas the DICOM image series have dimensions of 400x400x34.
# We checked the case and indeed the dimensions seem to not correspond on Z (23 vs 34); however, the labels are properly spatially placed.
# We don't currently see a problem with using the case.
# ================================
if patname == 'ProstateDx-01-0055':
lbl_tmp = np.zeros(shape = img.shape, dtype = lbl.dtype)
lbl_tmp[:, :, :lbl.shape[2]] = lbl
lbl = lbl_tmp
# ================================
# save as nifti, this sets the affine transformation as an identity matrix
# ================================
utils.save_nii(img_path = nifti_img_path + '_lbl.nii.gz', data = lbl, affine = np.eye(4))
nx_list[train_test].append(lbl.shape[0])
ny_list[train_test].append(lbl.shape[1])
nz_list[train_test].append(lbl.shape[2])
print('lbl.shape')
print(lbl.shape)
print('img.shape')
print(img.shape)
### PROCESSING LOOP FOR SLICE-BY-SLICE 2D DATA ###################
scale_vector = [pixel_size[0] / target_resolution,
pixel_size[1] / target_resolution]
for zz in range(img.shape[2]):
slice_img = np.squeeze(img[:, :, zz])
slice_rescaled = transform.rescale(slice_img,
scale_vector,
order=1,
preserve_range=True,
multichannel=False,
mode = 'constant')
slice_lbl = np.squeeze(lbl[:, :, zz])
lbl_rescaled = transform.rescale(slice_lbl,
scale_vector,
order=0,
preserve_range=True,
multichannel=False,
mode='constant')
slice_cropped = utils.crop_or_pad_slice_to_size(slice_rescaled, nx, ny)
lbl_cropped = utils.crop_or_pad_slice_to_size(lbl_rescaled, nx, ny)
img_list[train_test].append(slice_cropped)
lbl_list[train_test].append(lbl_cropped)
write_buffer += 1
# Writing needs to happen inside the loop over the slices
if write_buffer >= MAX_WRITE_BUFFER:
counter_to = counter_from + write_buffer
_write_range_to_hdf5(data, train_test, img_list, lbl_list, counter_from, counter_to)
_release_tmp_memory(img_list, lbl_list, train_test)
# reset stuff for next iteration
counter_from = counter_to
write_buffer = 0
logging.info('Writing remaining data')
counter_to = counter_from + write_buffer
_write_range_to_hdf5(data, train_test, img_list, lbl_list, counter_from, counter_to)
_release_tmp_memory(img_list, lbl_list, train_test)
# Write the small datasets
for tt in ['test', 'train', 'validation']:
hdf5_file.create_dataset('nx_%s' % tt, data=np.asarray(nx_list[tt], dtype=np.uint16))
hdf5_file.create_dataset('ny_%s' % tt, data=np.asarray(ny_list[tt], dtype=np.uint16))
hdf5_file.create_dataset('nz_%s' % tt, data=np.asarray(nz_list[tt], dtype=np.uint16))
hdf5_file.create_dataset('px_%s' % tt, data=np.asarray(px_list[tt], dtype=np.float32))
hdf5_file.create_dataset('py_%s' % tt, data=np.asarray(py_list[tt], dtype=np.float32))
hdf5_file.create_dataset('pz_%s' % tt, data=np.asarray(pz_list[tt], dtype=np.float32))
hdf5_file.create_dataset('patnames_%s' % tt, data=np.asarray(pat_names_list[tt], dtype="S20"))
# After test train loop:
logging.info('Test train loop done')
hdf5_file.close()
# ===============================================================
# Helper function to write a range of data to the hdf5 datasets
# ===============================================================
def _write_range_to_hdf5(hdf5_data,
train_test,
img_list,
lbl_list,
counter_from,
counter_to):
logging.info('Writing data from %d to %d' % (counter_from, counter_to))
img_arr = np.asarray(img_list[train_test], dtype=np.float32)
lbl_arr = np.asarray(lbl_list[train_test], dtype=np.uint8)
img_arr = np.swapaxes(img_arr, 0, 2)
lbl_arr = np.swapaxes(lbl_arr, 0, 2)
hdf5_data['images_%s' % train_test][..., counter_from:counter_to] = img_arr
hdf5_data['labels_%s' % train_test][..., counter_from:counter_to] = lbl_arr
# ===============================================================
# Helper function to reset the tmp lists and free the memory
# ===============================================================
def _release_tmp_memory(img_list, lbl_list, train_test):
img_list[train_test].clear()
lbl_list[train_test].clear()
gc.collect()
# ===============================================================
# ===============================================================
def load_and_maybe_process_data(input_folder,
preprocessing_folder,
size,
target_resolution,
force_overwrite=False,
sub_dataset = 'BMC', # RUNMC / BMC
cv_fold_num = 1):
#size_str = '_'.join([str(i) for i in size])
#res_str = '_'.join([str(i) for i in target_resolution])
data_file_name = 'data_2d_size_%s_res_%s_cv_fold_%d_%s.hdf5' % (size, target_resolution, cv_fold_num, sub_dataset)
data_file_path = os.path.join(preprocessing_folder, data_file_name)
utils.makefolder(preprocessing_folder)
if not os.path.exists(data_file_path) or force_overwrite:
logging.info('This configuration of mode, size and target resolution has not yet been preprocessed')
logging.info('Preprocessing now!')
prepare_data(input_folder,
preprocessing_folder,
data_file_path,
size,
target_resolution,
sub_dataset,
cv_fold_num)
else:
logging.info('Already preprocessed this configuration. Loading | |
Returns:
Process: The desired process.
Raises:
ValueError: Unknown process key.
"""
if key not in self._processes:
raise ValueError("Unknown process key.")
return self._processes[key]
def get_processes(self, only_alive: bool = False) -> Dict[str, Process]:
"""Get all managed processes or only the alive ones as dictionary with the process key as dict key.
An individual process can be retrieved by key via `get_process()`.
Args:
only_alive: True, if only alive processes shall be returned instead of all. Defaults to False.
Returns:
Dict[str, Process]: Dictionary with process keys as dict keys and the respective processes as dict values.
"""
if only_alive:
ret_processes = {}
for key, process in self._processes.items():
if process.is_alive():
ret_processes.update({key: process})
return ret_processes
else:
return self._processes
def stop_process(self, key: str) -> None:
"""Stop a process by its key.
Args:
key: The key identifying the process.
Raises:
ValueError: Unknown process key.
"""
if key not in self._processes:
raise ValueError("Unknown process key.")
self._processes[key].terminate()
self.log.debug(f"Process with key {key} stopped in Runtime {self.host}")
def has_free_port(self, port: int) -> bool:
"""Checks if the port is available on the runtime.
Args:
port: The port which will be checked.
Returns:
bool: True if port is free, else False.
"""
self.log.debug(f"Checking if port {str(port)} is free on Runtime {self.host}")
with self._fabric_connection as cxn:
cmd_str = (
"python -c \"import socket;print('free') "
"if socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('localhost', "
+ str(port)
+ ')) else None"'
)
res = cxn.run(cmd_str, hide=True)
return True if res.stdout else False
def print_info(self) -> None:
"""Print the Runtime info formatted as table."""
info = self.info
print(
"\u001b[1mInformation of `"
+ self.class_name
+ "` "
+ self.host
+ ":\u001b[0m"
)
for key, value in info.items():
if key == "memory":
display_value = str(self.memory_in_mb) + " mb"
elif isinstance(value, list):
display_value = ""
for gpu in value:
display_value = "{}".format(gpu)
else:
display_value = value
print("{:<8} {:<8}".format(key, display_value))
def check_filter(
self,
gpu_required: bool = False,
min_memory: Optional[int] = None,
min_cpu_cores: Optional[int] = None,
installed_executables: Union[str, List[str], None] = None,
filter_commands: Union[str, List[str], None] = None,
) -> bool:
"""Checks the `Runtime` object for certain filter criteria.
Args:
gpu_required: True, if gpu availability is required. Defaults to False.
min_memory: The minimal amount of memory in MB. Defaults to None, i.e. not restricted.
min_cpu_cores: The minimum number of cpu cores required. Defaults to None, i.e. not restricted.
installed_executables: Possibility to check if an executable is installed. E.g. if the executable `ping` is
installed.
filter_commands: Shell commands that can be used for generic filtering. See examples. A filter command must
echo true to be evaluated to True, everything else will be interpreted as False. Defaults
to None.
Returns:
bool: True, if all filters were successfully checked otherwise False.
Examples:
```python
# Check if the `Runtime` has a specific executable installed
# such as `ping` the network administration software utility.
check_passed = runtime.check_filter(installed_executables='ping')
# Check if a variable `WORKSPACE_VERSION` is set on the `Runtime`
filter_str = '[ ! -z "$WORKSPACE_VERSION" ] && echo "true" || echo "false"'
check_passed = runtime.check_filter(filer_commands=filter_str)
```
"""
self.log.debug(f"Start executing check_filter() for Runtime {self.host}")
all_filters_checked = True
if gpu_required and not self.gpus:
self.log.debug(f"Runtime {self.host} does not have GPUs.")
all_filters_checked = False
if min_memory and self.memory_in_mb < min_memory:
self.log.debug(
f"Runtime {self.host} has only {str(self.memory_in_mb)} mb instead of {str(min_memory)} as required."
)
all_filters_checked = False
if min_cpu_cores and self.cpu_cores < min_cpu_cores:
self.log.debug(
f"Runtime {self.host} has only {str(self.cpu_cores)} instead of {str(min_cpu_cores)} as required."
)
all_filters_checked = False
if installed_executables:
for executable_name in _utils.create_list_from_parameter_value(
installed_executables
):
if not self._has_executable_installed(str(executable_name)):
self.log.debug(
f"Runtime {self.host} does not have executable {str(executable_name)} installed."
)
all_filters_checked = False
if filter_commands:
for filter_command in _utils.create_list_from_parameter_value(
filter_commands
):
if not self._filter_command_checked(str(filter_command)):
self.log.debug(
f"Filter filter_commands could not be checked successfully on Runtime"
f" {self.host}."
)
all_filters_checked = False
return all_filters_checked
def create_tempdir(self) -> str:
"""Create a temporary directory and return its name/path.
Returns:
str: The name/path of the directory.
"""
with self._fabric_connection as cxn:
cmd_str = 'python -c "import tempfile; print(tempfile.mkdtemp())"'
res = cxn.run(cmd_str, hide=True)
path = res.stdout.split("\n")[0]
if not path:
path = res.stdout
self.log.debug(f"Temporary directory {path} created on Runtime {self.host}")
return path
def create_dir(self, path: str) -> None:
"""Create a directory. All folders in the path will be created if not existing.
Args:
path: The full path of the directory to be created.
Raises:
PathCreationError: If the path could not be created successfully.
"""
try:
with self._fabric_connection as cxn:
cmd_str = "mkdir -p " + path
res = cxn.run(cmd_str, hide=True)
if res.stderr:
raise PathCreationError(path, self.host)
else:
self.log.debug(f"Directory {path} created on Runtime {self.host}")
except Exception:
raise PathCreationError(path, self.host)
def delete_dir(self, path: str) -> bool:
"""Delete a directory recursively. If at least one contained file could not be removed then False is returned.
Args:
path: The full path of the directory to be deleted.
Returns:
bool: True if the directory could be deleted successfully.
"""
from invoke.exceptions import ThreadException
with self._fabric_connection as cxn:
cmd_str = f"rm -r {path} 2> /dev/null"
try:
res = cxn.run(cmd_str, warn=True)
except ThreadException:
self.log.warning(
f"ThreadException occured when deleting the directory {path}"
)
return True
if res.ok:
self.log.debug(f"Directory {path} deleted from Runtime {self.host}")
return True
else:
self.log.debug(
f"Directory {path} may not be deleted on Runtime {self.host}"
)
return False
def join(self) -> None:
"""Blocks until `RuntimeTasks` which were started via the `runtime.execute_task()` method terminated."""
self.log.info(
f"Joining all processes executing a RuntimeTask that were started via the Runtime {self.host}"
)
for task in self._tasks:
task.join()
def cleanup(self) -> None:
"""Release all acquired resources and terminate all processes."""
self.log.info(f"Start cleanup of Runtime {self.host}.")
for key, process in self._processes.items():
process.terminate()
process.join()
if process.is_alive():
self.log.warning(f"Process with key {key} could not be terminated")
else:
self.log.debug(f"Process with key {key} terminated")
if self._working_dir_is_temp and self._working_dir:
success = self.delete_dir(self._working_dir)
if success:
self.log.debug(
f"Temporary directory {self.working_dir} of Runtime {self.host} removed."
)
self._working_dir = None
self._working_dir_is_temp = False
else:
self.log.warning(
f"Temporary directory {self.working_dir} of Runtime {self.host} could not be"
f" removed."
)
for task in self._tasks:
task.cleanup()
def echo(self, msg: str) -> str:
"""Convenient method for echoing a string on the `Runtime` and returning the result."""
cxn = self._fabric_connection
with cxn.cd(self.working_dir):
return cxn.run(f"echo {msg}", env=self._env_variables, hide=True).stdout
# - Private methods -#
def _create_process_key_for_port_exposure(
self, direction: str, local_port: int, runtime_port: int
) -> str:
"""Create a process key for processes exposing ports, i.e. keeping ssh tunnels open.
This key will act as an identifier for internally generated processes.
Args:
direction (str): [description]
local_port (int): [description]
runtime_port (int): [description]
Raises:
ValueError: If direction has an invalid value.
Returns:
str: Generated key.
"""
if not local_port:
local_port = runtime_port
if not runtime_port:
runtime_port = local_port
delimiter = self._PROCESS_KEY_DELIMITER
if direction == self._PORT_FROM_RUNTIME:
return (
self.host
+ delimiter
+ self._PORT_FROM_RUNTIME
+ delimiter
+ str(local_port)
+ delimiter
+ str(runtime_port)
)
elif direction == self._PORT_TO_RUNTIME:
return (
self.host
+ delimiter
+ self._PORT_TO_RUNTIME
+ delimiter
+ str(runtime_port)
+ delimiter
+ str(local_port)
)
else:
raise ValueError(
direction + " is not a supported runtime process prefix type"
)
def _create_process_key_for_task_execution(self, task: RuntimeTask) -> str:
"""Generate keys used to identify subprocesses.
Create a process key for processes started to execute a `RuntimeTasks` asynchronously
This key will act as an identifier for internally generated processes.
Args:
task (RuntimeTask): The task that will be scanned for processes.
Returns:
str: Generated key.
"""
return (
self.host
+ self._PROCESS_KEY_DELIMITER
+ self._TASK_PROCESS_KEY_PREFIX
+ self._PROCESS_KEY_DELIMITER
+ str(task.name)
)
@classmethod
def _create_executable_installed_shell_cmd(cls, executable: str) -> str:
return "hash " + executable + ' 2>/dev/null && echo "true" || echo ""'
def _has_executable_installed(self, executable_name: str) -> bool:
"""Checks if an executable is installed on the runtime."""
shell_cmd = self._create_executable_installed_shell_cmd(executable_name)
return self._filter_command_checked(shell_cmd)
def _filter_command_checked(self, shell_cmd: str) -> bool:
task = RuntimeTask("_filter_command_checked")
task.run_command(shell_cmd)
self.execute_task(task, execute_async=False)
# Check the last log entry for the string true
result = str(task.execution_log[len(task.execution_log) - 1])
return True if result.lower() == "true" else False
@property
def _fabric_connection(self) -> Connection:
"""Get a new fabric connection to the runtime.
Note: We set the `fabric.Connection` parameter `inline_ssh_env=True`.
Raises:
ValueError: If user or port values | |
self.ax1 = gs.figure.add_subplot(gs[:, 0:-1])
self.ax2 = gs.figure.add_subplot(gs[:, -1])
# self.ax2.get_yaxis().set_visible(False)
# self.ax1 = self.figure.add_subplot(111)
self.figure.subplots_adjust(left=0.05,
bottom=0.12,
right=0.95,
top=0.95,
wspace=None,
hspace=0.6)
self.filters = {'Notch': 'none',
'Bandpass': 'none',
}
self.notchs = ('none', '50 Hz', '60 Hz')
self.bandpass = ('none', 'delta', 'theta', 'alpha', 'beta',
'0.01-20 Hz',
'5-45 Hz', '3-30 Hz', '4-40 Hz', '2-45 Hz', '1-50 Hz',
'7-13 Hz', '15-50 Hz', '1-100 Hz', '5-50 Hz')
self.add_radios('Notch', self.notchs, callback=self.set_filters,
area='top', stretch=0)
self.add_radios('Bandpass', self.bandpass, callback=self.set_filters,
area='top', stretch=0)
self.scale = self.add_spin('Scale', 150, suffix='uv', min_=0,
max_=1000, step=50, callback=self.fit, area='top',
stretch=0)
# ----------------------------------------------------------------------
@wait_for_it
def fit(self):
""""""
eeg = self.pipeline_input.original_eeg
timestamp = self.pipeline_input.timestamp
for f in self.filters:
if self.filters[f] != 'none':
eeg = self.filters[f](eeg, fs=1000, axis=1)
self.ax1.clear()
self.ax2.clear()
t = np.linspace(0, eeg.shape[1], eeg.shape[1], endpoint=True) / 1000
channels = eeg.shape[0]
# threshold = max(eeg.max(axis=1) - eeg.min(axis=1)).round()
# threshold = max(eeg.std(axis=1)).round()
threshold = self.scale.value()
# eeg_d = decimate(eeg, 15, axis=1)
# timestamp = np.linspace(
# 0, t[-1], eeg_d.shape[1], endpoint=True)
for i, ch in enumerate(eeg):
self.ax2.plot(t, ch + (threshold * i))
self.ax1.set_xlabel('Frequency [$Hz$]')
self.ax1.set_ylabel('Amplitude')
self.ax2.set_xlabel('Time [$s$]')
self.ax2.set_yticks([threshold * i for i in range(channels)])
self.ax2.set_yticklabels(
self.pipeline_input.header['channels'].values())
self.ax2.set_ylim(-threshold, threshold * channels)
# self.output_signal = eeg
w, spectrum = welch(eeg, fs=1000, axis=1,
nperseg=1024, noverlap=256, average='median')
# spectrum = decimate(spectrum, 15, axis=1)
# w = np.linspace(0, w[-1], spectrum.shape[1])
for i, ch in enumerate(spectrum):
self.ax1.fill_between(w, 0, ch, alpha=0.2, color=f'C{i}')
self.ax1.plot(w, ch, linewidth=2, color=f'C{i}')
self.ax1.set_xscale('log')
self.ax1.set_xlim(0, w[-1])
self.ax2.set_xlim(0, t[-1])
self.ax1.grid(True, axis='y')
self.ax2.grid(True, axis='x')
self.draw()
self.pipeline_tunned = True
self._pipeline_output = self.pipeline_input
self._pipeline_output.eeg = eeg.copy()
self._pipeline_propagate()
# ----------------------------------------------------------------------
def set_filters(self, group_name, filter_):
""""""
if filter_ == 'none':
self.filters[group_name] = filter_
else:
if group_name == 'Notch':
filter_ = getattr(flt, f'notch{filter_.replace(" Hz", "")}')
elif group_name == 'Bandpass':
if filter_ in self.bandpass[1:5]:
filter_ = getattr(flt, f'{filter_}')
else:
filter_ = getattr(
flt, f'band{filter_.replace(" Hz", "").replace("-", "").replace(".", "")}')
self.filters[group_name] = filter_
self.fit()
# # ----------------------------------------------------------------------
# @property
# def output(self):
# """"""
# if hasattr(self, 'output_signal'):
# return self.output_signal
########################################################################
class LoadDatabase(TimelockSeries):
""""""
# ----------------------------------------------------------------------
def __init__(self, height=700, *args, **kwargs):
"""Constructor"""
super().__init__(height, *args, **kwargs)
self.title = 'Raw EEG signal'
# Create grid plot
gs = self.figure.add_gridspec(4, 4)
self.ax1 = gs.figure.add_subplot(gs[0:-1, :])
self.ax2 = gs.figure.add_subplot(gs[-1, :])
self.ax2.get_yaxis().set_visible(False)
self.figure.subplots_adjust(left=0.05,
bottom=0.12,
right=0.95,
top=0.8,
wspace=None,
hspace=0.6)
self.add_button('Load database',
callback=self.load_database, area='top', stretch=0)
self.add_spacer(area='top')
self.set_window_width_options(['500 milliseconds'])
self.window_options = ['500 milliseconds',
'1 second',
'5 second',
'15 second',
'30 second',
'1 minute',
'5 minute',
'10 minute',
'30 minute',
'1 hour']
self.database_description = self.add_textarea(
area='right', stretch=0)
# ----------------------------------------------------------------------
def load_database(self):
""""""
self.datafile = Dialogs.load_database()
# Set input manually
self.pipeline_input = self.datafile
flt.compile_filters(
FS=self.pipeline_input.header['sample_rate'], N=2, Q=3)
self.fit()
# ----------------------------------------------------------------------
@wait_for_it
def fit(self):
""""""
datafile = self.pipeline_input
header = datafile.header
eeg = datafile.eeg
datafile.aux
timestamp = datafile.timestamp
self.database_description.setText(datafile.description)
eeg = decimate(eeg, 15, axis=1)
timestamp = np.linspace(
0, timestamp[0][-1], eeg.shape[1], endpoint=True) / 1000
eeg = eeg / 1000
options = [self._get_seconds_from_human(
w) for w in self.window_options]
l = len([o for o in options if o < timestamp[-1]])
self.combobox.clear()
self.combobox.addItems(self.window_options[:l])
self.set_data(timestamp, eeg,
labels=list(header['channels'].values()),
ylabel='Millivolt [$mv$]',
xlabel='Time [$s$]')
datafile.close()
self.pipeline_tunned = True
self.pipeline_output = datafile
########################################################################
class EpochsVisualization(TimelockWidget):
""""""
# ----------------------------------------------------------------------
def __init__(self, height=700, *args, **kwargs):
"""Constructor"""
super().__init__(height, *args, **kwargs)
self.title = 'Visualize epochs'
self.ax1 = self.figure.add_subplot(111)
self.pipeline_tunned = True
# ----------------------------------------------------------------------
def fit(self):
""""""
self.clear_widgets()
markers = sorted(list(self.pipeline_input.markers.keys()))
channels = list(self.pipeline_input.header['channels'].values())
self.tmin = self.add_spin('tmin', 0, suffix='s', min_=-99,
max_=99, callback=self.get_epochs, area='top', stretch=0)
self.tmax = self.add_spin(
'tmax', 1, suffix='s', min_=-99, max_=99, callback=self.get_epochs, area='top', stretch=0)
self.method = self.add_combobox(label='Method', items=[
'mean', 'median'], callback=self.get_epochs, area='top', stretch=0)
self.add_spacer(area='top', fixed=50)
self.reject = self.add_spin('Reject', 200, suffix='vpp', min_=0,
max_=500, step=10, callback=self.get_epochs, area='top', stretch=0)
self.flat = self.add_spin('Flat', 10, suffix='vpp', min_=0, max_=500,
step=10, callback=self.get_epochs, area='top', stretch=0)
self.add_spacer(area='top')
self.checkbox = self.add_checkbox(
'Markers', markers, callback=self.get_epochs, area='bottom', stretch=1)
self.add_spacer(area='bottom')
self.channels = self.add_channels(
'Channels', channels, callback=self.get_epochs, area='right', stretch=1)
self.add_spacer(area='right')
# ----------------------------------------------------------------------
@wait_for_it
def get_epochs(self, *args, **kwargs):
""""""
self.figure.clear()
self.ax1 = self.figure.add_subplot(111)
markers = sorted([ch.text()
for ch in self.checkbox if ch.isChecked()])
channels = sorted([ch.text()
for ch in self.channels if ch.isChecked()])
if not markers:
return
if not channels:
return
if self.reject.value() < self.flat.value():
return
epochs = self.pipeline_input.epochs(
tmin=self.tmin.value(), tmax=self.tmax.value(), markers=markers)
reject = {'eeg': self.reject.value()}
flat = {'eeg': self.flat.value()}
epochs.drop_bad(reject, flat)
evokeds = {}
for mk in markers:
erp = epochs[mk].average(
method=self.method.currentText(), picks=channels)
evokeds[mk] = erp
try:
mne.viz.plot_compare_evokeds(evokeds, axes=self.ax1, cmap=(
'Class', 'cool'), show=False, show_sensors=False, invert_y=True, styles={}, split_legend=False, legend='upper center')
except:
pass
self.draw()
self.pipeline_output = epochs
########################################################################
class AmplitudeAnalysis(TimelockWidget):
""""""
# ----------------------------------------------------------------------
def __init__(self, height, *args, **kwargs):
"""Constructor"""
super().__init__(height, *args, **kwargs)
self.title = 'Amplitude analysis'
self.ax1 = self.figure.add_subplot(111)
self.pipeline_tunned = True
self.figure.subplots_adjust(left=0.05,
bottom=0.12,
right=0.95,
top=0.95)
# ----------------------------------------------------------------------
@wait_for_it
def fit(self):
""""""
datafile = self.pipeline_input
t = datafile.timestamp[0] / 1000 / 60
eeg = datafile.eeg
eeg = eeg - eeg.mean(axis=1)[:, np.newaxis]
mx = eeg.max(axis=0)
mn = eeg.min(axis=0)
m = eeg.mean(axis=0)
self.ax1.clear()
# dc = int(self.decimate.currentText())
dc = 1000
mxd = decimate(mx, dc, n=2)
mnd = decimate(mn, dc, n=2)
md = decimate(m, dc, n=2)
td = decimate(t, dc, n=2)
self.ax1.fill_between(td, mnd, mxd, color='k',
alpha=0.3, linewidth=0)
self.ax1.plot(td, md, color='C0')
vpps = [100, 150, 200, 300, 500, 0]
for i, vpp in enumerate(vpps):
self.ax1.hlines(
vpp / 2, 0, td[-1], linestyle='--', color=pyplot.cm.tab10(i))
if vpp:
self.ax1.hlines(-vpp / 2, 0,
td[-1], linestyle='--', color=pyplot.cm.tab10(i))
self.ax1.set_xlim(0, td[-1])
self.ax1.set_ylim(2 * mn.mean(), 2 * mx.mean())
ticks = sorted(vpps + [-v for v in vpps])
self.ax1.set_yticks([v / 2 for v in ticks])
self.ax1.set_yticklabels([f'{abs(v)} vpp' for v in ticks])
self.ax1.grid(True, axis='x')
self.ax1.set_ylabel('Voltage [uv]')
self.ax1.set_xlabel('Time [$s$]')
self.draw()
self.pipeline_output = self.pipeline_input
########################################################################
class AddMarkers(TimelockSeries):
""""""
# ----------------------------------------------------------------------
def __init__(self, height, *args, **kwargs):
"""Constructor"""
super().__init__(height, *args, **kwargs)
self.title = 'Add new markers'
# Create grid plot
gs = self.figure.add_gridspec(4, 1)
self.ax1 = gs.figure.add_subplot(gs[0:-1, :])
self.ax2 = gs.figure.add_subplot(gs[-1, :])
self.ax2.get_yaxis().set_visible(False)
self.figure.subplots_adjust(left=0.05,
bottom=0.12,
right=0.95,
top=0.95,
wspace=None,
hspace=0.6)
self.set_window_width_options(
['500 milliseconds',
'1 second',
'5 second',
'15 second',
'30 second',
'1 minute',
'5 minute',
'10 minute',
'30 minute',
'1 hour'])
self.markers = self.add_combobox('Marker', [], callback=None, editable=True,
area='bottom2', stretch=3)
self.add_button('Add marker', callback=self.add_marker,
area='bottom2', stretch=0)
self.add_spacer(area='bottom2', stretch=10)
# self.database_description = self.add_textarea(
# area='right', stretch=0)
self.pipeline_tunned = True
# ----------------------------------------------------------------------
def add_marker(self):
""""""
q = np.mean(self.ax1.get_xlim())
self.ax1.vlines(q, * self.ax1.get_ylim(),
linestyle='--', color='red', linewidth=5, zorder=99)
self.ax2.vlines(q, * self.ax2.get_ylim(),
linestyle='--', color='red', linewidth=3, zorder=99)
markers = self._pipeline_output.markers
markers.setdefault(self.markers.currentText(), []).append(q)
self._pipeline_output.markers = markers
self._pipeline_propagate()
self.draw()
# ----------------------------------------------------------------------
@wait_for_it
def fit(self):
""""""
datafile = self.pipeline_input
markers = ['BAD', 'BLINK']
markers += sorted(list(datafile.markers.keys()))
self.markers.clear()
self.markers.addItems(markers)
header = datafile.header
eeg = datafile.eeg
timestamp = datafile.timestamp
eeg = decimate(eeg, 15, axis=1)
timestamp = np.linspace(
0, timestamp[0][-1], eeg.shape[1], endpoint=True) / 1000
# eeg = eeg / 1000
self.threshold = 150
channels = eeg.shape[0]
self.set_data(timestamp, eeg,
labels=list(header['channels'].values()),
ylabel='Millivolt [$mv$]',
xlabel='Time [$s$]',
legend=False,
)
self.ax1.set_yticks([self.threshold * i for i in range(channels)])
self.ax1.set_yticklabels(
self.pipeline_input.header['channels'].values())
self.ax1.set_ylim(-self.threshold, self.threshold * channels)
self.ax2.set_ylim(-self.threshold, self.threshold * channels)
self.vlines = self.ax1.vlines(np.mean(self.ax1.get_xlim()),
* self.ax1.get_ylim(), linestyle='--', color='red', linewidth=2, zorder=99)
self.draw()
datafile.close()
self.pipeline_tunned = True
self.pipeline_output = self.pipeline_input
# ----------------------------------------------------------------------
def set_data(self, timestamp, eeg, labels, ylabel='', xlabel='', legend=True):
""""""
self.ax1.clear()
self.ax2.clear()
for i, ch in enumerate(eeg):
self.ax1.plot(timestamp, ch + self.threshold *
i, label=labels[i])
self.ax2.plot(timestamp, ch + self.threshold * i, alpha=0.5)
self.ax1.grid(True, axis='x')
if legend:
self.ax1.legend(loc='upper center', ncol=8,
bbox_to_anchor=(0.5, 1.4), **LEGEND_KWARGS)
self.ax1.set_xlim(0, self.window_value)
self.ax2.grid(True, axis='x')
self.ax2.set_xlim(0, timestamp[-1])
self.ax2.fill_between([0, self.window_value], *self.ax1.get_ylim(),
color=self.fill_color, alpha=self.fill_opacity, label='AREA')
self.scroll.setMaximum((timestamp[-1] - self.window_value) * 1000)
self.scroll.setMinimum(0)
self.ax1.set_ylabel(ylabel)
self.ax2.set_xlabel(xlabel)
# ----------------------------------------------------------------------
def move_plot(self, value):
""""""
self.ax1.set_xlim(value / 1000, (value / 1000 + self.window_value))
for area in [i for i, c in enumerate(self.ax2.collections) if c.get_label() == 'AREA'][::-1]:
self.ax2.collections.pop(area)
self.ax2.fill_between([value / 1000, (value / 1000 + self.window_value)],
* self.ax1.get_ylim(), color=self.fill_color,
alpha=self.fill_opacity, label='AREA')
segments = self.vlines.get_segments()
segments[0][:, 0] = [np.mean(self.ax1.get_xlim())] * 2
self.vlines.set_segments(segments)
self.draw()
# ----------------------------------------------------------------------
def change_window(self):
""""""
self.window_value = self._get_seconds_from_human(
self.combobox.currentText())
eeg = self.pipeline_output.eeg
timestamp = self.pipeline_output.timestamp
timestamp = np.linspace(
0, timestamp[0][-1], eeg.shape[1], endpoint=True) / 1000
self.scroll.setMaximum((timestamp[-1] - self.window_value) * 1000)
self.scroll.setMinimum(0)
self.scroll.setPageStep(self.window_value * 1000)
self.ax1.set_xlim(self.scroll.value() / 1000,
(self.scroll.value() / 1000 + self.window_value))
self.draw()
# ########################################################################
# class ConditionalCreateMarkers(ta.TimelockWidget):
# """"""
# # ----------------------------------------------------------------------
# def __init__(self, height, *args, **kwargs):
# """Constructor"""
# super().__init__(height=0, *args, **kwargs)
# self.title = 'Create markers conditionally'
# self.layout = QtWidgets.QVBoxLayout()
# widget = QtWidgets.QWidget()
# widget.setLayout(self.layout)
# getattr(self.widget, 'topLayout').addWidget(widget)
# getattr(self, 'top_stretch').append(1)
# self.add_button('Add row', callback=self.add_row,
# area='bottom', stretch=0)
# self.add_spacer(area='bottom', fixed=None, stretch=1)
# self.new_markers = {}
# # ----------------------------------------------------------------------
# @wait_for_it
# def fit(self):
# | |
progress_callback,
current_step,
total_steps,
"Pre-processing image",
wrapper,
)
wrapper.store_image(
wrapper.current_image, "pre_processed_image", force_store=True
)
self._last_wrapper_luid = wrapper.luid
return use_last_result, current_step
def build_target_tools(
self, tools: Union[None, list], wrapper, use_last_result: bool
):
if tools is None:
tools = self.get_operators(
constraints=dict(
kind=(
ToolFamily.PRE_PROCESSING,
ToolFamily.THRESHOLD,
ToolFamily.MASK_CLEANUP,
),
enabled=True,
)
)
for tool in tools:
if tool["tool"].get_value_of("tool_target") not in [None, "", "none"]:
ret, use_last_result = self.process_tool(
tool_dict=tool, wrapper=wrapper, use_last_result=use_last_result
)
if ret is not None:
wrapper.data_output[tool["tool"].result_name] = ret
return use_last_result
def build_rois(
self,
wrapper,
tools: Union[None, list],
use_last_result: bool,
progress_callback=None,
current_step: int = -1,
total_steps: int = -1,
target_raw_image=True,
target_pp_image=True,
):
if target_raw_image and target_pp_image:
kinds = (ToolFamily.ROI_RAW_IMAGE_STR, ToolFamily.ROI_PP_IMAGE_STR)
elif target_raw_image:
kinds = (ToolFamily.ROI_RAW_IMAGE_STR,)
elif target_pp_image:
kinds = (ToolFamily.ROI_PP_IMAGE_STR,)
else:
kinds = ()
if tools is None:
tools = [op for op in self.get_operators(dict(kind=kinds, enabled=True))]
for tool in tools:
use_last_result = self.is_use_last_result(
tool_dict=tool, wrapper=wrapper, previous_state=use_last_result
)
if use_last_result:
last_result = tool.get("last_result", None)
else:
last_result = None
if use_last_result and last_result is not None:
wrapper.add_roi(new_roi=last_result)
else:
func = getattr(tool["tool"], "generate_roi", None)
if callable(func):
roi = func(wrapper=wrapper)
if roi is not None:
wrapper.add_roi(new_roi=roi)
tool["last_result"] = roi
else:
wrapper.error_list.add_error(
f'Unable to extract ROI from "{tool.name}"', target_logger=logger
)
if progress_callback is not None and total_steps > 0:
current_step = self.add_progress(
progress_callback, current_step, total_steps, "Building ROIs", None
)
return use_last_result, current_step
def process_tool(self, tool_dict: dict, wrapper: BaseImageProcessor, use_last_result):
use_last_result = self.is_use_last_result(
tool_dict=tool_dict, wrapper=wrapper, previous_state=use_last_result
)
if use_last_result:
last_result = tool_dict.get("last_result", None)
else:
last_result = None
self._last_wrapper_luid = wrapper.luid
tool_kind = tool_dict["kind"]
if use_last_result and (last_result is not None):
if isinstance(tool_dict["last_result"], np.ndarray):
try:
wrapper.store_image(
image=tool_dict["last_result"],
text=f'cached_image_from_{tool_dict["tool"].name}',
)
except Exception as e:
wrapper.error_list.add_error(
f"Unable to store cached image because: {repr(e)}",
target_logger=logger,
)
ret = tool_dict["last_result"]
else:
tool = tool_dict["tool"].copy()
if tool.process_wrapper(wrapper=wrapper):
if (
tool_dict["kind"]
in [ToolFamily.FEATURE_EXTRACTION, ToolFamily.IMAGE_GENERATOR]
) and (hasattr(tool, "data_dict")):
ret = tool.data_dict
else:
ret = tool.result
tool_dict["last_result"] = ret
else:
self._last_wrapper_luid = ""
return None, False
if tool_kind == ToolFamily.EXPOSURE_FIXING:
return ret, use_last_result
if tool_kind == ToolFamily.PRE_PROCESSING:
return ret, use_last_result
elif tool_kind == ToolFamily.THRESHOLD:
return ret, use_last_result
elif tool_kind in [ToolFamily.ROI_RAW_IMAGE_STR, ToolFamily.ROI_PP_IMAGE_STR]:
raise AttributeError("ROI tools should never be fed to process_tool")
elif tool_kind == ToolFamily.MASK_CLEANUP:
return ret, use_last_result
elif tool_kind in [ToolFamily.FEATURE_EXTRACTION, ToolFamily.IMAGE_GENERATOR]:
return ret, use_last_result
else:
self._last_wrapper_luid = ""
raise AttributeError("Unknown tool kind")
def add_progress(self, progress_callback, current_step, total_steps, msg, wrapper):
if progress_callback is not None:
progress_callback(current_step, total_steps, msg, wrapper)
return current_step + 1
def process_image(self, progress_callback=None, **kwargs):
res = False
wrapper = None
self.last_error.clear()
try:
save_mask = False
for tool in (
op["tool"]
for op in self.get_operators(
constraints=dict(
kind=(
ToolFamily.EXPOSURE_FIXING,
ToolFamily.PRE_PROCESSING,
ToolFamily.THRESHOLD,
ToolFamily.ROI_RAW_IMAGE_STR,
ToolFamily.ROI_PP_IMAGE_STR,
ToolFamily.MASK_CLEANUP,
ToolFamily.FEATURE_EXTRACTION,
ToolFamily.IMAGE_GENERATOR,
),
enabled=True,
)
)
):
if tool.has_param("path") and self.image_output_path:
tool.set_value_of(key="path", value=self.image_output_path)
save_mask = save_mask or tool.needs_previous_mask
tools_ = self.group_tools(tool_only=False, conditions=dict(enabled=True))
total_steps = self.get_operators_count(
constraints=dict(
kind=(
ToolFamily.EXPOSURE_FIXING,
ToolFamily.PRE_PROCESSING,
ToolFamily.THRESHOLD,
ToolFamily.ROI_RAW_IMAGE_STR,
ToolFamily.ROI_PP_IMAGE_STR,
ToolFamily.MASK_CLEANUP,
ToolFamily.FEATURE_EXTRACTION,
ToolFamily.IMAGE_GENERATOR,
),
enabled=True,
)
)
total_steps += 4
current_step = 0
# Build wrapper
current_step = self.add_progress(
progress_callback, current_step, total_steps, "Building wrapper", None
)
wrapper = kwargs.get("wrapper", None)
if wrapper is None:
file_path = kwargs.get("file_path", None)
if not file_path:
# Leave if no source
res = False
logger.error("Missing source image")
return False
wrapper = BaseImageProcessor(file_path)
wrapper.lock = True
if self._target_data_base:
wrapper.target_database = self._target_data_base
wrapper.store_image(
image=wrapper.current_image, text="true_source_image", force_store=True
)
# Pre process image
use_last_result, current_step = self.pre_process_image(
wrapper=wrapper,
use_last_result=True,
progress_callback=progress_callback,
current_step=current_step,
total_steps=total_steps,
)
# Build coarse mask
if len(tools_[ToolFamily.THRESHOLD]) > 0:
mask_list = []
mask_names = []
masks_failed_cpt = 0
for i, tool in enumerate(tools_[ToolFamily.THRESHOLD]):
target = tool["tool"].get_value_of("tool_target")
if target not in [None, "", "none"]:
continue
mask, use_last_result = self.process_tool(
tool_dict=tool, wrapper=wrapper, use_last_result=use_last_result
)
if mask is not None:
mask_list.append(mask)
mask_names.append(tool["tool"].short_desc())
else:
self.last_error.add_error(
new_error_text=f'Failed to process {tool["tool"].name}',
new_error_kind="pipeline_process_error",
target_logger=logger,
)
masks_failed_cpt += 1
current_step = self.add_progress(
progress_callback,
current_step,
total_steps,
f"Building coarse masks, failed {masks_failed_cpt}"
if masks_failed_cpt != 0
else "Building coarse masks",
wrapper if mask is not None else None,
)
for img_data, img_name in zip(mask_list, mask_names):
wrapper.store_image(img_data, img_name)
func = getattr(wrapper, self.merge_method, None)
if func:
wrapper.mask = func([mask for mask in mask_list if mask is not None])
wrapper.store_image(image=wrapper.mask, text="coarse_mask")
else:
logger.error("Unable to merge coarse masks")
self.last_error.add_error(
new_error_text="Unable to merge coarse masks",
new_error_kind="pipeline_process_error",
target_logger=logger,
)
res = False
return
current_step = self.add_progress(
progress_callback,
current_step,
total_steps,
"Merged coarse masks",
wrapper if func is not None else None,
)
# ROIs to be applied after mask merging
handled_rois = ["keep", "delete", "erode", "dilate", "open", "close"]
rois_list = [
roi
for roi in wrapper.rois_list
if roi.tag in handled_rois
and not (roi.target and roi.target != "none")
]
wrapper.store_image(
wrapper.retrieve_stored_image("mask_on_exp_fixed_bw_roi"),
text="used_rois",
)
wrapper.mask = wrapper.apply_roi_list(
img=wrapper.mask, rois=rois_list, print_dbg=self.display_images
)
current_step = self.add_progress(
progress_callback, current_step, total_steps, "Applied ROIs", wrapper
)
# Clean mask
if len(tools_[ToolFamily.MASK_CLEANUP]) > 0:
res = True
for tool in tools_[ToolFamily.MASK_CLEANUP]:
tmp_mask, use_last_result = self.process_tool(
tool_dict=tool,
wrapper=wrapper,
use_last_result=use_last_result,
)
if tmp_mask is None:
res = False
self.last_error.add_error(
new_error_text=f'Failed to process {tool["tool"].name}',
new_error_kind="pipeline_process_error",
target_logger=logger,
)
else:
wrapper.mask = tmp_mask
res = res and True
current_step = self.add_progress(
progress_callback,
current_step,
total_steps,
"Cleaning mask",
wrapper,
)
else:
res = True
wrapper.store_image(image=wrapper.mask, text="clean_mask")
# Check that the mask is where it belongs
if res:
enforcers_list = wrapper.get_rois({"enforce"})
if len(enforcers_list) > 0:
for i, enforcer in enumerate(enforcers_list):
mask = wrapper.mask.copy()
mask = enforcer.keep(mask)
partial_ok = np.count_nonzero(mask) > 0
res = partial_ok and res
if partial_ok:
roi_img = np.dstack(
(np.zeros_like(mask), mask, np.zeros_like(mask))
)
else:
roi_img = np.dstack(
(np.zeros_like(mask), np.zeros_like(mask), mask)
)
background_img = cv2.bitwise_and(
wrapper.mask, wrapper.mask, mask=255 - mask
)
img = cv2.bitwise_or(
roi_img,
np.dstack(
(background_img, background_img, background_img)
),
)
enforcer.draw_to(img, line_width=4)
wrapper.store_image(img, f"enforcer_{i}_{enforcer.name}")
wrapper.store_image(
image=wrapper.draw_rois(
img=wrapper.retrieve_stored_image("mask_on_exp_fixed_bw"),
rois=enforcers_list,
),
text="enforcer_rois",
)
fifth_image = "enforcer_rois"
else:
wrapper.store_image(
image=wrapper.retrieve_stored_image("exp_fixed_pseudo_on_bw"),
text="exp_fixed_pseudo_on_bw",
)
fifth_image = "exp_fixed_pseudo_on_bw"
if res and wrapper.mask is not None:
wrapper.store_image(
wrapper.retrieve_stored_image("mask_on_exp_fixed_bw"),
text="mask_on_bw",
)
current_step = self.add_progress(
progress_callback,
current_step,
total_steps,
"Checked mask enforcers",
wrapper,
)
else:
handled_rois = ["keep", "delete"]
rois_list = [
roi
for roi in wrapper.rois_list
if roi.tag in handled_rois
and not (roi.target and roi.target != "none")
]
wrapper.store_image(
image=wrapper.draw_rois(
img=wrapper.retrieve_stored_image("exposure_fixed"),
rois=rois_list,
),
text="used_rois",
)
wrapper.current_image = wrapper.apply_roi_list(
img=wrapper.current_image,
rois=rois_list,
print_dbg=self.display_images,
)
current_step = self.add_progress(
progress_callback, current_step, total_steps, "Applied ROIs", wrapper
)
res = True
# Prepare data holder
if res and (
(
not self.threshold_only
and len(tools_[ToolFamily.FEATURE_EXTRACTION]) > 0
)
or (len(tools_[ToolFamily.IMAGE_GENERATOR]) > 0)
):
wrapper.csv_data_holder = AbstractCsvWriter()
if save_mask and self.image_output_path and wrapper.mask is not None:
force_directories(os.path.join(self.image_output_path, "masks"))
cv2.imwrite(
filename=os.path.join(
self.image_output_path, "masks", wrapper.file_name
),
img=wrapper.mask,
)
# Extract features
if (
res
and not self.threshold_only
and len(tools_[ToolFamily.FEATURE_EXTRACTION]) > 0
):
wrapper.current_image = wrapper.retrieve_stored_image("exposure_fixed")
for tool in tools_[ToolFamily.FEATURE_EXTRACTION]:
current_data, use_last_result = self.process_tool(
tool_dict=tool, wrapper=wrapper, use_last_result=use_last_result
)
if isinstance(current_data, dict):
wrapper.csv_data_holder.data_list.update(current_data)
else:
self.last_error.add_error(
new_error_text=f'{tool["tool"].name} failed to extract features',
new_error_kind="pipeline_process_error",
target_logger=logger,
)
current_step = self.add_progress(
progress_callback,
current_step,
total_steps,
"Extracting features",
wrapper,
)
res = len(wrapper.csv_data_holder.data_list) > 0
# Generate images
if res and len(tools_[ToolFamily.IMAGE_GENERATOR]) > 0:
for tool in tools_[ToolFamily.IMAGE_GENERATOR]:
current_data, use_last_result = self.process_tool(
tool_dict=tool,
wrapper=wrapper,
use_last_result=use_last_result,
)
if isinstance(current_data, dict):
wrapper.csv_data_holder.data_list.update(current_data)
else:
self.last_error.add_error(
new_error_text=f'{tool["tool"].name} failed to generate images',
new_error_kind="pipeline_process_error",
target_logger=logger,
)
current_step = self.add_progress(
progress_callback,
current_step,
total_steps,
"Copying images",
wrapper,
)
res = len(wrapper.csv_data_holder.data_list) > 0
# Set last image to be displayed
if self.last_image:
last_image = wrapper.retrieve_stored_image(self.last_image)
if last_image is not None:
wrapper.store_image(image=last_image, text="last_" + self.last_image)
if self.build_mosaic:
old_mosaic = wrapper.store_mosaic
wrapper.store_mosaic = "result"
if wrapper.mask is not None:
wrapper.mosaic_data = np.array(
[
line.split(",")
for line in self.mosaic_items.replace(" ", "").split("\n")
]
)
else:
wrapper.mosaic_data = np.array(
["source", "exposure_fixed", "current_image"]
)
wrapper.print_mosaic(padding=4)
wrapper.store_mosaic = old_mosaic
else:
res = True
current_step = self.add_progress(
progress_callback, total_steps, total_steps, "Done", wrapper
)
except Exception as e:
logger.error(f'Unexpected failure: "{repr(e)}"')
res = False
else:
pass
finally:
wrapper.lock = False
return res
@staticmethod
def code_imports():
# External libraries
import_lst = list(
map(
lambda x: f"import {x}",
["argparse", "csv", "cv2", "numpy as np", "os", "sys"],
)
)
# Add paths
import_lst.extend(
[
"",
"abspath = os.path.abspath(__file__)",
"fld_name = os.path.dirname(abspath)",
"sys.path.insert(0, fld_name)",
"sys.path.insert(0, os.path.dirname(fld_name))",
"",
]
)
# IPSO Phen libraries
import_lst.extend(
[
"from ipso_phen.ipapi.base.ip_abstract import BaseImageProcessor",
"from ipso_phen.ipapi.base.ipt_functional import call_ipt, call_ipt_func",
"from ipso_phen.ipapi.tools.csv_writer import AbstractCsvWriter",
]
)
return | |
<reponame>Amoza-Theodore/Baidu2020
import os
import shutil
import v4l2capture
from ctypes import *
import struct
import array
from fcntl import ioctl
import cv2
import numpy as np
import time
from sys import argv
import getopt
import sys
import select
import termios
import tty
import threading
import paddlemobile as pm
from paddlelite import *
import codecs
import multiprocessing
import math
import functools
from PIL import Image
from PIL import ImageFile
from PIL import ImageDraw
ImageFile.LOAD_TRUNCATED_IMAGES = True
class Car:
def __init__(self):
# load config
self.config()
# create folders
if not os.path.exists(self.img_save_path):
os.makedirs(self.img_save_path)
if not os.path.exists(self.data_collect_path):
os.makedirs(self.data_collect_path)
img_collect_path = os.path.join(self.data_collect_path, 'img')
if not os.path.exists(img_collect_path):
os.makedirs(img_collect_path)
# Initialize the camera
camera = "/dev/video2"
video = v4l2capture.Video_device(camera)
video.set_format(424, 240, fourcc='MJPG')
video.create_buffers(1)
video.queue_all_buffers()
video.start()
self.video = video
# Initialize the predictor
self.angle_predictor = self.load_angle_model()
self.angle_right_predictor = self.load_angle_model_right()
self.label_predictor = self.load_label_model()
# Initialize the lower machine
path = os.path.split(os.path.realpath(__file__))[0] + "/.."
lib_path = path + "/lib" + "/libart_driver.so"
so = cdll.LoadLibrary
self.lib = so(lib_path)
car = "/dev/ttyUSB0"
self.lib.art_racecar_init(38400, car.encode("utf-8"))
def clean_predict_img(self):
filepath = self.img_save_path
if os.path.exists(filepath):
shutil.rmtree(filepath)
os.makedirs(filepath)
def clean_data_collect(self):
filepath = self.data_collect_path
if os.path.exists(filepath):
shutil.rmtree(filepath)
os.makedirs(filepath)
os.makedirs(filepath + '/img')
def getvalue(self):
axis_states = {}
button_states = {}
axis_map = []
button_map = []
buf = array.array('u', str(['\0'] * 5))
ioctl(self.jsdev, 0x80006a13 + (0x10000 * len(buf)), buf)
js_name = buf.tostring()
# get number of axes and buttons
buf = array.array('B', [0])
ioctl(self.jsdev, 0x80016a11, buf) # JSIOCGAXES
num_axes = buf[0]
buf = array.array('B', [0])
ioctl(self.jsdev, 0x80016a12, buf) # JSIOCGBUTTONS
num_buttons = buf[0]
# Get the axis map
buf = array.array('B', [0] * 0x40)
ioctl(self.jsdev, 0x80406a32, buf) # JSIOCGAXMAP
for axis in buf[:num_axes]:
axis_name = self.axis_names.get(axis, 'unknow(0x%02x)' % axis)
axis_map.append(axis_name)
axis_states[axis_name] = 0.0
# Get the button map.
buf = array.array('H', [0] * 200)
ioctl(self.jsdev, 0x80406a34, buf) # JSIOCGBTNMAP
for btn in buf[:num_buttons]:
btn_name = self.button_names.get(btn, 'unknown(0x%03x)' % btn)
button_map.append(btn_name)
button_states[btn_name] = 0
return axis_map, axis_states, button_map, button_states
def config(self):
# load config, modifiable
# Where angle_model is stored
self.angle_model_path = '../model/angle_model/model_infer'
self.angle_model_right_path = '../model/angle_model_right/model_infer'
# Where label_model is stored
self.label_model_path = '../model/freeze_model'
# Where images are saved
self.img_save_path = '../predict_img'
# Where the collected data is saved
self.data_collect_path = '../data_collect'
# The speed at which the car runs
self.init_vels = 1600
# avoiding para
self.contiune_angle = 1500
self.count = 0
# The corresponding serial number and label
# self.label_dict = {
# 0: 'green_light',
# 1: 'limit',
# 2: 'limit_end',
# 3: 'outer',
# 4: 'red_light',
# 5: 'stop',
# 6: 'straight',
# 7: 'turn_left'
# }
self.label_dict = {
# 0: 'guide'
0: 'avoiding',
1: 'limit',
2: 'limit_end',
3: 'outer',#unused
4: 'overtake',
5: 'red_line',#used to be stop
6: 'stop',#unused
7: 'back',
8: 'straight',#used to be side_walk
9: 'turn_left',
}
# The sequence number required to save the image
self.ImgInd = 0
# Flags for marker detection
self.stop_flag = False
self.run_flag = True
self.limit_flag = False
self.turn_left_flag = False
self.P_flag = False
self.PR_flag = False
self.back_flag = False
self.label = 'none'
self.overtake_flag = True
self.right_flag = False
def dataset(self, video):
lower_hsv = np.array([26, 85, 75])
upper_hsv = np.array([34, 255, 255])
select.select((video,), (), ())
image_data = video.read_and_queue()
frame = cv2.imdecode(np.frombuffer(
image_data, dtype=np.uint8), cv2.IMREAD_COLOR)
img_save = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lowerb=lower_hsv, upperb=upper_hsv)
img_angle = Image.fromarray(mask)
img_angle = img_angle.resize((128, 128), Image.ANTIALIAS)
img_angle = np.array(img_angle).astype(np.float32)
img_angle = cv2.cvtColor(img_angle, cv2.COLOR_GRAY2BGR)
img_angle = img_angle / 255.0
img_angle = np.expand_dims(img_angle, axis=0)
img_label = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
img_label = Image.fromarray(img_label)
return img_label, img_angle, img_save, mask
def load_angle_model(self):
valid_places = (
Place(TargetType.kFPGA, PrecisionType.kFP16, DataLayoutType.kNHWC),
Place(TargetType.kHost, PrecisionType.kFloat),
Place(TargetType.kARM, PrecisionType.kFloat),
)
config = CxxConfig()
model_dir = self.angle_model_path
config.set_model_file(model_dir + "/model")
config.set_param_file(model_dir + "/params")
config.set_valid_places(valid_places)
predictor = CreatePaddlePredictor(config)
return predictor
def load_angle_model_right(self):
valid_places = (
Place(TargetType.kFPGA, PrecisionType.kFP16, DataLayoutType.kNHWC),
Place(TargetType.kHost, PrecisionType.kFloat),
Place(TargetType.kARM, PrecisionType.kFloat),
)
config = CxxConfig()
model_dir_right = self.angle_model_right_path
config.set_model_file(model_dir_right + "/model")
config.set_param_file(model_dir_right + "/params")
config.set_valid_places(valid_places)
predictor = CreatePaddlePredictor(config)
return predictor
def load_label_model(self):
model_dir = self.label_model_path
pm_config = pm.PaddleMobileConfig()
pm_config.precision = pm.PaddleMobileConfig.Precision.FP32
pm_config.device = pm.PaddleMobileConfig.Device.kFPGA
pm_config.model_dir = model_dir
pm_config.thread_num = 4
label_predictor = pm.CreatePaddlePredictor(pm_config)
return label_predictor
def tensor_deal(self, origin):
tensor_img = origin.resize((256, 256), Image.BILINEAR)
if tensor_img.mode != 'RGB':
tensor_img = tensor_img.convert('RGB')
tensor_img = np.array(tensor_img).astype(
'float32').transpose((2, 0, 1))
tensor_img -= 127.5
tensor_img *= 0.007843
tensor_img = tensor_img[np.newaxis, :]
tensor = pm.PaddleTensor()
tensor.dtype = pm.PaddleDType.FLOAT32
tensor.shape = (1, 3, 256, 256)
tensor.data = pm.PaddleBuf(tensor_img)
paddle_data_feeds = [tensor]
return paddle_data_feeds
def angle_predict(self, predictor, image):
tmp = np.zeros((1, 128, 128, 3))
img = image
i = predictor.get_input(0)
i.resize((1, 3, 128, 128))
tmp[0, 0:img.shape[1], 0:img.shape[2] + 0, 0:img.shape[3]] = img
tmp = tmp.reshape(1, 3, 128, 128)
frame = cv2.imdecode(np.frombuffer(
img, dtype=np.uint8), cv2.IMREAD_COLOR)
i.set_data(tmp)
predictor.run()
out = predictor.get_output(0)
score = out.data()[0][0]
return score
def get_img_para(self, label_outputs):
# If the score > 0.6 then the object is detected successfully
mask = label_outputs[:, 1] > 0.5 if len(label_outputs.shape) > 1 else 0
if np.sum(mask) > 0:
detect = True
labels = label_outputs[mask, 0].astype('int32')
scores = label_outputs[mask, 1].astype('float32')
boxes = label_outputs[mask, 2:].astype('float32')
# No objects were detected
else:
detect = False
labels = None
scores = None
boxes = None
return detect, labels, scores, boxes
def img_save(self, img, detect, boxes, labels, scores):
img = Image.fromarray(img)
# # Detect object, draw a rectangle around the picture
# if detect == True:
# # draw = ImageDraw.Draw(img)
# for box, label, score in zip(boxes, labels, scores):
# xmin, ymin, xmax, ymax = box[0], box[1], box[2], box[3]
# xmin, xmax = (int(x / 608 * 320) for x in [xmin, xmax])
# ymin, ymax = (int(y / 608 * 240) for y in [ymin, ymax])
#
# draw.rectangle((xmin, ymin, xmax, ymax), None, 'red')
# box_str = str(xmin) + ' ' + str(ymin) + ' ' + str(xmax) + ' ' + str(ymax)
# draw.text((xmin, ymin), self.label_dict[int(label)] + ' ' + str(score) + '\n' + box_str, (255, 255, 0))
# save image
output_path = os.path.join(
self.img_save_path, str(self.ImgInd) + '.jpg')
img.save(output_path)
self.ImgInd += 1
def user_cmd(self, detect, label_ids, scores, boxes, vel, angle, a):
# identify
if detect:
for label_id, box in zip(label_ids, boxes):
# deal box
xmin, ymin, xmax, ymax = box[0], box[1], box[2], box[3]
xmin, xmax = (int(x / 608 * 320) for x in [xmin, xmax])
ymin, ymax = (int(y / 608 * 240) for y in [ymin, ymax])
center_y = int((ymin + ymax) / 2)
label = self.label_dict[label_id]
print('label: ' + label)
# if center_y > 160:
# print('label = ' + label)
if label == 'stop':
if center_y > 60:
print('label = ' + label)
self.lib.send_cmd(1500, 1500)
self.run_flag = False
if label == 'red_light':
if center_y > 35:
print('label =' + label)
self.lib.send_cmd(1500, 1500)
self.stop_flag = True
if label == 'green_light':
if center_y > 25:
print('label = ' + label)
self.stop_flag = False
if label == 'limit':
if center_y > 120:
print('label = ' + label)
self.limit_flag = True
if label == 'limit_end':
# if 120<center_y <200:
# self.lib.send_cmd(15,2100)
if center_y > 200:
print(center_y)
print('label = ' + label)
self.lib.send_cmd(1528, 1760)
time.sleep(0.4)
self.lib.send_cmd(1528, 1550)
time.sleep(0.1)
self.limit_flag = False
self.P_flag = True
# self.lib.send_cmd(vel, 2000)
if label == 'turn_left':
if center_y > 150:
print('label = ' + label)
# self.std_time = time.time()
self.turn_left_flag = True
self.P_flag = False
self.PR_flag = False
if label == 'straight':
if center_y > 160:
print('label = ' + label)
self.PR_flag = True
self.PT_flag = True
pass
# operation
if self.stop_flag:
self.lib.send_cmd(1500, 1500)
return
if self.turn_left_flag:
time.sleep(0.6)
self.lib.send_cmd(vel, 2250)
time.sleep(0.3)
self.turn_left_flag = False
# nowtime = time.time()
# if nowtime - self.std_time > 1.2:
# self.lib.send_cmd(vel, angle)
# self.turn_left_flag = False
# return
# if nowtime - self.std_time < 0.68:
# self.lib.send_cmd(vel, angle)
# return
# self.lib.send_cmd(vel, 2100)
return
if self.limit_flag:
# Test: unfinished
print('limited speed')
# angle = int(-2174 * a * a + 3805 * a + 141.3)
# angle = int(-2083 * a * a + 3695* a +151.4)
# angle = int(-1768* a * a +3398 * a + 177.9)
angle = int(-1792 * a * a + 3413 * a + 176.7 + 2 )
print(angle)
self.lib.send_cmd(1528, angle)
return
if self.P_flag & self.PR_flag:
vel = 1600
if self.PT_flag:
self.lib.send_cmd(1600, 915)
time.sleep(0.1)
self.PT_flag = False
# | |
self.type,
self.resource_user_provided_identifiers,
self.inventory_entry,
)
)
class InventoryEntryCreatedMessagePayload(MessagePayload):
"Corresponding marshmallow schema is :class:`commercetools.schemas.InventoryEntryCreatedMessagePayloadSchema`."
#: :class:`commercetools.types.InventoryEntry` `(Named` ``inventoryEntry`` `in Commercetools)`
inventory_entry: "InventoryEntry"
def __init__(
self, *, type: str = None, inventory_entry: "InventoryEntry" = None
) -> None:
self.inventory_entry = inventory_entry
super().__init__(type="InventoryEntryCreated")
def __repr__(self) -> str:
return "InventoryEntryCreatedMessagePayload(type=%r, inventory_entry=%r)" % (
self.type,
self.inventory_entry,
)
class InventoryEntryDeletedMessage(Message):
"Corresponding marshmallow schema is :class:`commercetools.schemas.InventoryEntryDeletedMessageSchema`."
#: :class:`str`
sku: str
#: :class:`commercetools.types.ChannelReference` `(Named` ``supplyChannel`` `in Commercetools)`
supply_channel: "ChannelReference"
def __init__(
self,
*,
id: str = None,
version: int = None,
created_at: datetime.datetime = None,
last_modified_at: datetime.datetime = None,
last_modified_by: typing.Optional["LastModifiedBy"] = None,
created_by: typing.Optional["CreatedBy"] = None,
sequence_number: int = None,
resource: "Reference" = None,
resource_version: int = None,
type: str = None,
resource_user_provided_identifiers: typing.Optional[
"UserProvidedIdentifiers"
] = None,
sku: str = None,
supply_channel: "ChannelReference" = None
) -> None:
self.sku = sku
self.supply_channel = supply_channel
super().__init__(
id=id,
version=version,
created_at=created_at,
last_modified_at=last_modified_at,
last_modified_by=last_modified_by,
created_by=created_by,
sequence_number=sequence_number,
resource=resource,
resource_version=resource_version,
type="InventoryEntryDeleted",
resource_user_provided_identifiers=resource_user_provided_identifiers,
)
def __repr__(self) -> str:
return (
"InventoryEntryDeletedMessage(id=%r, version=%r, created_at=%r, last_modified_at=%r, last_modified_by=%r, created_by=%r, sequence_number=%r, resource=%r, resource_version=%r, type=%r, resource_user_provided_identifiers=%r, sku=%r, supply_channel=%r)"
% (
self.id,
self.version,
self.created_at,
self.last_modified_at,
self.last_modified_by,
self.created_by,
self.sequence_number,
self.resource,
self.resource_version,
self.type,
self.resource_user_provided_identifiers,
self.sku,
self.supply_channel,
)
)
class InventoryEntryDeletedMessagePayload(MessagePayload):
"Corresponding marshmallow schema is :class:`commercetools.schemas.InventoryEntryDeletedMessagePayloadSchema`."
#: :class:`str`
sku: str
#: :class:`commercetools.types.ChannelReference` `(Named` ``supplyChannel`` `in Commercetools)`
supply_channel: "ChannelReference"
def __init__(
self,
*,
type: str = None,
sku: str = None,
supply_channel: "ChannelReference" = None
) -> None:
self.sku = sku
self.supply_channel = supply_channel
super().__init__(type="InventoryEntryDeleted")
def __repr__(self) -> str:
return (
"InventoryEntryDeletedMessagePayload(type=%r, sku=%r, supply_channel=%r)"
% (self.type, self.sku, self.supply_channel)
)
class InventoryEntryQuantitySetMessage(Message):
"Corresponding marshmallow schema is :class:`commercetools.schemas.InventoryEntryQuantitySetMessageSchema`."
#: :class:`int` `(Named` ``oldQuantityOnStock`` `in Commercetools)`
old_quantity_on_stock: int
#: :class:`int` `(Named` ``newQuantityOnStock`` `in Commercetools)`
new_quantity_on_stock: int
#: :class:`int` `(Named` ``oldAvailableQuantity`` `in Commercetools)`
old_available_quantity: int
#: :class:`int` `(Named` ``newAvailableQuantity`` `in Commercetools)`
new_available_quantity: int
def __init__(
self,
*,
id: str = None,
version: int = None,
created_at: datetime.datetime = None,
last_modified_at: datetime.datetime = None,
last_modified_by: typing.Optional["LastModifiedBy"] = None,
created_by: typing.Optional["CreatedBy"] = None,
sequence_number: int = None,
resource: "Reference" = None,
resource_version: int = None,
type: str = None,
resource_user_provided_identifiers: typing.Optional[
"UserProvidedIdentifiers"
] = None,
old_quantity_on_stock: int = None,
new_quantity_on_stock: int = None,
old_available_quantity: int = None,
new_available_quantity: int = None
) -> None:
self.old_quantity_on_stock = old_quantity_on_stock
self.new_quantity_on_stock = new_quantity_on_stock
self.old_available_quantity = old_available_quantity
self.new_available_quantity = new_available_quantity
super().__init__(
id=id,
version=version,
created_at=created_at,
last_modified_at=last_modified_at,
last_modified_by=last_modified_by,
created_by=created_by,
sequence_number=sequence_number,
resource=resource,
resource_version=resource_version,
type="InventoryEntryQuantitySet",
resource_user_provided_identifiers=resource_user_provided_identifiers,
)
def __repr__(self) -> str:
return (
"InventoryEntryQuantitySetMessage(id=%r, version=%r, created_at=%r, last_modified_at=%r, last_modified_by=%r, created_by=%r, sequence_number=%r, resource=%r, resource_version=%r, type=%r, resource_user_provided_identifiers=%r, old_quantity_on_stock=%r, new_quantity_on_stock=%r, old_available_quantity=%r, new_available_quantity=%r)"
% (
self.id,
self.version,
self.created_at,
self.last_modified_at,
self.last_modified_by,
self.created_by,
self.sequence_number,
self.resource,
self.resource_version,
self.type,
self.resource_user_provided_identifiers,
self.old_quantity_on_stock,
self.new_quantity_on_stock,
self.old_available_quantity,
self.new_available_quantity,
)
)
class InventoryEntryQuantitySetMessagePayload(MessagePayload):
"Corresponding marshmallow schema is :class:`commercetools.schemas.InventoryEntryQuantitySetMessagePayloadSchema`."
#: :class:`int` `(Named` ``oldQuantityOnStock`` `in Commercetools)`
old_quantity_on_stock: int
#: :class:`int` `(Named` ``newQuantityOnStock`` `in Commercetools)`
new_quantity_on_stock: int
#: :class:`int` `(Named` ``oldAvailableQuantity`` `in Commercetools)`
old_available_quantity: int
#: :class:`int` `(Named` ``newAvailableQuantity`` `in Commercetools)`
new_available_quantity: int
def __init__(
self,
*,
type: str = None,
old_quantity_on_stock: int = None,
new_quantity_on_stock: int = None,
old_available_quantity: int = None,
new_available_quantity: int = None
) -> None:
self.old_quantity_on_stock = old_quantity_on_stock
self.new_quantity_on_stock = new_quantity_on_stock
self.old_available_quantity = old_available_quantity
self.new_available_quantity = new_available_quantity
super().__init__(type="InventoryEntryQuantitySet")
def __repr__(self) -> str:
return (
"InventoryEntryQuantitySetMessagePayload(type=%r, old_quantity_on_stock=%r, new_quantity_on_stock=%r, old_available_quantity=%r, new_available_quantity=%r)"
% (
self.type,
self.old_quantity_on_stock,
self.new_quantity_on_stock,
self.old_available_quantity,
self.new_available_quantity,
)
)
class LineItemStateTransitionMessage(Message):
"Corresponding marshmallow schema is :class:`commercetools.schemas.LineItemStateTransitionMessageSchema`."
#: :class:`str` `(Named` ``lineItemId`` `in Commercetools)`
line_item_id: str
#: :class:`datetime.datetime` `(Named` ``transitionDate`` `in Commercetools)`
transition_date: datetime.datetime
#: :class:`int`
quantity: int
#: :class:`commercetools.types.StateReference` `(Named` ``fromState`` `in Commercetools)`
from_state: "StateReference"
#: :class:`commercetools.types.StateReference` `(Named` ``toState`` `in Commercetools)`
to_state: "StateReference"
def __init__(
self,
*,
id: str = None,
version: int = None,
created_at: datetime.datetime = None,
last_modified_at: datetime.datetime = None,
last_modified_by: typing.Optional["LastModifiedBy"] = None,
created_by: typing.Optional["CreatedBy"] = None,
sequence_number: int = None,
resource: "Reference" = None,
resource_version: int = None,
type: str = None,
resource_user_provided_identifiers: typing.Optional[
"UserProvidedIdentifiers"
] = None,
line_item_id: str = None,
transition_date: datetime.datetime = None,
quantity: int = None,
from_state: "StateReference" = None,
to_state: "StateReference" = None
) -> None:
self.line_item_id = line_item_id
self.transition_date = transition_date
self.quantity = quantity
self.from_state = from_state
self.to_state = to_state
super().__init__(
id=id,
version=version,
created_at=created_at,
last_modified_at=last_modified_at,
last_modified_by=last_modified_by,
created_by=created_by,
sequence_number=sequence_number,
resource=resource,
resource_version=resource_version,
type="LineItemStateTransition",
resource_user_provided_identifiers=resource_user_provided_identifiers,
)
def __repr__(self) -> str:
return (
"LineItemStateTransitionMessage(id=%r, version=%r, created_at=%r, last_modified_at=%r, last_modified_by=%r, created_by=%r, sequence_number=%r, resource=%r, resource_version=%r, type=%r, resource_user_provided_identifiers=%r, line_item_id=%r, transition_date=%r, quantity=%r, from_state=%r, to_state=%r)"
% (
self.id,
self.version,
self.created_at,
self.last_modified_at,
self.last_modified_by,
self.created_by,
self.sequence_number,
self.resource,
self.resource_version,
self.type,
self.resource_user_provided_identifiers,
self.line_item_id,
self.transition_date,
self.quantity,
self.from_state,
self.to_state,
)
)
class LineItemStateTransitionMessagePayload(MessagePayload):
"Corresponding marshmallow schema is :class:`commercetools.schemas.LineItemStateTransitionMessagePayloadSchema`."
#: :class:`str` `(Named` ``lineItemId`` `in Commercetools)`
line_item_id: str
#: :class:`datetime.datetime` `(Named` ``transitionDate`` `in Commercetools)`
transition_date: datetime.datetime
#: :class:`int`
quantity: int
#: :class:`commercetools.types.StateReference` `(Named` ``fromState`` `in Commercetools)`
from_state: "StateReference"
#: :class:`commercetools.types.StateReference` `(Named` ``toState`` `in Commercetools)`
to_state: "StateReference"
def __init__(
self,
*,
type: str = None,
line_item_id: str = None,
transition_date: datetime.datetime = None,
quantity: int = None,
from_state: "StateReference" = None,
to_state: "StateReference" = None
) -> None:
self.line_item_id = line_item_id
self.transition_date = transition_date
self.quantity = quantity
self.from_state = from_state
self.to_state = to_state
super().__init__(type="LineItemStateTransition")
def __repr__(self) -> str:
return (
"LineItemStateTransitionMessagePayload(type=%r, line_item_id=%r, transition_date=%r, quantity=%r, from_state=%r, to_state=%r)"
% (
self.type,
self.line_item_id,
self.transition_date,
self.quantity,
self.from_state,
self.to_state,
)
)
class OrderBillingAddressSetMessage(Message):
"Corresponding marshmallow schema is :class:`commercetools.schemas.OrderBillingAddressSetMessageSchema`."
#: Optional :class:`commercetools.types.Address`
address: typing.Optional["Address"]
#: Optional :class:`commercetools.types.Address` `(Named` ``oldAddress`` `in Commercetools)`
old_address: typing.Optional["Address"]
def __init__(
self,
*,
id: str = None,
version: int = None,
created_at: datetime.datetime = None,
last_modified_at: datetime.datetime = None,
last_modified_by: typing.Optional["LastModifiedBy"] = None,
created_by: typing.Optional["CreatedBy"] = None,
sequence_number: int = None,
resource: "Reference" = None,
resource_version: int = None,
type: str = None,
resource_user_provided_identifiers: typing.Optional[
"UserProvidedIdentifiers"
] = None,
address: typing.Optional["Address"] = None,
old_address: typing.Optional["Address"] = None
) -> None:
self.address = address
self.old_address = old_address
super().__init__(
id=id,
version=version,
created_at=created_at,
last_modified_at=last_modified_at,
last_modified_by=last_modified_by,
created_by=created_by,
sequence_number=sequence_number,
resource=resource,
resource_version=resource_version,
type="OrderBillingAddressSet",
resource_user_provided_identifiers=resource_user_provided_identifiers,
)
def __repr__(self) -> str:
return (
"OrderBillingAddressSetMessage(id=%r, version=%r, created_at=%r, last_modified_at=%r, last_modified_by=%r, created_by=%r, sequence_number=%r, resource=%r, resource_version=%r, type=%r, resource_user_provided_identifiers=%r, address=%r, old_address=%r)"
% (
self.id,
self.version,
self.created_at,
self.last_modified_at,
self.last_modified_by,
self.created_by,
self.sequence_number,
self.resource,
self.resource_version,
self.type,
self.resource_user_provided_identifiers,
self.address,
self.old_address,
)
)
class OrderBillingAddressSetMessagePayload(MessagePayload):
"Corresponding marshmallow schema is :class:`commercetools.schemas.OrderBillingAddressSetMessagePayloadSchema`."
#: Optional :class:`commercetools.types.Address`
address: typing.Optional["Address"]
#: Optional :class:`commercetools.types.Address` `(Named` ``oldAddress`` `in Commercetools)`
old_address: typing.Optional["Address"]
def __init__(
self,
*,
type: str = None,
address: typing.Optional["Address"] = None,
old_address: typing.Optional["Address"] = None
) -> None:
self.address = address
self.old_address = old_address
super().__init__(type="OrderBillingAddressSet")
def __repr__(self) -> str:
return (
"OrderBillingAddressSetMessagePayload(type=%r, address=%r, old_address=%r)"
% (self.type, self.address, self.old_address)
)
class OrderCreatedMessage(Message):
"Corresponding marshmallow schema is :class:`commercetools.schemas.OrderCreatedMessageSchema`."
#: :class:`commercetools.types.Order`
order: "Order"
def __init__(
self,
*,
id: str = None,
version: int = None,
created_at: datetime.datetime = None,
last_modified_at: datetime.datetime = None,
last_modified_by: typing.Optional["LastModifiedBy"] = None,
created_by: typing.Optional["CreatedBy"] = None,
sequence_number: int = None,
resource: "Reference" = None,
resource_version: int = None,
type: str = None,
resource_user_provided_identifiers: typing.Optional[
"UserProvidedIdentifiers"
] = None,
order: "Order" = None
) -> None:
self.order = order
super().__init__(
id=id,
version=version,
created_at=created_at,
last_modified_at=last_modified_at,
last_modified_by=last_modified_by,
created_by=created_by,
sequence_number=sequence_number,
resource=resource,
resource_version=resource_version,
type="OrderCreated",
resource_user_provided_identifiers=resource_user_provided_identifiers,
)
def __repr__(self) -> str:
return (
"OrderCreatedMessage(id=%r, version=%r, created_at=%r, last_modified_at=%r, last_modified_by=%r, created_by=%r, sequence_number=%r, resource=%r, resource_version=%r, type=%r, resource_user_provided_identifiers=%r, order=%r)"
% (
self.id,
self.version,
self.created_at,
self.last_modified_at,
self.last_modified_by,
self.created_by,
self.sequence_number,
self.resource,
self.resource_version,
self.type,
self.resource_user_provided_identifiers,
self.order,
)
)
class OrderCreatedMessagePayload(MessagePayload):
"Corresponding marshmallow schema is :class:`commercetools.schemas.OrderCreatedMessagePayloadSchema`."
#: :class:`commercetools.types.Order`
order: "Order"
def __init__(self, *, type: str = None, order: "Order" = None) -> None:
self.order = order
super().__init__(type="OrderCreated")
def __repr__(self) -> str:
return "OrderCreatedMessagePayload(type=%r, order=%r)" % (self.type, self.order)
class OrderCustomLineItemDiscountSetMessage(Message):
"Corresponding marshmallow schema is :class:`commercetools.schemas.OrderCustomLineItemDiscountSetMessageSchema`."
#: :class:`str` `(Named` ``customLineItemId`` `in Commercetools)`
custom_line_item_id: str
#: List of :class:`commercetools.types.DiscountedLineItemPriceForQuantity` `(Named` ``discountedPricePerQuantity`` `in Commercetools)`
discounted_price_per_quantity: typing.List["DiscountedLineItemPriceForQuantity"]
#: Optional :class:`commercetools.types.TaxedItemPrice` `(Named` ``taxedPrice`` `in Commercetools)`
taxed_price: typing.Optional["TaxedItemPrice"]
def __init__(
self,
*,
id: str = None,
version: int = None,
created_at: datetime.datetime = None,
last_modified_at: datetime.datetime = None,
last_modified_by: typing.Optional["LastModifiedBy"] = None,
created_by: typing.Optional["CreatedBy"] = None,
sequence_number: int = None,
resource: "Reference" = None,
resource_version: int = None,
type: str = None,
resource_user_provided_identifiers: typing.Optional[
"UserProvidedIdentifiers"
] = None,
custom_line_item_id: str = None,
discounted_price_per_quantity: typing.List[
"DiscountedLineItemPriceForQuantity"
] = None,
taxed_price: typing.Optional["TaxedItemPrice"] = None
) -> None:
self.custom_line_item_id = custom_line_item_id
self.discounted_price_per_quantity = discounted_price_per_quantity
self.taxed_price = taxed_price
super().__init__(
id=id,
version=version,
created_at=created_at,
last_modified_at=last_modified_at,
last_modified_by=last_modified_by,
created_by=created_by,
sequence_number=sequence_number,
resource=resource,
resource_version=resource_version,
type="OrderCustomLineItemDiscountSet",
resource_user_provided_identifiers=resource_user_provided_identifiers,
)
| |
(int,)):
#
# Q: should this be a warning? A user _might_ be trying
# to use an integer as a key. But in practice that's not
# likely.
#
raise PluginError("Access of the n-th extension point is disallowed. This is not well-defined, since ExtensionPoints are stored as unordered sets.")
return self.extensions(all=all, key=key)
def service(self, key=None, all=False):
"""
Return the unique service that matches the interface of this
extension point. An exception occurs if no service matches the
specified key, or if multiple services match.
"""
ans = ExtensionPoint.__call__(self, key=key, all=all)
if len(ans) == 1:
#
# There is a single service, so return it.
#
return ans.pop()
elif len(ans) == 0:
return None
else:
raise PluginError("The ExtensionPoint does not have a unique service! %d services are defined for interface %s. (key=%s)" % (len(ans), self.interface. __name__, str(key)))
def __len__(self):
"""
Return the number of services that match the interface of this
extension point.
"""
return len(self.extensions())
def extensions(self, all=False, key=None):
"""
Return a set of services that match the interface of this
extension point. This tacitly filters out disabled extension points.
"""
ans = set()
for env in self.env:
ans.update(env.active_services(self.interface, all=all, key=key))
return sorted(ans, key=lambda x: x.id)
def __repr__(self):
"""Return a textual representation of the extension point."""
env_str = ""
for env in self.env:
env_str += " env=%s" % env.name
return '<ExtensionPoint %s%s>' % (self.interface.__name__,env_str)
"""
The environment for the components in the PCA.
This class has the following attributes that a user may use:
* name - A string that identifies this environment. By default a unique integer id is used to define the name "env.<id>"
* namespace - A name the defines the relationship of this environment to other environments
* registry - A map from interfaces to registered services that match each interface
* services - The set of all services (Plugin instances) that have been registered in this environment
* singleton_services - Singleton services, which can only be registered once in each environment
* enabled - A cache that denotes whether a service has been enabled.
The namespace of Environment instances is dynamically generated by extending the namespace of the current environment. However, the environment namespace can be explicitly declared in the constructor.
"""
class PluginEnvironment(object):
def __init__(self, name=None, bootstrap=False):
# The registry of plugins, by name
self.plugin_registry = {}
if name is None:
self.name = "env"+str(PluginGlobals.next_id())
else:
self.name = name
if self.name in PluginGlobals.env_registry:
raise PluginError("The Environment %r already exists!" % self.name)
PluginGlobals.env_registry[self.name] = self
self.singleton_services={}
self.services=set()
if not bootstrap:
self.loaders = ExtensionPoint(IPluginLoader)
self.loader_paths = ExtensionPoint(IPluginLoadPath)
self.log = logger_factory(self.name)
if __debug__:
self.log.debug("Creating PluginEnvironment %r" % self.name)
self.level = []
self._cache = {}
def __del__(self):
#
# Don't delete the two standard environments.
#
if self.name == 'pca' or self.name == '<default>':
return
#
# If the PluginGlobals.clear() method is being called, then
# don't try to remove data from the environment registry. It
# has already been deleted!
#
if not PluginGlobals.clearing:
if self.name in PluginGlobals.env_registry:
del PluginGlobals.env_registry[self.name]
def __contains__(self, cls):
"""
Return whether the given service is in the set of services.
"""
return cls in self.services
def active_services(self, cls, all=False, key=None):
"""
Return the services that have been activated for a specific interface class.
"""
if isinstance(cls, Plugin):
id = cls.__class__
else:
id = cls
cache_key = (id, all, key)
try:
return self._cache[cache_key]
except KeyError:
if not issubclass(id, Interface):
raise PluginError("PluginEnvironment[x] expects "+str(id)+" to be an Interface class")
strkey = str(key)
tmp = [x for x in self.services if id in x.__interfaces__ and (all or x.enabled()) and (key is None or x._p_name == strkey)]
self._cache[cache_key] = tmp
return tmp
def activate(self, service):
"""
This adds the service to this environment.
"""
self.log.info("Adding service %s to environment %s" % (service._p_name, self.name))
self.services.add(service)
self.clear_cache()
def deactivate(self, service):
"""
This removes the service from this environment.
"""
self.log.info("Removing service %s from environment %s" % (service._p_name, self.name))
if service in self.services:
self.services.remove(service)
self.clear_cache()
def __repr__(self):
return self.pprint()
def pprint(self, show_ids=True):
"""
Provides a detailed summary of this environment
"""
s = ""
s += " Services for Environment %r\n" % self.name
flag=True
tmp = {}
for service in self.services:
tmp[str(service)] = service
keys = list(tmp.keys())
keys.sort()
for key in keys:
flag=False
s += " "+key
if show_ids:
s += " ("
if not tmp[key].enabled():
s += "-" #pragma:nocover
s += str(tmp[key].id)
if tmp[key].__class__ in self.singleton_services:
s += "*"
s += ")\n"
else:
s += "\n"
if flag:
s += " None\n"
return s
def load_services(self, path=None, auto_disable=False, name_re=True):
"""Load services from IPluginLoader extension points"""
#
# Construct the search path
#
search_path = []
if path is not None:
if isinstance(path, str):
search_path.append(path)
elif isinstance(path, (list, tuple)):
search_path += path
else:
raise PluginError("Unknown type of path argument: "+str(type(path)))
for item in self.loader_paths:
search_path += item.get_load_path()
self.log.info("Loading services to environment %s from search path %s" % (self.name, search_path))
#
# Compile the enable expression
#
if isinstance(auto_disable, bool):
if auto_disable:
disable_p = re.compile("")
else:
disable_p = re.compile("^$")
elif isinstance(auto_disable, str):
disable_p = re.compile(auto_disable)
else:
raise PluginError("Unknown type of auth_disable argument: "+str(type(auto_disable)))
#
# Compile the name expression
#
if isinstance(name_re, bool):
if name_re:
name_p = re.compile("")
else: #pragma:nocover
raise PluginError("It doesn't make sense to specify name_re=False")
elif isinstance(name_re, str):
name_p = re.compile(name_re)
else:
raise PluginError("Unknown type of name_re argument: "+str(type(name_re)))
for loader in self.loaders:
loader.load(self, search_path, disable_p, name_p)
self.clear_cache()
def clear_cache(self):
""" Clear the cache of active services """
self._cache = {}
#
# Reset the plugins environment when this module is first loaded.
#
PluginGlobals.clear(bootstrap=True)
PluginGlobals.push_env("pca")
class IPluginLoader(Interface):
"""An interface for loading plugins."""
def load(self, env, path, disable_re, name_re):
"""Load plugins found on the specified path. If disable_re is
not none, then it is interpreted as a regular expression. If this
expression matches the path of a plugin, then that plugin is
disabled. Otherwise, the plugin is enabled by default.
"""
class IPluginLoadPath(Interface):
def get_load_path(self):
"""Returns a list of paths that are searched for plugins"""
class IIgnorePluginWhenLoading(Interface):
"""Interface used by Plugin loaders to identify Plugins that should
be ignored"""
def ignore(self, name):
"""Returns true if a loader should ignore a plugin during loading"""
PluginGlobals.env("<default>").loaders = ExtensionPoint(IPluginLoader)
PluginGlobals.env("<default>").loader_paths = ExtensionPoint(IPluginLoadPath)
PluginGlobals.env("pca").loaders = ExtensionPoint(IPluginLoader)
PluginGlobals.env("pca").loader_paths = ExtensionPoint(IPluginLoadPath)
class PluginMeta(type):
"""Meta class for the Plugin class. This meta class
takes care of service and extension point registration. This class
also instantiates singleton plugins.
"""
def __new__(cls, name, bases, d):
"""Find all interfaces that need to be registered."""
#
# Avoid cycling in the Python logic by hard-coding the behavior
# for the Plugin and SingletonPlugin classes.
#
if name == "Plugin":
d['__singleton__'] = False
return type.__new__(cls, name, bases, d)
if name == "SingletonPlugin":
d['__singleton__'] = True
return type.__new__(cls, name, bases, d)
if name == "ManagedSingletonPlugin":
#
# This is a derived class of SingletonPlugin for which
# we do not need to build an instance
#
d['__singleton__'] = True
return type.__new__(cls, name, bases, d)
#
# Check if plugin has already been registered
#
if len(d.get('_implements', [])) == 0 and name in PluginGlobals.env().plugin_registry:
raise PluginError("Plugin class %r does not implement an interface, and it has already been defined in environment '%r'." % (str(name), PluginGlobals.env().name))
#
# Capture the environment namespace that this plugin is declared in
#
d['__plugin_namespace__'] = PluginGlobals.env().name
#
# Find all interfaces that this plugin will support
#
__interfaces__ = {}
for interface in d.get('_implements', {}):
__interfaces__.setdefault(interface,[]).extend( d['_implements'][interface] )
for base in [base for base in bases if hasattr(base, '__interfaces__')]:
for interface in base.__interfaces__:
__interfaces__.setdefault(interface,[]).extend( base.__interfaces__[interface] )
d['__interfaces__'] = __interfaces__
#
# Create a boolean, which indicates whether this is
# a singleton class.
#
if True in [issubclass(x, SingletonPlugin) for x in bases]:
d['__singleton__'] = True
else:
d['__singleton__'] = False
#
# Add interfaces to the list of base classes if they are
# declared inherited.
#
flag=False
bases = list(bases)
for interface in d.get('_inherited_interfaces', set()):
if interface not in bases:
bases.append(interface)
flag=True
if flag:
cls=MergedPluginMeta
#
# Create new class
| |
<filename>hyperparameter_hunter/key_handler.py<gh_stars>0
"""This module handles the creation of `cross_experiment_key`\s and `hyperparameter_key`\s for
:class:`hyperparameter_hunter.environment.Environment`, and :class:`hyperparameter_hunter.experiments.BaseExperiment`,
respectively. It also handles the treatment of complex-typed inputs and their storage in the 'KeyAttributeLookup' subdirectory.
The descendants of :class:`hyperparameter_hunter.key_handler.KeyMaker` defined herein are each responsible for the generation and
saving of their keys, as well as determining whether such a key already exists
Related
-------
:mod:`hyperparameter_hunter.environment`
This module uses :class:`hyperparameter_hunter.key_handler.CrossExperimentKeyMaker` to set
:attr:`hyperparameter_hunter.environment.Environment.cross_experiment_key`
:mod:`hyperparameter_hunter.experiments`
This module uses :class:`hyperparameter_hunter.key_handler.HyperparameterKeyMaker` to set
:attr:`hyperparameter_hunter.experiments.BaseExperiment.hyperparameter_key`"""
##################################################
# Import Own Assets
##################################################
from hyperparameter_hunter.exception_handler import EnvironmentInvalidError, EnvironmentInactiveError
from hyperparameter_hunter.library_helpers.keras_helper import keras_callback_to_dict, parameterize_compiled_keras_model
from hyperparameter_hunter.library_helpers.keras_optimization_helper import initialize_dummy_model
from hyperparameter_hunter.sentinels import Sentinel
from hyperparameter_hunter.settings import G
from hyperparameter_hunter.utils.file_utils import write_json, read_json, add_to_json
from hyperparameter_hunter.utils.boltons_utils import remap
##################################################
# Import Miscellaneous Assets
##################################################
from abc import ABCMeta, abstractmethod
import base64
from copy import deepcopy
import dill
from functools import partial
import hashlib
from inspect import getsourcelines, isfunction, isclass, getsource
from os import listdir
import os.path
import pandas as pd
import re
import shelve
##################################################
# Import Learning Assets
##################################################
try:
from keras.callbacks import Callback as BaseKerasCallback
except ModuleNotFoundError:
class BaseKerasCallback():
placeholder_attribute = """
Hello, there! I am a `placeholder_attribute` for `BaseKerasCallback` if attempting to import `Keras` raised a
`ModuleNotFoundError`. You might be wondering what I'm doing here. I'm special because no normal/sane person would make a
class, or an attribute just like me! That means that if anyone checks to see if something is an instance of yours truly,
hopefully it won't be! :) Nice to meet you! &*%#))(%#(*&@*HIOV0(#*W*Q()UFIJW_Q)_#R*(*(T{_E_QWO_))T+VMS"W)|GO{>A?C<A/woe0
"""
##################################################
# KeyMaker Base Class:
##################################################
class KeyMaker(metaclass=ABCMeta):
def __init__(self, parameters, **kwargs):
"""Base class to handle making key hashes and checking for their existence. Additionally, this class handles saving
entries for complex-typed parameters, along with their hashes to ensure experiments are reproducible
Parameters
----------
parameters: Dict
All the parameters to be included when creating the key hash. Keys should correspond to parameter names, and values
should be the values of the corresponding keys
**kwargs: Dict
Additional arguments
Attributes
----------
parameters: Dict
A deep copy of the given `parameters` input
key: Str, or None
If a key has been generated for `parameters`, it is saved here. Else, None
exists: Boolean
If `key` is not None, and was found to already exist in `tested_keys_dir`, `exists` = True. Else, False
key_attribute_lookup_dir: Str
The directory in which complex-typed parameter entries will be saved
tested_keys_dir: Str, or None
The directory is which `key` will be saved if it does not already contain `key`"""
self.parameters = deepcopy(parameters)
self.key = None
self.exists = False
self.key_attribute_lookup_dir = G.Env.result_paths['key_attribute_lookup']
self.tested_keys_dir = G.Env.result_paths['tested_keys']
self.validate_environment()
self.handle_complex_types()
self.make_key()
self.does_key_exist()
def __repr__(self):
return F'{self.__class__.__name__}(key={self.key!r})'
def __str__(self):
return F'{self.key!s}'
def __eq__(self, other):
return self.key == other
def __ne__(self, other):
"""A KeyMaker instance will always return True for a non-equality check if its key has not been set (is None)"""
return (self.key is None) or (self.key != other)
##################################################
# Core Methods
##################################################
def validate_environment(self):
"""Check that the currently active Environment is suitable"""
if G.Env is None:
raise EnvironmentInactiveError('')
if not all([hasattr(G.Env, _) for _ in ['result_paths', 'cross_experiment_key']]):
raise EnvironmentInvalidError('')
try:
# Ensure :attr:`tested_keys_dir` exists before calling :meth:`does_key_exist`, so "None" paths won't be checked
if os.path.exists(self.tested_keys_dir) is False:
# TypeError may also be raised if :func:`os.path.exists` receives invalid input
raise TypeError
except TypeError: # Key-making blacklisted
if self.tested_keys_dir is None:
return
os.makedirs(self.tested_keys_dir)
def handle_complex_types(self):
"""Locate complex types in :attr:`parameters`, create hashes for them, add lookup entries linking their original values
to their hashes, then update their values in :attr:`parameters` to their hashes to facilitate Description saving"""
if self.tested_keys_dir is None: # Key-making blacklisted
return
dataframe_hashes = {}
def visit(path, key, value):
"""Check whether a parameter is of a complex type. If not, return it unchanged. Otherwise, 1) create a hash for its
value; 2) save a complex type lookup entry linking `key`, `value`, and the hash for `value`; and 3) return the hashed
value with `key`, instead of the original complex-typed `value`
Parameters
----------
path: Tuple
The path of keys that leads to `key`
key: Str
The parameter name
value: *
The value of the parameter `key`
Returns
-------
Tuple of (`key`, value), in which value is either unchanged or a hash for the original `value`"""
if isinstance(value, BaseKerasCallback):
return (key, keras_callback_to_dict(value))
if isinstance(value, Sentinel):
return (key, value.sentinel)
elif callable(value) or isinstance(value, pd.DataFrame):
hashed_value = make_hash_sha256(value)
if isinstance(value, pd.DataFrame):
dataframe_hashes.setdefault(hashed_value, []).append(key)
try:
self.add_complex_type_lookup_entry(path, key, value, hashed_value)
except FileNotFoundError:
os.makedirs(self.key_attribute_lookup_dir, exist_ok=False)
self.add_complex_type_lookup_entry(path, key, value, hashed_value)
return (key, hashed_value)
return (key, value)
self.parameters = remap(self.parameters, visit=visit)
#################### Check for Identical DataFrames ####################
for df_hash, df_names in dataframe_hashes.items():
if len(df_names) > 1:
G.warn(
F'The dataframes: {df_names} have an identical hash: {df_hash!s}. This implies the dataframes are ' +
'identical, which is probably unintentional. If left alone, scores may be misleading!'
)
def add_complex_type_lookup_entry(self, path, key, value, hashed_value):
"""Add lookup entry in `key_attribute_lookup_dir` for a complex-typed parameter, linking the parameter `key`, its
`value`, and its `hashed_value`
Parameters
----------
path: Tuple
The path of keys that leads to `key`
key: Str
The parameter name
value: *
The value of the parameter `key`
hashed_value: Str
The hash produced for `value`"""
# TODO: Combine `path` and `key` to produce actual filepaths
shelve_params = ['model_initializer', 'cross_validation_type']
if isclass(value) or (key in shelve_params):
with shelve.open(os.path.join(self.key_attribute_lookup_dir, F'{key}'), flag='c') as shelf:
# NOTE: When reading from shelve file, DO NOT add the ".db" file extension
shelf[hashed_value] = value
elif isinstance(value, pd.DataFrame):
os.makedirs(os.path.join(self.key_attribute_lookup_dir, key), exist_ok=True)
value.to_csv(os.path.join(self.key_attribute_lookup_dir, key, F'{hashed_value}.csv'), index=False)
else: # Possible types: partial, function, *other
add_to_json(
file_path=os.path.join(self.key_attribute_lookup_dir, F'{key}.json'),
data_to_add=getsource(value), key=hashed_value, condition=lambda _: hashed_value not in _.keys(), default={},
)
def make_key(self):
"""Set :attr:`key` to an sha256 hash for :attr:`parameters`"""
self.key = make_hash_sha256(self._filter_parameters_to_hash(deepcopy(self.parameters)))
@staticmethod
def _filter_parameters_to_hash(parameters):
"""Produce a filtered version of `parameters` that does not include values that should be ignored during hashing
Parameters
----------
parameters: dict
The full dictionary of initial parameters to be filtered
Returns
-------
parameters: dict
The filtered version of the given `parameters`"""
return parameters
##################################################
# Abstract Methods
##################################################
@property
@abstractmethod
def key_type(self) -> str:
"""A string in ['hyperparameter', 'cross_experiment'] denoting which type of key is being processed"""
raise NotImplementedError()
@abstractmethod
def does_key_exist(self) -> bool:
"""Check if the key hash already exists among previously saved keys in the contents of :attr:`tested_keys_dir`"""
raise NotImplementedError()
@abstractmethod
def save_key(self):
"""Save the key hash and the parameters used to make it to :attr:`tested_keys_dir`"""
raise NotImplementedError()
class CrossExperimentKeyMaker(KeyMaker):
key_type = 'cross_experiment'
def __init__(self, parameters, **kwargs):
"""A KeyMaker class dedicated to creating cross-experiment keys, which determine when experiments were executed under
sufficiently similar conditions to permit proper comparison. Two separate instances of :class:`environment.Environment`
should produce identical `cross_experiment_key` s if their arguments are the same (or close enough)
Parameters
----------
parameters: Dict
All the parameters to be included when creating the key hash. Keys should correspond to parameter names, and values
should be the values of the corresponding keys
**kwargs: Dict
Additional arguments supplied to :meth:`key_handler.KeyMaker.__init__`"""
KeyMaker.__init__(self, parameters, **kwargs)
def does_key_exist(self):
"""Check if a file corresponding to this cross_experiment_key already exists
Returns
-------
Boolean"""
tested_keys_dir_contents = [os.path.splitext(_)[0] for _ in listdir(self.tested_keys_dir)]
self.exists = self.key in tested_keys_dir_contents
return self.exists
def save_key(self):
"""Create a new file for this cross_experiment_key if :attr:`exists` is False"""
if not self.exists:
write_json(F'{self.tested_keys_dir}/{self.key}.json', {})
self.exists = True
G.log(F'Saved {self.key_type}_key: "{self.key}"')
else:
G.log(F'{self.key_type}_key "{self.key}" already exists - Skipped saving')
class HyperparameterKeyMaker(KeyMaker):
key_type = 'hyperparameter'
def __init__(self, parameters, cross_experiment_key, **kwargs):
"""A KeyMaker class dedicated to creating hyperparameter keys, which determine when experiments were executed using
identical hyperparameters. Two separate instances of :class:`experiments.CrossValidationExperiment` should produce
identical `hyperparameter_key` s if their hyperparameters are the same (or close enough)
Parameters
----------
parameters: Dict
All the parameters to be included when creating the key hash. Keys should correspond to parameter names, and values
should be the values of the corresponding keys
cross_experiment_key: Str
The key produced by the active Environment via :class:`key_handler.CrossExperimentKeyMaker`, used for determining
when a hyperparameter key has already been tested under the same cross-experiment parameters
**kwargs: Dict
Additional arguments supplied to :meth:`key_handler.KeyMaker.__init__`"""
self.cross_experiment_key = cross_experiment_key
if hasattr(G.Env, 'current_task') and G.Env.current_task and G.Env.current_task.module_name == 'keras':
parameters = deepcopy(parameters)
#################### Initialize and Parameterize Dummy Model ####################
temp_model = initialize_dummy_model(
parameters['model_initializer'], | |
in
# self.input[first_useable: i] can be copied directly to self.processed_input
if not np.any(np.isnan(first_useable)):
self.processed_input[first_useable_i:i, 0:3] = self.input[first_useable_i: i, 0:3]
first_useable = float('nan')
# If last_useable has already been assigned, the current value is somewhere within a row of
# several NaN. Thus, nothing is done.
else:
# If the current value is not NaN, check if last_useable has been assigned
# If last_useable has NOT been assigned, the current value is somewhere within a row of
# actual values. Nothing needs to be done
if not np.any(np.isnan(last_useable)):
# If last_useable has in fact been assigned, the current value is the first actual value
# after some number of NaN values. Therefore, next_useable is assigned as current value.
next_useable = self.input[i, 0:3]
# The values between last_useable and next_useable are then interpolated
# Incremental values are identified
steps = i - last_useable_i
increment = (next_useable - last_useable) / steps
increment_array = np.linspace(increment, increment * (steps - 1), steps - 1)
# Currently encapsulated NaN-values are set equal to last_useable
self.processed_input[last_useable_i + 1:i, 0:3] = last_useable
# Increments are added
# TODO: Solve this try/except
try:
self.processed_input[last_useable_i + 1:i, 0:3] += increment_array
except ValueError:
print(f'Couldn\'t insert the interpolated gyro array into processed_input:\n'
f'Steps: {steps}, last_usable_i: {last_useable_i}, increment_array: {increment_array}')
# Finally, both last_useable and next_useable are set to NaN again
last_useable = float('nan')
# If first_useable is not set to any value, this is the first non-NaN value in some row of
# non-NaN values.
if np.any(np.isnan(first_useable)):
first_useable = self.input[i, 0:3]
first_useable_i = i
# When the entire row is checked for NaN, there may still be the case that the last values are
# NaN values. In this case we cannot interpolate anything and instead we perform a simple
# extrapolation, by copying last_useable into each of the values.
if not np.any(np.isnan(last_useable)):
self.processed_input[last_useable_i + 1:end, 0:3] = last_useable
else:
# If last_useable is not set, this bursts ends with non-NaN values. These can be inserted into
# self.processed_input
self.processed_input[first_useable_i:end, 0:3] = self.input[first_useable_i:end, 0:3]
# Finally, after having copied/inter/extrapolated input to processed_input, the data is adjusted according
# to input bias
self.processed_input[start:end, 0:2] -= self.acc_bias_final[0:2]
self.processed_input[start:end, 0:3] *= self.gravitational_constant
def set_processed_gyro_input_nan(self, start: int, end: int):
# TODO: Consider a variable that updates for each data row in this method. The value increases by some metric
# for each non-NaN value, and decreases for each NaN-value. If the variable stays below a certain value
# for some number of rows, these rows are discarded.
# Method uses assumtion 1
first_useable = float('nan')
first_useable_i = 0
last_useable = float('nan')
last_useable_i = 0
for i in range(start, end):
# Check if the current value is NaN
if np.any(np.isnan(self.input[i, 3:5])):
# Check if last_useable has been assigned
if np.any(np.isnan(last_useable)):
# If last_useable has not been assigned, the current value is the first NaN value of
# some number of NaN values. Therefore, assign last_useable as the previous value
last_useable = self.input[i - 1, 3:5]
last_useable_i = i - 1
# If first_useable has also been assigned, all values in
# self.input[first_useable: i] can be copied directly to self.processed_input
if not np.any(np.isnan(first_useable)):
self.processed_input[first_useable_i:i, 3:5] = self.input[first_useable_i: i, 3:5]
first_useable = float('nan')
# Taking into account that self.input[i-1] might also be a NaN value (in case this is the beginning
# of a burst and the last value of the previous burst was NaN), a generic set of values may need
# to be used
if np.any(np.isnan(last_useable)):
last_useable = np.array([0.0, 0.0]) # Generic set of [gyroX, gyroY]
# If last_useable has already been assigned, the current value is somewhere within a row of
# several NaN. Thus, nothing is done.
else:
# If the current value is not NaN, check if last_useable has been assigned
# If last_useable has NOT been assigned, the current value is somewhere within a row of
# actual values. Nothing needs to be done
if not np.any(np.isnan(last_useable)):
# If last_useable has in fact been assigned, the current value is the first actual value
# after some number of NaN values. Therefore, next_useable is assigned as current value.
next_useable = self.input[i, 3:5]
# The values between last_useable and next_useable are then interpolated
# Incremental values are identified
steps = i - last_useable_i
increment = (next_useable - last_useable) / steps
increment_array = np.linspace(increment, increment * (steps - 1), steps - 1)
# Currently encapsulated NaN-values are set equal to last_useable
self.processed_input[last_useable_i + 1:i, 3:5] = last_useable
# Increments are added
# TODO: Solve this try/except
try:
self.processed_input[last_useable_i + 1:i, 3:5] += increment_array
except ValueError:
print(f'Couldn\'t insert the interpolated gyro array into processed_input:\n'
f'Steps: {steps}, last_usable_i: {last_useable_i}, increment_array: {increment_array}')
# Finally, both last_useable and next_useable are set to NaN again
last_useable = float('nan')
# If first_useable is not set to any value, this is the first non-NaN value in some row of
# non-NaN values.
if np.any(np.isnan(first_useable)):
first_useable = self.input[i, 3:5]
first_useable_i = i
# When the entire row is checked for NaN, there may still be the case that the last values are
# NaN values. In this case we cannot interpolate anything and instead we perform a simple
# extrapolation, by copying last_useable into each of the values.
if not np.any(np.isnan(last_useable)):
self.processed_input[last_useable_i + 1:end, 3:5] = last_useable
else:
# If last_useable is not set, this bursts ends with non-NaN values. These can be inserted into
# self.processed_input
self.processed_input[first_useable_i:end, 3:5] = self.input[first_useable_i:end, 3:5]
# Finally, after having copied/inter/extrapolated input to processed_input, the data is adjusted according
# to input bias
self.processed_input[start:end, 3:5] -= self.gyro_bias_final
def update_gyro_bias(self, row_no: int):
"""
Updates gyroscope sensor bias from a mean of historic data.
"""
# Update gyroscope bias if enough data has arrived since last update
if row_no - self.last_gyro_bias_update >= self.points_between_gyro_bias_update:
if self.dev_mode:
self.n_bias_updates[0] += 1
self.update_sliding_window_gyro_bias(row_no=row_no)
self.update_adaptive_gyro_bias()
if self.dev_mode:
if row_no > self.n_points_for_gyro_mean:
self.gyro_bias_array[self.last_gyro_bias_update: row_no] = self.gyro_bias_final
self.last_gyro_bias_update = row_no
def update_sliding_window_gyro_bias(self, row_no: int):
# Check whether the entire data request can be indexed within [0, row_no]
if self.historic_data_is_contiguous(request_size=self.n_points_for_gyro_mean,
end_row_of_data=row_no):
self.gyro_bias_sliding_window = np.nanmean(self.input[row_no - self.n_points_for_gyro_mean:
row_no, 3:5],
axis=0)
else:
# If data is split between the end and the start of the buffer,
# indices are generated to slice these parts
last_indices, first_indices = self.patched_buffer_indices(request_size=self.n_points_for_gyro_mean,
current_row=row_no)
# The number of NaN-rows (if any) in the part of the buffer being used is required when finding the mean
# of two separate parts of the buffer.
n_nan = np.count_nonzero(np.isnan(self.input[last_indices[0]:last_indices[1], 0])) \
+ np.count_nonzero(np.isnan(self.input[first_indices[0]:first_indices[1], 0]))
self.gyro_bias_sliding_window = (np.nansum(self.input[last_indices[0]:last_indices[1], 3:5], axis=0) +
np.nansum(self.input[first_indices[0]:first_indices[1], 3:5], axis=0)) \
/ (self.n_points_for_gyro_mean - n_nan)
def update_adaptive_gyro_bias(self):
self.adav_gyro_x.update(sample=self.gyro_bias_sliding_window[0])
self.adav_gyro_y.update(sample=self.gyro_bias_sliding_window[1])
# self.adav_gyro_z.update(sample=self.gyro_bias_sliding_window[2])
self.gyro_bias_final[0] = self.adav_gyro_x.adaptive_average
self.gyro_bias_final[1] = self.adav_gyro_y.adaptive_average
# self.gyro_bias_final[2] = self.adav_gyro_z.adaptive_average
def update_acc_bias(self, row_no: int):
"""
Updates x- and y-acceleration sensor bias from a mean of historic data.
:param row_no: First index of current burst.
"""
if row_no - self.last_acc_bias_update >= self.points_between_acc_bias_update:
if self.dev_mode:
self.n_bias_updates[1] += 1
self.update_sliding_window_acc_bias(row_no=row_no)
self.update_adaptive_acc_bias()
if self.dev_mode:
if row_no > self.n_points_for_acc_mean:
self.acc_bias_array[self.last_acc_bias_update: row_no] = self.acc_bias_final
self.last_acc_bias_update = row_no
def update_sliding_window_acc_bias(self, row_no: int):
# Check whether the entire data request can be indexed within [0, row_no]
if self.historic_data_is_contiguous(request_size=self.n_points_for_acc_mean, end_row_of_data=row_no):
self.acc_bias_sliding_window = \
np.nanmean(self.input[row_no - self.n_points_for_acc_mean: row_no, 0:3], axis=0)
else:
# If data is split between the end and the start of the buffer,
# indices are generated to slice these parts
last_indices, first_indices = self.patched_buffer_indices(request_size=self.n_points_for_acc_mean,
current_row=row_no)
# The number of NaN-rows (if any) in the part of the buffer being used is required when finding the mean
# of two separate parts of the buffer.
n_nan = np.count_nonzero(np.isnan(self.input[last_indices[0]:last_indices[1], 0])) \
+ np.count_nonzero(np.isnan(self.input[first_indices[0]:first_indices[1], 0]))
self.acc_bias_sliding_window = \
(np.nansum(self.input[last_indices[0]:last_indices[1], 0:3], axis=0) +
np.nansum(self.input[first_indices[0]:first_indices[1], 0:3], axis=0)) \
/ (self.n_points_for_acc_mean - n_nan)
# Since we expect the z acceleration to have an average value of -1.0 g, we subtract this from the
# accumulated z acceleration bias
# self.acc_bias[2] += 1.0
# Instead of setting acc_bias so | |
<filename>dxm/lib/masking_api/api/reidentification_job_api.py
# coding: utf-8
"""
Masking API
Schema for the Masking Engine API # noqa: E501
OpenAPI spec version: 5.1.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from dxm.lib.masking_api.api_client import ApiClient
class ReidentificationJobApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_reidentification_job(self, body, **kwargs): # noqa: E501
"""Create re-identification job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_reidentification_job(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReidentificationJob body: The re-identification job to create (required)
:return: ReidentificationJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_reidentification_job_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_reidentification_job_with_http_info(body, **kwargs) # noqa: E501
return data
def create_reidentification_job_with_http_info(self, body, **kwargs): # noqa: E501
"""Create re-identification job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_reidentification_job_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReidentificationJob body: The re-identification job to create (required)
:return: ReidentificationJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_reidentification_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `create_reidentification_job`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/reidentification-jobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReidentificationJob', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_reidentification_job(self, reidentification_job_id, **kwargs): # noqa: E501
"""Delete re-identification job by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_reidentification_job(reidentification_job_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int reidentification_job_id: The ID of the re-identification job to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_reidentification_job_with_http_info(reidentification_job_id, **kwargs) # noqa: E501
else:
(data) = self.delete_reidentification_job_with_http_info(reidentification_job_id, **kwargs) # noqa: E501
return data
def delete_reidentification_job_with_http_info(self, reidentification_job_id, **kwargs): # noqa: E501
"""Delete re-identification job by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_reidentification_job_with_http_info(reidentification_job_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int reidentification_job_id: The ID of the re-identification job to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['reidentification_job_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_reidentification_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'reidentification_job_id' is set
if self.api_client.client_side_validation and ('reidentification_job_id' not in params or
params['reidentification_job_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `reidentification_job_id` when calling `delete_reidentification_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'reidentification_job_id' in params:
path_params['reidentificationJobId'] = params['reidentification_job_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/reidentification-jobs/{reidentificationJobId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_reidentification_jobs(self, **kwargs): # noqa: E501
"""Get all re-identification jobs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_reidentification_jobs(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_number: The page number for which to get re-identification jobs. This will default to the first page if excluded
:param int page_size: The maximum number of objects to return. This will default to the DEFAULT_API_PAGE_SIZE property if not provided
:param int environment_id: The ID of the environment to get all re-identification jobs from
:return: ReidentificationJobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_reidentification_jobs_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_reidentification_jobs_with_http_info(**kwargs) # noqa: E501
return data
def get_all_reidentification_jobs_with_http_info(self, **kwargs): # noqa: E501
"""Get all re-identification jobs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_reidentification_jobs_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_number: The page number for which to get re-identification jobs. This will default to the first page if excluded
:param int page_size: The maximum number of objects to return. This will default to the DEFAULT_API_PAGE_SIZE property if not provided
:param int environment_id: The ID of the environment to get all re-identification jobs from
:return: ReidentificationJobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_number', 'page_size', 'environment_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_reidentification_jobs" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page_number' in params:
query_params.append(('page_number', params['page_number'])) # noqa: E501
if 'page_size' in params:
query_params.append(('page_size', params['page_size'])) # noqa: E501
if 'environment_id' in params:
query_params.append(('environment_id', params['environment_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/reidentification-jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReidentificationJobList', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_reidentification_job_by_id(self, reidentification_job_id, **kwargs): # noqa: E501
"""Get re-identification job by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_reidentification_job_by_id(reidentification_job_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int reidentification_job_id: The ID of the re-identification job to get (required)
:return: ReidentificationJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_reidentification_job_by_id_with_http_info(reidentification_job_id, **kwargs) # noqa: E501
else:
(data) = self.get_reidentification_job_by_id_with_http_info(reidentification_job_id, **kwargs) # noqa: E501
return data
def get_reidentification_job_by_id_with_http_info(self, reidentification_job_id, **kwargs): # noqa: E501
"""Get re-identification job by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_reidentification_job_by_id_with_http_info(reidentification_job_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int reidentification_job_id: The ID of the re-identification job to get (required)
:return: ReidentificationJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['reidentification_job_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_reidentification_job_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'reidentification_job_id' is set
if self.api_client.client_side_validation and ('reidentification_job_id' not in params or
params['reidentification_job_id'] is None): # noqa: | |
<reponame>jacoblb64/pico_rgb_keypad_hid<gh_stars>10-100
# SPDX-FileCopyrightText: 2001-2019 Python Software Foundation
#
# SPDX-License-Identifier: PSF-2.0
"""
`adafruit_itertools`
================================================================================
Python's itertools adapted for CircuitPython by <NAME>
Copyright 2001-2019 Python Software Foundation; All Rights Reserved
* Author(s): The PSF and <NAME>
Implementation Notes
--------------------
**Hardware:**
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
# pylint:disable=invalid-name,redefined-builtin,attribute-defined-outside-init
# pylint:disable=stop-iteration-return,anomalous-backslash-in-string
__version__ = "1.1.4"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_Itertools.git"
def accumulate(iterable, func=lambda x, y: x + y):
"""Make an iterator that returns accumulated sums, or accumulated
results of other binary functions (specified via the optional func
argument). If func is supplied, it should be a function of two
arguments that returns a value. Elements of the input iterable may
be any type that can be accepted as arguments to func. (For
example, with the default operation of addition, elements may be any
addable type including Decimal or Fraction.) If the input iterable
is empty, the output iterable will also be empty.
:param iterable: the source of values to be accumulated
:param func: the function to combine the accumulated value with the next one"""
it = iter(iterable)
try:
acc = next(it)
except StopIteration:
return
yield acc
for element in it:
acc = func(acc, element)
yield acc
def chain(*iterables):
"""Make an iterator that returns elements from the first iterable until it
is exhausted, then proceeds to the next iterable, until all of the iterables
are exhausted. Used for treating consecutive sequences as a single sequence.
:param p: a list of iterable from which to yield values
"""
# chain('ABC', 'DEF') --> A B C D E F
for i in iterables:
yield from i
def chain_from_iterable(iterables):
"""Alternate constructor for chain(). Gets chained inputs from a
single iterable argument that is evaluated lazily.
:param iterables: an iterable of iterables
"""
# chain_from_iterable(['ABC', 'DEF']) --> A B C D E F
for it in iterables:
for element in it:
yield element
def combinations(iterable, r):
"""Return r length subsequences of elements from the input iterable.
Combinations are emitted in lexicographic sort order. So, if the input
iterable is sorted, the combination tuples will be produced in sorted order.
Elements are treated as unique based on their position, not on their value.
So if the input elements are unique, there will be no repeat values in each
combination.
:param iterable: the iterable containing the the items to combine
:param r: the length of the resulting combinations
"""
# combinations('ABCD', 2) --> AB AC AD BC BD CD
# combinations(range(4), 3) --> 012 013 023 123
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = list(range(r))
yield tuple(pool[i] for i in indices)
while True:
index = 0
for i in reversed(range(r)):
if indices[i] != i + n - r:
index = i
break
else:
return
indices[index] += 1
for j in range(index + 1, r):
indices[j] = indices[j - 1] + 1
yield tuple(pool[i] for i in indices)
def combinations_with_replacement(iterable, r):
"""Return r length subsequences of elements from the input iterable allowing
individual elements to be repeated more than once.
Combinations are emitted in lexicographic sort order. So, if the input
iterable is sorted, the combination tuples will be produced in sorted order.
Elements are treated as unique based on their position, not on their value.
So if the input elements are unique, the generated combinations will also be
unique.
:param iterable: the iterable containing the the items to combine
:param r: the length of the resulting combinations
"""
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
index = 0
for i in reversed(range(r)):
if indices[i] != n - 1:
index = i
break
else:
return
indices[index:] = [indices[index] + 1] * (r - index)
yield tuple(pool[i] for i in indices)
def compress(data, selectors):
"""Make an iterator that filters elements from data returning only those
that have a corresponding element in selectors that evaluates to True.
Stops when either the data or selectors iterables has been exhausted.
:param data: the source of values
:param selector: the source of selection values
"""
# compress('ABCDEF', [1,0,1,0,1,1]) --> A C E F
return (d for d, s in zip(data, selectors) if s)
def count(start=0, step=1):
"""Make an iterator that returns evenly spaced values starting with number
start. Often used as an argument to map() to generate consecutive data
points. Also, used with zip() to add sequence numbers.
:param start: the initial value of the sequence
:param step: how far apart subsequent values are
"""
while True:
yield start
start += step
def cycle(p):
"""Make an iterator returning elements from the iterable and saving a copy
of each. When the iterable is exhausted, return elements from the saved
copy. Repeats indefinitely.
:param p: the iterable from which to yield elements
"""
try:
len(p)
except TypeError:
# len() is not defined for this type. Assume it is
# a finite iterable so we must cache the elements.
cache = []
for i in p:
yield i
cache.append(i)
p = cache
while p:
yield from p
def dropwhile(predicate, iterable):
"""Make an iterator that drops elements from the iterable as long as the
predicate is true; afterwards, returns every element. Note, the iterator
does not produce any output until the predicate first becomes false, so it
may have a lengthy start-up time.
:param predicate: used to test each element until it returns False
:param iterable: source of values
"""
# dropwhile(lambda x: x<5, [1,4,6,4,1]) --> 6 4 1
iterable = iter(iterable)
for x in iterable:
if not predicate(x):
yield x
break
for x in iterable:
yield x
def filterfalse(predicate, iterable):
"""Make an iterator that filters elements from iterable returning only those
for which the predicate is False. If predicate is None, return the items
that are false.
:param predicate: used to test each value
:param iterable: source of values
"""
# filterfalse(lambda x: x%2, range(10)) --> 0 2 4 6 8
if predicate is None:
predicate = bool
for x in iterable:
if not predicate(x):
yield x
class groupby:
"""Make an iterator that returns consecutive keys and groups from the
iterable. The key is a function computing a key value for each element. If
not specified or is None, key defaults to an identity function and returns
the element unchanged. Generally, the iterable needs to already be sorted
on the same key function.
The operation of groupby() is similar to the uniq filter in Unix. It
generates a break or new group every time the value of the key
function changes (which is why it is usually necessary to have
sorted the data using the same key function). That behavior differs
from SQL’s GROUP BY which aggregates common elements regardless of
their input order.
The returned group is itself an iterator that shares the underlying
iterable with groupby(). Because the source is shared, when the
groupby() object is advanced, the previous group is no longer
visible. So, if that data is needed later, it should be stored as a
list.
:param iterable: the source of values
:param key: the key computation function (default is None)
"""
# [k for k, g in groupby('AAAABBBCCDAABBB')] --> A B C D A B
# [list(g) for k, g in groupby('AAAABBBCCD')] --> AAAA BBB CC D
def __init__(self, iterable, key=None):
if key is None:
key = lambda x: x
self.keyfunc = key
self.it = iter(iterable)
self.tgtkey = self.currkey = self.currvalue = object()
def __iter__(self):
return self
def __next__(self):
self.id = object()
while self.currkey == self.tgtkey:
self.currvalue = next(self.it) # Exit on StopIteration
self.currkey = self.keyfunc(self.currvalue)
self.tgtkey = self.currkey
return (self.currkey, self._grouper(self.tgtkey, self.id))
def _grouper(self, tgtkey, id):
while self.id is id and self.currkey == tgtkey:
yield self.currvalue
try:
self.currvalue = next(self.it)
except StopIteration:
return
self.currkey = self.keyfunc(self.currvalue)
def islice(p, start, stop=(), step=1):
"""Make an iterator that returns selected elements from the
iterable. If start is non-zero | |
less snapshots then observation time steps'
if return_copy:
x = self.copy()
else:
x = self
new_list = main_loop(x)
del x[:]
x.extend(new_list)
for A in x:
self.bool_int_matrix(A)
if return_copy:
return x
else:
return
def node_activity_series(self, norm=True):
""" returns the number of active nodes for each snapshot. """
active = {}
n = float(self.number_of_nodes)
if norm:
norma = float(n)
else:
norma = 1
for i in range(len(self)):
outs = sp.coo_matrix(self[i].sum(axis=1))
ins = sp.coo_matrix(self[i].sum(axis=0))
nodes1 = set(outs.row)
nodes2 = set(ins.col)
nodes = nodes1.union(nodes2)
active[i] = len(nodes) / norma
return active
def edge_activity_series(self, norm=True):
""" Dict {time:matrix_density} """
da = {}
n = float(self.number_of_nodes)
if norm:
norma = n * (n-1.0)
else:
norma = 1.0
for i in range(len(self)):
da[i] = float(self[i].nnz) / norma
return da
def shift_start_time(self, new_start_time, return_copy=False):
""" Returns list of adjacency matrices with new ordering, beginning with
Index new_start_time using periodic boundary conditions.
"""
assert new_start_time <= len(self)-1, \
'new_start_time must be in network observation time.'
if return_copy:
x = self.copy()
x.extend(self[:new_start_time])
del x[:new_start_time]
return x
else:
self.extend(self[:new_start_time])
del self[:new_start_time]
def GST(self, return_copy=False):
# alias
self.time_shuffled(return_copy)
def time_shuffled(self, return_copy=False):
""" Shuffle times occurence times for each snapshot.
"""
if return_copy:
x = self[:]
else:
x = self
random.shuffle(x)
if return_copy:
return x
else:
return
def TR(self, return_copy):
# alias
self.time_reversed(return_copy)
def time_reversed(self, return_copy=False):
""" reverts list and transposes elements
"""
if return_copy:
x = self[:]
else:
x = self
for i in range(len(self)):
x[i] = x[i].transpose()
x.reverse()
if return_copy:
return x
else:
return
def transpose(self, inplace=True):
""" Transpose all matrices in self.
If inplace, the object in transposed in place, otherwise a new
sequence is returned.
"""
if inplace:
x = self
else:
x = self[:]
for i in range(len(self)):
x[i] = x[i].transpose()
if inplace:
return
else:
return x
def symmetrize_matrix(self, A):
""" Returns symmetric version of a non-symm Matrix A as bool-int. """
M = A + A.transpose()
M = M.astype('bool')
M = M.astype('float')
return M
def as_undirected(self):
""" makes every matrix in self symmetric. """
# if self.is_directed:
for i in range(len(self)):
self[i] = self.symmetrize_matrix(self[i])
self.is_directed = False
# else:
# raise NotImplementedError, "Network is already undirected."
def clustering_matrix2vector(self, in_file):
""" Reads file and returns vector from matrix """
C = mmread(in_file)
C = lil_matrix(C)
x = [0.0 for i in range(C.shape[0])]
indices = zip(C.nonzero()[0], C.nonzero()[1])
for i, j in indices:
x[i + j] += C[i, j]
return x
def __random_combination(self, iterable, r, with_replacement=False):
""" Random selection from
itertools.combinations_with_replacement(iterable, r).
Parameters
----------
iterable: iterable
list where samples are drawn from.
r: int
number of elements to be sampled
with_replacement: boolean (optional, default=False)
if True, combinations with i<=j<=k are returned, if False i<j<k.
"""
pool = tuple(iterable)
n = len(pool)
if with_replacement:
indices = sorted(random.randrange(n) for i in xrange(r))
else:
indices = sorted(random.sample(xrange(n), r))
return tuple(pool[i] for i in indices)
def clustering_matrix(self, limit=None, random_iterations=True, replacement=False):
""" Computes the matrix of clustering coefficients of
a matrix sequence.
Parameters
----------
limit: int, optional (default=None)
Number of time steps to be considered.
random_iterations: Boolean, optional (default=True)
If True, sample time triples are considered
replacement: Boolean, optional (default=False)
If True, time indices follow the condition i=<j<=k, and i<j<k, if False.
"""
def triple_product(M1, M2, M3):
# Product of three matrices
a3 = M1 * M2 * M3
tr = (a3.diagonal()).sum()
clu_norm = (M1 * M2).sum() - ((M1 * M2).diagonal()).sum()
clu_norm += (M1 * M3).sum() - ((M1 * M3).diagonal()).sum()
clu_norm += (M2 * M3).sum()-((M2 * M3).diagonal()).sum()
return tr, clu_norm
if limit:
n = limit
else:
n = len(self)
domain = range(n)
C = lil_matrix((n, n), dtype='float')
# c=[]
if random_iterations:
for l in range(random_iterations):
(i, j, k) = \
self.__random_combination(domain, 3, replacement)
trace, c_norm = triple_product(self[i], self[j], self[k])
if c_norm > 0.0:
C[j-i, k-j] += float(trace) / c_norm
# c.append((i,j,k,float(trace)/c_norm))
else:
for (i, j, k) in itertools.combinations(domain, 3):
trace, c_norm = triple_product(self[i], self[j], self[k])
if c_norm > 0.0:
C[j-i, k-j] += float(trace) / c_norm
# c.append((i,j,k,float(trace)/c_norm))
return C
def write(self, fname):
""" writes self to txtfile.
"""
# generate edge list
t_edges = []
for i in range(len(self)):
# print "extracting edges ",i
indices = zip(self[i].nonzero()[0], self[i].nonzero()[1])
to_add = [(u, v, i) for u, v in indices]
t_edges.extend(to_add)
# edge list as set for file storage
t_edges_set = set(t_edges)
# remove double edges, if undirected
if not self.is_directed:
print "removing bidirectional links..."
for (u, v, d) in t_edges:
if (v, u, d) in t_edges_set and (u, v, d) in t_edges_set:
t_edges_set.remove((v, u, d))
# write file
g = file(fname, 'w+')
for e in t_edges_set:
wstring = ''
for j in range(1, len(e)):
wstring += '\t' + str(e[j])
g.writelines((str(e[0]) + wstring + '\n'))
g.close
return
def matricesCreation(self):
""" creates list of sparse matrices from input file """
edges = loadtxt(self.fname, dtype=int, usecols=self.cols)
_, _, days = np.array(zip(*edges))
if not self.first_day:
self.first_day = min(days)
if not self.last_day:
self.last_day = max(days)
# use only times between firsttime and lasttime
edges = [(u, v, d) for u, v, d in edges if
(d >= self.first_day) and (d <= self.last_day)]
# get dictionary of new indices and write map-file
re_dct = self.reindex(edges)
if self.label_file:
g = file('oldindex_matrixfriendly.txt', 'w+')
for k in re_dct:
g.writelines((str(k) + '\t' + str(re_dct[k]) + '\n'))
g.close
# reindex using this dictionary
edges = [(re_dct[u], re_dct[v], d) for u, v, d in edges]
edges = self.groupByTime(edges)
# the actual construction of the sparse matrices
mx_index = len(re_dct)
for d, es in edges:
us = [u for u, v in es]
vs = [v for u, v in es]
bs = [True for i in range(len(es))]
m = csr_matrix((bs, (us, vs)), shape=(mx_index, mx_index),
dtype=np.int32)
self.append(m)
def bool_int_matrix(self, M):
""" Returns matrix with only np.int64: ones. """
M.data = np.ones_like(M.data)
def unfold_accessibility(self, verbose=True,
return_accessibility_matrix=False):
""" Unfold accessibility storing path density.
"""
P = self[0].copy()
D = sp.identity(self.number_of_nodes, dtype=np.int32)
P = P + D
cumu = [P.nnz]
for i in range(1, len(self)):
if verbose:
print 'unfolding accessibility. Step ', i, 'non-zeros: ', P.nnz
self.bool_int_matrix(P)
try:
P = P + P * self[i]
except:
print 'Break at t = ', i
break
cumu.append(P.nnz)
else:
print '---> Unfolding complete.'
if return_accessibility_matrix:
P = P.astype('bool')
P = P.astype('int')
return P, cumu
else:
return cumu
def unfold_accessibility_memory_efficient(self, return_ranges=False):
""" Computes path density step by step for single nodes.
Parameters
----------
return ranges: boolean, optional (default=False)
If True, the method returns a tuple with path density over time
and the range for every node.
Returns
-------
Returns a numpy vector, where indices are the time steps and values
are path densities (not normalized).
If '''return ranges''', returns a tuple with the above path
denisities and the range of the nodes as a dictionary.
Usage
-----
>>> c = At.unfold_accessibility_memory_efficient()
>>> c, r = At.unfold_accessibility_memory_efficient(True)
"""
all_paths = zeros(len(self), dtype=int)
ranges = {}
for node in range(self.number_of_nodes):
print 'Computing accessibility for node ', node+1,\
' of ', self.number_of_nodes
single_node_SI = self.unfold_accessibility_single_node(node)
all_paths += single_node_SI
ranges[node] = single_node_SI[-1]
if return_ranges:
return (all_paths, ranges)
else:
return all_paths
def unfold_accessibility_single_node(self, start):
""" Accessibility of one node. Returns a numpy vector containing
the number of nonzeros for every timestep.
"""
# init
x = sp.coo_matrix(([1], ([0], [start])),
shape=(1, self.number_of_nodes), dtype=int)
x = x.tocsr()
# these 2 lines are not in the for-loop to be
# optically consistent with the matrix version.
x = x + x * self[0]
cumu = [x.nnz]
for t in range(1, len(self)):
x = x + x * self[t]
cumu.append(x.nnz)
return np.array(cumu)
def trace_forward(self, start, stop=None):
""" same as unfold_accessibility_single_node, but returns all
nodes reached during traversal.
"""
if not stop:
maxtime = len(self)
# init
x = sp.coo_matrix(([1], ([0], [start])),
shape=(1, self.number_of_nodes), dtype=int)
x = x.tocsr()
# these 2 lines are not in the for-loop to be
# optically consistent with the matrix version.
x = x | |
range(len(self.keys))], self.length, p=self.probs, replace=True)
settings = [self.keys[i] for i in choices]
settings = [item for item in settings if item[1] != '0' * self.nqubits]
return settings
def evaluate_expectations(self, settings: List[str], params: List[float]):
expectations = []
for sett in settings:
init = tensor([Qobj([[1, 0], [0, 0]])]*self.nqubits)
state = self.input_states[sett[0]] * \
init*self.input_states[sett[0]].dag()
basis = self.meas_bases[sett[1]]
circ = self.ansatz.populate_ansatz(params)
exp = (circ*state*circ.dag()*basis).tr()
expectations.append(exp)
return expectations
def generate_states_bases(self):
states = {}
bases = {}
for sett in self.keys:
state = sett[0]
basis = sett[1]
states[state] = self.get_input_state(state)
bases[basis] = self.get_meas_basis(basis)
return states, bases
def get_input_state(self, op: str):
operator = []
for i in op:
if i == '0':
operator.append(qeye(2))
elif i == '1':
operator.append(generate_u3(np.arccos(-1/3), 0, 0))
elif i == '2':
operator.append(generate_u3(np.arccos(-1/3), 2*np.pi/3, 0))
elif i == '3':
operator.append(generate_u3(np.arccos(-1/3), 4*np.pi/3, 0))
return tensor(operator)
def get_meas_basis(self, op: str):
operator = []
for i in op:
if i == '0':
operator.append(qeye(2))
elif i == '1':
operator.append(sigmax())
elif i == '2':
operator.append(sigmay())
elif i == '3':
operator.append(sigmaz())
return tensor(operator)
class QutipFlammiaEstimator:
def __init__(self, prob_dist: ProbDist, nqubits: int, ansatz: QutipAnsatz):
self.prob_dist = prob_dist
self.prob_dict = self.prob_dist.probabilities
self.chi_dict = self.prob_dist.chi_dict
self.probs = [self.prob_dict[key] for key in self.prob_dict]
self.keys = [key for key in self.prob_dict]
self.nqubits = nqubits
self.ansatz = ansatz
self.input_states, self.meas_bases = self.generate_states_bases()
def calculate_pf(self, length: int, params: List[float] = None):
self.length = length
settings = self.select_settings()
ideal_chi = [self.chi_dict[(sett[0], sett[1])] for sett in settings]
expects = self.evaluate_expectations(settings, params)
evalues = self.generate_evalues(settings)
fom = 0
for i, _chi in enumerate(ideal_chi):
fom += evalues[i]*expects[i] / _chi
_l = np.int(self.length/2**self.nqubits)
fom += _l - np.int(len(settings)/2**self.nqubits)
fom /= _l
return np.real(fom)
def select_settings(self):
choices = np.random.choice(
[i for i in range(len(self.keys))], np.int(self.length/2**self.nqubits), p=self.probs, replace=True)
settings = [self.keys[i] for i in choices]
bases = [''.join(i) for i in itertools.product('01',
repeat=len(settings[0][0]))]
new_settings = []
for sett in settings:
for base in bases:
new_settings.append((sett[0], sett[1], base))
settings = [item for item in new_settings if item[1] != '0' * self.nqubits]
return settings
def evaluate_expectations(self, settings, params):
expectations = []
for sett in settings:
state = self.input_states[sett[0], sett[2]]
basis = self.meas_bases[sett[1]]
circ = self.ansatz.populate_ansatz(params)
exp = (circ*state*circ.dag()*basis).tr()
expectations.append(exp)
return expectations
def generate_states_bases(self):
states = {}
bases = {}
all_bases = [''.join(i) for i in itertools.product('01',
repeat=self.nqubits)]
for sett in self.keys:
basis = sett[1]
bases[basis] = self.get_operator(basis)
for prj in all_bases:
states[(sett[0], prj)] = self.get_estate(sett[0], prj)
return states, bases
def get_operator(self, op: List[str]):
operator = []
for i in op:
if i == '0':
operator.append(qeye(2))
elif i == '1':
operator.append(sigmax())
elif i == '2':
operator.append(sigmay())
elif i == '3':
operator.append(sigmaz())
return tensor(operator)
def get_estate(self, sigma, estate):
operator = []
for i, op in enumerate(sigma):
if op == '0':
if estate[i] == '0':
_op = basis(2, 0)*basis(2, 0).dag()
operator.append(_op)
elif estate[i] == '1':
_op = basis(2, 1)*basis(2, 1).dag()
operator.append(_op)
elif op == '1':
if estate[i] == '0':
_op = 1/np.sqrt(2)*(basis(2, 0) + basis(2, 1))
operator.append(_op*_op.dag())
elif estate[i] == '1':
_op = 1/np.sqrt(2)*(basis(2, 0) - basis(2, 1))
operator.append(_op*_op.dag())
elif op == '2':
if estate[i] == '0':
_op = 1/np.sqrt(2)*(basis(2, 0) + 1j*basis(2, 1))
operator.append(_op*_op.dag())
elif estate[i] == '1':
_op = 1/np.sqrt(2)*(basis(2, 0) - 1j*basis(2, 1))
operator.append(_op*_op.dag())
elif op == '3':
if estate[i] == '0':
_op = basis(2, 0)*basis(2, 0).dag()
operator.append(_op)
elif estate[i] == '1':
_op = basis(2, 1)*basis(2, 1).dag()
operator.append(_op)
return tensor(operator)
def generate_evalues(self, settings):
evals = []
for sett in settings:
_e = 1
for i, _op in enumerate(sett[0]):
if _op == '0':
_e *= 1.0
else:
if sett[2][i] == '0':
_e *= 1.0
else:
_e *= -1.0
evals.append(_e)
return evals
class QiskitEstimator:
def __init__(self, prob_dist: ProbDist, ansatz: QiskitAnsatz, num_shots: int,
noise_model=None):
self.prob_dist = prob_dist
self.prob_dict = self.prob_dist.probabilities
self.chi_dict = self.prob_dist.chi_dict
self.probs = [self.prob_dict[key] for key in self.prob_dict]
self.keys = [key for key in self.prob_dict]
self.num_shots = num_shots
self.noise_model = noise_model # only for Aer simulator
self.ansatz = ansatz
self.nqubits = self.ansatz.nqubits
self.backend = self.ansatz.backend
self.init_layout = self.ansatz.init_layout
self.circuits = self.ansatz.circs
self.quant_inst = QuantumInstance(backend=self.backend, shots=self.num_shots,
initial_layout=self.init_layout,
skip_qobj_validation=False,
noise_model=self.noise_model)
def estimate_zero_fidelity(self, params, length):
self.length = length # how many circuits to include in estimate
settings, qutip_settings = self.select_settings()
ideal_chi = [self.chi_dict[i] for i in qutip_settings]
expects = self.run_circuits(settings, params)
fidelity = 0
for i, _chi in enumerate(ideal_chi):
fidelity += expects[i] / _chi
fidelity += self.length - len(settings) # add settings with measurment in 00...0
fidelity /= self.length
return np.real(fidelity) # analytically real, gets rid of some numerical error
def select_settings(self):
"""Choose a set of settings given a probability distribution"""
choices = []
choices = np.random.choice(
[i for i in range(len(self.keys))], self.length, p=self.probs, replace=True)
qutip_settings = [self.keys[i] for i in choices]
# qutip and qiskit use mirrored qubit naming schemes
settings = []
for _set in qutip_settings:
setting0 = _set[0][::-1]
setting1 = _set[1][::-1]
settings.append((setting0, setting1))
# measurements in 00...0 basis always yield +1 expectation value
settings = [item for item in settings if item[1] != '0' * self.nqubits]
qutip_settings = [
item for item in qutip_settings if item[1] != '0' * self.nqubits]
return settings, qutip_settings
def run_circuits(self, settings, params):
"""Choose a subset of <length> circuits for fidelity estimation and run them
Parameters:
-----------
params: list of parameters to populate the circuits with (intended to be
adapted through optimisation)
Returns:
--------
expects: list of expectation values for each circuit in the list
"""
exec_circs = [self.circuits[qc].populate_circuits(params) for qc in settings]
results = self.quant_inst.execute(exec_circs, had_transpiled=True)
q_list = [i for i in range(self.nqubits)][::-1]
expects = []
for i, _c in enumerate(settings):
_ig = []
for j, _b in enumerate(_c[1]):
if _b == '0':
_ig.append(j)
_ignore = [q_list[i] for i in _ig]
expects.append(self.generate_expectation(
results.get_counts(i), _ignore))
return expects
def evaluate_process_zero_fidelities(self, params, num_shots=8192):
"""Evaluate the process and zero fidelities using all measurement settings,
yielding the highest precision evaluation available under the experimental
constraints.
"""
self.quant_inst = QuantumInstance(backend=self.backend, shots=self.num_shots,
initial_layout=self.init_layout,
skip_qobj_validation=False,
noise_model=self.noise_model)
self.expects = self.run_all_circuits(params)
perms = [''.join(i) for i in itertools.product('0123', repeat=self.nqubits)]
self.B_dict = {}
for i, p in enumerate(perms):
self.B_dict[p] = i
process_fidelity = self.evaluate_full_pfidelity()
zero_fidelity = self.evaluate_full_zfidelity()
return process_fidelity, zero_fidelity
def run_all_circuits(self, params):
chosen_circs = [self.circuits[_s] for _s in self.keys]
exec_circs = [qc.populate_circuits(params) for qc in chosen_circs]
results = self.quant_inst.execute(exec_circs, had_transpiled=True)
q_list = [i for i in range(self.nqubits)][::-1]
expects = []
for i, _c in enumerate(self.keys):
_ig = []
for j, _b in enumerate(_c[1]):
if _b == '0':
_ig.append(j)
_ignore = [q_list[i] for i in _ig]
expects.append(self.generate_expectation(
results.get_counts(i), _ignore))
return expects
def evaluate_full_pfidelity(self):
d = 2**self.nqubits
Bmat = generate_Bmat(self.nqubits, self.nqubits)
F = 0
chis = [self.chi_dict[key] for key in self.chi_dict]
chi_keys = [key for key in self.chi_dict]
keys = [key[0] for key in self.chi_dict]
for i, _key in enumerate(chi_keys):
chi = self.chi_dict[_key]
for j, exp in enumerate(self.expects):
_set1 = self.B_dict[keys[i]]
_set2 = self.B_dict[keys[j]]
F += Bmat[_set1, _set2]*chi*exp
return F/d**3
def evaluate_full_zfidelity(self):
d = 2**self.nqubits
chis = [self.chi_dict[key] for key in self.chi_dict]
FOM = 0
for i, chi in enumerate(chis):
FOM += chi*self.expects[i]
return FOM/d**3
@ staticmethod
def generate_expectation(counts_dict, ignore=None):
"""Generate the expectation value for a Pauli string operator
Parameters:
-----------
counts_dict: dictionary of counts generated from the machine (or qasm simulator)
ignore: list of qubits which are not being measured
Returns:
--------
expect: expectation value of the circuit in the measured basis
"""
if ignore is None:
ignore = []
total_counts = 0
key_len = [len(key) for key in counts_dict]
N = key_len[0]
bitstrings = [''.join(i) for i in itertools.product('01', repeat=N)]
expect = 0
# add any missing counts to dictionary to avoid errors
for string in bitstrings:
if string not in counts_dict:
counts_dict[string] = 0
count = 0
for i, idx in enumerate(string):
if i in ignore:
continue
if idx == '1':
count += 1
if count % 2 == 0: # subtract odd product of -ve evalues, add even products
expect += counts_dict[string]
total_counts += counts_dict[string]
else:
expect -= counts_dict[string]
total_counts += counts_dict[string]
return expect / total_counts
class QiskitFlammiaEstimator:
def __init__(self, prob_dist: ProbDist, ansatz: QiskitAnsatz, num_shots: int,
noise_model=None):
self.prob_dist = prob_dist
self.prob_dict = self.prob_dist.probabilities
self.chi_dict | |
True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('processed_pkts', (YLeaf(YType.uint64, 'processed-pkts'), ['int'])),
('processed_bytes', (YLeaf(YType.uint64, 'processed-bytes'), ['int'])),
])
self.processed_pkts = None
self.processed_bytes = None
self._segment_path = lambda: "spi-si"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Detail.SiArr.Data.SpiSi, ['processed_pkts', 'processed_bytes'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_pbr_vservice_mgr_oper as meta
return meta._meta_table['GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Detail.SiArr.Data.SpiSi']['meta_info']
class Term(_Entity_):
"""
Terminate stats
.. attribute:: terminated_pkts
Number of terminated packets
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: terminated_bytes
Total bytes terminated
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
**units**\: byte
"""
_prefix = 'pbr-vservice-mgr-oper'
_revision = '2017-05-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Detail.SiArr.Data.Term, self).__init__()
self.yang_name = "term"
self.yang_parent_name = "data"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('terminated_pkts', (YLeaf(YType.uint64, 'terminated-pkts'), ['int'])),
('terminated_bytes', (YLeaf(YType.uint64, 'terminated-bytes'), ['int'])),
])
self.terminated_pkts = None
self.terminated_bytes = None
self._segment_path = lambda: "term"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Detail.SiArr.Data.Term, ['terminated_pkts', 'terminated_bytes'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_pbr_vservice_mgr_oper as meta
return meta._meta_table['GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Detail.SiArr.Data.Term']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_pbr_vservice_mgr_oper as meta
return meta._meta_table['GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Detail.SiArr.Data']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_pbr_vservice_mgr_oper as meta
return meta._meta_table['GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Detail.SiArr']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_pbr_vservice_mgr_oper as meta
return meta._meta_table['GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Detail']['meta_info']
class Summarized(_Entity_):
"""
Combined statistics of all service index in
service functionpath
.. attribute:: data
Statistics data
**type**\: :py:class:`Data <ydk.models.cisco_ios_xr.Cisco_IOS_XR_pbr_vservice_mgr_oper.GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data>`
**config**\: False
.. attribute:: si_arr
SI array in case of detail stats
**type**\: list of :py:class:`SiArr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_pbr_vservice_mgr_oper.GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.SiArr>`
**config**\: False
"""
_prefix = 'pbr-vservice-mgr-oper'
_revision = '2017-05-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized, self).__init__()
self.yang_name = "summarized"
self.yang_parent_name = "stats"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("data", ("data", GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data)), ("si-arr", ("si_arr", GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.SiArr))])
self._leafs = OrderedDict()
self.data = GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data()
self.data.parent = self
self._children_name_map["data"] = "data"
self.si_arr = YList(self)
self._segment_path = lambda: "summarized"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized, [], name, value)
class Data(_Entity_):
"""
Statistics data
.. attribute:: sfp
SFP stats
**type**\: :py:class:`Sfp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_pbr_vservice_mgr_oper.GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sfp>`
**config**\: False
.. attribute:: spi_si
SPI SI stats
**type**\: :py:class:`SpiSi <ydk.models.cisco_ios_xr.Cisco_IOS_XR_pbr_vservice_mgr_oper.GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.SpiSi>`
**config**\: False
.. attribute:: term
Terminate stats
**type**\: :py:class:`Term <ydk.models.cisco_ios_xr.Cisco_IOS_XR_pbr_vservice_mgr_oper.GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Term>`
**config**\: False
.. attribute:: sf
Service function stats
**type**\: :py:class:`Sf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_pbr_vservice_mgr_oper.GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sf>`
**config**\: False
.. attribute:: sff
Service function forwarder stats
**type**\: :py:class:`Sff <ydk.models.cisco_ios_xr.Cisco_IOS_XR_pbr_vservice_mgr_oper.GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sff>`
**config**\: False
.. attribute:: sff_local
Local service function forwarder stats
**type**\: :py:class:`SffLocal <ydk.models.cisco_ios_xr.Cisco_IOS_XR_pbr_vservice_mgr_oper.GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.SffLocal>`
**config**\: False
.. attribute:: type
type
**type**\: :py:class:`VsNshStats <ydk.models.cisco_ios_xr.Cisco_IOS_XR_pbr_vservice_mgr_oper.VsNshStats>`
**config**\: False
"""
_prefix = 'pbr-vservice-mgr-oper'
_revision = '2017-05-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data, self).__init__()
self.yang_name = "data"
self.yang_parent_name = "summarized"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("sfp", ("sfp", GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sfp)), ("spi-si", ("spi_si", GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.SpiSi)), ("term", ("term", GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Term)), ("sf", ("sf", GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sf)), ("sff", ("sff", GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sff)), ("sff-local", ("sff_local", GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.SffLocal))])
self._leafs = OrderedDict([
('type', (YLeaf(YType.enumeration, 'type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_pbr_vservice_mgr_oper', 'VsNshStats', '')])),
])
self.type = None
self.sfp = GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sfp()
self.sfp.parent = self
self._children_name_map["sfp"] = "sfp"
self.spi_si = GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.SpiSi()
self.spi_si.parent = self
self._children_name_map["spi_si"] = "spi-si"
self.term = GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Term()
self.term.parent = self
self._children_name_map["term"] = "term"
self.sf = GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sf()
self.sf.parent = self
self._children_name_map["sf"] = "sf"
self.sff = GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sff()
self.sff.parent = self
self._children_name_map["sff"] = "sff"
self.sff_local = GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.SffLocal()
self.sff_local.parent = self
self._children_name_map["sff_local"] = "sff-local"
self._segment_path = lambda: "data"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data, ['type'], name, value)
class Sfp(_Entity_):
"""
SFP stats
.. attribute:: spi_si
Service index counters
**type**\: :py:class:`SpiSi <ydk.models.cisco_ios_xr.Cisco_IOS_XR_pbr_vservice_mgr_oper.GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sfp.SpiSi>`
**config**\: False
.. attribute:: term
Terminate counters
**type**\: :py:class:`Term <ydk.models.cisco_ios_xr.Cisco_IOS_XR_pbr_vservice_mgr_oper.GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sfp.Term>`
**config**\: False
"""
_prefix = 'pbr-vservice-mgr-oper'
_revision = '2017-05-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sfp, self).__init__()
self.yang_name = "sfp"
self.yang_parent_name = "data"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("spi-si", ("spi_si", GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sfp.SpiSi)), ("term", ("term", GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sfp.Term))])
self._leafs = OrderedDict()
self.spi_si = GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sfp.SpiSi()
self.spi_si.parent = self
self._children_name_map["spi_si"] = "spi-si"
self.term = GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sfp.Term()
self.term.parent = self
self._children_name_map["term"] = "term"
self._segment_path = lambda: "sfp"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sfp, [], name, value)
class SpiSi(_Entity_):
"""
Service index counters
.. attribute:: processed_pkts
Number of packets processed
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: processed_bytes
Total bytes processed
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
**units**\: byte
"""
_prefix = 'pbr-vservice-mgr-oper'
_revision = '2017-05-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sfp.SpiSi, self).__init__()
self.yang_name = "spi-si"
self.yang_parent_name = "sfp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('processed_pkts', (YLeaf(YType.uint64, 'processed-pkts'), ['int'])),
('processed_bytes', (YLeaf(YType.uint64, 'processed-bytes'), ['int'])),
])
self.processed_pkts = None
self.processed_bytes = None
self._segment_path = lambda: "spi-si"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sfp.SpiSi, ['processed_pkts', 'processed_bytes'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_pbr_vservice_mgr_oper as meta
return meta._meta_table['GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sfp.SpiSi']['meta_info']
class Term(_Entity_):
"""
Terminate counters
.. attribute:: terminated_pkts
Number of terminated packets
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: terminated_bytes
Total bytes terminated
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
**units**\: byte
"""
_prefix = 'pbr-vservice-mgr-oper'
_revision = '2017-05-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sfp.Term, self).__init__()
self.yang_name = "term"
self.yang_parent_name = "sfp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('terminated_pkts', (YLeaf(YType.uint64, 'terminated-pkts'), ['int'])),
('terminated_bytes', (YLeaf(YType.uint64, 'terminated-bytes'), ['int'])),
])
self.terminated_pkts = None
self.terminated_bytes = None
self._segment_path = lambda: "term"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sfp.Term, ['terminated_pkts', 'terminated_bytes'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_pbr_vservice_mgr_oper as meta
return meta._meta_table['GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sfp.Term']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_pbr_vservice_mgr_oper as meta
return meta._meta_table['GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sfp']['meta_info']
class SpiSi(_Entity_):
"""
SPI SI stats
.. attribute:: processed_pkts
Number of packets processed
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: processed_bytes
Total bytes processed
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
**units**\: byte
"""
_prefix = 'pbr-vservice-mgr-oper'
_revision = '2017-05-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.SpiSi, self).__init__()
self.yang_name = "spi-si"
self.yang_parent_name = "data"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('processed_pkts', (YLeaf(YType.uint64, 'processed-pkts'), ['int'])),
('processed_bytes', (YLeaf(YType.uint64, 'processed-bytes'), ['int'])),
])
self.processed_pkts = None
self.processed_bytes = None
self._segment_path = lambda: "spi-si"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.SpiSi, ['processed_pkts', 'processed_bytes'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_pbr_vservice_mgr_oper as meta
return meta._meta_table['GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.SpiSi']['meta_info']
class Term(_Entity_):
"""
Terminate stats
.. attribute:: terminated_pkts
Number of terminated packets
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: terminated_bytes
Total bytes terminated
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
**units**\: byte
"""
_prefix = 'pbr-vservice-mgr-oper'
_revision = '2017-05-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Term, self).__init__()
self.yang_name = "term"
self.yang_parent_name = "data"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('terminated_pkts', (YLeaf(YType.uint64, 'terminated-pkts'), ['int'])),
('terminated_bytes', (YLeaf(YType.uint64, 'terminated-bytes'), ['int'])),
])
self.terminated_pkts = None
self.terminated_bytes = None
self._segment_path = lambda: "term"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Term, ['terminated_pkts', 'terminated_bytes'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_pbr_vservice_mgr_oper as meta
return meta._meta_table['GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Term']['meta_info']
class Sf(_Entity_):
"""
Service function stats
.. attribute:: processed_pkts
Number of packets processed
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: processed_bytes
Total bytes processed
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
**units**\: byte
"""
_prefix = 'pbr-vservice-mgr-oper'
_revision = '2017-05-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(GlobalServiceFunctionChaining.ServiceFunctionPath.PathIds.PathId.Stats.Summarized.Data.Sf, self).__init__()
self.yang_name = "sf"
self.yang_parent_name = "data"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('processed_pkts', (YLeaf(YType.uint64, 'processed-pkts'), ['int'])),
('processed_bytes', (YLeaf(YType.uint64, 'processed-bytes'), ['int'])),
])
self.processed_pkts = None
self.processed_bytes = None
self._segment_path = lambda: "sf"
| |
:paramtype trigger_parameter: str
"""
super(ScaleRuleAuth, self).__init__(**kwargs)
self.secret_ref = secret_ref
self.trigger_parameter = trigger_parameter
class Secret(msrest.serialization.Model):
"""Secret definition.
:ivar name: Secret Name.
:vartype name: str
:ivar value: Secret Value.
:vartype value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
value: Optional[str] = None,
**kwargs
):
"""
:keyword name: Secret Name.
:paramtype name: str
:keyword value: Secret Value.
:paramtype value: str
"""
super(Secret, self).__init__(**kwargs)
self.name = name
self.value = value
class SecretsCollection(msrest.serialization.Model):
"""Container App Secrets Collection ARM resource.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. Collection of resources.
:vartype value: list[~azure.mgmt.appcontainers.models.ContainerAppSecret]
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ContainerAppSecret]'},
}
def __init__(
self,
*,
value: List["ContainerAppSecret"],
**kwargs
):
"""
:keyword value: Required. Collection of resources.
:paramtype value: list[~azure.mgmt.appcontainers.models.ContainerAppSecret]
"""
super(SecretsCollection, self).__init__(**kwargs)
self.value = value
class SourceControl(ProxyResource):
"""Container App SourceControl.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.appcontainers.models.SystemData
:ivar operation_state: Current provisioning State of the operation. Possible values include:
"InProgress", "Succeeded", "Failed", "Canceled".
:vartype operation_state: str or ~azure.mgmt.appcontainers.models.SourceControlOperationState
:ivar repo_url: The repo url which will be integrated to ContainerApp.
:vartype repo_url: str
:ivar branch: The branch which will trigger the auto deployment.
:vartype branch: str
:ivar github_action_configuration: Container App Revision Template with all possible settings
and the
defaults if user did not provide them. The defaults are populated
as they were at the creation time.
:vartype github_action_configuration:
~azure.mgmt.appcontainers.models.GithubActionConfiguration
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'operation_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'operation_state': {'key': 'properties.operationState', 'type': 'str'},
'repo_url': {'key': 'properties.repoUrl', 'type': 'str'},
'branch': {'key': 'properties.branch', 'type': 'str'},
'github_action_configuration': {'key': 'properties.githubActionConfiguration', 'type': 'GithubActionConfiguration'},
}
def __init__(
self,
*,
repo_url: Optional[str] = None,
branch: Optional[str] = None,
github_action_configuration: Optional["GithubActionConfiguration"] = None,
**kwargs
):
"""
:keyword repo_url: The repo url which will be integrated to ContainerApp.
:paramtype repo_url: str
:keyword branch: The branch which will trigger the auto deployment.
:paramtype branch: str
:keyword github_action_configuration: Container App Revision Template with all possible
settings and the
defaults if user did not provide them. The defaults are populated
as they were at the creation time.
:paramtype github_action_configuration:
~azure.mgmt.appcontainers.models.GithubActionConfiguration
"""
super(SourceControl, self).__init__(**kwargs)
self.operation_state = None
self.repo_url = repo_url
self.branch = branch
self.github_action_configuration = github_action_configuration
class SourceControlCollection(msrest.serialization.Model):
"""SourceControl collection ARM resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar value: Required. Collection of resources.
:vartype value: list[~azure.mgmt.appcontainers.models.SourceControl]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SourceControl]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["SourceControl"],
**kwargs
):
"""
:keyword value: Required. Collection of resources.
:paramtype value: list[~azure.mgmt.appcontainers.models.SourceControl]
"""
super(SourceControlCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:ivar created_by: The identity that created the resource.
:vartype created_by: str
:ivar created_by_type: The type of identity that created the resource. Possible values include:
"User", "Application", "ManagedIdentity", "Key".
:vartype created_by_type: str or ~azure.mgmt.appcontainers.models.CreatedByType
:ivar created_at: The timestamp of resource creation (UTC).
:vartype created_at: ~datetime.datetime
:ivar last_modified_by: The identity that last modified the resource.
:vartype last_modified_by: str
:ivar last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:vartype last_modified_by_type: str or ~azure.mgmt.appcontainers.models.CreatedByType
:ivar last_modified_at: The timestamp of resource last modification (UTC).
:vartype last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
*,
created_by: Optional[str] = None,
created_by_type: Optional[Union[str, "CreatedByType"]] = None,
created_at: Optional[datetime.datetime] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[Union[str, "CreatedByType"]] = None,
last_modified_at: Optional[datetime.datetime] = None,
**kwargs
):
"""
:keyword created_by: The identity that created the resource.
:paramtype created_by: str
:keyword created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:paramtype created_by_type: str or ~azure.mgmt.appcontainers.models.CreatedByType
:keyword created_at: The timestamp of resource creation (UTC).
:paramtype created_at: ~datetime.datetime
:keyword last_modified_by: The identity that last modified the resource.
:paramtype last_modified_by: str
:keyword last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:paramtype last_modified_by_type: str or ~azure.mgmt.appcontainers.models.CreatedByType
:keyword last_modified_at: The timestamp of resource last modification (UTC).
:paramtype last_modified_at: ~datetime.datetime
"""
super(SystemData, self).__init__(**kwargs)
self.created_by = created_by
self.created_by_type = created_by_type
self.created_at = created_at
self.last_modified_by = last_modified_by
self.last_modified_by_type = last_modified_by_type
self.last_modified_at = last_modified_at
class Template(msrest.serialization.Model):
"""Container App versioned application definition.
Defines the desired state of an immutable revision.
Any changes to this section Will result in a new revision being created.
:ivar revision_suffix: User friendly suffix that is appended to the revision name.
:vartype revision_suffix: str
:ivar containers: List of container definitions for the Container App.
:vartype containers: list[~azure.mgmt.appcontainers.models.Container]
:ivar scale: Scaling properties for the Container App.
:vartype scale: ~azure.mgmt.appcontainers.models.Scale
:ivar volumes: List of volume definitions for the Container App.
:vartype volumes: list[~azure.mgmt.appcontainers.models.Volume]
"""
_attribute_map = {
'revision_suffix': {'key': 'revisionSuffix', 'type': 'str'},
'containers': {'key': 'containers', 'type': '[Container]'},
'scale': {'key': 'scale', 'type': 'Scale'},
'volumes': {'key': 'volumes', 'type': '[Volume]'},
}
def __init__(
self,
*,
revision_suffix: Optional[str] = None,
containers: Optional[List["Container"]] = None,
scale: Optional["Scale"] = None,
volumes: Optional[List["Volume"]] = None,
**kwargs
):
"""
:keyword revision_suffix: User friendly suffix that is appended to the revision name.
:paramtype revision_suffix: str
:keyword containers: List of container definitions for the Container App.
:paramtype containers: list[~azure.mgmt.appcontainers.models.Container]
:keyword scale: Scaling properties for the Container App.
:paramtype scale: ~azure.mgmt.appcontainers.models.Scale
:keyword volumes: List of volume definitions for the Container App.
:paramtype volumes: list[~azure.mgmt.appcontainers.models.Volume]
"""
super(Template, self).__init__(**kwargs)
self.revision_suffix = revision_suffix
self.containers = containers
self.scale = scale
self.volumes = volumes
class TrafficWeight(msrest.serialization.Model):
"""Traffic weight assigned to a revision.
:ivar revision_name: Name of a revision.
:vartype revision_name: str
:ivar weight: Traffic weight assigned to a revision.
:vartype weight: int
:ivar latest_revision: Indicates that the traffic weight belongs to a latest stable revision.
:vartype latest_revision: bool
:ivar label: Associates a traffic label with a revision.
:vartype label: str
"""
_attribute_map = {
'revision_name': {'key': 'revisionName', 'type': 'str'},
'weight': {'key': 'weight', 'type': 'int'},
'latest_revision': {'key': 'latestRevision', 'type': 'bool'},
'label': {'key': 'label', 'type': 'str'},
}
def __init__(
self,
*,
revision_name: Optional[str] = None,
weight: Optional[int] = None,
latest_revision: Optional[bool] = False,
label: Optional[str] = None,
**kwargs
):
"""
:keyword revision_name: Name of a revision.
:paramtype revision_name: str
:keyword weight: Traffic weight assigned to a revision.
:paramtype weight: int
:keyword latest_revision: Indicates that the traffic weight belongs to a latest stable
revision.
:paramtype latest_revision: bool
:keyword label: Associates a traffic label with a revision.
:paramtype label: str
"""
super(TrafficWeight, self).__init__(**kwargs)
self.revision_name = revision_name
self.weight = weight
self.latest_revision = latest_revision
self.label = label
class Twitter(msrest.serialization.Model):
"""The configuration settings of the Twitter provider.
:ivar enabled: :code:`<code>false</code>` if the Twitter provider should not be enabled despite
the set registration; otherwise, :code:`<code>true</code>`.
:vartype enabled: bool
:ivar registration: The configuration settings of the app registration for the Twitter
provider.
:vartype registration: ~azure.mgmt.appcontainers.models.TwitterRegistration
"""
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'registration': {'key': 'registration', 'type': 'TwitterRegistration'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
registration: Optional["TwitterRegistration"] = None,
**kwargs
):
"""
:keyword | |
<gh_stars>1-10
import json
import logging
from unittest import mock
import pytest
from requests import RequestException
from elastalert.alerters.httppost2 import HTTPPost2Alerter
from elastalert.loaders import FileRulesLoader
from elastalert.util import EAException
def test_http_alerter_with_payload(caplog):
caplog.set_level(logging.INFO)
rule = {
'name': 'Test HTTP Post Alerter With Payload',
'type': 'any',
'http_post2_url': 'http://test.webhook.url',
'http_post2_payload': {'posted_name': 'toto'},
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = HTTPPost2Alerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'somefield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'posted_name': 'toto',
}
mock_post_request.assert_called_once_with(
rule['http_post2_url'],
data=mock.ANY,
headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'},
proxies=None,
timeout=10,
verify=True
)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0]
def test_http_alerter_with_payload_raw_fields(caplog):
caplog.set_level(logging.INFO)
rule = {
'name': 'Test HTTP Post Alerter With Payload and raw fields',
'type': 'any',
'http_post2_url': 'http://test.webhook.url',
'http_post2_payload': {'posted_name': 'toto'},
'http_post2_raw_fields': {'posted_raw_field': 'somefield'},
'http_post2_static_payload': {'name': 'somestaticname'},
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = HTTPPost2Alerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'somefield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'posted_name': 'toto',
'posted_raw_field': 'foobarbaz'
}
mock_post_request.assert_called_once_with(
rule['http_post2_url'],
data=mock.ANY,
headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'},
proxies=None,
timeout=10,
verify=True
)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0]
def test_http_alerter_with_payload_raw_fields_overwrite(caplog):
caplog.set_level(logging.INFO)
rule = {
'name': 'Test HTTP Post Alerter raw fields overwrite payload',
'type': 'any',
'http_post2_url': 'http://test.webhook.url',
'http_post2_payload': {'posted_name': 'toto', 'overwrite_field': 'tata'},
'http_post2_raw_fields': {'overwrite_field': 'somefield'},
'http_post2_static_payload': {'name': 'somestaticname'},
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = HTTPPost2Alerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'somefield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'posted_name': 'toto',
'overwrite_field': 'foobarbaz'
}
mock_post_request.assert_called_once_with(
rule['http_post2_url'],
data=mock.ANY,
headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'},
proxies=None,
timeout=10,
verify=True
)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0]
def test_http_alerter_with_payload_no_clash(caplog):
caplog.set_level(logging.INFO)
rule = {
'name': 'Test HTTP Post Alerter With Payload has no clash with the match fields',
'type': 'any',
'http_post2_url': 'http://test.webhook.url',
'http_post2_payload': {'posted_name': 'toto'},
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = HTTPPost2Alerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'toto': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'posted_name': 'toto',
}
mock_post_request.assert_called_once_with(
rule['http_post2_url'],
data=mock.ANY,
headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'},
proxies=None,
timeout=10,
verify=True
)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0]
def test_http_alerter_with_payload_args_keys(caplog):
caplog.set_level(logging.INFO)
rule = {
'name': 'Test HTTP Post Alerter With Payload args for the key',
'type': 'any',
'http_post2_url': 'http://test.webhook.url',
'http_post2_payload': {'args_{{some_field}}': 'tata'},
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = HTTPPost2Alerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'some_field': 'toto'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'args_toto': 'tata',
}
mock_post_request.assert_called_once_with(
rule['http_post2_url'],
data=mock.ANY,
headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'},
proxies=None,
timeout=10,
verify=True
)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0]
def test_http_alerter_with_payload_nested_keys(caplog):
caplog.set_level(logging.INFO)
rule = {
'name': 'Test HTTP Post Alerter With Payload args for the key',
'type': 'any',
'http_post2_url': 'http://test.webhook.url',
'http_post2_payload': {'key': {'nested_key': 'some_value_{{some_field}}'}},
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = HTTPPost2Alerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'some_field': 'toto'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'key': {'nested_key': 'some_value_toto'},
}
mock_post_request.assert_called_once_with(
rule['http_post2_url'],
data=mock.ANY,
headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'},
proxies=None,
timeout=10,
verify=True
)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0]
def test_http_alerter_with_payload_none_value(caplog):
caplog.set_level(logging.INFO)
rule = {
'name': 'Test HTTP Post Alerter With Payload args for the key',
'type': 'any',
'http_post2_url': 'http://test.webhook.url',
'http_post2_payload': {'key': None},
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = HTTPPost2Alerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'some_field': 'toto'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'key': None,
}
mock_post_request.assert_called_once_with(
rule['http_post2_url'],
data=mock.ANY,
headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'},
proxies=None,
timeout=10,
verify=True
)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0]
def test_http_alerter_with_payload_args_key_not_found(caplog):
caplog.set_level(logging.INFO)
rule = {
'name': 'Test HTTP Post Alerter With Payload args for the key if not found',
'type': 'any',
'http_post2_url': 'http://test.webhook.url',
'http_post2_payload': {'args_{{some_field1}}': 'tata'},
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = HTTPPost2Alerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'some_field': 'toto'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'args_': 'tata',
}
mock_post_request.assert_called_once_with(
rule['http_post2_url'],
data=mock.ANY,
headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'},
proxies=None,
timeout=10,
verify=True
)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0]
def test_http_alerter_with_payload_args_value(caplog):
caplog.set_level(logging.INFO)
rule = {
'name': 'Test HTTP Post Alerter With Payload args for the value',
'type': 'any',
'http_post2_url': 'http://test.webhook.url',
'http_post2_payload': {'posted_name': 'toto', 'args_name': '{{some_field}}'},
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = HTTPPost2Alerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'some_field': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'posted_name': 'toto',
'args_name': 'foobarbaz',
}
mock_post_request.assert_called_once_with(
rule['http_post2_url'],
data=mock.ANY,
headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'},
proxies=None,
timeout=10,
verify=True
)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0]
def test_http_alerter_with_payload_args_value_not_found(caplog):
caplog.set_level(logging.INFO)
rule = {
'name': 'Test HTTP Post Alerter With Payload args for the value if not found',
'type': 'any',
'http_post2_url': 'http://test.webhook.url',
'http_post2_payload': {'posted_name': 'toto', 'args_name': '{{some_field1}}'},
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = HTTPPost2Alerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'some_field': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'posted_name': 'toto',
'args_name': '',
}
mock_post_request.assert_called_once_with(
rule['http_post2_url'],
data=mock.ANY,
headers={'Content-Type': 'application/json', 'Accept': 'application/json;charset=utf-8'},
proxies=None,
timeout=10,
verify=True
)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0]
def test_http_alerter_with_header_no_clash(caplog):
caplog.set_level(logging.INFO)
rule = {
'name': 'Test HTTP Post Alerter With Headers has no clash with the match fields',
'type': 'any',
'http_post2_url': 'http://test.webhook.url',
'http_post2_headers': {'header_name': 'titi'},
'http_post2_payload': {'posted_name': 'toto'},
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = HTTPPost2Alerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'titi': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_headers = {
'Content-Type': 'application/json',
'Accept': 'application/json;charset=utf-8',
'header_name': 'titi'
}
mock_post_request.assert_called_once_with(
rule['http_post2_url'],
data=mock.ANY,
headers=expected_headers,
proxies=None,
timeout=10,
verify=True
)
assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0]
def test_http_alerter_with_header_args_value(caplog):
caplog.set_level(logging.INFO)
rule = {
'name': 'Test HTTP Post Alerter With Headers args value',
'type': 'any',
'http_post2_url': 'http://test.webhook.url',
'http_post2_headers': {'header_name': '{{titi}}'},
'http_post2_payload': {'posted_name': 'toto'},
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = HTTPPost2Alerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'titi': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_headers = {
'Content-Type': 'application/json',
'Accept': 'application/json;charset=utf-8',
'header_name': 'foobarbaz'
}
mock_post_request.assert_called_once_with(
rule['http_post2_url'],
data=mock.ANY,
headers=expected_headers,
proxies=None,
timeout=10,
verify=True
)
assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0]
def test_http_alerter_with_header_args_value_list(caplog):
with pytest.raises(ValueError) as error:
rule = {
'name': 'Test HTTP Post Alerter With Headers args value',
'type': 'any',
'http_post2_url': 'http://test.webhook.url',
'http_post2_headers': {'header_name': ["test1", "test2"]},
'http_post2_payload': {'posted_name': 'toto'},
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = HTTPPost2Alerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'titi': 'foobarbaz'
}
with mock.patch('requests.post'):
alert.alert([match])
assert "HTTP Post 2: Can't send a header value which is not a string! " \
"Forbidden header header_name: ['test1', 'test2']" in str(error)
def test_http_alerter_with_header_args_value_dict(caplog):
with pytest.raises(ValueError) as error:
rule = {
'name': 'Test HTTP Post Alerter With Headers args value',
'type': 'any',
'http_post2_url': 'http://test.webhook.url',
'http_post2_headers': {'header_name': {'test': 'val'}},
'http_post2_payload': {'posted_name': 'toto'},
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = HTTPPost2Alerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'titi': 'foobarbaz'
}
with mock.patch('requests.post'):
alert.alert([match])
assert "HTTP Post 2: Can't send a header value which is not a string! " \
"Forbidden header header_name: {'test': 'val'}" in str(error)
def test_http_alerter_with_header_args_value_none(caplog):
with pytest.raises(ValueError) as error:
rule = {
'name': 'Test HTTP Post Alerter With Headers args value',
'type': 'any',
'http_post2_url': 'http://test.webhook.url',
'http_post2_headers': {'header_name': None},
'http_post2_payload': {'posted_name': 'toto'},
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = HTTPPost2Alerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'titi': 'foobarbaz'
}
with mock.patch('requests.post'):
alert.alert([match])
assert "HTTP Post 2: Can't send a header value which is not a string! " \
"Forbidden header header_name: None" in str(error)
def test_http_alerter_with_header_args_value_not_found(caplog):
caplog.set_level(logging.INFO)
rule = {
'name': 'Test HTTP Post Alerter With Headers args value if not found',
'type': 'any',
'http_post2_url': 'http://test.webhook.url',
'http_post2_headers': {'header_name': '{{titi1}}'},
'http_post2_payload': {'posted_name': 'toto'},
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = HTTPPost2Alerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'titi': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_headers = {
'Content-Type': 'application/json',
'Accept': 'application/json;charset=utf-8',
'header_name': ''
}
mock_post_request.assert_called_once_with(
rule['http_post2_url'],
data=mock.ANY,
headers=expected_headers,
proxies=None,
timeout=10,
verify=True
)
assert ('elastalert', logging.INFO, 'HTTP Post 2 alert sent.') == caplog.record_tuples[0]
def test_http_alerter_with_header_args_key(caplog):
caplog.set_level(logging.INFO)
rule = {
'name': 'Test HTTP Post Alerter With Headers args key',
| |
<filename>tests/components/hyperion/test_light.py
"""Tests for the Hyperion integration."""
from __future__ import annotations
from datetime import timedelta
from unittest.mock import AsyncMock, Mock, call, patch
from hyperion import const
from homeassistant.components.hyperion import (
get_hyperion_device_id,
light as hyperion_light,
)
from homeassistant.components.hyperion.const import (
CONF_EFFECT_HIDE_LIST,
DEFAULT_ORIGIN,
DOMAIN,
HYPERION_MANUFACTURER_NAME,
HYPERION_MODEL_NAME,
TYPE_HYPERION_PRIORITY_LIGHT,
)
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_EFFECT,
ATTR_HS_COLOR,
DOMAIN as LIGHT_DOMAIN,
)
from homeassistant.config_entries import (
RELOAD_AFTER_UPDATE_DELAY,
SOURCE_REAUTH,
ConfigEntry,
ConfigEntryState,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_HOST,
CONF_PORT,
CONF_SOURCE,
CONF_TOKEN,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.util import dt
import homeassistant.util.color as color_util
from . import (
TEST_AUTH_NOT_REQUIRED_RESP,
TEST_AUTH_REQUIRED_RESP,
TEST_CONFIG_ENTRY_ID,
TEST_ENTITY_ID_1,
TEST_ENTITY_ID_2,
TEST_ENTITY_ID_3,
TEST_HOST,
TEST_ID,
TEST_INSTANCE,
TEST_INSTANCE_1,
TEST_INSTANCE_2,
TEST_INSTANCE_3,
TEST_PORT,
TEST_PRIORITY,
TEST_PRIORITY_LIGHT_ENTITY_ID_1,
TEST_SYSINFO_ID,
add_test_config_entry,
call_registered_callback,
create_mock_client,
register_test_entity,
setup_test_config_entry,
)
from tests.common import async_fire_time_changed
COLOR_BLACK = color_util.COLORS["black"]
def _get_config_entry_from_unique_id(
hass: HomeAssistant, unique_id: str
) -> ConfigEntry | None:
for entry in hass.config_entries.async_entries(domain=DOMAIN):
if TEST_SYSINFO_ID == entry.unique_id:
return entry
return None
async def test_setup_config_entry(hass: HomeAssistant) -> None:
"""Test setting up the component via config entries."""
await setup_test_config_entry(hass, hyperion_client=create_mock_client())
assert hass.states.get(TEST_ENTITY_ID_1) is not None
async def test_setup_config_entry_not_ready_connect_fail(
hass: HomeAssistant,
) -> None:
"""Test the component not being ready."""
client = create_mock_client()
client.async_client_connect = AsyncMock(return_value=False)
await setup_test_config_entry(hass, hyperion_client=client)
assert hass.states.get(TEST_ENTITY_ID_1) is None
async def test_setup_config_entry_not_ready_switch_instance_fail(
hass: HomeAssistant,
) -> None:
"""Test the component not being ready."""
client = create_mock_client()
client.async_client_switch_instance = AsyncMock(return_value=False)
await setup_test_config_entry(hass, hyperion_client=client)
assert client.async_client_disconnect.called
assert hass.states.get(TEST_ENTITY_ID_1) is None
async def test_setup_config_entry_not_ready_load_state_fail(
hass: HomeAssistant,
) -> None:
"""Test the component not being ready."""
client = create_mock_client()
client.async_get_serverinfo = AsyncMock(
return_value={
"command": "serverinfo",
"success": False,
}
)
await setup_test_config_entry(hass, hyperion_client=client)
assert client.async_client_disconnect.called
assert hass.states.get(TEST_ENTITY_ID_1) is None
async def test_setup_config_entry_dynamic_instances(hass: HomeAssistant) -> None:
"""Test dynamic changes in the instance configuration."""
registry = er.async_get(hass)
config_entry = add_test_config_entry(hass)
master_client = create_mock_client()
master_client.instances = [TEST_INSTANCE_1, TEST_INSTANCE_2]
entity_client = create_mock_client()
entity_client.instances = master_client.instances
with patch(
"homeassistant.components.hyperion.client.HyperionClient",
side_effect=[master_client, entity_client, entity_client],
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(TEST_ENTITY_ID_1) is not None
assert hass.states.get(TEST_ENTITY_ID_2) is not None
assert master_client.set_callbacks.called
# == Inject a new instances update (stop instance 1, add instance 3)
instance_callback = master_client.set_callbacks.call_args[0][0][
f"{const.KEY_INSTANCE}-{const.KEY_UPDATE}"
]
with patch(
"homeassistant.components.hyperion.client.HyperionClient",
return_value=entity_client,
):
await instance_callback(
{
const.KEY_SUCCESS: True,
const.KEY_DATA: [
{**TEST_INSTANCE_1, "running": False},
TEST_INSTANCE_2,
TEST_INSTANCE_3,
],
}
)
await hass.async_block_till_done()
assert hass.states.get(TEST_ENTITY_ID_1) is None
assert hass.states.get(TEST_ENTITY_ID_2) is not None
assert hass.states.get(TEST_ENTITY_ID_3) is not None
# Instance 1 is stopped, it should still be registered.
assert registry.async_is_registered(TEST_ENTITY_ID_1)
# == Inject a new instances update (remove instance 1)
assert master_client.set_callbacks.called
instance_callback = master_client.set_callbacks.call_args[0][0][
f"{const.KEY_INSTANCE}-{const.KEY_UPDATE}"
]
with patch(
"homeassistant.components.hyperion.client.HyperionClient",
return_value=entity_client,
):
await instance_callback(
{
const.KEY_SUCCESS: True,
const.KEY_DATA: [TEST_INSTANCE_2, TEST_INSTANCE_3],
}
)
await hass.async_block_till_done()
assert hass.states.get(TEST_ENTITY_ID_1) is None
assert hass.states.get(TEST_ENTITY_ID_2) is not None
assert hass.states.get(TEST_ENTITY_ID_3) is not None
# Instance 1 is removed, it should not still be registered.
assert not registry.async_is_registered(TEST_ENTITY_ID_1)
# == Inject a new instances update (re-add instance 1, but not running)
with patch(
"homeassistant.components.hyperion.client.HyperionClient",
return_value=entity_client,
):
await instance_callback(
{
const.KEY_SUCCESS: True,
const.KEY_DATA: [
{**TEST_INSTANCE_1, "running": False},
TEST_INSTANCE_2,
TEST_INSTANCE_3,
],
}
)
await hass.async_block_till_done()
assert hass.states.get(TEST_ENTITY_ID_1) is None
assert hass.states.get(TEST_ENTITY_ID_2) is not None
assert hass.states.get(TEST_ENTITY_ID_3) is not None
# == Inject a new instances update (re-add instance 1, running)
with patch(
"homeassistant.components.hyperion.client.HyperionClient",
return_value=entity_client,
):
await instance_callback(
{
const.KEY_SUCCESS: True,
const.KEY_DATA: [TEST_INSTANCE_1, TEST_INSTANCE_2, TEST_INSTANCE_3],
}
)
await hass.async_block_till_done()
assert hass.states.get(TEST_ENTITY_ID_1) is not None
assert hass.states.get(TEST_ENTITY_ID_2) is not None
assert hass.states.get(TEST_ENTITY_ID_3) is not None
async def test_light_basic_properies(hass: HomeAssistant) -> None:
"""Test the basic properties."""
client = create_mock_client()
await setup_test_config_entry(hass, hyperion_client=client)
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "on"
assert entity_state.attributes["brightness"] == 255
assert entity_state.attributes["hs_color"] == (0.0, 0.0)
assert entity_state.attributes["icon"] == hyperion_light.ICON_LIGHTBULB
assert entity_state.attributes["effect"] == hyperion_light.KEY_EFFECT_SOLID
# By default the effect list is the 3 external sources + 'Solid'.
assert len(entity_state.attributes["effect_list"]) == 4
assert (
entity_state.attributes["supported_features"] == hyperion_light.SUPPORT_HYPERION
)
async def test_light_async_turn_on(hass: HomeAssistant) -> None:
"""Test turning the light on."""
client = create_mock_client()
await setup_test_config_entry(hass, hyperion_client=client)
# On (=), 100% (=), solid (=), [255,255,255] (=)
client.async_send_set_color = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: TEST_ENTITY_ID_1}, blocking=True
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: [255, 255, 255],
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
# On (=), 50% (!), solid (=), [255,255,255] (=)
# ===
brightness = 128
client.async_send_set_color = AsyncMock(return_value=True)
client.async_send_set_adjustment = AsyncMock(return_value=True)
client.adjustment = [{const.KEY_ID: TEST_ID}]
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_BRIGHTNESS: brightness},
blocking=True,
)
assert client.async_send_set_adjustment.call_args == call(
**{const.KEY_ADJUSTMENT: {const.KEY_BRIGHTNESS: 50, const.KEY_ID: TEST_ID}}
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: [255, 255, 255],
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
# Simulate a false return of async_send_set_adjustment
client.async_send_set_adjustment = AsyncMock(return_value=False)
client.adjustment = [{const.KEY_ID: TEST_ID}]
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_BRIGHTNESS: brightness},
blocking=True,
)
# Simulate a state callback from Hyperion.
client.adjustment = [{const.KEY_BRIGHTNESS: 50}]
call_registered_callback(client, "adjustment-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "on"
assert entity_state.attributes["brightness"] == brightness
# On (=), 50% (=), solid (=), [0,255,255] (!)
hs_color = (180.0, 100.0)
client.async_send_set_color = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_HS_COLOR: hs_color},
blocking=True,
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: (0, 255, 255),
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
# Simulate a state callback from Hyperion.
client.visible_priority = {
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: (0, 255, 255)},
}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["hs_color"] == hs_color
assert entity_state.attributes["icon"] == hyperion_light.ICON_LIGHTBULB
# On (=), 100% (!), solid, [0,255,255] (=)
brightness = 255
client.async_send_set_color = AsyncMock(return_value=True)
client.async_send_set_adjustment = AsyncMock(return_value=True)
client.adjustment = [{const.KEY_ID: TEST_ID}]
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_BRIGHTNESS: brightness},
blocking=True,
)
assert client.async_send_set_adjustment.call_args == call(
**{const.KEY_ADJUSTMENT: {const.KEY_BRIGHTNESS: 100, const.KEY_ID: TEST_ID}}
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: (0, 255, 255),
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
client.adjustment = [{const.KEY_BRIGHTNESS: 100}]
call_registered_callback(client, "adjustment-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["brightness"] == brightness
# On (=), 100% (=), "USB Capture (!), [0,255,255] (=)
component = "V4L"
effect = const.KEY_COMPONENTID_TO_NAME[component]
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_component = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_EFFECT: effect},
blocking=True,
)
assert client.async_send_clear.call_args == call(
**{const.KEY_PRIORITY: TEST_PRIORITY}
)
assert client.async_send_set_component.call_args_list == [
call(
**{
const.KEY_COMPONENTSTATE: {
const.KEY_COMPONENT: const.KEY_COMPONENTID_EXTERNAL_SOURCES[0],
const.KEY_STATE: False,
}
}
),
call(
**{
const.KEY_COMPONENTSTATE: {
const.KEY_COMPONENT: const.KEY_COMPONENTID_EXTERNAL_SOURCES[1],
const.KEY_STATE: False,
}
}
),
call(
**{
const.KEY_COMPONENTSTATE: {
const.KEY_COMPONENT: const.KEY_COMPONENTID_EXTERNAL_SOURCES[2],
const.KEY_STATE: True,
}
}
),
]
client.visible_priority = {const.KEY_COMPONENTID: component}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["icon"] == hyperion_light.ICON_EXTERNAL_SOURCE
assert entity_state.attributes["effect"] == effect
# On (=), 100% (=), "Warm Blobs" (!), [0,255,255] (=)
effect = "Warm Blobs"
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_effect = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_EFFECT: effect},
blocking=True,
)
assert client.async_send_clear.call_args == call(
**{const.KEY_PRIORITY: TEST_PRIORITY}
)
assert client.async_send_set_effect.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_EFFECT: {const.KEY_NAME: effect},
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
client.visible_priority = {
const.KEY_COMPONENTID: const.KEY_COMPONENTID_EFFECT,
const.KEY_OWNER: effect,
}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["icon"] == hyperion_light.ICON_EFFECT
assert entity_state.attributes["effect"] == effect
# On (=), 100% (=), [0,0,255] (!)
# Ensure changing the color will move the effect to 'Solid' automatically.
hs_color = (240.0, 100.0)
client.async_send_set_color = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_HS_COLOR: hs_color},
blocking=True,
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: (0, 0, 255),
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
# Simulate a state callback from Hyperion.
client.visible_priority = {
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: (0, 0, 255)},
}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["hs_color"] == hs_color
assert entity_state.attributes["icon"] == hyperion_light.ICON_LIGHTBULB
assert entity_state.attributes["effect"] == hyperion_light.KEY_EFFECT_SOLID
# No calls if disconnected.
client.has_loaded_state = False
call_registered_callback(client, "client-update", {"loaded-state": False})
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_effect = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: TEST_ENTITY_ID_1}, blocking=True
)
assert not client.async_send_clear.called
assert not client.async_send_set_effect.called
async def test_light_async_turn_on_fail_async_send_set_component(
hass: HomeAssistant,
) -> None:
"""Test set_component failure when turning the light on."""
client = create_mock_client()
client.async_send_set_component = AsyncMock(return_value=False)
client.is_on = Mock(return_value=False)
await setup_test_config_entry(hass, hyperion_client=client)
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: TEST_ENTITY_ID_1}, blocking=True
)
assert client.method_calls[-1] == call.async_send_set_component(
componentstate={"component": "ALL", "state": True}
)
async def test_light_async_turn_on_fail_async_send_set_component_source(
hass: HomeAssistant,
) -> None:
"""Test async_send_set_component failure when selecting the source."""
client = create_mock_client()
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_component = AsyncMock(return_value=False)
client.is_on = Mock(return_value=True)
await setup_test_config_entry(hass, hyperion_client=client)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: TEST_ENTITY_ID_1,
ATTR_EFFECT: const.KEY_COMPONENTID_TO_NAME["V4L"],
},
blocking=True,
)
assert client.method_calls[-1] == call.async_send_set_component(
componentstate={"component": "BOBLIGHTSERVER", "state": False}
)
async def test_light_async_turn_on_fail_async_send_clear_source(
hass: HomeAssistant,
) -> None:
"""Test async_send_clear failure when turning the light on."""
client = create_mock_client()
client.is_on | |
<filename>proganomaly_modules/training_module/trainer/losses.py
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from . import losses_berg
from . import losses_ganomaly
class Losses(
losses_berg.LossesBerg,
losses_ganomaly.LossesGanomaly
):
"""Class used for both training & evaluation losses.
"""
def __init__(self):
"""Instantiate instance of `Losses`.
"""
pass
def get_fake_logits_loss(self, fake_image_type, fake_logits):
"""Gets fake logits loss.
Args:
fake_image_type: str, the type of fake image.
fake_logits: tensor, shape of (batch_size, 1).
Returns:
Tensor of fake logit's loss of shape ().
"""
if self.params["training"]["distribution_strategy"]:
# Calculate base generator loss.
fake_logits_loss = tf.nn.compute_average_loss(
per_example_loss=fake_logits,
global_batch_size=(
self.global_batch_size_schedule_reconstruction[self.block_idx]
)
)
else:
# Calculate base generator loss.
fake_logits_loss = tf.reduce_mean(
input_tensor=fake_logits,
name="fake_logits_loss"
)
if self.params["training"]["reconstruction"]["write_loss_summaries"]:
# Add summaries for TensorBoard.
with self.summary_file_writer.as_default():
with tf.summary.record_if(
condition=tf.equal(
x=tf.math.floormod(
x=self.global_step_var,
y=self.params["training"]["reconstruction"]["save_summary_steps"]
),
y=0
)
):
tf.summary.scalar(
name="losses/{}_loss".format(fake_image_type),
data=fake_logits_loss,
step=self.global_step_var
)
self.summary_file_writer.flush()
return fake_logits_loss
def generator_loss_phase(self, fake_image_type, fake_images, training):
"""Gets logits and loss for generator.
Args:
fake_image_type: str, the type of fake image.
fake_images: tensor, generated images of shape
(batch_size, iamge_height, image_width, image_depth).
training: bool, if model should be training.
Returns:
Generator loss tensor of shape ().
"""
if self.params["training"]["reconstruction"]["write_generator_image_summaries"] and training:
# Add summaries for TensorBoard.
with self.summary_file_writer.as_default():
with tf.summary.record_if(
condition=tf.equal(
x=tf.math.floormod(
x=self.global_step_var,
y=self.params["training"]["reconstruction"]["save_summary_steps"]
),
y=0
)
):
tf.summary.image(
name="fake_images_{}".format(fake_image_type),
data=fake_images,
step=self.global_step_var,
max_outputs=5
)
self.summary_file_writer.flush()
# Get fake logits from discriminator using generator's output image.
fake_discriminator_logits = (
self.network_objects["discriminator"].models[self.growth_idx](
inputs=fake_images, training=training
)
)
# Get generator loss from discriminator.
generator_loss = self.get_fake_logits_loss(
fake_image_type=fake_image_type,
fake_logits=fake_discriminator_logits
)
return generator_loss
def get_encoder_l1_z_loss(self, z, z_hat):
"""Gets encoder L1 latent vector loss.
Args:
z: tensor, latent vector of shape
(batch_size, generator_latent_size).
z_hat: tensor, latent vector of shape
(batch_size, generator_latent_size).
Returns:
Tensor of encoder's L2 image loss of shape ().
"""
# Get difference between z and z-hat.
z_diff = tf.subtract(x=z, y=z_hat, name="z_diff")
# Get L1 norm of latent vector difference.
if self.params["training"]["reconstruction"]["normalize_reconstruction_losses"]:
z_diff_l1_norm = tf.reduce_mean(
input_tensor=tf.abs(x=z_diff),
axis=-1,
name="z_diff_l1_norm"
) + 1e-8
else:
z_diff_l1_norm = tf.reduce_sum(
input_tensor=tf.abs(x=z_diff),
axis=[-1],
name="z_diff_l1_norm"
) + 1e-8
if self.params["training"]["distribution_strategy"]:
# Calculate base encoder loss.
encoder_z_loss = tf.nn.compute_average_loss(
per_example_loss=z_diff_l1_norm,
global_batch_size=(
self.global_batch_size_schedule_reconstruction[self.block_idx]
)
)
else:
# Calculate base encoder loss.
encoder_z_loss = tf.reduce_mean(
input_tensor=z_diff_l1_norm,
name="encoder_l1_z_loss"
)
if self.params["training"]["reconstruction"]["write_loss_summaries"]:
# Add summaries for TensorBoard.
with self.summary_file_writer.as_default():
with tf.summary.record_if(
condition=tf.equal(
x=tf.math.floormod(
x=self.global_step_var,
y=self.params["training"]["reconstruction"]["save_summary_steps"]
), y=0
)
):
tf.summary.scalar(
name="losses/encoder_l1_z_loss",
data=encoder_z_loss,
step=self.global_step_var
)
self.summary_file_writer.flush()
return encoder_z_loss
def get_encoder_l2_z_loss(self, z, z_hat):
"""Gets encoder L2 latent vector loss.
Args:
z: tensor, latent vector of shape
(batch_size, generator_latent_size).
z_hat: tensor, latent vector of shape
(batch_size, generator_latent_size).
Returns:
Tensor of encoder's L2 image loss of shape [].
"""
# Get difference between z and z-hat.
z_diff = tf.subtract(x=z, y=z_hat, name="z_diff")
# Get L2 norm of latent vector difference.
if self.params["training"]["reconstruction"]["normalize_reconstruction_losses"]:
z_diff_l2_norm = tf.reduce_mean(
input_tensor=tf.square(x=z_diff),
axis=-1,
name="z_diff_l2_norm"
)
else:
z_diff_l2_norm = tf.reduce_sum(
input_tensor=tf.square(x=z_diff),
axis=-1,
name="z_diff_l2_norm"
)
if self.params["training"]["distribution_strategy"]:
# Calculate base encoder loss.
encoder_z_loss = tf.nn.compute_average_loss(
per_example_loss=z_diff_l2_norm,
global_batch_size=(
self.global_batch_size_schedule_reconstruction[self.block_idx]
)
)
else:
# Calculate base encoder loss.
encoder_z_loss = tf.reduce_mean(
input_tensor=z_diff_l2_norm,
name="encoder_l2_z_loss"
)
if self.params["training"]["reconstruction"]["write_loss_summaries"]:
# Add summaries for TensorBoard.
with self.summary_file_writer.as_default():
with tf.summary.record_if(
condition=tf.equal(
x=tf.math.floormod(
x=self.global_step_var,
y=self.params["training"]["reconstruction"]["save_summary_steps"]
), y=0
)
):
tf.summary.scalar(
name="losses/encoder_l2_z_loss",
data=encoder_z_loss,
step=self.global_step_var
)
self.summary_file_writer.flush()
return encoder_z_loss
def get_encoder_l1_image_loss(self, images, encoded_images):
"""Gets encoder L1 image loss.
Args:
images: tensor, either real images or images generated by the
generator from random noise. Shape of
(batch_size, image_height, image_width, depth).
encoded_images: tensor, images generated by the generator from
encoder's vector output of shape
(batch_size, image_height, image_width, depth).
Returns:
Tensor of encoder's L1 image loss of shape ().
"""
# Get difference between fake images and encoder images.
generator_encoder_image_diff = tf.subtract(
x=images,
y=encoded_images,
name="generator_encoder_image_diff"
)
# Get L1 norm of image difference.
if self.params["training"]["reconstruction"]["normalize_reconstruction_losses"]:
image_diff_l1_norm = tf.reduce_mean(
input_tensor=tf.abs(x=generator_encoder_image_diff),
axis=[1, 2, 3],
name="image_diff_l1_norm"
) + 1e-8
else:
image_diff_l1_norm = tf.reduce_sum(
input_tensor=tf.abs(x=generator_encoder_image_diff),
axis=[1, 2, 3],
name="image_diff_l1_norm"
) + 1e-8
if self.params["training"]["distribution_strategy"]:
# Calculate base encoder loss.
encoder_image_loss = tf.nn.compute_average_loss(
per_example_loss=image_diff_l1_norm,
global_batch_size=(
self.global_batch_size_schedule_reconstruction[self.block_idx]
)
)
else:
# Calculate base encoder loss.
encoder_image_loss = tf.reduce_mean(
input_tensor=image_diff_l1_norm,
name="encoder_l1_image_loss"
)
if self.params["training"]["reconstruction"]["write_loss_summaries"]:
# Add summaries for TensorBoard.
with self.summary_file_writer.as_default():
with tf.summary.record_if(
condition=tf.equal(
x=tf.math.floormod(
x=self.global_step_var,
y=self.params["training"]["reconstruction"]["save_summary_steps"]
), y=0
)
):
tf.summary.scalar(
name="losses/encoder_l1_image_loss",
data=encoder_image_loss,
step=self.global_step_var
)
self.summary_file_writer.flush()
return encoder_image_loss
def get_encoder_l2_image_loss(self, images, encoded_images):
"""Gets encoder L2 image loss.
Args:
images: tensor, either real images or images generated by the
generator from random noise. Shape of
(batch_size, image_height, image_width, depth).
encoded_images: tensor, images generated by the generator from
encoder's vector output of shape
(batch_size, image_height, image_width, depth).
Returns:
Tensor of encoder's L2 image loss of shape ().
"""
# Get difference between fake images and encoder images.
generator_encoder_image_diff = tf.subtract(
x=images,
y=encoded_images,
name="generator_encoder_image_diff"
)
# Get L2 norm of image difference.
if self.params["training"]["reconstruction"]["normalize_reconstruction_losses"]:
image_diff_l2_norm = tf.reduce_mean(
input_tensor=tf.square(x=generator_encoder_image_diff),
axis=[1, 2, 3],
name="image_diff_l2_norm"
)
else:
image_diff_l2_norm = tf.reduce_sum(
input_tensor=tf.square(x=generator_encoder_image_diff),
axis=[1, 2, 3],
name="image_diff_l2_norm"
)
if self.params["training"]["distribution_strategy"]:
# Calculate base encoder loss.
encoder_image_loss = tf.nn.compute_average_loss(
per_example_loss=image_diff_l2_norm,
global_batch_size=(
self.global_batch_size_schedule_reconstruction[self.block_idx]
)
)
else:
# Calculate base encoder loss.
encoder_image_loss = tf.reduce_mean(
input_tensor=image_diff_l2_norm,
name="encoder_l2_image_loss"
)
if self.params["training"]["reconstruction"]["write_loss_summaries"]:
# Add summaries for TensorBoard.
with self.summary_file_writer.as_default():
with tf.summary.record_if(
condition=tf.equal(
x=tf.math.floormod(
x=self.global_step_var,
y=self.params["training"]["reconstruction"]["save_summary_steps"]
), y=0
)
):
tf.summary.scalar(
name="losses/encoder_l2_image_loss",
data=encoder_image_loss,
step=self.global_step_var
)
self.summary_file_writer.flush()
return encoder_image_loss
def get_network_regularization_loss(self, network):
"""Gets network's regularization loss.
Args:
network: str, name of network model.
Returns:
Tensor of network's regularization loss of shape ().
"""
if self.params["training"]["distribution_strategy"]:
# Get regularization losses.
reg_loss = tf.nn.scale_regularization_loss(
regularization_loss=sum(
self.network_objects[network].models[self.growth_idx].losses
)
)
else:
# Get regularization losses.
reg_loss = sum(self.network_objects[network].models[self.growth_idx].losses)
if self.params["training"]["reconstruction"]["write_loss_summaries"]:
# Add summaries for TensorBoard.
with summary_file_writer.as_default():
with tf.summary.record_if(
condition=tf.equal(
x=tf.math.floormod(
x=global_step,
y=self.params["training"]["reconstruction"]["save_summary_steps"]
), y=0
)
):
tf.summary.scalar(
name="losses/{}_reg_loss".format(network),
data=reg_loss,
step=global_step
)
return reg_loss
def get_discriminator_loss_real_image_losses(self, real_images, training):
"""Gets real image losses for discriminator.
Args:
real_images: tensor, real images from input of shape
(batch_size, image_height, image_width, depth).
training: bool, if in training mode.
Returns:
Dictionary of scalar losses and running loss scalar tensor of
discriminator.
"""
dis_loss_weights = self.params["discriminator"]["losses"]
# Create empty dict for unweighted losses.
loss_dict = {}
discriminator_real_loss = tf.zeros(shape=(), dtype=tf.float32)
if dis_loss_weights["D_of_x_loss_weight"]:
# Get real logits from discriminator using real image.
real_logits = self.network_objects["discriminator"].models[self.growth_idx](
inputs=real_images, training=training
)
if self.params["training"]["distribution_strategy"]:
discriminator_real_loss = tf.nn.compute_average_loss(
per_example_loss=real_logits,
global_batch_size=(
self.global_batch_size_schedule_reconstruction[self.block_idx]
)
)
else:
discriminator_real_loss = tf.reduce_mean(
input_tensor=real_logits,
name="real_loss"
)
loss_dict["D(x)"] = discriminator_real_loss
discriminator_real_loss = tf.multiply(
x=dis_loss_weights["D_of_x_loss_weight"],
y=discriminator_real_loss
)
# Get discriminator epsilon drift penalty.
if self.params["discriminator"]["epsilon_drift"]:
epsilon_drift_penalty = tf.multiply(
x=self.params["discriminator"]["epsilon_drift"],
y=tf.reduce_mean(input_tensor=tf.square(x=real_logits)),
name="epsilon_drift_penalty"
)
loss_dict["epsilon_drift_penalty"] = epsilon_drift_penalty
if self.params["training"]["reconstruction"]["write_loss_summaries"]:
# Add summaries for TensorBoard.
with self.summary_file_writer.as_default():
with tf.summary.record_if(
condition=tf.equal(
x=tf.math.floormod(
x=self.global_step_var,
y=self.params["training"]["reconstruction"]["save_summary_steps"]
), y=0
)
):
tf.summary.scalar(
name="losses/discriminator_real_loss",
data=discriminator_real_loss,
step=self.global_step_var
)
if self.params["discriminator"]["epsilon_drift"]:
tf.summary.scalar(
name="losses/epsilon_drift_penalty",
data=epsilon_drift_penalty,
step=self.global_step_var
)
self.summary_file_writer.flush()
return loss_dict, discriminator_real_loss
def _get_gradient_penalty_loss(self, fake_images, real_images):
"""Gets discriminator gradient penalty loss.
Args:
fake_images: tensor, images generated by the generator from random
noise of shape (batch_size, image_size, image_size, 3).
real_images: tensor, real images from input of shape
(batch_size, image_height, image_width, 3).
Returns:
Discriminator's gradient penalty loss of shape ().
"""
batch_size = real_images.shape[0]
# Get a random uniform number rank 4 tensor.
random_uniform_num = tf.random.uniform(
shape=(batch_size, 1, 1, 1),
minval=0., maxval=1.,
dtype=tf.float32,
name="gp_random_uniform_num"
)
# Find the element-wise difference between images.
image_difference = fake_images - real_images
# Get random samples from this mixed image distribution.
mixed_images = random_uniform_num * image_difference
mixed_images += real_images
# Get loss from interpolated mixed images and watch for gradients.
with tf.GradientTape() as gp_tape:
# Watch interpolated mixed images.
gp_tape.watch(tensor=mixed_images)
# Send to the discriminator to get logits.
mixed_logits = self.network_objects["discriminator"].models[self.growth_idx](
inputs=mixed_images, training=True
)
# Get the mixed loss.
mixed_loss = tf.reduce_sum(
input_tensor=mixed_logits,
name="gp_mixed_loss"
)
# Get gradient from returned list of length 1.
mixed_gradients = gp_tape.gradient(
target=mixed_loss, sources=[mixed_images]
)[0]
# Get gradient's L2 norm.
mixed_norms = tf.sqrt(
x=tf.reduce_sum(
| |
excpt_msgs:
if n == 1:
excpt_msg = "{}, {}".format(n, msg)
else:
excpt_msg = "{}; {}, {}".format(excpt_msg, n, msg)
except:
try:
excpt_msg = response.json()
if excpt_msg is None:
raise Exception()
elif excpt_msg == "":
raise Exception()
except:
excpt_msg = "Unknown error ('{0}'), check url in a web browser: '{1}'".format(response.reason, url)
api_error = EODataDownResponseException(excpt_msg, response)
api_error.__cause__ = None
raise api_error
return success
def check_http_response_prod(self, response, url):
"""
Check the HTTP response and raise an exception with appropriate error message
if request was not successful.
:param response: the http response object.
:param url: the URL called.
:return: boolean as to whether status is successful or otherwise.
"""
try:
response.raise_for_status()
success = True
except (requests.HTTPError, ValueError):
success = False
excpt_msg = "Invalid API response."
try:
excpt_msgs = response.json()["errors"]
excpt_msg = ""
n = 1
for msg in excpt_msgs:
if n == 1:
excpt_msg = "{}, {}".format(n, msg)
else:
excpt_msg = "{}; {}, {}".format(excpt_msg, n, msg)
except:
try:
excpt_msg = response.json()
if excpt_msg is None:
raise Exception()
elif excpt_msg == "":
raise Exception()
except:
excpt_msg = "Unknown error ('{0}'), check url in a web browser: '{1}'".format(response.reason, url)
api_error = EODataDownResponseException(excpt_msg, response)
api_error.__cause__ = None
raise api_error
return success
def check_http_response_granules(self, response, url):
"""
Check the HTTP response and raise an exception with appropriate error message
if request was not successful.
:param response: the http response object.
:param url: the URL called.
:return: boolean as to whether status is successful or otherwise.
"""
try:
response.raise_for_status()
success = True
except (requests.HTTPError, ValueError):
success = False
excpt_msg = "Invalid API response."
try:
excpt_msgs = response.json()["errors"]
excpt_msg = ""
n = 1
for msg in excpt_msgs:
if n == 1:
excpt_msg = "{}, {}".format(n, msg)
else:
excpt_msg = "{}; {}, {}".format(excpt_msg, n, msg)
except:
try:
excpt_msg = response.json()
if excpt_msg is None:
raise Exception()
elif excpt_msg == "":
raise Exception()
except:
excpt_msg = "Unknown error ('{0}'), check url in a web browser: '{1}'".format(response.reason, url)
api_error = EODataDownResponseException(excpt_msg, response)
api_error.__cause__ = None
raise api_error
return success
def check_new_scns(self, check_from_start=False):
json_parse_helper = eodatadown.eodatadownutils.EDDJSONParseHelper()
logger.debug("Creating HTTP Session Object.")
session_req = requests.Session()
user_agent = "eoedatadown/" + str(eodatadown.EODATADOWN_VERSION)
session_req.headers["User-Agent"] = user_agent
headers = {'Accept': 'application/json'}
logger.debug("Creating Database Engine and Session.")
db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)
session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)
ses = session_sqlalc()
# Get the next PID value to ensure increment
c_max_pid = ses.query(func.max(EDDICESAT2.PID).label("max_pid")).one().max_pid
if c_max_pid is None:
n_max_pid = 0
else:
n_max_pid = c_max_pid + 1
"""
###########################
# Authenticate the User:
token_api_url = 'https://cmr.earthdata.nasa.gov/legacy-services/rest/tokens'
hostname = socket.gethostname()
lcl_ip = socket.gethostbyname(hostname)
data = {
'token': {
'username' : self.earthDataUser,
'password' : <PASSWORD>,
'client_id' : 'EODataDown_client_id',
'user_ip_address': lcl_ip
}
}
auth_resp = session_req.post(token_api_url, json=data, headers=headers)
if not self.check_http_response_auth(auth_resp, token_api_url):
raise EODataDownException("Failed to authenticate.")
json_rspn = json.loads(auth_resp.content)
api_token = json_parse_helper.getStrValue(json_rspn, ['token', 'id'])
print(api_token)
# Authenticated
###########################
"""
###########################
# Check Product and Versions
cmr_collcts_url = 'https://cmr.earthdata.nasa.gov/search/collections.json'
for prod in self.productsLst:
prod_srch_params = {'short_name': prod["product"]}
prod_resp = session_req.get(cmr_collcts_url, params=prod_srch_params, headers=headers)
if not self.check_http_response_auth(prod_resp, cmr_collcts_url):
raise EODataDownException("Failed to find product information.")
prod_info = json.loads(prod_resp.content)
prod_versions = [i['version_id'] for i in prod_info['feed']['entry']]
if prod['version'] not in prod_versions:
vers_str = ""
for ver in prod_versions:
if vers_str == "":
vers_str = ver
else:
vers_str = "{}, {}".format(vers_str, ver)
raise EODataDownException("The specified version ({}) for {} product is not available."
"Available options are: '{}'".format(prod['version'],
prod['product'], vers_str))
max_version = max(prod_versions)
num_max_version = int(max_version)
num_prod_version = int(prod['version'])
if num_prod_version < num_max_version:
logger.warning("You are not using the most recent version ({}) of "
"the product ({}: {})".format(max_version, prod['product'], prod['version']))
# Checked product versions
###########################
###########################
# Find Available Downloads
new_scns_avail = False
query_datetime = datetime.datetime.now()
granule_search_url = 'https://cmr.earthdata.nasa.gov/search/granules'
end_date_str = datetime.datetime.now().strftime("%Y-%m-%dT23:59:59Z")
for prod in self.productsLst:
logger.info("Finding Granules for Product '{}'".format(prod['product']))
logger.debug("Find the start date for query for product '{}' - if table is empty then using "
"config date otherwise date of last acquried image.".format(prod["product"]))
query_date = self.startDate
if (not check_from_start) and \
(ses.query(EDDICESAT2).filter(EDDICESAT2.Product == prod["product"]).first() is not None):
query_date = ses.query(EDDICESAT2).filter(EDDICESAT2.Product == prod["product"]).\
order_by(EDDICESAT2.Start_Time.desc()).first().Start_Time
logger.info("Query for product '{}' with start at date: {}".format(prod["product"], query_date))
start_date_str = query_date.strftime("%Y-%m-%dT00:00:00Z")
temporal_str = "{},{}".format(start_date_str, end_date_str)
for geo_bound in self.geoBounds:
logger.info("Finding Granules for Product '{}' in BBOX: [{}]".format(prod['product'], geo_bound.getSimpleBBOXStr()))
search_params = {
'short_name' : prod["product"],
'version' : prod["version"],
'temporal' : temporal_str,
'page_size' : 100,
'page_num' : 1,
'bounding_box': geo_bound.getBBOXLLURStr()
}
db_records = list()
while True:
granules_resp = session_req.get(granule_search_url, params=search_params, headers=headers)
if not self.check_http_response_granules(granules_resp, granule_search_url):
raise EODataDownException("Failed to expected response to granules search")
granules_rslt = json.loads(granules_resp.content)
if not json_parse_helper.doesPathExist(granules_rslt, ['feed', 'entry']):
break
granule_entries = json_parse_helper.getListValue(granules_rslt, ['feed', 'entry'])
if len(granule_entries) == 0:
# Out of results, so break out of loop
break
for granule_meta in granule_entries:
invalid_granule = False
gran_id = json_parse_helper.getStrValue(granule_meta, ['id'])
gran_size = json_parse_helper.getNumericValue(granule_meta, ['granule_size'])
gran_online = json_parse_helper.getBooleanValue(granule_meta, ['online_access_flag'])
gran_orig_format = json_parse_helper.getStrValue(granule_meta, ['original_format'])
gran_producer_id = json_parse_helper.getStrValue(granule_meta, ['producer_granule_id'])
gran_title = json_parse_helper.getStrValue(granule_meta, ['title'])
gran_start_time = json_parse_helper.getDateTimeValue(granule_meta, ['time_start'],
["%Y-%m-%dT%H:%M:%S.%f"])
gran_end_time = json_parse_helper.getDateTimeValue(granule_meta, ['time_end'],
["%Y-%m-%dT%H:%M:%S.%f"])
gran_updated = json_parse_helper.getDateTimeValue(granule_meta, ['updated'],
["%Y-%m-%dT%H:%M:%S.%f"])
if json_parse_helper.doesPathExist(granule_meta, ['orbit']):
gran_orb_ascending_crossing = json_parse_helper.getNumericValue(granule_meta, ['orbit', 'ascending_crossing'])
gran_orb_start_direction = json_parse_helper.getStrValue(granule_meta, ['orbit', 'start_direction'])
gran_orb_start_lat = json_parse_helper.getNumericValue(granule_meta, ['orbit', 'start_lat'])
gran_orb_end_direction = json_parse_helper.getStrValue(granule_meta, ['orbit', 'end_direction'])
gran_orb_end_lat = json_parse_helper.getNumericValue(granule_meta, ['orbit', 'end_lat'])
else:
gran_orb_ascending_crossing = None
gran_orb_start_direction = None
gran_orb_start_lat = None
gran_orb_end_direction = None
gran_orb_end_lat = None
north_lat = 0.0
south_lat = 0.0
east_lon = 0.0
west_lon = 0.0
if json_parse_helper.doesPathExist(granule_meta, ['boxes']):
gran_bbox = json_parse_helper.getListValue(granule_meta, ['boxes'])
first_bbox = True
for bbox in gran_bbox:
bbox_comps = bbox.split(' ')
south_lat_tmp = float(bbox_comps[0])
west_lon_tmp = float(bbox_comps[1])
north_lat_tmp = float(bbox_comps[2])
east_lon_tmp = float(bbox_comps[3])
if first_bbox:
west_lon = west_lon_tmp
south_lat = south_lat_tmp
east_lon = east_lon_tmp
north_lat = north_lat_tmp
first_bbox = False
else:
if west_lon_tmp < west_lon:
west_lon = west_lon_tmp
if south_lat_tmp < south_lat:
south_lat = south_lat_tmp
if east_lon_tmp > east_lon:
east_lon = east_lon_tmp
if north_lat_tmp > north_lat:
north_lat = north_lat_tmp
elif json_parse_helper.doesPathExist(granule_meta, ['polygons']):
gran_polys = json_parse_helper.getListValue(granule_meta, ['polygons'])
first_coord = True
for poly in gran_polys:
for coord_str in poly:
coord_str_comps = coord_str.split(' ')
n_pts = int(len(coord_str_comps)/2)
for pt in range(n_pts):
lat_val = float(coord_str_comps[(pt*2)])
lon_val = float(coord_str_comps[(pt*2)+1])
if first_coord:
west_lon = lon_val
south_lat = lat_val
east_lon = lon_val
north_lat = lat_val
first_coord = False
else:
if lon_val < west_lon:
west_lon = lon_val
if lat_val < south_lat:
south_lat = lat_val
if lon_val > east_lon:
east_lon = lon_val
if lat_val > north_lat:
north_lat = lat_val
else:
if gran_size > 4.0:
import pprint
pprint.pprint(granule_meta)
raise EODataDownException("No BBOX defined for {}".format(gran_producer_id))
else:
invalid_granule = True
logger.debug("The granule '{}' has been defined as invalid as no BBOX "
"or polygon was defined.".format(gran_producer_id))
if not invalid_granule:
granule_links = json_parse_helper.getListValue(granule_meta, ['links'])
gran_url = None
for link in granule_links:
if link['type'] == 'application/x-hdfeos':
gran_url = link['href']
break
if gran_url is None:
raise EODataDownException("Could not find a dataset URL for '{}'".format(gran_producer_id))
granule_extra_info = dict()
granule_extra_info['granule'] = dict()
use_extra_info = False
gran_equator_crossing_date_time = None
gran_equator_crossing_longitude = None
gran_orbit_number = None
if json_parse_helper.doesPathExist(granule_meta, ['orbit_calculated_spatial_domains']):
if len(granule_meta['orbit_calculated_spatial_domains']) == 1:
orb_calcd_spat_domain = granule_meta['orbit_calculated_spatial_domains'][0]
gran_equator_crossing_date_time = json_parse_helper.getDateTimeValue(orb_calcd_spat_domain, ['equator_crossing_date_time'], ["%Y-%m-%dT%H:%M:%S.%f"])
gran_equator_crossing_longitude = json_parse_helper.getNumericValue(orb_calcd_spat_domain, ['equator_crossing_longitude'], -180, 180)
gran_orbit_number = int(json_parse_helper.getNumericValue(orb_calcd_spat_domain, ['orbit_number']))
else:
use_extra_info = True
granule_extra_info['granule']['orbit_calculated_spatial_domains'] = granule_meta['orbit_calculated_spatial_domains']
if json_parse_helper.doesPathExist(granule_meta, ['polygons']):
use_extra_info = True
granule_extra_info['granule']['polygons'] = granule_meta['polygons']
if json_parse_helper.doesPathExist(granule_meta, ['boxes']):
use_extra_info = True
granule_extra_info['granule']['boxes'] = granule_meta['boxes']
if not use_extra_info:
granule_extra_info = None
if not invalid_granule:
db_records.append(EDDICESAT2(PID=n_max_pid, Producer_ID=gran_producer_id, Granule_ID=gran_id,
Title=gran_title, Start_Time=gran_start_time,
End_Time=gran_end_time, Updated_Time=gran_updated,
Product=prod["product"], Version=prod["version"],
Online=gran_online, Original_Format=gran_orig_format,
Orb_Ascending_Crossing=gran_orb_ascending_crossing,
Orb_Start_Direct=gran_orb_start_direction, Orb_Start_Lat=gran_orb_start_lat,
Orb_End_Direct=gran_orb_end_direction, Orb_End_Lat=gran_orb_end_lat,
Eq_Cross_Time=gran_equator_crossing_date_time,
Eq_Cross_Lon=gran_equator_crossing_longitude,
Orbit_Number=gran_orbit_number,
North_Lat=north_lat, South_Lat=south_lat, East_Lon=east_lon,
West_Lon=west_lon, Total_Size=gran_size, Remote_URL=gran_url,
Query_Date=query_datetime, ExtendedInfo=granule_extra_info))
n_max_pid += 1
# Increment page_num
search_params['page_num'] += 1
logger.info("Adding {} records to the database.".format(len(db_records)))
if len(db_records) > 0:
logger.debug("Writing records to the database.")
ses.add_all(db_records)
ses.commit()
logger.debug("Written and committed records to the database.")
new_scns_avail = True
# Found Available Downloads
###########################
ses.commit()
ses.close()
logger.debug("Closed Database session")
edd_usage_db = EODataDownUpdateUsageLogDB(self.db_info_obj)
edd_usage_db.add_entry(description_val="Checked for availability of new scenes", sensor_val=self.sensor_name,
updated_lcl_db=True, scns_avail=new_scns_avail)
def rm_scns_intersect(self, all_scns=False):
"""
A function which checks whether the bounding box for the scene intersects with a specified
vector layer. If the scene does not intersect then it is deleted from the database. By default
this is only testing the scenes which have not been downloaded.
:param all_scns: If | |
super(AuthInfoUpdateParameters, self).__init__(**kwargs)
self.token_type = token_type
self.token = token
self.refresh_token = refresh_token
self.scope = scope
self.expires_in = expires_in
class BaseImageDependency(Model):
"""Properties that describe a base image dependency.
:param type: The type of the base image dependency. Possible values
include: 'BuildTime', 'RunTime'
:type type: str or
~azure.mgmt.containerregistry.v2019_06_01_preview.models.BaseImageDependencyType
:param registry: The registry login server.
:type registry: str
:param repository: The repository name.
:type repository: str
:param tag: The tag name.
:type tag: str
:param digest: The sha256-based digest of the image manifest.
:type digest: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'registry': {'key': 'registry', 'type': 'str'},
'repository': {'key': 'repository', 'type': 'str'},
'tag': {'key': 'tag', 'type': 'str'},
'digest': {'key': 'digest', 'type': 'str'},
}
def __init__(self, *, type=None, registry: str=None, repository: str=None, tag: str=None, digest: str=None, **kwargs) -> None:
super(BaseImageDependency, self).__init__(**kwargs)
self.type = type
self.registry = registry
self.repository = repository
self.tag = tag
self.digest = digest
class BaseImageTrigger(Model):
"""The trigger based on base image dependency.
All required parameters must be populated in order to send to Azure.
:param base_image_trigger_type: Required. The type of the auto trigger for
base image dependency updates. Possible values include: 'All', 'Runtime'
:type base_image_trigger_type: str or
~azure.mgmt.containerregistry.v2019_06_01_preview.models.BaseImageTriggerType
:param update_trigger_endpoint: The endpoint URL for receiving update
triggers.
:type update_trigger_endpoint: str
:param update_trigger_payload_type: Type of Payload body for Base image
update triggers. Possible values include: 'Default', 'Token'
:type update_trigger_payload_type: str or
~azure.mgmt.containerregistry.v2019_06_01_preview.models.UpdateTriggerPayloadType
:param status: The current status of trigger. Possible values include:
'Disabled', 'Enabled'. Default value: "Enabled" .
:type status: str or
~azure.mgmt.containerregistry.v2019_06_01_preview.models.TriggerStatus
:param name: Required. The name of the trigger.
:type name: str
"""
_validation = {
'base_image_trigger_type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'base_image_trigger_type': {'key': 'baseImageTriggerType', 'type': 'str'},
'update_trigger_endpoint': {'key': 'updateTriggerEndpoint', 'type': 'str'},
'update_trigger_payload_type': {'key': 'updateTriggerPayloadType', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, *, base_image_trigger_type, name: str, update_trigger_endpoint: str=None, update_trigger_payload_type=None, status="Enabled", **kwargs) -> None:
super(BaseImageTrigger, self).__init__(**kwargs)
self.base_image_trigger_type = base_image_trigger_type
self.update_trigger_endpoint = update_trigger_endpoint
self.update_trigger_payload_type = update_trigger_payload_type
self.status = status
self.name = name
class BaseImageTriggerUpdateParameters(Model):
"""The properties for updating base image dependency trigger.
All required parameters must be populated in order to send to Azure.
:param base_image_trigger_type: The type of the auto trigger for base
image dependency updates. Possible values include: 'All', 'Runtime'
:type base_image_trigger_type: str or
~azure.mgmt.containerregistry.v2019_06_01_preview.models.BaseImageTriggerType
:param update_trigger_endpoint: The endpoint URL for receiving update
triggers.
:type update_trigger_endpoint: str
:param update_trigger_payload_type: Type of Payload body for Base image
update triggers. Possible values include: 'Default', 'Token'
:type update_trigger_payload_type: str or
~azure.mgmt.containerregistry.v2019_06_01_preview.models.UpdateTriggerPayloadType
:param status: The current status of trigger. Possible values include:
'Disabled', 'Enabled'. Default value: "Enabled" .
:type status: str or
~azure.mgmt.containerregistry.v2019_06_01_preview.models.TriggerStatus
:param name: Required. The name of the trigger.
:type name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'base_image_trigger_type': {'key': 'baseImageTriggerType', 'type': 'str'},
'update_trigger_endpoint': {'key': 'updateTriggerEndpoint', 'type': 'str'},
'update_trigger_payload_type': {'key': 'updateTriggerPayloadType', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, *, name: str, base_image_trigger_type=None, update_trigger_endpoint: str=None, update_trigger_payload_type=None, status="Enabled", **kwargs) -> None:
super(BaseImageTriggerUpdateParameters, self).__init__(**kwargs)
self.base_image_trigger_type = base_image_trigger_type
self.update_trigger_endpoint = update_trigger_endpoint
self.update_trigger_payload_type = update_trigger_payload_type
self.status = status
self.name = name
class CallbackConfig(Model):
"""The configuration of service URI and custom headers for the webhook.
All required parameters must be populated in order to send to Azure.
:param service_uri: Required. The service URI for the webhook to post
notifications.
:type service_uri: str
:param custom_headers: Custom headers that will be added to the webhook
notifications.
:type custom_headers: dict[str, str]
"""
_validation = {
'service_uri': {'required': True},
}
_attribute_map = {
'service_uri': {'key': 'serviceUri', 'type': 'str'},
'custom_headers': {'key': 'customHeaders', 'type': '{str}'},
}
def __init__(self, *, service_uri: str, custom_headers=None, **kwargs) -> None:
super(CallbackConfig, self).__init__(**kwargs)
self.service_uri = service_uri
self.custom_headers = custom_headers
class CloudError(Model):
"""CloudError.
"""
_attribute_map = {
}
class Credentials(Model):
"""The parameters that describes a set of credentials that will be used when a
run is invoked.
:param source_registry: Describes the credential parameters for accessing
the source registry.
:type source_registry:
~azure.mgmt.containerregistry.v2019_06_01_preview.models.SourceRegistryCredentials
:param custom_registries: Describes the credential parameters for
accessing other custom registries. The key
for the dictionary item will be the registry login server
(myregistry.azurecr.io) and
the value of the item will be the registry credentials for accessing the
registry.
:type custom_registries: dict[str,
~azure.mgmt.containerregistry.v2019_06_01_preview.models.CustomRegistryCredentials]
"""
_attribute_map = {
'source_registry': {'key': 'sourceRegistry', 'type': 'SourceRegistryCredentials'},
'custom_registries': {'key': 'customRegistries', 'type': '{CustomRegistryCredentials}'},
}
def __init__(self, *, source_registry=None, custom_registries=None, **kwargs) -> None:
super(Credentials, self).__init__(**kwargs)
self.source_registry = source_registry
self.custom_registries = custom_registries
class CustomRegistryCredentials(Model):
"""Describes the credentials that will be used to access a custom registry
during a run.
:param user_name: The username for logging into the custom registry.
:type user_name:
~azure.mgmt.containerregistry.v2019_06_01_preview.models.SecretObject
:param password: The password for logging into the custom registry. The
password is a secret
object that allows multiple ways of providing the value for it.
:type password:
~azure.mgmt.containerregistry.v2019_06_01_preview.models.SecretObject
:param identity: Indicates the managed identity assigned to the custom
credential. If a user-assigned identity
this value is the Client ID. If a system-assigned identity, the value will
be `system`. In
the case of a system-assigned identity, the Client ID will be determined
by the runner. This
identity may be used to authenticate to key vault to retrieve credentials
or it may be the only
source of authentication used for accessing the registry.
:type identity: str
"""
_attribute_map = {
'user_name': {'key': 'userName', 'type': 'SecretObject'},
'password': {'key': 'password', 'type': 'SecretObject'},
'identity': {'key': 'identity', 'type': 'str'},
}
def __init__(self, *, user_name=None, password=None, identity: str=None, **kwargs) -> None:
super(CustomRegistryCredentials, self).__init__(**kwargs)
self.user_name = user_name
self.password = password
self.identity = identity
class RunRequest(Model):
"""The request parameters for scheduling a run.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: DockerBuildRequest, FileTaskRunRequest, TaskRunRequest,
EncodedTaskRunRequest
All required parameters must be populated in order to send to Azure.
:param is_archive_enabled: The value that indicates whether archiving is
enabled for the run or not. Default value: False .
:type is_archive_enabled: bool
:param agent_pool_name: The dedicated agent pool for the run.
:type agent_pool_name: str
:param log_template: The template that describes the repository and tag
information for run log artifact.
:type log_template: str
:param type: Required. Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'is_archive_enabled': {'key': 'isArchiveEnabled', 'type': 'bool'},
'agent_pool_name': {'key': 'agentPoolName', 'type': 'str'},
'log_template': {'key': 'logTemplate', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
_subtype_map = {
'type': {'DockerBuildRequest': 'DockerBuildRequest', 'FileTaskRunRequest': 'FileTaskRunRequest', 'TaskRunRequest': 'TaskRunRequest', 'EncodedTaskRunRequest': 'EncodedTaskRunRequest'}
}
def __init__(self, *, is_archive_enabled: bool=False, agent_pool_name: str=None, log_template: str=None, **kwargs) -> None:
super(RunRequest, self).__init__(**kwargs)
self.is_archive_enabled = is_archive_enabled
self.agent_pool_name = agent_pool_name
self.log_template = log_template
self.type = None
class DockerBuildRequest(RunRequest):
"""The parameters for a docker quick build.
All required parameters must be populated in order to send to Azure.
:param is_archive_enabled: The value that indicates whether archiving is
enabled for the run or not. Default value: False .
:type is_archive_enabled: bool
:param agent_pool_name: The dedicated agent pool for the run.
:type agent_pool_name: str
:param log_template: The template that describes the repository and tag
information for run log artifact.
:type log_template: str
:param type: Required. Constant filled by server.
:type type: str
:param image_names: The fully qualified image names including the
repository and tag.
:type image_names: list[str]
:param is_push_enabled: The value of this property indicates whether the
image built should be pushed to the registry or not. Default value: True .
:type is_push_enabled: bool
:param no_cache: The value of this property indicates whether the image
cache is enabled or not. Default value: False .
:type no_cache: bool
:param docker_file_path: Required. The Docker file path relative to the
source location.
:type docker_file_path: str
:param target: The name of the target build stage for the docker build.
:type target: str
:param arguments: The collection of override arguments to be used when
executing the run.
:type arguments:
list[~azure.mgmt.containerregistry.v2019_06_01_preview.models.Argument]
:param timeout: Run timeout in seconds. Default value: 3600 .
:type timeout: int
:param platform: Required. The platform properties against which the run
has to happen.
:type platform:
~azure.mgmt.containerregistry.v2019_06_01_preview.models.PlatformProperties
:param agent_configuration: The machine | |
file_bug, '_GetAllCurrentVersionsFromOmahaProxy',
mock.MagicMock(return_value=[]))
def testGet_WithFinish_SingleRevOwner_Clank_Skips(self):
# When a POST request is sent with keys specified and with the finish
# parameter given, an issue will be created using the issue tracker
# API, and the anomalies will be updated, and a response page will
# be sent which indicates success.
namespaced_stored_object.Set(
'repositories',
{"chromium": {
"repository_url": "https://chromium.googlesource.com/chromium/src"
}})
MockIssueTrackerService.bug_id = 277761
response = self._PostSampleBug(is_single_rev=True, master='ClankInternal')
# The response page should have a bug number.
self.assertIn('277761', response.body)
# Three HTTP requests are made when filing a bug with owner; test third
# request for owner hame.
comment = MockIssueTrackerService.add_comment_args[1]
self.assertNotIn(
'Assigning to <EMAIL> because this is the only CL in range',
comment)
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch.object(
file_bug, '_GetAllCurrentVersionsFromOmahaProxy',
mock.MagicMock(return_value=[]))
def testGet_WithFinish_SingleRevOwner_InvalidRepository_Skips(self):
# When a POST request is sent with keys specified and with the finish
# parameter given, an issue will be created using the issue tracker
# API, and the anomalies will be updated, and a response page will
# be sent which indicates success.
namespaced_stored_object.Set(
'repositories',
{"chromium": {
"repository_url": "https://chromium.googlesource.com/chromium/src"
}})
MockIssueTrackerService.bug_id = 277761
response = self._PostSampleBug(is_single_rev=True, master='FakeMaster')
# The response page should have a bug number.
self.assertIn('277761', response.body)
# Three HTTP requests are made when filing a bug with owner; test third
# request for owner hame.
comment = MockIssueTrackerService.add_comment_args[1]
self.assertNotIn(
'Assigning to <EMAIL> because this is the only CL in range',
comment)
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch.object(
file_bug, '_GetAllCurrentVersionsFromOmahaProxy',
mock.MagicMock(return_value=[]))
@mock.patch.object(
crrev_service, 'GetNumbering',
mock.MagicMock(return_value={
'git_sha': '852ba7672ce02911e9f8f2a22363283adc80940e'}))
@mock.patch('dashboard.services.gitiles_service.CommitInfo',
mock.MagicMock(return_value={
'author': {'email': '<EMAIL>'},
'message': 'My first commit!'}))
def testGet_WithFinish_CreatesBugSingleRevDifferentMasterOwner(self):
# When a POST request is sent with keys specified and with the finish
# parameter given, an issue will be created using the issue tracker
# API, and the anomalies will be updated, and a response page will
# be sent which indicates success.
namespaced_stored_object.Set(
'repositories',
{"chromium": {
"repository_url": "https://chromium.googlesource.com/chromium/src"
}})
MockIssueTrackerService.bug_id = 277761
response = self._PostSampleBug(is_single_rev=True, master='Foo')
# The response page should have a bug number.
self.assertIn('277761', response.body)
# Three HTTP requests are made when filing a bug with owner; test third
# request for owner hame.
comment = MockIssueTrackerService.add_comment_args[1]
self.assertNotIn(
'Assigning to <EMAIL> because this is the only CL in range',
comment)
self.assertNotIn('My first commit', comment)
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch.object(
file_bug, '_GetAllCurrentVersionsFromOmahaProxy',
mock.MagicMock(return_value=[
{
'versions': [
{'branch_base_position': '112000', 'current_version': '2.0'},
{'branch_base_position': '111990', 'current_version': '1.0'}
]
}
]))
@mock.patch.object(
file_bug.auto_bisect, 'StartNewBisectForBug',
mock.MagicMock(return_value={'issue_id': 123, 'issue_url': 'foo.com'}))
def testGet_WithFinish_LabelsBugWithMilestone(self):
# Here, we expect the bug to have the following end revisions:
# [112005, 112010] and the milestones are M-1 for rev 111990 and
# M-2 for 11200. Hence the expected behavior is to label the bug
# M-2 since 111995 (lowest possible revision introducing regression)
# is less than 112010 (revision for M-2).
self._PostSampleBug()
self.assertIn('M-2', MockIssueTrackerService.new_bug_kwargs['labels'])
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch.object(
file_bug, '_GetAllCurrentVersionsFromOmahaProxy',
mock.MagicMock(return_value=[
{
'versions': [
{'branch_base_position': '112000', 'current_version': '2.0'},
{'branch_base_position': '111990', 'current_version': '1.0'}
]
}
]))
@mock.patch.object(
file_bug.auto_bisect, 'StartNewBisectForBug',
mock.MagicMock(return_value={'issue_id': 123, 'issue_url': 'foo.com'}))
def testGet_WithFinish_LabelsBugWithNoMilestoneBecauseNoCommitPos(self):
# Here, we expect to return no Milestone label because the alerts do not
# contain r_commit_pos (and therefore aren't chromium). Assuming
# testGet_WithFinish_LabelsBugWithMilestone passes, M-2
# would be the label that it would get if the alert was Chromium.
self._PostSampleBug(has_commit_positions=False)
labels = MockIssueTrackerService.new_bug_kwargs['labels']
self.assertEqual(0, len([x for x in labels if x.startswith(u'M-')]))
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch.object(
file_bug, '_GetAllCurrentVersionsFromOmahaProxy',
mock.MagicMock(return_value=[
{
'versions': [
{'branch_base_position': '113000', 'current_version': '2.0'},
{'branch_base_position': '112000', 'current_version': '2.0'},
{'branch_base_position': '111990', 'current_version': '1.0'}
]
}
]))
@mock.patch.object(
file_bug.auto_bisect, 'StartNewBisectForBug',
mock.MagicMock(return_value={'issue_id': 123, 'issue_url': 'foo.com'}))
def testGet_WithFinish_LabelsBugForClank(self):
# Here, we expect to return M-2 even though the alert revisions aren't
# even close to the branching points. We use r_commmit_pos to determine
# which revision to check. There are 3 branching points to ensure we are
# actually changing the revision that is checked to r_commit_pos instead
# of just displaying the highest one (previous behavior).
self._PostSampleBug(master='ClankInternal')
self.assertIn('M-2', MockIssueTrackerService.new_bug_kwargs['labels'])
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch(
'google.appengine.api.urlfetch.fetch',
mock.MagicMock(return_value=testing_common.FakeResponseObject(
200, '[]')))
def testGet_WithFinish_SucceedsWithNoVersions(self):
# Here, we test that we don't label the bug with an unexpected value when
# there is no version information from omahaproxy (for whatever reason)
self._PostSampleBug()
labels = MockIssueTrackerService.new_bug_kwargs['labels']
self.assertEqual(0, len([x for x in labels if x.startswith(u'M-')]))
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch(
'google.appengine.api.urlfetch.fetch',
mock.MagicMock(return_value=testing_common.FakeResponseObject(
200, '[]')))
def testGet_WithFinish_SucceedsWithComponents(self):
# Here, we test that components are posted separately from labels.
self._PostSampleBug()
self.assertIn('Foo>Bar', MockIssueTrackerService.new_bug_kwargs[
'components'])
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch(
'google.appengine.api.urlfetch.fetch',
mock.MagicMock(return_value=testing_common.FakeResponseObject(
200, json.dumps([
{
'versions': [
{'branch_base_position': '0', 'current_version': '1.0'}
]
}
]))))
def testGet_WithFinish_SucceedsWithRevisionOutOfRange(self):
# Here, we test that we label the bug with the highest milestone when the
# revision introducing regression is beyond all milestones in the list.
self._PostSampleBug()
self.assertIn('M-1', MockIssueTrackerService.new_bug_kwargs['labels'])
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch(
'google.appengine.api.urlfetch.fetch',
mock.MagicMock(return_value=testing_common.FakeResponseObject(
200, json.dumps([
{
'versions': [
{'branch_base_position': 'N/A', 'current_version': 'N/A'}
]
}
]))))
@mock.patch('logging.warn')
def testGet_WithFinish_SucceedsWithNAAndLogsWarning(self, mock_warn):
self._PostSampleBug()
labels = MockIssueTrackerService.new_bug_kwargs['labels']
self.assertEqual(0, len([x for x in labels if x.startswith(u'M-')]))
self.assertEqual(1, mock_warn.call_count)
def testGet_OwnersAreEmptyEvenWithOwnership(self):
ownership_samples = [
{
'type': 'Ownership',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'emails': ['<EMAIL>']
},
{
'type': 'Ownership',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827',
'emails': ['<EMAIL>']
}
]
test_paths = ['ChromiumPerf/linux/scrolling/first_paint',
'ChromiumPerf/linux/scrolling/mean_frame_time']
test_keys = [utils.TestKey(test_path) for test_path in test_paths]
subscription = Subscription(
name='Sheriff',
bug_labels=['Performance-Sheriff', 'Cr-Blink-Javascript'])
anomaly_1 = anomaly.Anomaly(
start_revision=1476193324, end_revision=1476201840, test=test_keys[0],
median_before_anomaly=100, median_after_anomaly=200,
subscriptions=[subscription], subscription_names=[subscription.name],
ownership=ownership_samples[0]).put()
anomaly_2 = anomaly.Anomaly(
start_revision=1476193320, end_revision=1476201870, test=test_keys[1],
median_before_anomaly=100, median_after_anomaly=200,
subscriptions=[subscription], subscription_names=[subscription.name],
ownership=ownership_samples[1]).put()
response = self.testapp.post(
'/file_bug',
[
('keys', '%s,%s' % (anomaly_1.urlsafe(), anomaly_2.urlsafe())),
('summary', 's'),
('description', 'd\n'),
('label', 'one'),
('label', 'two'),
('component', 'Foo>Bar'),
])
self.assertIn(
'<input type="text" name="owner" value="">',
response.body)
response_changed_order = self.testapp.post(
'/file_bug',
[
('keys', '%s,%s' % (anomaly_2.urlsafe(), anomaly_1.urlsafe())),
('summary', 's'),
('description', 'd\n'),
('label', 'one'),
('label', 'two'),
('component', 'Foo>Bar'),
])
self.assertIn(
'<input type="text" name="owner" value="">',
response_changed_order.body)
def testGet_OwnersNotFilledWhenNoOwnership(self):
test_key = utils.TestKey('ChromiumPerf/linux/scrolling/first_paint')
subscription = Subscription(
name='Sheriff',
bug_labels=['Performance-Sheriff', 'Cr-Blink-Javascript'])
anomaly_entity = anomaly.Anomaly(
start_revision=1476193324, end_revision=1476201840, test=test_key,
median_before_anomaly=100, median_after_anomaly=200,
subscriptions=[subscription], subscription_names=[subscription.name],
).put()
response = self.testapp.post(
'/file_bug',
[
('keys', '%s' % (anomaly_entity.urlsafe())),
('summary', 's'),
('description', 'd\n'),
('label', 'one'),
('label', 'two'),
('component', 'Foo>Bar'),
])
self.assertIn(
'<input type="text" name="owner" value="">',
response.body)
def testGet_WithAllOwnershipComponents(self):
ownership_samples = [
{
'type': 'Ownership',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'component': 'Abc>Xyz'
},
{
'type': 'Ownership',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827',
'component': 'Def>123'
}
]
test_paths = ['ChromiumPerf/linux/scrolling/first_paint',
'ChromiumPerf/linux/scrolling/mean_frame_time']
test_keys = [utils.TestKey(test_path) for test_path in test_paths]
subscription = Subscription(
name='Sheriff',
bug_labels=['Performance-Sheriff', 'Cr-Blink-Javascript'])
anomaly_1 = anomaly.Anomaly(
start_revision=1476193324, end_revision=1476201840, test=test_keys[0],
median_before_anomaly=100, median_after_anomaly=200,
subscriptions=[subscription], subscription_names=[subscription.name],
ownership=ownership_samples[0]).put()
anomaly_2 = anomaly.Anomaly(
start_revision=1476193320, end_revision=1476201870, test=test_keys[1],
median_before_anomaly=100, median_after_anomaly=200,
subscriptions=[subscription], subscription_names=[subscription.name],
ownership=ownership_samples[1]).put()
response = self.testapp.post(
'/file_bug',
[
('keys', '%s' % (anomaly_1.urlsafe())),
('summary', 's'),
('description', 'd\n'),
('label', 'one'),
('label', 'two'),
('component', 'Foo>Bar'),
])
self.assertIn(
'<input type="checkbox" checked name="component" value="Abc>Xyz">',
response.body)
response_with_both_anomalies = self.testapp.post(
'/file_bug',
[
('keys', '%s,%s' % (anomaly_1.urlsafe(), anomaly_2.urlsafe())),
('summary', 's'),
('description', 'd\n'),
('label', 'one'),
('label', 'two'),
('component', 'Foo>Bar'),
])
self.assertIn(
'<input type="checkbox" checked name="component" value="Abc>Xyz">',
response_with_both_anomalies.body)
self.assertIn(
'<input type="checkbox" checked name="component" value="Def>123">',
response_with_both_anomalies.body)
def testGet_UsesOnlyMostRecentComponents(self):
ownership_samples = [
{
'type': 'Ownership',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827',
'component': 'Abc>Def'
},
{
'type': 'Ownership',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'component': '123>456'
},
]
subscription = Subscription(
name='Sheriff',
bug_labels=['Performance-Sheriff', 'Cr-Blink-Javascript'])
test_key = utils.TestKey('ChromiumPerf/linux/scrolling/first_paint')
now_datetime = datetime.datetime.now()
older_alert = anomaly.Anomaly(
start_revision=1476193320, end_revision=1476201870, test=test_key,
median_before_anomaly=100, median_after_anomaly=200,
subscriptions=[subscription], subscription_names=[subscription.name],
ownership=ownership_samples[0], timestamp=now_datetime).put()
newer_alert = anomaly.Anomaly(
start_revision=1476193320, end_revision=1476201870, test=test_key,
median_before_anomaly=100, median_after_anomaly=200,
subscriptions=[subscription], subscription_names=[subscription.name],
ownership=ownership_samples[1],
timestamp=now_datetime + datetime.timedelta(10)).put()
response = self.testapp.post(
'/file_bug',
[
('keys', '%s,%s' % (older_alert.urlsafe(),
newer_alert.urlsafe())),
('summary', 's'),
('description', 'd\n'),
('label', 'one'),
('label', 'two'),
('component', 'Foo>Bar'),
])
self.assertNotIn(
'<input type="checkbox" checked name="component" value="Abc>Def">',
response.body)
self.assertIn(
'<input type="checkbox" checked name="component" value="123>456">',
response.body)
response_inverted_order = self.testapp.post(
'/file_bug',
[
('keys', '%s,%s' % (newer_alert.urlsafe(),
older_alert.urlsafe())),
('summary', 's'),
('description', 'd\n'),
('label', 'one'),
('label', 'two'),
('component', 'Foo>Bar'),
])
self.assertNotIn(
'<input type="checkbox" checked name="component" value="Abc>Def">',
response_inverted_order.body)
self.assertIn(
'<input type="checkbox" checked name="component" value="123>456">',
response_inverted_order.body)
def testGet_ComponentsChosenPerTest(self):
ownership_samples = [
{
'type': 'Ownership',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827',
'component': 'Abc>Def'
},
{
'type': 'Ownership',
'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
'component': '123>456'
},
]
subscription = Subscription(
name='Sheriff',
bug_labels=['Performance-Sheriff', 'Cr-Blink-Javascript'])
test_paths = ['ChromiumPerf/linux/scrolling/first_paint',
'ChromiumPerf/linux/scrolling/mean_frame_time']
test_keys = [utils.TestKey(test_path) for test_path in test_paths]
now_datetime = datetime.datetime.now()
alert_test_key_0 = anomaly.Anomaly(
start_revision=1476193320, end_revision=1476201870, test=test_keys[0],
median_before_anomaly=100, median_after_anomaly=200,
subscriptions=[subscription], subscription_names=[subscription.name],
ownership=ownership_samples[0], timestamp=now_datetime).put()
alert_test_key_1 = anomaly.Anomaly(
start_revision=1476193320, end_revision=1476201870, test=test_keys[1],
median_before_anomaly=100, median_after_anomaly=200,
subscriptions=[subscription], subscription_names=[subscription.name],
ownership=ownership_samples[1],
timestamp=now_datetime + datetime.timedelta(10)).put()
response = self.testapp.post(
'/file_bug',
[
('keys', '%s,%s' % (alert_test_key_0.urlsafe(),
alert_test_key_1.urlsafe())),
('summary', 's'),
('description', 'd\n'),
('label', 'one'),
| |
import torch
import torch.nn.functional as F
from torch import nn as nn
from torch.nn.modules.loss import CrossEntropyLoss, _Loss
import math
import numpy as np
from typing import List
from tyche.utils import param_scheduler as p_scheduler
def kullback_leibler(mean, sigma, reduction='mean'):
"""
Kullback-Leibler divergence between Gaussian posterior distr.
with parameters (mean, sigma) and a fixed Gaussian prior
with mean = 0 and sigma = 1
"""
kl = -0.5 * (1 + 2.0 * torch.log(sigma) - mean * mean - sigma * sigma) # [B, D]
skl = torch.sum(kl, dim=1)
if reduction == 'mean':
return torch.mean(skl)
elif reduction == 'sum':
return torch.sum(skl)
else:
return skl
def kullback_leibler_two_gaussians(mean1, sigma1, mean2, sigma2, reduction='mean'):
"""
Kullback-Leibler divergence between two Gaussians
"""
kl = -0.5 * (1 - 2.0 * torch.log(sigma2 / sigma1) -
((mean1 - mean2) * (mean1 - mean2) + sigma1 * sigma1)/(sigma2 * sigma2)) # [B, D]
skl = torch.sum(kl, dim=1)
if reduction == 'mean':
return torch.mean(skl)
elif reduction == 'sum':
return torch.sum(skl)
else:
return skl
def log_of_gaussian_pdf(x, mean, sigma, reduction='mean'):
"""
x, mean, sigma: [B, D]
Note that we omitted the constant factor -0.5 * log(2 * pi)
"""
rop = - torch.sum(torch.log(sigma) + (x - mean) * (x - mean) / (2 * sigma * sigma), dim=-1)
if reduction == 'mean':
return torch.mean(rop)
elif reduction == 'sum':
return torch.sum(rop)
else:
return rop
def log_multinomial_pdf(x, pi, reduction='mean'):
"""
x, pi: [B, n_classes]
"""
rop = torch.sum(x * torch.log(pi), dim=-1)
if reduction == 'mean':
return torch.mean(rop)
elif reduction == 'sum':
return torch.sum(rop)
else:
return rop
class SlicedWasserstein(object):
n_projections: int
cost_p_norm: int
def __init__(self, n_projections: int, cost_p_norm: int):
self.n_projections = n_projections
self.cost_p_norm = cost_p_norm
def __call__(self, x: torch.Tensor, y: torch.Tensor, device: torch.device):
"""
x, y: [B, D]
"""
batch_size, emb_dim = x.shape
theta = self._get_projections(emb_dim, device) # [N, D]
x_ = torch.einsum('bi,ni->bn', x, theta) # [B, N]
x_ = torch.sort(x_, dim=0)[0]
y_ = torch.einsum('bi,ni->bn', y, theta) # [B, N]
y_ = torch.sort(y_, dim=0)[0]
cost = torch.pow(torch.abs(x_ - y_), self.cost_p_norm)
cost = torch.sum(cost) / float(batch_size) / float(self.n_projections)
return cost
def _get_projections(self, emb_dim, device):
theta = torch.randn(self.n_projections, emb_dim, device=device)
theta = torch.nn.functional.normalize(theta, dim=-1)
return theta
class MMDPenalty(object):
scales: List[float]
latent_dim: int
def __init__(self, latent_dim: int, scales: List[float]):
self.scales = scales
self.latent_dim = latent_dim
def __call__(self, z_prior: torch.Tensor, z_post: torch.Tensor):
batch_size = z_prior.size(0)
norms_prior = z_prior.pow(2).sum(1, keepdim=True)
prods_prior = torch.mm(z_prior, z_prior.t())
dists_prior = norms_prior + norms_prior.t() - 2 * prods_prior
norms_post = z_post.pow(2).sum(1, keepdim=True)
prods_post = torch.mm(z_post, z_post.t())
dists_post = norms_post + norms_post.t() - 2 * prods_post
dot_prd = torch.mm(z_prior, z_post.t())
dist_dist = norms_prior + norms_post.t() - 2 * dot_prd
total_dist = 0
for scale in self.scales:
C = 2 * self.latent_dim * 1.0 * scale
dist1 = C / (C + dists_prior)
dist1 += C / (C + dists_post)
dist1 = (1 - torch.eye(batch_size, device=z_prior.device)) * dist1
dist1 = dist1.sum() / (batch_size - 1)
res2 = C / (C + dist_dist)
res2 = res2.sum() * 2. / (batch_size)
total_dist += dist1 - res2
return total_dist
def mim_reg(mean, sigma, reduction='mean'):
"""
Kullback-Leibler divergence between Gaussian posterior distr.
with parameters (mean, sigma) and a fixed Gaussian prior
with mean = 0 and sigma = 1
"""
D = mean.size(-1)
dist = 0.25 * (1 + 2.0 * torch.log(sigma) + mean * mean + sigma * sigma) # [B, D]
s_dist = torch.sum(dist, dim=1) + 0.5 * D * torch.log(torch.tensor(2 * math.pi))
if reduction == 'mean':
return torch.mean(s_dist)
elif reduction == 'sum':
return torch.sum(s_dist)
else:
return s_dist
def kullback_leibler_weibull_gamma(k, l, a, b, device, reduction='mean'):
"""
(negative) Kullback-Leibler divergence between Weibull and Gamma distributions:
k: shape parameter of Weibull distr.
l: scale parameter of Weibull distr.
a: shape parameter of Gamma distr.
b: inverse-scale parameter of Gamma distr.
"""
epsilon = torch.ones(k.shape).fill_(1e-8).to(device)
a = torch.ones(k.shape).fill_(a).to(device)
b = torch.ones(k.shape).fill_(b).to(device)
k = torch.max(k, epsilon)
l = torch.max(l, epsilon)
kl = -(a * torch.log(l) - np.euler_gamma * (a / k) - torch.log(k)
- b * l * torch.exp(torch.lgamma(1 + (1/k))) + np.euler_gamma
+ 1 + a * torch.log(b) - torch.lgamma(a))
if reduction == 'mean':
return torch.mean(kl)
elif reduction == 'sum':
return torch.sum(kl)
else:
return kl
def smim_reg_weibull_gamma(k, l, a, b, device, reduction='mean'):
"""
(negative) E_q(w)[log q(w) + log p(w)]:
k: shape parameter of Weibull distr.
l: scale parameter of Weibull distr.
a: shape parameter of Gamma distr.
b: inverse-scale parameter of Gamma distr.
"""
epsilon = torch.ones(k.shape).fill_(1e-8).to(device)
a = torch.ones(k.shape).fill_(a).to(device)
b = torch.ones(k.shape).fill_(b).to(device)
k = torch.max(k, epsilon)
l = torch.max(l, epsilon)
reg = (torch.log(k) + a * torch.log(b) - torch.lgamma(a) +
a * torch.log(l) - 2.0 * torch.log(l) - np.euler_gamma
- np.euler_gamma * (a / k) + 2 * (np.euler_gamma / k) - 1
- l * b * torch.exp(torch.lgamma((k + 1) / k)))
if reduction == 'mean':
return torch.mean(reg)
elif reduction == 'sum':
return torch.sum(reg)
else:
return reg
class ELBO(CrossEntropyLoss):
r"""This criterion combines :func:`nn.LogSoftmax` and :func:`nn.NLLLoss` in one single class.
It is useful when training a classification problem with `C` classes.
If provided, the optional argument :attr:`weight` should be a 1D `Tensor`
assigning weight to each of the classes.
This is particularly useful when you have an unbalanced training set.
The `input` is expected to contain raw, unnormalized scores for each class.
`input` has to be a Tensor of size either :math:`(minibatch, C)` or
:math:`(minibatch, C, d_1, d_2, ..., d_K)`
with :math:`K \geq 1` for the `K`-dimensional case (described later).
This criterion expects a class index in the range :math:`[0, C-1]` as the
`target` for each value of a 1D tensor of size `minibatch`; if `ignore_index`
is specified, this criterion also accepts this class index (this index may not
necessarily be in the class range).
The loss can be described as:
.. math::
\text{loss}(x, class) = -\log\left(\frac{\exp(x[class])}{\sum_j \exp(x[j])}\right)
= -x[class] + \log\left(\sum_j \exp(x[j])\right)
or in the case of the :attr:`weight` argument being specified:
.. math::
\text{loss}(x, class) = weight[class] \left(-x[class] + \log\left(\sum_j \exp(x[j])\right)\right)
The losses are averaged across observations for each minibatch.
Can also be used for higher dimension inputs, such as 2D images, by providing
an input of size :math:`(minibatch, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1`,
where :math:`K` is the number of dimensions, and a target of appropriate shape
(see below).
Args:
weight (Tensor, optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size `C`
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when reduce is ``False``. Default: ``True``
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. When :attr:`size_average` is
``True``, the loss is averaged over non-ignored targets.
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Shape:
- Input: :math:`(N, C)` where `C = number of classes`, or
:math:`(N, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1`
in the case of `K`-dimensional loss.
- Target: :math:`(N)` where each value is :math:`0 \leq \text{targets}[i] \leq C-1`, or
:math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 1` in the case of
K-dimensional loss.
- Output: scalar.
If :attr:`reduction` is ``'none'``, then the same size as the target:
:math:`(N)`, or
:math:`(N, | |
[-1.5162963554188984, -0.969321871044044769, 3.25761385947302129],
[-1.51757954405924744, -0.948487215037029707, 3.18759453339230348],
[-1.51880873597402544, -0.928529358237395464, 3.12052187893353228],
[-1.51998726945227491, -0.909394092834694523, 3.05621371913374107],
[-1.52111821315522922, -0.891031589295950166, 2.99450259116960371],
[-1.52220439279935937, -0.873395963081599613, 2.93523429021351845],
[-1.52324841473204242, -0.856444891817212617, 2.87826658285713233],
[-1.52425268681420678, -0.840139276196806839, 2.82346806750426893],
[-1.25585150912462629, -4.8017851819470998, 17.3787686777393127],
[-1.28942990057131635, -4.29809966857105419, 15.5558145697129309],
[-1.31663266441368876, -3.89005105394953477, 14.0789924683327392],
[-1.3391182070314116, -3.55276259008333684, 12.8582676818016175],
[-1.35801565425719395, -3.26929694543398552, 11.8323400986668474],
[-1.3741203990337838, -3.02772291312528585, 10.9580279278880557],
[-1.38800891813433425, -2.81939311496036682, 10.2040343122219443],
[-1.40010923894679351, -2.637886971928741, 9.54712169455748239],
[-1.41074583356460104, -2.47833727518105551, 8.96967452286682132],
[-1.42016914984075115, -2.33698720859870424, 8.45809601265915667],
[-1.4285755820051862, -2.210890781674693, 8.00172394444571466],
[-1.43612130694859341, -2.09770527981255217, 7.59207949347542943],
[-1.44293207742731555, -1.99554436239947885, 7.22233555776119118],
[-1.44911028650570017, -1.90287209339132346, 6.88693323031244464],
[-1.4547401509301281, -1.8184251886626599, 6.58130039435216574],
[-1.45989157296036698, -1.74115508701695143, 6.3016420649344278],
[-1.46462305772957047, -1.67018418863898388, 6.04478200580493752],
[-1.46898394506149943, -1.60477237695794006, 5.80804156429777674],
[-1.47301613660464259, -1.54429111095648031, 5.58914590542326195],
[-1.47675544659262359, -1.48820316323287671, 5.38615067923898749],
[-1.48023266856577829, -1.43604661875784445, 5.19738410865819844],
[-1.48347442537694163, -1.38742212445325475, 5.02140084273274123],
[-1.48650385216529002, -1.34198264431588665, 4.85694487808172859],
[-1.48934114937723905, -1.29942516388618179, 4.70291953544960428],
[-1.49200403379566948, -1.25948392463270786, 4.55836297338222352],
[-1.49450810886796037, -1.22192486888575358, 4.42242808315959302],
[-1.49686717069123842, -1.18654104993817655, 4.29436587689163218],
[-1.49909346233029828, -1.15314881717715245, 4.17351168062961797],
[-1.50119788636783058, -1.12158462774608036, 4.05927359503485263],
[-1.50319018347690059, -1.09170236788825203, 3.95112280070324973],
[-1.50507908318789685, -1.06337109138275299, 3.848585373042964],
[-1.50687243177348584, -1.03647310121985781, 3.75123533941494713],
[-1.50857730120321976, -1.01090231523927088, 3.65868876400059229],
[-1.51020008235828307, -0.986562867872245119, 3.5705986871837796],
[-1.51174656509680072, -0.963367909130314226, 3.48665077881394492],
[-1.51322200728413203, -0.941238569124101088, 3.40655959056068935],
[-1.51463119452261408, -0.920103062093176716, 3.33006531319107468],
[-1.5159784920106758, -0.899895908499487973, 3.2569309611462236],
[-1.51726788971519455, -0.88055725742399682, 3.18693992013838878],
[-1.5185030418418739, -0.862032294495386875, 3.11989380430830421],
[-1.51968730142606834, -0.84427072301448236, 3.05561057829472293],
[-1.52082375073355869, -0.827226307930387428, 2.99392290677882578],
[-1.52191522805175894, -0.810856473962002555, 2.93467669999324787],
[-1.52296435136162334, -0.795121950510084385, 2.87772982857688264],
[-1.52397353930590063, -0.779986457125067312, 2.82295098521034005],
[-1.2546347298986078, -4.42812257611273363, 17.3619306005086749],
[-1.2883092471641453, -3.96402845281275473, 15.5422948920700073],
[-1.31559483178413639, -3.58798590486582647, 14.0678947353280943],
[-1.33815218060813867, -3.27710738108115596, 12.8489918566556973],
[-1.35711232239503965, -3.01580516087786643, 11.8244694016086331],
[-1.37327219120485622, -2.79309598211157217, 10.9512638298624552],
[-1.38720950335524629, -2.60101781879108707, 10.1981573645100543],
[-1.39935328437891693, -2.43365788587637732, 9.54196696087362817],
[-1.41002881402533742, -2.28653321193297998, 8.96511563512055787],
[-1.41948719792964684, -2.15618308546275506, 8.45403451425188557],
[-1.42792536989247032, -2.03989338556067157, 7.99808198255246605],
[-1.43549995265571528, -1.93550555414253789, 7.58879469353386682],
[-1.44233707004516321, -1.84128136601510195, 7.21935735591754479],
[-1.44853942610522268, -1.75580536497602746, 6.88422020184350725],
[-1.45419149993278807, -1.67791326915321726, 6.57881827613823855],
[-1.45936341650389734, -1.60663862367401555, 6.29936227032186835],
[-1.46411387114256075, -1.54117249593953765, 6.04268049449627043],
[-1.46849236699504382, -1.48083263898659867, 5.80609797202662126],
[-1.47254094669668101, -1.42503962583549804, 5.58734286629668198],
[-1.4762955467886214, -1.37329818305918283, 5.38447329274454489],
[-1.47978706740796362, -1.32518244842504229, 5.19581951653318619],
[-1.48304222471879554, -1.2803242227788163, 5.01993789014527447],
[-1.48608423587928007, -1.23840352989940028, 4.85557384014723148],
[-1.48893337371013512, -1.19914097211554527, 4.70163189483636668],
[-1.49160741909310191, -1.16229149539722232, 4.55715123820327594],
[-1.49412203244287101, -1.12763926976820472, 4.42128563688283549],
[-1.49649106065263093, -1.09499345901412015, 4.29328685388452946],
[-1.49872679222200222, -1.06418470453633285, 4.17249086236953826],
[-1.50084017049361629, -1.03506218654732174, 4.05830632308762418],
[-1.50284097280947981, -1.00749115495607588, 3.95020490339028063],
[-1.50473796177692165, -0.981350844637122743, 3.84771310334805161],
[-1.50653901358142894, -0.956532707035827934, 3.75040532216964184],
[-1.50825122730965466, -0.93293890349050812, 3.65789795076903346],
[-1.50988101848240763, -0.910481016170296087, 3.5698443175677026],
[-1.51143419939586332, -0.889078940821096064, 3.48593034713604721],
[-1.51291604839175697, -0.86865993209096104, 3.40587081707330919],
[-1.51433136979643646, -0.849157777455945895, 3.32940611910852047],
[-1.5156845459631425, -0.830512079979804252, 3.25629944692081574],
[-1.5169795826050132, -0.812667633538400103, 3.18633434649847658],
[-1.51822014840685515, -0.795573876894421939, 3.11931257565687936],
[-1.51940960974072059, -0.779184415251651608, 3.05505222813251676],
[-1.52055106117695749, -0.763456599754120946, 2.99338608486928237],
[-1.52164735237323945, -0.74835115690487175, 2.93416016103118471],
[-1.52270111183335222, -0.733831861124507268, 2.87723242215900488],
[-1.52371476795285954, -0.719865244702138529, 2.82247164693630159],
[-1.25354086000071385, -4.0555733705905439, 17.3467933714804623],
[-1.28730097751032257, -3.63084891092655093, 15.53013102355402],
[-1.31466036148068577, -3.28665090370171464, 14.0579022744696083],
[-1.33728172745531926, -3.00206102081806359, 12.8406337299536268],
[-1.35629778662817047, -2.76282882461293999, 11.8173723816398937],
[-1.37250683826548525, -2.55891105439327715, 10.945160464473064],
[-1.38648769821491058, -2.38302573130687767, 10.1928509689080542],
[-1.39867027828258483, -2.22976421175484507, 9.53730965118776908],
[-1.4093805759002751, -2.09502518039230123, 8.96099406704161616],
[-1.41887028156450556, -1.97564216420374184, 8.45036034779873191],
[-1.42733680907089466, -1.86913153568807622, 7.99478534128399954],
[-1.43493717795109221, -1.77351786038899029, 7.58581957557290831],
[-1.44179784238573716, -1.68721024108969231, 7.21665834938818396],
[-1.44802178336316878, -1.60891309262574289, 6.8817600916401096],
[-1.45369371367992151, -1.53756065869991687, 6.57656627198464427],
[-1.45888395675284466, -1.47226821323681567, 6.29729267570838047],
[-1.46365137740227058, -1.41229518872148896, 6.04077169357691446],
[-1.4680456243534159, -1.35701696368803137, 5.80433164922926181],
[-1.47210886593289225, -1.30590302623079157, 5.58570339855988607],
[-1.4758771477371424, -1.2584998934192686, 5.38294727139787099],
[-1.47938146496345535, -1.21441762049238844, 5.19439536765153509],
[-1.48264861699918593, -1.17331904942381593, 5.01860557049004985],
[-1.4857018941629998, -1.13491116915229129, 4.85432459169230057],
[-1.48856163384018858, -1.09893811894242122, 4.70045804511281062],
[-1.49124567410042674, -1.06517548150030472, 4.55604603678084885],
[-1.49376972618849457, -1.03342559673417833, 4.42024312057664481],
[-1.49614768232530571, -1.00351368936453422, 4.29230173496375755],
[-1.49839187155746667, -0.975284650126161723, 4.17155843531235515],
[-1.50051327360542586, -0.948600345382740517, 4.05742238638708574],
[-1.50252169854030337, -0.923337356644879148, 3.9493656936492334],
[-1.50442593849447825, -0.899385071926046664, 3.84691523946549996],
[-1.50623389635592608, -0.8766440666621792, 3.74964575785959342],
[-1.50795269541959609, -0.855024724206987385, 3.65717393399443269],
[-1.50958877320420837, -0.834446055540014719, 3.56915335574160775],
[-1.51114796203942303, -0.814834685413414417, 3.48527017715464948],
[-1.5126355585500153, -0.796123978184218539, 3.40523937941522625],
[-1.51405638378162966, -0.778253281383080697, 3.32880153537035905],
[-1.51541483540627775, -0.761167268925527662, 3.25572000026782504],
[-1.51671493319881545, -0.744815368981561199, 3.18577846459840464],
[-1.51796035877477609, -0.729151264040285407, 3.1187788157359484],
[-1.51915449041719963, -0.714132452760222081, 3.05453926385167662],
[-1.52030043368614343, -0.699719864876467756, 2.99289269476707309],
[-1.5214010483949203, -0.68587752181738193, 2.93368521831889328],
[-1.52245897244644257, -0.672572236823729686, 2.87677488568695283],
[-1.52347664294797314, -0.659773349308177437, 2.8220305531772496],
[-1.2525710182781149, -3.6840324067003376, 17.3333724735250883],
[-1.28640617447530992, -3.29847737044951206, 15.5193360279649575],
[-1.31383029368433291, -2.98597794019166596, 14.0490262086017861],
[-1.3365078428727819, -2.72756702627098369, 12.8332028586049773],
[-1.35557299756153049, -2.51032036585468621, 11.8110573213462988],
[-1.37182524755051549, -2.32512753822121221, 10.9397250673308051],
[-1.38584436881124917, -2.16538182626757658, 10.1881214925889481],
[-1.39806104780918883, -2.02617543160751978, 9.53315540571338005],
[-1.40880190974604935, -1.90378636452168815, 8.9573148450745439],
[-1.41831915701271094, -1.79534070507938104, 8.4470780173984874],
[-1.42681062372090173, -1.69858407585821625, 7.9918380769129076],
[-1.43443367699993241, -1.6117232325841937, 7.58315780930863159],
[-1.44131506052972314, -1.53331389418055641, 7.2142418721195174],
[-1.44755799806482921, -1.46217979602507997, 6.8795559402980011],
[-1.45324740731376756, -1.39735327626323791, 6.57454716481853474],
[-1.4584537857296731, -1.33803099608226894, 6.2954358365670755],
[-1.46323614681451497, -1.28354047966185525, 6.03905795680902813],
[-1.46764426701818107, -1.2333145100993117, 5.80274477001726119],
[-1.47172042496143463, -1.18687131045277017, 5.58422951568028303],
[-1.47550076195103474, -1.14379904027212009, 5.38157448448032483],
[-1.47901635662336051, -1.10374354971892563, 5.19311340142754485],
[-1.48229408142406038, -1.06639861972953987, 5.0174055058274849],
[-1.48535729090375379, -1.03149811868316221, 4.85319864840427773],
[-1.4882263791388084, -0.9988096504287306, 4.6993994052671777],
[-1.49091923441781504, -0.96812937299858115, 4.5550486999583244],
[-1.49345161262550508, -0.939277743789625807, 4.41930178453018918],
[-1.49583744579381905, -0.91209600353281628, 4.2914116966219007],
[-1.49808909858439021, -0.886443253600231018, 4.17071550818908676],
[-1.50021758267307614, -0.862194013030503581, 4.05662283130851886],
[-1.50223273688327197, -0.839236165856576566, 3.94860616035519296],
[-1.50414337928666852, -0.817469227873189319, 3.84619271714335564],
[-1.50595743623225187, -0.796802876313360775, 3.7489575330543623],
[-1.50768205228590091, -0.777155697054588201, 3.65651755464183692],
[-1.50932368429618102, -0.758454112711648643, 3.56852660030830693],
[-1.51088818219741361, -0.740631461861477325, 3.48467102805825091],
[-1.51238085868169359, -0.723627205110858207, 3.40466600004658781],
[-1.51380654948842563, -0.7073862380787036, 3.32825225016030046],
[-1.51516966575331469, -0.691858294864527323, 3.25519327733760067],
[-1.51647423961059014, -0.676997428397584811, 3.18527290061063661],
[-1.5177239640416289, -0.66276155635005618, 3.11829312262701475],
[-1.51892222779947916, -0.649112063162170516, 3.05407225717801234],
[-1.52007214610475616, -0.636013450253036017, 2.99244328344053434],
[-1.52117658769855835, -0.623433027745310731, 2.93325239554168737],
[-1.52223819874688049, -0.611340642067020323, 2.8763577209253306],
[-1.52325942401601133, -0.599708434651972988, 2.82162818503753288],
[-1.25172628087776361, -3.31339309644113911, 17.3216827986172888],
[-1.28562587606116763, -2.96682894475654058, 15.5099224278661367],
[-1.31310562313446111, -2.68589786550230691, 14.0412771746537253],
[-1.33583147707461758, -2.4535680191166449, 12.8267083666042367],
[-1.35493886161885801, -2.25823143603143039, 11.8055321183643098],
[-1.37122828343303249, -2.09170416116903235, 10.9349645314447752],
[-1.38528033967041253, -1.94805047766151773, 10.1839749971082671],
[-1.3975263800087292, -1.82286049566355968, 9.52950959121894137],
[-1.40829356751202384, -1.71278947400111003, 8.9540827501946918],
[-1.41783454341673631, -1.61525454313299055, 8.44419180604524477],
[-1.42634750234676377, -1.52822946680010396, 7.98924404588276627],
[-1.43399010969321217, -1.45010235811673116, 7.58081288326579994],
[-1.44088935763135129, -1.37957491688108091, 7.21211109325053901],
[-1.44714867835896843, -1.31558970759906213, 6.87761063805954098],
[-1.45285316557095046, -1.2572767778979379, 6.57276360001088555],
[-1.45807346622345979, -1.20391387119368254, 6.29379418218475095],
[-1.46286872156672798, -1.15489635913162747, 6.03754152190478521],
[-1.4672888178122474, -1.10971423195884245, 5.80133940151817118],
[-1.47137612837868104, -1.06793428672646207, 5.58292313227556036],
[-1.47516687684790671, -1.02918619314970239, 5.38035670974372238],
[-1.47869221359237879, -0.993151486741149836, 5.19197527235234446],
[-1.48197907387669958, -0.959554796035272939, 5.01633923927328773],
[-1.48505086748739745, -0.928156792179623324, 4.85219745245014167],
[-1.4879280372570638, -0.898748478880105672, 4.69845732570099628],
[-1.49062851466849144, -0.871146534546521023, 4.55416049449042415],
[-1.49316809400544637, -0.845189487172894216, 4.41846281905385307],
[-1.49556074154804008, -0.820734553288558488, 4.29061785913629734],
[-1.49781885259882985, -0.797655010259731956, 4.16996313696893228],
[-1.49995346632810533, -0.775837999824882107, 4.05590865464007777],
[-1.50197444629951549, -0.755182682496963076, 3.94792724572023479],
[-1.50389063290604619, -0.735598679138827038, 3.8455464281646452],
[-1.5057099726866463, -0.717004748898402933, 3.74834149285220652],
[-1.50743962851355628, -0.699327662712474596, 3.65592961451354803],
[-1.50908607387208726, -0.682501239439637364, 3.56796481284832678],
[-1.51065517384939829, -0.666465517874734625, 3.48413362400069593],
[-1.51215225496786254, -0.651166042809127377, 3.40415136824104936],
[-1.5135821656155668, -0.636553247221500085, 3.32775892019684205],
[-1.51494932851856867, -0.622581915829548804, 3.25471990442003012],
[-1.51625778645134335, -0.609210717770629073, 3.1848182523453441],
[-1.51751124218062872, -0.596401798236928404, 3.11785606744972021],
[-1.51871309347396832, -0.584120420566910936, 3.05365175418590651],
[-1.5198664638699908, -0.572334651666523753, 2.99203837343443801],
[-1.5209742297971327, -0.561015084761237404, 2.93286219311291374],
[-1.52203904453673622, -0.550134594410868538, 2.87598140744792952],
[-1.52306335945054649, -0.539668119490351095, 2.82126500244678002],
[-1.20807844068863446, -3.33263018121002608, 19.6000312532414647],
[-1.25100767582920858, -2.94354747253931448, 17.3117385728718389],
[-1.2849610702487082, -2.63581757999735045, 15.5019021423594161],
[-1.31248729423210198, -2.38634053496745846, 14.0346652712773636],
[-1.33525353054661, -2.18000576415773084, 12.8211589004526534],
[-1.35439623664280595, -2.00651294317452766, 11.8008042470451908],
[-1.37071676315359503, -1.85859900088623053, 10.930885373962143],
[-1.38479638979094122, -1.7309954872386768, 10.1804172093224672],
[-1.39706701807475908, -1.619787847043199, 9.52637727542281354],
[-1.40785625897247635, -1.52200676645673094, 8.95130229522365006],
[-1.41741711940084425, -1.43535910825401936, 8.44170575541895118],
[-1.42594809454583915, -1.35804580432937061, 7.98700688671211001],
[-1.43360709856855029, -1.2886355942189216, 7.57878808850003338],
[-1.44052133098067769, -1.22597560083461921, 7.21026900240860424],
[-1.44679439795220621, -1.1691267862240049, 6.87592691147992952],
[-1.45251154010035766, -1.11731656930796741, 6.57121807324248408],
[-1.45774352922344796, -1.06990350768693432, 6.29237000458378226],
[-1.46254961326986743, -1.02635060580341575, 6.03622450038133884],
[-1.4669797701435523, -0.986204887491463733, 5.80011749455917069],
[-1.47107645246849117, -0.949081582237736421, 5.58178605553568641],
[-1.47487595257936932, -0.914651753537593426, 5.37929562549297113],
[-1.47840948080282053, -0.882632525852430128, 5.19098254266960435],
[-1.48170402491171904, -0.852779294913219577, 5.01540822820837207],
[-1.48478304086782775, -0.824879467148793122, 4.85132236616883983],
[-1.48766701226664155, -0.798747389136451935, 4.69763308235875776],
[-1.49037390670723813, -0.774220211276487436, 4.55338261756984064],
[-1.49291955058336279, -0.75115449085955377, 4.41772734936775002],
[-1.49531793881654695, -0.729423384788559392, 4.28992128178771459],
[-1.497581492336167, -0.708914315898777292, 4.16930232037968374],
[-1.49972127330958016, -0.689527022211301244, 4.05528079938021513],
[-1.50174716599536451, -0.671171917763291437, 3.94732984134535725],
[-1.5036680294592859, -0.653768708460559012, 3.84497721663366265],
[-1.50549182713130358, -0.63724521783335597, 3.74779843738242446],
[-1.50722573719977082, -0.621536386474132341, 3.65541087295099087],
[-1.50887624707017176, -0.606583415907606782, 3.56746871480661243],
[-1.51044923450890156, -0.592333033140745813, 3.48365865115901086],
[-1.51195003761163327, -0.578736856502060526, 3.4036961373027439],
[-1.51338351535158044, -0.565750846860403889, 3.32732216809775005],
[-1.51475410015475598, -0.553334831106760183, 3.25430047544663337],
[-1.51606584370093489, -0.541452087036048191, 3.18441508688075858],
[-1.51732245694710621, -0.530068980592875549, 3.11746819211184878],
[-1.51852734520622579, -0.519154647933752433, 3.05327827316038114],
[-1.5196836389796804, -0.508680715976462028, 2.99167846083656741],
[-1.5207942211311376, -0.49862105610856966, 2.93251508623852519],
[-1.52186175089856346, -0.488951566553755435, 2.87564640079427392],
[-1.52288868516539977, -0.479649979579653496, 2.82094144240283695],
[-1.20743694534676327, -2.91450297152666993, 19.5896235443328273],
[-1.25041617783044168, -2.57438624847443887, 17.3035532843889044],
[-1.28441269002976677, -2.30535611030983834, 15.4952864271539816],
[-1.31197619632152152, -2.08723485778423878, 14.0292000083926336],
[-1.33477484957103099, -1.90682121367290125, 12.8165625861871426],
[-1.35394592765223543, -1.75511509140104627, 11.7968807214884581],
[-1.37029145279717324, -1.62576952026783284, 10.927493704085931],
[-1.38439324882673231, -1.51418011590423873, 10.1774534933277749],
[-1.39668365771997038, -1.41692544986083946, 9.5237632022789267],
[-1.40749064828182746, -1.33141007269902589, 8.9489777029270261],
[-1.41706751979372436, -1.25562944791848996, 8.4396236463664227],
[-1.42561300788727618, -1.18801083990606338, 7.98513000251146909],
[-1.43328522583432649, -1.12730298661126804, 7.57708650286573704],
[-1.44021153916347688, -1.07249795469620635, 7.20871839549378635],
[-1.4464956933956985, -1.02277473270402908, 6.87450731053208219],
[-1.45222304686840742, -0.977457820007581968, 6.56991291876524652],
[-1.45746447143531155, -0.935986357802493729, 6.29116544780104636],
[-1.46227930057882416, -0.897890798601032492, 6.03510886773979571],
[-1.46671758572313538, -0.862775050425373835, 5.79908087464483391],
[-1.47082184301217489, -0.830302653313324646, 5.58081997691313259],
[-1.47462841992744398, -0.800185964301713848, 5.37839280291366251],
[-1.47816857489248732, -0.772177613749806757, 5.19013667527548606],
[-1.48146933780980361, -0.74606369529990102, 5.01461383769433411],
[-1.48455420169438179, -0.721658292490324538, 4.85057466595282438],
[-1.48744368285580375, -0.69879904563695483, 4.69692787103124676],
[-1.49015577788144959, -0.677343535400658947, 4.55271619151442852],
[-1.49270633894042803, -0.657166312741068959, 4.41709643063818458],
[-1.49510938394766746, -0.638156444367906861, 4.28932295821571685],
[-1.49737735440777353, -0.620215472239906251, 4.16873399555537016],
[-1.49952133095357087, -0.603255707854884804, 4.05474015065319016],
[-1.50155121446200179, -0.587198798951620859, 3.94681478438196587],
[-1.50347587899464852, -0.571974519182746666, 3.84448587536403252],
[-1.50530330154738445, -0.557519741313846184, 3.74732911840235117],
[-1.50704067261056984, -0.543777562282164451, 3.65496204362511934],
[-1.50869449077120454, -0.530696554542634868, 3.56703898446156664],
[-1.51027064398117061, -0.518230122934715376, 3.48324675486833701],
[-1.5117744796338437, -0.506335950116598377, 3.40330092185513555],
[-1.51321086520678216, -0.494975516656424197, 3.3269425798121075],
[-1.51458424091973831, -0.484113684312245163, 3.25393554955587572],
[-1.51589866560814746, -0.473718333002546066, 3.18406393825282752],
[-1.51715785681050552, -0.463760043566530111, 3.11713000711503385],
[-1.51836522590350853, -0.454211819714724796, 3.05295230251111427],
[-1.51952390898436263, -0.445048843635587388, 2.99136401329348312],
[-1.52063679408889008, -0.436248260599271742, 2.93221152302796195],
[-1.52170654524284132, -0.4277889886224856, 2.87535313066970666],
[-1.52273562376814153, -0.419651549857361805, 2.82065791725555348],
[-1.20693597054516544, -2.49710890457620449, 19.5814956600517363],
[-1.24995270329853758, -2.20579888817389014, 17.2971396147635872],
[-1.28398160868782707, -1.97535632105819547, 15.4900858176313481],
[-1.31157315920488271, -1.78850885346120414, 14.0248902592249394],
[-1.33439622197028629, -1.63395455751463659, 12.8129269885106076],
[-1.35358868280505895, -1.50398742533895491, 11.7937680603663022],
[-1.36995306346325085, -1.39317260691178046, 10.9247951925332121],
[-1.38407159345175246, -1.29756711886101805, 10.1750888237351482],
[-1.39637694372278753, -1.21424082062851091, 9.52167176842857188],
[-1.40719735069096985, -1.14097082488457047, 8.94711288513650516],
[-1.41678633250303276, -1.07604025253394919, 8.4379489802870502],
[-1.42534280493399335, -1.01810200352428093, 7.98361654430290191],
[-1.43302503052922825, -0.96608429024442366, 7.57571097600002208],
[-1.43996049934911441, -0.91912372298879641, 7.20746186110381082],
[-1.44625306149295918, -0.87651700696542989, 6.87335419628724509],
[-1.45198816367902994, -0.837685479045594206, 6.56885029818253408],
[-1.45723675290620291, -0.802148671324515394, 6.29018249763640735],
[-1.46205822691595877, -0.769504329955767896, 6.03419645406981253],
[-1.46650269238122322, -0.739413122209020113, 5.7982312333223982],
[-1.47061271319153097, -0.711586796705579672, 5.58002646416625314],
[-1.47442467828948898, -0.685778920134646142, 5.3776496987225153],
[-1.4779698822667855, -0.661777559223933776, 5.18943902691434733],
[-1.48127538671266623, -0.639399447501870277, 5.01395733416049882],
[-1.48436471251558766, -0.618485296881494784, 4.84995553637905452],
[-1.48725840059837067, -0.598896000240954796, 4.69634280188948683],
[-1.48997446936169231, -0.580509533517542531, 4.55216225866672897],
[-1.49252879037299113, -0.563218411461506085, 4.41657104321064331],
[-1.49493539885409299, -0.546927584946619305, 4.28882381195640594],
[-1.49720675179825968, -0.531552692946127836, 4.16825903385255181],
[-1.49935394374046949, -0.517018601289817026, 4.05428753178098233],
[-1.50138688807057163, -0.503258174772258648, 3.94638285383912768],
[-1.50331447014255692, -0.490211240263877301, 3.84407314240257092],
[-1.50514467716900402, -0.477823707037779044, 3.74693423602125053],
[-1.50688470890602533, -0.466046817187430573, 3.65458379144476764],
[-1.50854107236328217, -0.454836504230135275, 3.56667625400464416],
[-1.51011966316573387, -0.444152842107568779, 3.48289853686018525],
[-1.51162583571169717, -0.433959570060774347, 3.40296629522657179],
[-1.51306446388653582, -0.424223681463514746, 3.3266207021430616],
[-1.51443999378288985, -0.414915066789832865, 3.25362564874360549],
[-1.51575648962875986, -0.406006202579132114, 3.18376530522469414],
[-1.51701767392293974, -0.397471879630464542, 3.11684198943555923],
[-1.51822696261260193, -0.389288964772462054, 3.05267429875738916],
[-1.51938749601307377, -0.381436191467717234, 2.99109546809268245],
[-1.52050216605929767, -0.373893975260483014, 2.93195192266762072],
[-1.5215736403866893, -0.366644250695587814, 2.87510199920456788],
[-1.52260438366374795, -0.359670326849704347, 2.82041481304643105],
[-1.20657641266197602, -2.08030415976202754, 19.575662143360681],
[-1.24961810574623566, -1.83767368492093497, 17.2925093751059968],
[-1.28366863538470599, -1.64572901972398222, 15.4863100756026721],
[-1.31127894894430552, -1.49008971470943807, 14.0217442154158114],
[-1.33411837312044268, -1.3613452786943292, 12.810259072513638],
[-1.35332518961463455, -1.25307887927280981, 11.7914722539571404],
[-1.36970224767426307, -1.16076461667310427, 10.9227950428939113],
[-1.38383204394887604, -1.08111878433505937, 10.1733277605929082],
[-1.3961474666850282, -1.01170106281523764, 9.52010700109138597],
[-1.40697692946244035, -0.950660087474621829, 8.94571142313619205],
[-1.41657409557746994, -0.896565883276879583, 8.43668496163543757],
[-1.42513800044175643, -0.848296428834378835, 7.98246939533150446],
[-1.43282700584911149, -0.804958992050062605, 7.57466411519108895],
[-1.43976868473672481, -0.765834406774853593, 7.20650176775137208],
[-1.44606695685748443, -0.730336846897719494, 6.8724697293075403],
[-1.4518073278356427, -0.697984292228674308, 6.56803218987182547],
[-1.45706079478447492, -0.668376511369025206, 6.28942297198252742],
[-1.46188679832248969, -0.641178420316881437, 6.03348893518185392],
[-1.46633548200473762, -0.616107345380141891, 5.79757012002713523],
[-1.47044944160737212, -0.592923161938456511, 5.57940695384087704],
[-1.47426509377301795, -0.571420578981789995, 5.37706764821864436],
[-1.47781375726547437, -0.551423043755774001, 5.18889084174183335],
[-1.48112251485772894, -0.532777883042348521, 5.01343987942849978],
[-1.48421490607805184, -0.51535239794376797, 4.84946606465085672],
[-1.48711148831330076, -0.499030700776275482, 4.69587889430475247],
[-1.48983029455893545, -0.483711134597057002, 4.55172177655830623],
[-1.4923872093643944, -0.469304153888174314, 4.41615208808772053],
[-1.49479627953645244, -0.455730573029406172, 4.28842469220671241],
[-1.49706997243838846, -0.44292011018887234, 4.16787823687728842],
[-1.49921939191483489, -0.430810170090469791, 4.05392370055132112],
[-1.50125445973660399, -0.419344821155476111, 3.94603476707302958],
[-1.50318406882255595, -0.4084739317452597, 3.84373969772289392],
[-1.50501621323073098, -0.398152437362627176, 3.74661443558232188],
[-1.50675809892660051, -0.388339716218196018, 3.65427672961322481],
[-1.5084162385651434, -0.378999054915865163, 3.5663811067582909],
[-1.50999653291510794, -0.370097189439977492, 3.48261455263018771],
[-1.51150434107137377, -0.361603909347218588, 3.40269278695732691],
[-1.51294454121625188, -0.35349171523744205, 3.32635704038432989],
[-1.51432158338120804, -0.345735521319910533, 3.25337125562035823],
[-1.51563953541149798, -0.338312396297209383, 3.18351964915673991],
[-1.51690212313371897, -0.331201336928759638, 3.11660458049962807],
[-1.51811276556176833, -0.324383069564480431, 3.05244468460176055],
[-1.51927460584174145, -0.317839875699109098, 2.99087323032861674],
[-1.52039053852562223, -0.31155543822246351, 2.93173667367338142],
[-1.52146323367199554, -0.305514705556625632, 2.87489337928784705],
[-1.52249515819623116, -0.299703771298470756, 2.82021248791860968],
[-1.20635910478467956, -1.66394359280645432, 19.5721365103859206],
[-1.24941317154386211, -1.46989784887513175, 17.2896734473937386],
[-1.283474511109296, -1.31638411395825239, 15.4839681404339444],
[-1.31109426400402862, -1.19190387636729889, 14.0197693457703529],
[-1.33394196228424256, -1.08893221410958585, 12.8085651684640016],
[-1.35315607146636663, -1.00233783071582705, 11.789998733794917],
[-1.3695395960662875, -0.928501421061889931, 10.9214979652404782],
[-1.3836751610643101, -0.864796975665193868, 10.1721744262618419],
[-1.39599576003957626, -0.809272904370768931, 9.51907253766117023],
[-1.4068298930212968, -0.760448590822322745, | |
import torch
import torch.nn as nn
from torch.autograd import Variable
from models.modules import build_mlp, SoftAttention, ImageSoftAttention, TextSoftAttention, PositionalEncoding, ScaledDotProductAttention, create_mask, create_mask_for_object, proj_masking, PositionalEncoding, StateAttention, ConfigAttention, ConfigObjAttention
class SelfMonitoring(nn.Module):
""" An unrolled LSTM with attention over instructions for decoding navigation actions. """
def __init__(self, opts, img_fc_dim, img_fc_use_batchnorm, img_dropout, img_feat_input_dim,
rnn_hidden_size, rnn_dropout, max_len, fc_bias=True, max_navigable=16):
super(SelfMonitoring, self).__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.max_navigable = max_navigable
self.feature_size = img_feat_input_dim
self.hidden_size = rnn_hidden_size
self.max_len = max_len
proj_navigable_kwargs = {
'input_dim': img_feat_input_dim,
'hidden_dims': img_fc_dim,
'use_batchnorm': img_fc_use_batchnorm,
'dropout': img_dropout,
'fc_bias': fc_bias,
'relu': opts.mlp_relu
}
self.proj_navigable_mlp = build_mlp(**proj_navigable_kwargs)
self.h0_fc = nn.Linear(rnn_hidden_size, img_fc_dim[-1], bias=fc_bias)
self.h1_fc = nn.Linear(rnn_hidden_size, rnn_hidden_size, bias=fc_bias)
self.text_soft_attn = TextSoftAttention()
self.img_soft_attn = ImageSoftAttention()
self.r_linear = nn.Linear(rnn_hidden_size + 128, 2)
self.sm = nn.Softmax(dim=1)
self.dropout = nn.Dropout(p=rnn_dropout)
self.state_attention = StateAttention()
self.lstm = nn.LSTMCell(img_fc_dim[-1] * 2 + rnn_hidden_size, rnn_hidden_size)
self.lang_position = PositionalEncoding(rnn_hidden_size, dropout=0.1, max_len=max_len)
self.logit_fc = nn.Linear(rnn_hidden_size * 2, img_fc_dim[-1])
self.h2_fc_lstm = nn.Linear(rnn_hidden_size + img_fc_dim[-1], rnn_hidden_size, bias=fc_bias)
if opts.monitor_sigmoid:
self.critic = nn.Sequential(
nn.Linear(max_len + rnn_hidden_size, 1),
nn.Sigmoid()
)
else:
self.critic = nn.Sequential(
nn.Linear(max_len + rnn_hidden_size, 1),
nn.Tanh()
)
self.num_predefined_action = 1
def forward(self, img_feat, navigable_feat, pre_feat, question, h_0, c_0, ctx, pre_ctx_attend,
s0, r0, navigable_index=None, ctx_mask=None):
""" Takes a single step in the decoder
img_feat: batch x 36 x feature_size
navigable_feat: batch x max_navigable x feature_size
pre_feat: previous attended feature, batch x feature_size
question: this should be a single vector representing instruction
ctx: batch x seq_len x dim
navigable_index: list of list
ctx_mask: batch x seq_len - indices to be masked
"""
batch_size, num_imgs, feat_dim = img_feat.size()
index_length = [len(_index) + self.num_predefined_action for _index in navigable_index]
navigable_mask = create_mask(batch_size, self.max_navigable, index_length)
proj_navigable_feat = proj_masking(navigable_feat, self.proj_navigable_mlp, navigable_mask)
proj_pre_feat = self.proj_navigable_mlp(pre_feat)
#positioned_ctx = self.lang_position(ctx)
weighted_ctx, ctx_attn = self.text_soft_attn(self.h1_fc(h_0), ctx, mask=ctx_mask)
weighted_img_feat, img_attn = self.img_soft_attn(self.h0_fc(h_0), proj_navigable_feat, mask=navigable_mask)
# if r_t is None:
# r_t = self.r_linear(torch.cat((weighted_img_feat, h_0), dim=1))
# r_t = self.sm(r_t)
# weighted_ctx, ctx_attn = self.state_attention(s_0, r_t, ctx, ctx_mask)
# merge info into one LSTM to be carry through time
concat_input = torch.cat((proj_pre_feat, weighted_img_feat, weighted_ctx), 1)
h_1, c_1 = self.lstm(concat_input, (h_0, c_0))
h_1_drop = self.dropout(h_1)
# policy network
h_tilde = self.logit_fc(torch.cat((weighted_ctx, h_1_drop), dim=1))
logit = torch.bmm(proj_navigable_feat, h_tilde.unsqueeze(2)).squeeze(2)
# value estimation
concat_value_input = self.h2_fc_lstm(torch.cat((h_0, weighted_img_feat), 1))
h_1_value = self.dropout(torch.sigmoid(concat_value_input) * torch.tanh(c_1))
value = self.critic(torch.cat((ctx_attn, h_1_value), dim=1))
return h_1, c_1, weighted_ctx, img_attn, ctx_attn, logit, value, navigable_mask
class SpeakerFollowerBaseline(nn.Module):
""" An unrolled LSTM with attention over instructions for decoding navigation actions. """
def __init__(self, opts, img_fc_dim, img_fc_use_batchnorm, img_dropout, img_feat_input_dim,
rnn_hidden_size, rnn_dropout, max_len, fc_bias=True, max_navigable=16):
super(SpeakerFollowerBaseline, self).__init__()
self.max_navigable = max_navigable
self.feature_size = img_feat_input_dim
self.hidden_size = rnn_hidden_size
self.proj_img_mlp = nn.Linear(img_feat_input_dim, img_fc_dim[-1], bias=fc_bias)
self.proj_navigable_mlp = nn.Linear(img_feat_input_dim, img_fc_dim[-1], bias=fc_bias)
self.h0_fc = nn.Linear(rnn_hidden_size, img_fc_dim[-1], bias=False)
self.soft_attn = SoftAttention()
self.dropout = nn.Dropout(p=rnn_dropout)
self.lstm = nn.LSTMCell(img_feat_input_dim * 2, rnn_hidden_size)
self.h1_fc = nn.Linear(rnn_hidden_size, rnn_hidden_size, bias=False)
self.proj_out = nn.Linear(rnn_hidden_size, img_fc_dim[-1], bias=fc_bias)
def forward(self, img_feat, navigable_feat, pre_feat, h_0, c_0, ctx, navigable_index=None, ctx_mask=None):
""" Takes a single step in the decoder LSTM.
img_feat: batch x 36 x feature_size
navigable_feat: batch x max_navigable x feature_size
h_0: batch x hidden_size
c_0: batch x hidden_size
ctx: batch x seq_len x dim
navigable_index: list of list
ctx_mask: batch x seq_len - indices to be masked
"""
batch_size, num_imgs, feat_dim = img_feat.size()
index_length = [len(_index) + self.num_predefined_action for _index in navigable_index]
navigable_mask = create_mask(batch_size, self.max_navigable, index_length)
proj_navigable_feat = proj_masking(navigable_feat, self.proj_navigable_mlp, navigable_mask)
proj_pre_feat = self.proj_navigable_mlp(pre_feat)
positioned_ctx = self.lang_position(ctx)
weighted_ctx, ctx_attn = self.soft_attn(self.h1_fc(h_0), positioned_ctx, mask=ctx_mask)
weighted_img_feat, img_attn = self.soft_attn(self.h0_fc(h_0), proj_navigable_feat, mask=navigable_mask)
# merge info into one LSTM to be carry through time
concat_input = torch.cat((proj_pre_feat, weighted_img_feat, weighted_ctx), 1)
h_1, c_1 = self.lstm(concat_input, (h_0, c_0))
h_1_drop = self.dropout(h_1)
# policy network
h_tilde = self.logit_fc(torch.cat((weighted_ctx, h_1_drop), dim=1))
logit = torch.bmm(proj_navigable_feat, h_tilde.unsqueeze(2)).squeeze(2)
# value estimation
concat_value_input = self.h2_fc_lstm(torch.cat((h_0, weighted_img_feat), 1))
h_1_value = self.dropout(torch.sigmoid(concat_value_input) * torch.tanh(c_1))
value = self.critic(torch.cat((ctx_attn, h_1_value), dim=1))
return h_1, c_1, weighted_ctx, img_attn, ctx_attn, logit, value, navigable_mask
class Configuring(nn.Module):
def __init__(self, opts, img_fc_dim, img_fc_use_batchnorm, img_dropout, img_feat_input_dim,
rnn_hidden_size, rnn_dropout, max_len, fc_bias=True, max_navigable=16):
super(Configuring, self).__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.max_navigable = max_navigable
self.feature_size = img_feat_input_dim
self.hidden_size = rnn_hidden_size
self.max_len = max_len
proj_navigable_kwargs = {
'input_dim': img_feat_input_dim,
'hidden_dims': img_fc_dim,
'use_batchnorm': img_fc_use_batchnorm,
'dropout': img_dropout,
'fc_bias': fc_bias,
'relu': opts.mlp_relu
}
self.proj_navigable_mlp = build_mlp(**proj_navigable_kwargs)
self.h0_fc = nn.Linear(rnn_hidden_size, img_fc_dim[-1], bias=fc_bias)
self.h1_fc = nn.Linear(rnn_hidden_size, rnn_hidden_size, bias=fc_bias)
self.soft_attn = SoftAttention()
self.dropout = nn.Dropout(p=rnn_dropout)
self.lstm = nn.LSTMCell(img_fc_dim[-1] * 2 + rnn_hidden_size, rnn_hidden_size)
self.lang_position = PositionalEncoding(rnn_hidden_size, dropout=0.1, max_len=max_len)
self.logit_fc = nn.Linear(rnn_hidden_size * 2, img_fc_dim[-1])
self.h2_fc_lstm = nn.Linear(rnn_hidden_size + img_fc_dim[-1], rnn_hidden_size, bias=fc_bias)
self.r_linear = nn.Linear(rnn_hidden_size + 128, 2)
self.sm = nn.Softmax(dim=1)
self.num_predefined_action = 1
self.state_attention = StateAttention()
self.config_fc = nn.Linear(768, 512, bias=False)
if opts.monitor_sigmoid:
self.critic = nn.Sequential(
#nn.Linear(max_len + rnn_hidden_size, 1),
nn.Linear(10 + rnn_hidden_size, 1),
nn.Sigmoid()
)
else:
self.critic = nn.Sequential(
# nn.Linear(max_len + rnn_hidden_size, 1),
nn.Linear(10 + rnn_hidden_size, 1),
nn.Tanh()
)
def forward(self, img_feat, navigable_feat, pre_feat, question, h_0, c_0, ctx, pre_ctx_attend,
s_0, r_t, navigable_index=None, ctx_mask=None):
#def forward(self, img_feat, navigable_feat, pre_feat, question, h_0, c_0, ctx, pre_ctx_attend,\
# navigable_index=None, ctx_mask=None, s_0, r_t, config_embedding):
""" Takes a single step in the decoder LSTM.
config_embedding: batch x max_config_len x config embeddding
image_feature: batch x 12 images x image_feature_size
navigable_index: list of navigable viewstates
h_t: batch x hidden_size
c_t: batch x hidden_size
ctx_mask: batch x seq_len - indices to be masked
"""
batch_size, num_imgs, feat_dim = img_feat.size()
index_length = [len(_index) + self.num_predefined_action for _index in navigable_index]
navigable_mask = create_mask(batch_size, self.max_navigable, index_length)
proj_navigable_feat = proj_masking(navigable_feat, self.proj_navigable_mlp, navigable_mask)
proj_pre_feat = self.proj_navigable_mlp(pre_feat)
weighted_img_feat, img_attn = self.soft_attn(self.h0_fc(h_0), proj_navigable_feat, mask=navigable_mask)
if r_t is None:
r_t = self.r_linear(torch.cat((weighted_img_feat, h_0), dim=1))
r_t = self.sm(r_t)
# r_t = self.r_linear(torch.cat((weighted_img_feat, h_0), dim=1))
# r_t = self.sm(r_t)
weighted_ctx, ctx_attn = self.state_attention(s_0, r_t, self.config_fc(ctx), ctx_mask)
# positioned_ctx = self.lang_position(self.config_fc(ctx))
# weighted_ctx, ctx_attn = self.soft_attn(self.h1_fc(h_0), positioned_ctx, mask=ctx_mask)
# merge info into one LSTM to be carry through time
concat_input = torch.cat((proj_pre_feat, weighted_img_feat, weighted_ctx), 1)
h_1, c_1 = self.lstm(concat_input, (h_0, c_0))
h_1_drop = self.dropout(h_1)
# policy network
h_tilde = self.logit_fc(torch.cat((weighted_ctx, h_1_drop), dim=1))
logit = torch.bmm(proj_navigable_feat, h_tilde.unsqueeze(2)).squeeze(2)
# value estimation
concat_value_input = self.h2_fc_lstm(torch.cat((h_0, weighted_img_feat), 1))
h_1_value = self.dropout(torch.sigmoid(concat_value_input) * torch.tanh(c_1))
value = self.critic(torch.cat((ctx_attn, h_1_value), dim=1))
return h_1, c_1, weighted_ctx, img_attn, ctx_attn, logit, value, navigable_mask
class ConfiguringObject(nn.Module):
def __init__(self, opts, img_fc_dim, img_fc_use_batchnorm, img_dropout, img_feat_input_dim,
rnn_hidden_size, rnn_dropout, max_len, fc_bias=True, max_navigable=16):
super(ConfiguringObject, self).__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.max_navigable = max_navigable
self.feature_size = img_feat_input_dim
self.hidden_size = rnn_hidden_size
proj_navigable_obj_kwargs = {
'input_dim': 152,
'hidden_dims': img_fc_dim,
'use_batchnorm': img_fc_use_batchnorm,
'dropout': img_dropout,
'fc_bias': fc_bias,
'relu': opts.mlp_relu
}
self.proj_navigable_obj_mlp = build_mlp(**proj_navigable_obj_kwargs)
proj_navigable_img_kwargs = {
'input_dim': img_feat_input_dim,
'hidden_dims': img_fc_dim,
'use_batchnorm': img_fc_use_batchnorm,
'dropout': img_dropout,
'fc_bias': fc_bias,
'relu': opts.mlp_relu
}
self.proj_navigable_img_mlp = build_mlp(**proj_navigable_img_kwargs)
self.h0_fc = nn.Linear(rnn_hidden_size, img_fc_dim[-1], bias=False)
self.soft_attn = SoftAttention()
self.state_attention = StateAttention()
self.config_obj_attention = ConfigObjAttention()
self.dropout = nn.Dropout(p=rnn_dropout)
#self.lstm = nn.LSTMCell(img_fc_dim[-1] + 768, rnn_hidden_size)
self.lstm = nn.LSTMCell(img_fc_dim[-1] * 2 + rnn_hidden_size, rnn_hidden_size)
self.h1_fc = nn.Linear(rnn_hidden_size, rnn_hidden_size, bias=False)
self.h2_fc_lstm = nn.Linear(rnn_hidden_size + img_fc_dim[-1], rnn_hidden_size, bias=fc_bias)
self.proj_out = nn.Linear(rnn_hidden_size, img_fc_dim[-1], bias=fc_bias)
self.state_attention = StateAttention()
# self.logit_fc = nn.Linear(rnn_hidden_size, img_fc_dim[-1])
self.logit_fc = nn.Linear(rnn_hidden_size * 2, img_fc_dim[-1])
self.r_linear = nn.Linear(rnn_hidden_size + 128, 4)
self.image_linear = nn.Linear(img_feat_input_dim, img_fc_dim[-1])
self.config_fc = nn.Linear(768, 512, bias=False)
self.config_atten_linear = nn.Linear(512, 128)
#self.config_atten_linear = nn.Linear(768, 128)
self.sm = nn.Softmax(dim=1)
if opts.monitor_sigmoid:
self.critic = nn.Sequential(
#nn.Linear(max_len + rnn_hidden_size, 1),
nn.Linear(10 + rnn_hidden_size, 1),
nn.Sigmoid()
)
else:
self.critic = nn.Sequential(
# nn.Linear(max_len + rnn_hidden_size, 1),
nn.Linear(10 + rnn_hidden_size, 1),
nn.Tanh()
)
self.r_transform = Variable(torch.tensor([[1,0,0.75,0.5],[0,1,0.25,0.5]]).transpose(0,1), requires_grad=False)
def forward(self, navigable_img_feat, navigable_obj_feat, pre_feat, question, h_0, c_0, ctx, pre_ctx_attend, \
s_0, r_t, navigable_index, ctx_mask):
""" Takes a single step in the decoder LSTM.
config_embedding: batch x max_config_len x config embeddding
image_feature: batch x 12 images x 36 boxes x image_feature_size
navigable_index: list of navigable viewstates
h_t: batch x hidden_size
c_t: batch x hidden_size
ctx_mask: batch x seq_len - indices to be masked
"""
# input of image_feature should be changed
batch_size, num_heading, num_object, object_feat_dim = navigable_obj_feat.size()
navigable_obj_feat = navigable_obj_feat.view(batch_size, num_heading*num_object, object_feat_dim) #4 x 16*36 x 152
index_length = [len(_index)+1 for _index in navigable_index]
navigable_mask = create_mask(batch_size, self.max_navigable, index_length)
navigable_obj_mask = create_mask_for_object(batch_size, self.max_navigable*num_object, index_length) #batch x | |
get(self, k):
i = id(k)
try:
return self.by_id[i]
except KeyError:
return self.by_hash.get(self._hash(k))
def __setitem__(self, k, v):
self.by_id[id(k)] = v
self.by_hash[self._hash(k)] = v
def __delitem__(self, k):
i = id(k)
if i in self.by_id:
del self.by_id[i]
del self.by_hash[self._hash(k)]
def items(self):
for (k, v) in self.by_hash.items():
yield (self._unhash(k), v)
def values(self):
for k, v in self.items():
yield v
_ReduceOp = collections.namedtuple("_ReduceOp", ("x", "index"))
_OnExitOp = collections.namedtuple("_OnExitOp", ("x",))
class IterativeReducer(object):
"""Abstract class to help write iterative forms of recursive traversals.
Implementors should override `reduce`. For instance, to implement a naive
replacement algorithm:
class Replacer(IterativeReducer):
def __init__(self, needle, replacement):
super().__init__()
self.needle = needle
self.replacement = replacement
def children(self, x):
# no need to visit children of something we are replacing!
if x == self.needle:
return ()
return super().children(x)
def reduce(self, x, new_children):
if x == self.needle:
return self.replacement
return super().reduce(x, new_children)
"""
def current_parent(self):
for x in reversed(self.work_stack):
if isinstance(x, _ReduceOp):
return x.x
raise ValueError("no current parent, sadly")
def bind(self, v : syntax.EVar):
pass
def unbind(self, v : syntax.EVar):
pass
def on_enter(self, x):
if isinstance(x, syntax.ELambda):
self.bind(x.arg)
def on_exit(self, x):
if isinstance(x, syntax.ELambda):
self.unbind(x.arg)
def visit(self, x):
self.work_stack = work_stack = [x]
done_stack = []
while work_stack:
# print("TODO: {}; DONE: {}".format(work_stack, done_stack))
top = work_stack.pop()
if isinstance(top, _ReduceOp):
args = [done_stack.pop() for i in range(top.index)]
args.reverse()
done_stack.append(self.reduce(top.x, tuple(args)))
continue
if isinstance(top, _OnExitOp):
self.on_exit(top.x)
continue
children = self.children(top)
self.on_enter(top)
work_stack.append(_OnExitOp(top))
work_stack.append(_ReduceOp(top, len(children)))
work_stack.extend(reversed(children))
assert len(done_stack) == 1
return done_stack[0]
def children(self, x):
if isinstance(x, tuple) or isinstance(x, list):
return x
elif isinstance(x, dict):
raise NotImplementedError()
elif isinstance(x, common.ADT):
return x.children()
else:
return ()
def reduce(self, x, new_children):
if isinstance(x, common.ADT):
if all(a is b for (a, b) in zip(x.children(), new_children)):
return x
out = type(x)(*new_children)
if isinstance(x, syntax.Exp) and hasattr(x, "type"):
out = out.with_type(x.type)
return out
elif type(x) in [list, tuple, dict]:
if type(x) in [list, tuple] and all(a is b for (a, b) in zip(x, new_children)):
return x
return type(x)(new_children)
else:
return x
def cse(e, verify=False):
"""
Common subexpression elimination. Replaces re-used expressions with ELet,
e.g. "(x+1) + (x+1)" ---> "let a = x+1 in a+a".
"""
def finish(e, avail):
ravail = collections.OrderedDict([(v, k) for (k, v) in avail.items() if v is not None])
counts = free_vars(e, counts=True)
for var, value in reversed(ravail.items()):
for (vv, ct) in free_vars(value, counts=True).items():
counts[vv] = counts.get(vv, 0) + ct
to_inline = common.OrderedSet(v for v in ravail if counts.get(v, 0) < 2 or ravail[v].size() < 2)
sub = { v : ravail[v] for v in to_inline }
skip = collections.defaultdict(int)
class V(IterativeReducer):
def children(self, x):
if isinstance(x, syntax.EVar) and x in sub and not skip[x]:
return (sub[x],)
return super().children(x)
def reduce(self, x, new_children):
if isinstance(x, syntax.EVar) and x in sub and not skip[x]:
return new_children[0]
return super().reduce(x, new_children)
def bind(self, v):
skip[v] += 1
def unbind(self, v):
skip[v] -= 1
inliner = V()
e = inliner.visit(e)
for var, value in reversed(ravail.items()):
if var in to_inline:
continue
value = inliner.visit(value)
ee = syntax.ELet(value, target_syntax.ELambda(var, e))
if hasattr(e, "type"):
ee = ee.with_type(e.type)
e = ee
return e
class V(BottomUpRewriter):
def __init__(self):
super().__init__()
self.avail = ExpMap() # maps expressions --> variables
def visit_EVar(self, e):
return e
def visit_ENum(self, e):
return e
def visit_EEnumEntry(self, e):
return e
def visit_EEmptyList(self, e):
return e
def visit_EStr(self, e):
return e
def visit_EBool(self, e):
return e
def visit_ENative(self, e):
return e
def visit_ENull(self, e):
return e
def visit_Exp(self, e):
ee = type(e)(*[self.visit(c) for c in e.children()]).with_type(e.type)
res = self.avail.get(ee)
if res is not None:
return res
v = fresh_var(e.type, hint="tmp")
self.avail[ee] = v
return v
def visit_ELet(self, e):
# slow, but correct
return self.visit(subst(e.body_function.body, {e.body_function.arg.id:e.e}))
def visit_EListComprehension(self, e):
raise NotImplementedError()
def _fvs(self, e):
if not hasattr(e, "_fvs"):
e._fvs = free_vars(e)
return e._fvs
def visit_ELambda(self, e):
old_avail = self.avail
self.avail = ExpMap([(k, v) for (k, v) in self.avail.items() if e.arg not in self._fvs(k)])
body = self.visit(e.body)
precious = set((e.arg,))
# print("computing fvs x{}...".format(len(self.avail.items())))
fvs = { v : self._fvs(k) for (k, v) in self.avail.items() }
# print("done")
dirty = True
while dirty:
dirty = False
for v in self.avail.values():
if any(vv in precious for vv in fvs[v]):
if v not in precious:
precious.add(v)
dirty = True
for (k, v) in list(self.avail.items()):
if v not in precious:
old_avail[k] = v
del self.avail[k]
body = finish(body, self.avail)
self.avail = old_avail
return target_syntax.ELambda(e.arg, body)
v = V()
res = v.visit(e)
res = finish(res, v.avail)
if verify:
from cozy.solver import valid
if not valid(syntax.EBinOp(e, "===", res).with_type(syntax.BOOL), model_callback=print):
print(repr(e))
assert False
return res
def inline_calls(spec, target=None):
if target is None:
target = spec
extern_func_names = set(e.name for e in spec.extern_funcs)
queries = {q.name : q for q in spec.methods if isinstance(q, syntax.Query)}
class CallInliner(BottomUpRewriter):
def visit_Spec(self, spec):
spec = shallow_copy(spec)
spec.assumptions = tuple(self.visit(a) for a in spec.assumptions)
spec.methods = tuple(self.visit(m) for m in spec.methods if not (isinstance(m, syntax.Query) and m.visibility != syntax.Visibility.Public))
return spec
def visit_ECall(self, e):
new_args = self.visit(e.args)
query = queries.get(e.func)
if query is None:
return syntax.ECall(e.func, new_args).with_type(e.type)
return self.visit(subst(query.ret,
{arg: expr for ((arg, argtype), expr) in zip(query.args, new_args)}))
rewriter = CallInliner()
return rewriter.visit(target)
class LetInliner(BottomUpRewriter):
def visit_ELet(self, e):
body = subst(e.body_function.body, {e.body_function.arg.id : e.e})
return self.visit(body)
inline_lets = LetInliner().visit
def get_modified_var(stm):
"""
Given a statement, returns a tuple:
(
the EVar modified by the statement (if any),
the handle type modified by the statement (if any)
)
Returns (None, None) if there is no modification.
"""
def find_lvalue_target(e):
if isinstance(e, syntax.EVar):
return e, None
elif isinstance(e, target_syntax.EMapGet):
return find_lvalue_target(e.map)[0], None
elif isinstance(e, syntax.ETupleGet):
return find_lvalue_target(e.e)[0], None
elif isinstance(e, syntax.EGetField):
if isinstance(e.e.type, syntax.THandle) and e.field_name == "val":
handle_type = e.e.type
else:
handle_type = None
return find_lvalue_target(e.e)[0], handle_type
assert False, "unexpected modification target {}".format(e)
if isinstance(stm, syntax.SAssign):
return find_lvalue_target(stm.lhs)
elif isinstance(stm, syntax.SCall):
return find_lvalue_target(stm.target)
elif isinstance(stm, (target_syntax.SMapPut, target_syntax.SMapDel,
target_syntax.SMapUpdate)):
return find_lvalue_target(stm.map)
else:
return None, None
class ExpInfo(object):
def __init__(self, tempvar, count, dependents, handle_types, paths):
self.tempvar = tempvar
self.count = count
self.dependents = dependents
self.handle_types = handle_types
self.paths = paths
def __repr__(self):
return "<ExpInfo(tempvar={}, count={}, deps={}, handle_types={}, paths={})>".format(
self.tempvar, self.count, self.dependents, self.handle_types, self.paths
)
class ExpressionMap(ExpMap):
"""
Maps expressions to (temp vars, other supporting info).
"""
def set_or_increment(self, exp, dependents, handle_types, path):
info = self.get(exp)
if info is not None:
# Expr has been seen before. (Dependents/types shouldn't change.)
assert info.dependents == dependents
assert info.handle_types == handle_types
info.count += 1
info.paths.append(path)
else:
# Never before seen expr.
self[exp] = ExpInfo(fresh_var(exp.type, "tmp"), 1, set(dependents),
set(handle_types), [path])
def unbind_handle_type(self, handle_type):
"""
Returns a new ExpressionMap without expressions related to the given
handle type.
"""
return ExpressionMap((exp, expinfo) for exp, expinfo in self.items()
if handle_type.statevar not in expinfo.handle_types)
def unbind(self, var):
"""
Returns a new ExpressionMap without expressions related to the given var.
"""
return ExpressionMap((exp, expinfo) for exp, expinfo in self.items()
if var.id not in expinfo.dependents)
class SLinearSequence(syntax.Stm):
"""
An intermediate form of SSeq that just holds a list of its ordered
constituent statements.
"""
def __init__(self, statements):
self.statements = statements
@classmethod
def from_seq(cls, seq):
return cls(list(break_seq(seq)))
def children(self):
return tuple(self.statements)
def __repr__(self):
return "SLinearSequence{}".format(repr(self.children()))
def cse_scan(e):
SIMPLE_EXPS = (syntax.ENum, syntax.EVar, syntax.EBool, syntax.EStr,
syntax.ENative, syntax.EEnumEntry, syntax.ENull, syntax.EEmptyList)
class SeqTransformer(BottomUpRewriter):
"""Rewrites SSeq -> SLinearSequence for CSE process."""
def visit_SSeq(self, s):
return SLinearSequence.from_seq(s)
class CSEScanner(PathAwareExplorer):
def __init__(self):
self.captures = collections.defaultdict(list)
# And we want a map of expression path -> CSE temp var.
self.rewrites = dict()
def visit_object(self, o, path, *args, **kwargs):
# Include empty dependents/handle types in result.
return self.join(o, ()), set(), set()
def visit_children(self, e, path, entries, capture_point):
"""
Returns (expr, dependent_vars, handle types) for each child of e by
visiting it.
"""
assert isinstance(e, common.ADT)
return [
self.visit(c, path + (i,), entries, capture_point)
for i, c in enumerate(e.children())
if isinstance(c, syntax.ADT) # ignore non-ADT children, like strings.
]
def filter_captured_vars(self, outer_entries, inner_entries,
capture_path, bound_var, handle_capture=False):
"""
Move things from inner_entries to capture/rewrite structures if
they're related to the binding variable. Otherwise, bubble them up
to the surrounding scope.
"""
for expr, expinfo in inner_entries.items():
if handle_capture:
| |
<filename>examples/nist_sre/helpers.py<gh_stars>1-10
from __future__ import absolute_import, division, print_function
import os
import pickle
import shutil
import warnings
from collections import OrderedDict, defaultdict
from enum import Enum
from numbers import Number
import numba as nb
import numpy as np
from scipy.io import wavfile
from odin import fuel as F
from odin import visual as V
from odin.stats import freqcount, sampling_iter
from odin.utils import (Progbar, args_parse, cache_disk, catch_warnings_error,
catch_warnings_ignore, crypto, ctext, get_exppath,
get_logpath, get_module_from_path, get_script_name,
get_script_path, mpi, select_path)
# ===========================================================================
# Configuration
# ===========================================================================
class Config(object):
# ====== Acoustic features ====== #
FRAME_LENGTH = 0.025
STEP_LENGTH = 0.01
SAMPLE_RATE = 8000
WINDOW = 'hamm'
NFFT = 512
# Random seed for reproducibility
SUPER_SEED = 87654321
class SystemStates(Enum):
""" SystemStates """
UNKNOWN = 0
EXTRACT_FEATURES = 1
TRAINING = 2
SCORING = 3
# ===========================================================================
# General arguments for all experiments
# ===========================================================================
_args = args_parse(descriptions=[
('recipe', 'recipe is the name of acoustic Dataset defined in feature_recipes.py', None),
('-feat', 'specific name for the acoustic features, extracted from the given recipe', None, ''),
('-aug', 'augmentation dataset: musan, rirs; could be multiple dataset '
'for training: "musan,rirs"', None, 'None'),
('-ncpu', 'number of CPU to be used, if <= 0, auto-select', None, 0),
# for scoring
('-sys', 'name of the system for scoring: xvec, ivec, e2e ...', None, 'xvec'),
('-sysid', 'when a system is saved multiple checkpoint (e.g. sys.0.ai)', None, '-1'),
('-score', 'name of dataset for scoring, multiple dataset split by ","', None, 'sre18dev,sre18eval'),
('-backend', 'list of dataset for training the backend: '
'PLDA, SVM or Cosine', None, 'sre04,sre05,sre06,sre08,sre10,mx6'),
('-lda', 'if > 0, running LDA before training the backend '
'with given number of components', None, 0),
('-plda', 'number of PLDA components, must be > 0 ', None, 150),
('--mll', 'pre-fitting maximum likelihood before training PLDA', None, False),
('--showllk', 'show LLK during training of PLDA, this will slow thing down', None, False),
# for training
('-downsample', 'absolute number of files used for training', None, 0),
('-exclude', 'list of excluded dataset not for training,'
'multiple dataset split by ","', None, ''),
# for ivector
('-nmix', 'for i-vector training, number of Gaussian components', None, 2048),
('-tdim', 'for i-vector training, number of latent dimension for i-vector', None, 600),
# for DNN
('-utt', 'maximum length of sequence for training', None, 3),
('-seq', 'sequencing mode for training data, cut or pad', None, 'cut'),
('-batch', 'batch size, for training DNN, kaldi use 64, we use 128', None, 128),
('-epoch', 'number of epoch, for training DNN, kaldi only 3 epochs', None, 12),
('-clip', 'The maximum change in parameters allowed per minibatch, '
'measured in Euclidean norm over the entire model (change '
'will be clipped to this value), kaldi use 2.0', None, 2.0),
('-lr', 'learning rate for Adam, kaldi use 0.001 by default,'
' we use 0.01', None, 0.01),
# others
('-mindur', 'for filtering utterances, minimum duration of utterance '
'for training (in second)', None, 1),
('-minutt', 'for filtering utterances, minimum number of utterance of '
'each speaker for training', None, 3),
('--override', 'override previous experiments', None, False),
('--debug', 'enable debugging', None, False),
])
IS_DEBUGGING = bool(_args.debug)
IS_OVERRIDE = bool(_args.override)
MINIMUM_UTT_DURATION = int(_args.mindur) # in seconds
assert MINIMUM_UTT_DURATION > 0, "Minimum utterances duration must be greater than 0"
MINIMUM_UTT_PER_SPEAKERS = int(_args.minutt) # number of utterances
# this variable determine which state is running
CURRENT_STATE = SystemStates.UNKNOWN
# ====== Features extraction ====== #
FEATURE_RECIPE = str(_args.recipe)
FEATURE_NAME = FEATURE_RECIPE.split('_')[0] if len(str(_args.feat)) == 0 else str(_args.feat)
AUGMENTATION_NAME = _args.aug
TRAINING_DATASET = ['mx6', 'voxceleb1', 'voxceleb2', 'swb', 'fisher',
'sre04', 'sre05', 'sre06', 'sre08', 'sre10']
# ====== DNN ====== #
BATCH_SIZE = int(_args.batch)
EPOCH = int(_args.epoch)
LEARNING_RATE = float(_args.lr)
GRADIENT_CLIPPING = float(_args.clip)
# ====== searching for the appropriate system ====== #
SCORE_SYSTEM_NAME = _args.sys
SCORE_SYSTEM_ID = int(_args.sysid)
N_LDA = int(_args.lda)
N_PLDA = int(_args.plda)
assert N_PLDA > 0, "Number of PLDA components must > 0, but given: %d" % N_PLDA
PLDA_MAXIMUM_LIKELIHOOD = bool(_args.mll)
PLDA_SHOW_LLK = bool(_args.showllk)
# ====== system ====== #
NCPU = min(18, mpi.cpu_count() - 2) if _args.ncpu <= 0 else int(_args.ncpu)
# ====== helper for checking the requirement ====== #
def _check_feature_extraction_requirement():
# check requirement for feature extraction
from shutil import which
if which('sox') is None:
raise RuntimeError("`sox` was not installed")
if which('sph2pipe') is None:
raise RuntimeError("`sph2pipe` was not installed")
if which('ffmpeg') is None:
raise RuntimeError("`ffmpeg` was not installed")
def _check_recipe_name_for_extraction():
# check the requirement of recipe name for feature extraction
if '_' in FEATURE_RECIPE:
raise ValueError("'_' can appear in recipe name which is: '%s'" % FEATURE_RECIPE)
# ====== check the running script to determine the current running states ====== #
_script_name = get_script_name()
if _script_name in ('speech_augmentation', 'speech_features_extraction'):
CURRENT_STATE = SystemStates.EXTRACT_FEATURES
_check_feature_extraction_requirement()
_check_recipe_name_for_extraction()
elif _script_name in ('train_xvec', 'train_ivec', 'train_tvec',
'train_evec', 'analyze', 'analyze_data'):
CURRENT_STATE = SystemStates.TRAINING
elif _script_name in ('make_score'):
CURRENT_STATE = SystemStates.SCORING
_check_feature_extraction_requirement()
else:
raise RuntimeError("Unknown states for current running script: %s/%s" %
(get_script_path(), get_script_name()))
# some fancy log of current state
print(ctext('====================================', 'red'))
print(ctext("System state:", 'cyan'), ctext(CURRENT_STATE, 'yellow'))
print(ctext('====================================', 'red'))
# ===========================================================================
# FILE LIST PATH
# ===========================================================================
# ====== basic directories ====== #
EXP_DIR = get_exppath('sre', override=False)
# this folder store extracted vectors for training backend and extracting scores
VECTORS_DIR = os.path.join(EXP_DIR, 'vectors')
if not os.path.exists(VECTORS_DIR):
os.mkdir(VECTORS_DIR)
# this folder store the results
RESULT_DIR = os.path.join(EXP_DIR, 'results')
if not os.path.exists(RESULT_DIR):
os.mkdir(RESULT_DIR)
# this folder store the analysis
ANALYSIS_DIR = os.path.join(EXP_DIR, 'analysis')
if not os.path.exists(ANALYSIS_DIR):
os.mkdir(ANALYSIS_DIR)
# ====== raw data ====== #
PATH_BASE = select_path(
'/media/data2/SRE_DATA',
'/mnt/sda1/SRE_DATA',
'/mnt/sdb1/SRE_DATA',
default='')
# path to directory contain following folders:
##############
# * fisher
# * mx6
# * sre04
# * sre05
# * sre06
# * sre08
# * sre10
# * swb
# * voxceleb1
# * voxceleb2
###############
# * musan
# * rirs
###############
# * sre18dev
# * sre18eval
PATH_RAW_DATA = {
'mx6': PATH_BASE,
'voxceleb1': PATH_BASE,
'voxceleb2': PATH_BASE,
'swb': PATH_BASE,
'fisher': PATH_BASE,
'sre04': os.path.join(PATH_BASE, 'NIST1996_2008/SRE02_SRE06'),
'sre05': os.path.join(PATH_BASE, 'NIST1996_2008/SRE96_SRE05'),
'sre06': os.path.join(PATH_BASE, 'NIST1996_2008/SRE02_SRE06'),
'sre08': PATH_BASE,
'sre10': PATH_BASE,
'sre18dev': PATH_BASE,
'sre18eval': PATH_BASE,
# noise datasets
'musan': PATH_BASE,
'rirs': PATH_BASE,
}
# all features will be stored here
OUTPUT_DIR = select_path(
'/home/trung/data',
'/media/data1',
'/mnt/sda1'
)
PATH_ACOUSTIC_FEATURES = os.path.join(OUTPUT_DIR, "SRE_FEAT")
if not os.path.exists(PATH_ACOUSTIC_FEATURES):
os.mkdir(PATH_ACOUSTIC_FEATURES)
# ===========================================================================
# Load the file list
# ===========================================================================
sre_file_list = F.load_sre_list()
print('README at:', ctext(sre_file_list['README.txt'], 'cyan'))
sre_file_list = {k: v
for k, v in sre_file_list.items()
if isinstance(v, np.ndarray)}
print("Original dataset:")
for k, v in sorted(sre_file_list.items(), key=lambda x: x[0]):
print(' ', ctext('%-18s' % k, 'yellow'), ':',
ctext(v.shape, 'cyan'))
# ===========================================================================
# Validate scoring dataset
# ===========================================================================
def validate_scoring_dataset(in_path_raw, score_dataset, file_must_exist=True):
all_files = {}
for dsname in score_dataset:
if dsname not in sre_file_list:
raise ValueError("Cannot find dataset with name: '%s' in the file list" % dsname)
if dsname not in in_path_raw:
raise ValueError("Cannot find dataset with name: '%s' in provided path" % dsname)
base_path = in_path_raw[dsname]
ds = []
for row in sre_file_list[dsname]:
path = os.path.join(base_path, row[0])
# every file must exist
if bool(file_must_exist) and not os.path.exists(path):
raise RuntimeError("File not exist at path: %s" % path)
ds.append([path] + row[1:4].tolist() + [dsname])
all_files[dsname] = np.array(ds)
# Header:
# 0 1 2 3 4
# path, channel, name, something, dataset_name
return all_files
# ====== check dataset for scoring ====== #
if CURRENT_STATE == SystemStates.SCORING:
assert len(_args.score) > 0, \
"No dataset are provided for scoring, specify '-score' option"
# for scoring
SCORING_DATASETS = validate_scoring_dataset(
in_path_raw=PATH_RAW_DATA,
score_dataset=str(_args.score).strip().split(','))
print("Processed scoring dataset:")
for dsname, dsarray in sorted(SCORING_DATASETS.items(),
key=lambda x: x[0]):
print(' ', ctext('%-10s' % dsname, 'yellow'), ':',
'%s' % ctext(dsarray.shape, 'cyan'))
# for training the backend
BACKEND_DATASETS = validate_scoring_dataset(
in_path_raw=PATH_RAW_DATA,
score_dataset=str(_args.backend).strip().split(','),
file_must_exist=False)
assert len(BACKEND_DATASETS) > 0, \
"Datasets for training the backend must be provided"
print("Processed backend dataset:")
for dsname, dsarray in sorted(BACKEND_DATASETS.items(),
key=lambda x: x[0]):
print(' ', ctext('%-10s' % dsname, 'yellow'), ':',
'%s' % ctext(dsarray.shape, 'cyan'))
# ===========================================================================
# Validating the Noise dataset for augmentation
# ===========================================================================
@cache_disk
def validating_noise_data(in_path_raw):
# preparing
noise_dataset = ['musan', 'rirs']
all_files = defaultdict(list)
n_files = sum(len(sre_file_list[i])
for i in noise_dataset
if i in sre_file_list)
n_non_exist = 0
n_exist = 0
prog = Progbar(target=n_files, print_summary=True,
name="Validating noise dataset")
prog.set_summarizer(key='#Non-exist', fn=lambda x: x[-1])
prog.set_summarizer(key='#Exist', fn=lambda x: x[-1])
# check all dataset
for ds_name in noise_dataset:
if ds_name not in sre_file_list:
continue
if ds_name not in in_path_raw:
continue
base_path = in_path_raw[ds_name]
base_ds = all_files[ds_name]
# start validating
for row in sre_file_list[ds_name]:
# check file
path, channel, name, noise_type, duration = row[:5]
path = os.path.join(base_path, path)
if os.path.exists(path):
base_ds.append([path, channel, name, noise_type, duration])
n_exist += 1
else:
n_non_exist += 1
# update progress
prog['ds'] = ds_name
prog['#Exist'] = n_exist
prog['#Non-exist'] = n_non_exist
prog.add(1)
# ====== return ====== #
# Header:
# 0 1 2 3 4
# path, channel, name, noise_type, duration
return {key: np.array(sorted(val, key=lambda x: | |
'endpointsConfiguration', 'type': 'FlowEndpointsConfiguration'},
'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'},
}
def __init__(
self,
*,
provisioning_state: Optional[Union[str, "WorkflowProvisioningState"]] = None,
state: Optional[Union[str, "WorkflowState"]] = None,
integration_service_environment_id: Optional[str] = None,
endpoints_configuration: Optional["FlowEndpointsConfiguration"] = None,
network_configuration: Optional["NetworkConfiguration"] = None,
**kwargs
):
super(IntegrationServiceEnvironmentProperties, self).__init__(**kwargs)
self.provisioning_state = provisioning_state
self.state = state
self.integration_service_environment_id = integration_service_environment_id
self.endpoints_configuration = endpoints_configuration
self.network_configuration = network_configuration
class IntegrationServiceEnvironmentSku(msrest.serialization.Model):
"""The integration service environment sku.
:param name: The sku name. Possible values include: "NotSpecified", "Premium", "Developer".
:type name: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuName
:param capacity: The sku capacity.
:type capacity: int
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'int'},
}
def __init__(
self,
*,
name: Optional[Union[str, "IntegrationServiceEnvironmentSkuName"]] = None,
capacity: Optional[int] = None,
**kwargs
):
super(IntegrationServiceEnvironmentSku, self).__init__(**kwargs)
self.name = name
self.capacity = capacity
class IntegrationServiceEnvironmentSkuCapacity(msrest.serialization.Model):
"""The integration service environment sku capacity.
:param minimum: The minimum capacity.
:type minimum: int
:param maximum: The maximum capacity.
:type maximum: int
:param default: The default capacity.
:type default: int
:param scale_type: The sku scale type. Possible values include: "Manual", "Automatic", "None".
:type scale_type: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuScaleType
"""
_attribute_map = {
'minimum': {'key': 'minimum', 'type': 'int'},
'maximum': {'key': 'maximum', 'type': 'int'},
'default': {'key': 'default', 'type': 'int'},
'scale_type': {'key': 'scaleType', 'type': 'str'},
}
def __init__(
self,
*,
minimum: Optional[int] = None,
maximum: Optional[int] = None,
default: Optional[int] = None,
scale_type: Optional[Union[str, "IntegrationServiceEnvironmentSkuScaleType"]] = None,
**kwargs
):
super(IntegrationServiceEnvironmentSkuCapacity, self).__init__(**kwargs)
self.minimum = minimum
self.maximum = maximum
self.default = default
self.scale_type = scale_type
class IntegrationServiceEnvironmentSkuDefinition(msrest.serialization.Model):
"""The integration service environment sku definition.
:param resource_type: The resource type.
:type resource_type: str
:param sku: The sku.
:type sku: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuDefinitionSku
:param capacity: The sku capacity.
:type capacity: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuCapacity
"""
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'IntegrationServiceEnvironmentSkuDefinitionSku'},
'capacity': {'key': 'capacity', 'type': 'IntegrationServiceEnvironmentSkuCapacity'},
}
def __init__(
self,
*,
resource_type: Optional[str] = None,
sku: Optional["IntegrationServiceEnvironmentSkuDefinitionSku"] = None,
capacity: Optional["IntegrationServiceEnvironmentSkuCapacity"] = None,
**kwargs
):
super(IntegrationServiceEnvironmentSkuDefinition, self).__init__(**kwargs)
self.resource_type = resource_type
self.sku = sku
self.capacity = capacity
class IntegrationServiceEnvironmentSkuDefinitionSku(msrest.serialization.Model):
"""The sku.
:param name: The sku name. Possible values include: "NotSpecified", "Premium", "Developer".
:type name: str or ~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuName
:param tier: The sku tier.
:type tier: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[Union[str, "IntegrationServiceEnvironmentSkuName"]] = None,
tier: Optional[str] = None,
**kwargs
):
super(IntegrationServiceEnvironmentSkuDefinitionSku, self).__init__(**kwargs)
self.name = name
self.tier = tier
class IntegrationServiceEnvironmentSkuList(msrest.serialization.Model):
"""The list of integration service environment skus.
:param value: The list of integration service environment skus.
:type value: list[~azure.mgmt.logic.models.IntegrationServiceEnvironmentSkuDefinition]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[IntegrationServiceEnvironmentSkuDefinition]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["IntegrationServiceEnvironmentSkuDefinition"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(IntegrationServiceEnvironmentSkuList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class IntegrationServiceEnvironmentSubnetNetworkHealth(msrest.serialization.Model):
"""The integration service environment subnet network health.
All required parameters must be populated in order to send to Azure.
:param outbound_network_dependencies: The outbound network dependencies.
:type outbound_network_dependencies:
list[~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkDependency]
:param outbound_network_health: The integration service environment network health.
:type outbound_network_health:
~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkDependencyHealth
:param network_dependency_health_state: Required. The integration service environment network
health state. Possible values include: "NotSpecified", "Unknown", "Available", "NotAvailable".
:type network_dependency_health_state: str or
~azure.mgmt.logic.models.IntegrationServiceEnvironmentNetworkEndPointAccessibilityState
"""
_validation = {
'network_dependency_health_state': {'required': True},
}
_attribute_map = {
'outbound_network_dependencies': {'key': 'outboundNetworkDependencies', 'type': '[IntegrationServiceEnvironmentNetworkDependency]'},
'outbound_network_health': {'key': 'outboundNetworkHealth', 'type': 'IntegrationServiceEnvironmentNetworkDependencyHealth'},
'network_dependency_health_state': {'key': 'networkDependencyHealthState', 'type': 'str'},
}
def __init__(
self,
*,
network_dependency_health_state: Union[str, "IntegrationServiceEnvironmentNetworkEndPointAccessibilityState"],
outbound_network_dependencies: Optional[List["IntegrationServiceEnvironmentNetworkDependency"]] = None,
outbound_network_health: Optional["IntegrationServiceEnvironmentNetworkDependencyHealth"] = None,
**kwargs
):
super(IntegrationServiceEnvironmentSubnetNetworkHealth, self).__init__(**kwargs)
self.outbound_network_dependencies = outbound_network_dependencies
self.outbound_network_health = outbound_network_health
self.network_dependency_health_state = network_dependency_health_state
class IpAddress(msrest.serialization.Model):
"""The ip address.
:param address: The address.
:type address: str
"""
_attribute_map = {
'address': {'key': 'address', 'type': 'str'},
}
def __init__(
self,
*,
address: Optional[str] = None,
**kwargs
):
super(IpAddress, self).__init__(**kwargs)
self.address = address
class IpAddressRange(msrest.serialization.Model):
"""The ip address range.
:param address_range: The IP address range.
:type address_range: str
"""
_attribute_map = {
'address_range': {'key': 'addressRange', 'type': 'str'},
}
def __init__(
self,
*,
address_range: Optional[str] = None,
**kwargs
):
super(IpAddressRange, self).__init__(**kwargs)
self.address_range = address_range
class JsonSchema(msrest.serialization.Model):
"""The JSON schema.
:param title: The JSON title.
:type title: str
:param content: The JSON content.
:type content: str
"""
_attribute_map = {
'title': {'key': 'title', 'type': 'str'},
'content': {'key': 'content', 'type': 'str'},
}
def __init__(
self,
*,
title: Optional[str] = None,
content: Optional[str] = None,
**kwargs
):
super(JsonSchema, self).__init__(**kwargs)
self.title = title
self.content = content
class KeyVaultKey(msrest.serialization.Model):
"""The key vault key.
:param kid: The key id.
:type kid: str
:param attributes: The key attributes.
:type attributes: ~azure.mgmt.logic.models.KeyVaultKeyAttributes
"""
_attribute_map = {
'kid': {'key': 'kid', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'KeyVaultKeyAttributes'},
}
def __init__(
self,
*,
kid: Optional[str] = None,
attributes: Optional["KeyVaultKeyAttributes"] = None,
**kwargs
):
super(KeyVaultKey, self).__init__(**kwargs)
self.kid = kid
self.attributes = attributes
class KeyVaultKeyAttributes(msrest.serialization.Model):
"""The key attributes.
:param enabled: Whether the key is enabled or not.
:type enabled: bool
:param created: When the key was created.
:type created: long
:param updated: When the key was updated.
:type updated: long
"""
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'created': {'key': 'created', 'type': 'long'},
'updated': {'key': 'updated', 'type': 'long'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
created: Optional[int] = None,
updated: Optional[int] = None,
**kwargs
):
super(KeyVaultKeyAttributes, self).__init__(**kwargs)
self.enabled = enabled
self.created = created
self.updated = updated
class KeyVaultKeyCollection(msrest.serialization.Model):
"""Collection of key vault keys.
:param value: The key vault keys.
:type value: list[~azure.mgmt.logic.models.KeyVaultKey]
:param skip_token: The skip token.
:type skip_token: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[KeyVaultKey]'},
'skip_token': {'key': 'skipToken', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["KeyVaultKey"]] = None,
skip_token: Optional[str] = None,
**kwargs
):
super(KeyVaultKeyCollection, self).__init__(**kwargs)
self.value = value
self.skip_token = skip_token
class KeyVaultKeyReference(msrest.serialization.Model):
"""The reference to the key vault key.
All required parameters must be populated in order to send to Azure.
:param key_vault: Required. The key vault reference.
:type key_vault: ~azure.mgmt.logic.models.KeyVaultKeyReferenceKeyVault
:param key_name: Required. The private key name in key vault.
:type key_name: str
:param key_version: The private key version in key vault.
:type key_version: str
"""
_validation = {
'key_vault': {'required': True},
'key_name': {'required': True},
}
_attribute_map = {
'key_vault': {'key': 'keyVault', 'type': 'KeyVaultKeyReferenceKeyVault'},
'key_name': {'key': 'keyName', 'type': 'str'},
'key_version': {'key': 'keyVersion', 'type': 'str'},
}
def __init__(
self,
*,
key_vault: "KeyVaultKeyReferenceKeyVault",
key_name: str,
key_version: Optional[str] = None,
**kwargs
):
super(KeyVaultKeyReference, self).__init__(**kwargs)
self.key_vault = key_vault
self.key_name = key_name
self.key_version = key_version
class KeyVaultKeyReferenceKeyVault(msrest.serialization.Model):
"""The key vault reference.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: The resource id.
:type id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(KeyVaultKeyReferenceKeyVault, self).__init__(**kwargs)
self.id = id
self.name = None
self.type = None
class KeyVaultReference(ResourceReference):
"""The key vault reference.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: The resource id.
:type id: str
:ivar type: Gets the resource type.
:vartype type: str
:param name: The key vault name.
:type name: str
"""
_validation = {
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
name: Optional[str] = None,
**kwargs
):
super(KeyVaultReference, self).__init__(id=id, **kwargs)
self.name = name
class ListKeyVaultKeysDefinition(msrest.serialization.Model):
"""The list key vault keys definition.
All required parameters must be populated in order to send to Azure.
:param key_vault: Required. The key vault reference.
:type key_vault: ~azure.mgmt.logic.models.KeyVaultReference
:param skip_token: The skip token.
:type skip_token: str
"""
_validation = {
'key_vault': {'required': True},
}
_attribute_map = {
'key_vault': {'key': 'keyVault', 'type': 'KeyVaultReference'},
'skip_token': {'key': 'skipToken', 'type': 'str'},
}
def __init__(
self,
*,
key_vault: "KeyVaultReference",
skip_token: Optional[str] = None,
**kwargs
):
super(ListKeyVaultKeysDefinition, self).__init__(**kwargs)
self.key_vault = key_vault
self.skip_token = skip_token
class ManagedApi(Resource):
"""The managed api | |
for failure.
instanceIdToken: This is the GCE instance identity token described in
https://cloud.google.com/compute/docs/instances/verifying-instance-
identity where the audience is 'osconfig.googleapis.com' and the format
is 'full'.
instanceSystemId: Required. The unique, system-generated identifier for
the instance. This is the immutable, auto-generated ID assigned to the
instance upon creation. This is needed here because GCE instance names
are not tombstoned; it is possible to delete an instance and create a
new one with the same name; this provides a mechanism for this API to
identify distinct instances in this case.
patchJob: Unique identifier of the patch job this request applies to.
state: State of current patch execution on the instance.
"""
class StateValueValuesEnum(_messages.Enum):
r"""State of current patch execution on the instance.
Values:
PATCH_STATE_UNSPECIFIED: Unspecified.
PENDING: The instance has not been notified yet.
INACTIVE: Instance is inactive and cannot be patched.
NOTIFIED: The instance has been notified that it should patch.
STARTED: The instance has started the patching process.
DOWNLOADING_PATCHES: The instance is downloading patches.
APPLYING_PATCHES: The instance is applying patches.
REBOOTING: The instance is rebooting.
SUCCEEDED: The instance has completed applying patches.
SUCCEEDED_REBOOT_REQUIRED: The instance has completed applying patches
but a reboot is required.
FAILED: The instance has failed to apply the patch.
ACKED: The instance acked the notification and will start shortly.
TIMED_OUT: The instance exceeded the time out while applying the patch.
"""
PATCH_STATE_UNSPECIFIED = 0
PENDING = 1
INACTIVE = 2
NOTIFIED = 3
STARTED = 4
DOWNLOADING_PATCHES = 5
APPLYING_PATCHES = 6
REBOOTING = 7
SUCCEEDED = 8
SUCCEEDED_REBOOT_REQUIRED = 9
FAILED = 10
ACKED = 11
TIMED_OUT = 12
attemptCount = _messages.IntegerField(1)
failureReason = _messages.StringField(2)
instanceIdToken = _messages.StringField(3)
instanceSystemId = _messages.StringField(4)
patchJob = _messages.StringField(5)
state = _messages.EnumField('StateValueValuesEnum', 6)
class ReportPatchJobInstanceDetailsResponse(_messages.Message):
r"""Response from reporting instance patch details. Includes information the
agent needs to continue or stop patching.
Enums:
PatchJobStateValueValuesEnum: State of the overall patch. If the patch is
no longer active, the agent should not begin a new patch step.
Fields:
dryRun: If this patch job is a dry run, the agent will report its status
as it goes through the motions but won't actually run any updates or
perform any reboots.
patchConfig: Patch configuration the agent should apply.
patchJob: Unique identifier for the current patch job.
patchJobState: State of the overall patch. If the patch is no longer
active, the agent should not begin a new patch step.
"""
class PatchJobStateValueValuesEnum(_messages.Enum):
r"""State of the overall patch. If the patch is no longer active, the
agent should not begin a new patch step.
Values:
PATCH_JOB_STATE_UNSPECIFIED: Unspecified is invalid.
ACTIVE: The patch job is running. Instances will continue to run patch
job steps.
COMPLETED: The patch job is complete.
"""
PATCH_JOB_STATE_UNSPECIFIED = 0
ACTIVE = 1
COMPLETED = 2
dryRun = _messages.BooleanField(1)
patchConfig = _messages.MessageField('PatchConfig', 2)
patchJob = _messages.StringField(3)
patchJobState = _messages.EnumField('PatchJobStateValueValuesEnum', 4)
class RetryStrategy(_messages.Message):
r"""The strategy for retrying failed patches during the patch window.
Fields:
enabled: If true, the agent will continue to try and patch until the
window has ended.
"""
enabled = _messages.BooleanField(1)
class SetIamPolicyRequest(_messages.Message):
r"""Request message for `SetIamPolicy` method.
Fields:
policy: REQUIRED: The complete policy to be applied to the `resource`. The
size of the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Cloud Platform services (such as Projects)
might reject them.
updateMask: OPTIONAL: A FieldMask specifying which fields of the policy to
modify. Only the fields in the mask will be modified. If no mask is
provided, the following default mask is used: paths: "bindings, etag"
This field is only used by Cloud IAM.
"""
policy = _messages.MessageField('Policy', 1)
updateMask = _messages.StringField(2)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
class TestIamPermissionsRequest(_messages.Message):
r"""Request message for `TestIamPermissions` method.
Fields:
permissions: The set of permissions to check for the `resource`.
Permissions with wildcards (such as '*' or 'storage.*') are not allowed.
For more information see [IAM
Overview](https://cloud.google.com/iam/docs/overview#permissions).
"""
permissions = _messages.StringField(1, repeated=True)
class TestIamPermissionsResponse(_messages.Message):
r"""Response message for `TestIamPermissions` method.
Fields:
permissions: A subset of `TestPermissionsRequest.permissions` that the
caller is allowed.
"""
permissions = _messages.StringField(1, repeated=True)
class WindowsUpdateConfig(_messages.Message):
r"""Configuration settings for the Windows update.
Fields:
windowsUpdateServerUri: Optional URI of Windows update server. This sets
the registry value `WUServer` under
`HKLM:\SOFTWARE\Policies\Microsoft\Windows\WindowsUpdate`.
"""
windowsUpdateServerUri = _messages.StringField(1)
class WindowsUpdateSettings(_messages.Message):
r"""Windows patching is performed using the Windows Update Agent.
Enums:
ClassificationsValueListEntryValuesEnum:
Fields:
classifications: Only apply updates of these windows update
classifications. If empty, all updates will be applied.
excludes: Optional list of KBs to exclude from update.
"""
class ClassificationsValueListEntryValuesEnum(_messages.Enum):
r"""ClassificationsValueListEntryValuesEnum enum type.
Values:
CLASSIFICATION_UNSPECIFIED: <no description>
CRITICAL: <no description>
SECURITY: <no description>
DEFINITION: <no description>
DRIVER: <no description>
FEATURE_PACK: <no description>
SERVICE_PACK: <no description>
TOOL: <no description>
UPDATE_ROLLUP: <no description>
UPDATE: <no description>
"""
CLASSIFICATION_UNSPECIFIED = 0
CRITICAL = 1
SECURITY = 2
DEFINITION = 3
DRIVER = 4
FEATURE_PACK = 5
SERVICE_PACK = 6
TOOL = 7
UPDATE_ROLLUP = 8
UPDATE = 9
classifications = _messages.EnumField('ClassificationsValueListEntryValuesEnum', 1, repeated=True)
excludes = _messages.StringField(2, repeated=True)
class YumPackageConfig(_messages.Message):
r"""A list of packages to install, remove, and their repos for a given
package manager type.
Fields:
packageInstalls: Packages to install. yum -y install package1 package2
package3
packageRemovals: Packages to remove. yum -y remove package1 package2
package3
repositories: Package repositories to configure in the package manager.
The instance likely already has some defaults set and duplicates are
acceptable but ignored.
"""
packageInstalls = _messages.MessageField('Package', 1, repeated=True)
packageRemovals = _messages.MessageField('Package', 2, repeated=True)
repositories = _messages.MessageField('YumRepository', 3, repeated=True)
class YumRepository(_messages.Message):
r"""Represents a single yum package repository. These will be added to a
repo file that will be managed a /etc/yum.repos.d/google_osconfig.repo
Fields:
baseUrl: Required. The location of the repository directory.
displayName: Optional. If omitted, the id will be used for the name.
gpgKeys: Optional. URIs of GPG keys.
id: Required. A one word, unique name for this repository. This will be
the `repo id` in the yum config file and also the `display_name` if
`display_name` is omitted.
"""
baseUrl = _messages.StringField(1)
displayName = _messages.StringField(2)
gpgKeys = _messages.StringField(3, repeated=True)
id = _messages.StringField(4)
class YumSettings(_messages.Message):
r"""Yum patching will be performed by executing `yum update`. Additional
options can be set to control how this is executed. Note that not all
settings are supported on all platforms.
Fields:
excludes: List of packages to exclude from update. These packages will be
excluded by using the yum `--exclude` flag.
minimal: Optional. Will cause patch to run `yum update-minimal` instead.
security: Optional. Adds the `--security` flag to `yum update`. Not
supported on all platforms.
"""
excludes = | |
print("")
for punct in keptPunct:
text = text.replace(punct, delim + punct + delim)
for punct in ignoredPunct:
text = text.replace(punct, "")
for punct in delimPunct:
text = text.replace(punct, delim)
text = text.lower()
if config.tokenizer == "stanford":
ret = StanfordTokenizer().tokenize(text)
elif config.tokenizer == "nltk":
ret = word_tokenize(text)
else:
ret = text.split() # delim
ret = [replacelistPost.get(word, word) for word in ret]
ret = [t for t in ret if t != ""]
if not tokenize:
ret = delim.join(ret)
return ret
# Read class generated files.
# files interface
def readInstances(self, instancesFilename):
with open(instancesFilename, "r") as inFile:
instances = json.load(inFile)
return instances
"""
Generate class' files. Save json representation of instances and
symbols-to-integers dictionaries.
"""
def writeInstances(self, instances, instancesFilename):
with open(instancesFilename, "w") as outFile:
json.dump(instances, outFile)
def setVocabs(self):
self.createVocabs()
self.writeVocabs()
def createVocabs(self):
ansAddUnk = True
self.questionDict.createVocab(
minCount=config.wrdEmbQMinCount, top=config.wrdEmbQTop
)
self.answerDict.createVocab(
minCount=config.wrdEmbAMinCount,
top=config.wrdEmbATop,
addUnk=ansAddUnk,
weights=True,
) # config
self.qaDict.createVocab(minCount=config.wrdEmbQMinCount)
def loadVocabs(self):
if os.path.exists(config.qaDictFile()):
print("load dictionaries")
with open(config.questionDictFile(), "rb") as inFile:
self.questionDict = pickle.load(inFile)
with open(config.answerDictFile(), "rb") as inFile:
self.answerDict = pickle.load(inFile)
with open(config.qaDictFile(), "rb") as inFile:
self.qaDict = pickle.load(inFile)
def writeVocabs(self):
with open(config.questionDictFile(), "wb") as outFile:
pickle.dump(self.questionDict, outFile)
with open(config.answerDictFile(), "wb") as outFile:
pickle.dump(self.answerDict, outFile)
with open(config.qaDictFile(), "wb") as outFile:
pickle.dump(self.qaDict, outFile)
# Write prediction json to file and optionally a one-answer-per-line output file
def writePreds(self, res, tier, suffix=""):
if res is None:
return
preds = res["preds"]
sortedPreds = sorted(preds, key=lambda instance: instance["index"])
with open(config.predsFile(tier + suffix), "w") as outFile:
outFile.write(json.dumps(sortedPreds))
answersFile = config.answersFile(tier + suffix)
if config.dataset == "CLEVR":
with open(answersFile, "w") as outFile:
for instance in sortedPreds:
writeline(outFile, instance["prediction"])
else:
with open(answersFile, "w") as outFile:
results = [
{
"question_id": instance.get("questionId", "NONE"),
"answer": instance["prediction"],
}
for instance in sortedPreds
]
outFile.write(json.dumps(results))
# Reads NLVR data entries and create a json dictionary.
def readNLVR(self, datasetFilenames, instancesFilename, tier, train, imageIndex):
instances = []
i = 0
if os.path.exists(instancesFilename):
instances = self.readInstacnes(instancesFilename)
else:
with open(datasetFilenames[0], "r") as datasetFile:
for line in datasetFile:
instance = json.loads(line)
questionStr = instance["sentence"]
question = self.processText(
questionStr, ignoredPunct=Preprocesser.allPunct, keptPunct=[]
)
if train or (not config.wrdEmbQUnk):
self.questionDict.addSymbols(question)
self.qaDict.addSymbols(question)
answer = instance["label"]
if train or (not config.wrdEmbAUnk):
self.answerDict.addSymbols(answer)
self.qaDict.addSymbols(answer)
imageId = instance["identifier"] # + "-" + str(k)
imageIdx = imageIndex[imageId]["idx"]
# int(imageId) if imageIndex is None else imageIndex[imageId]["idx"]
for k in range(6):
instances.append(
{
"questionStr": questionStr,
"question": question,
"answer": answer,
"imageId": {
"group": tier,
"id": imageId,
"idx": imageIdx,
}, # imageInfo[imageId]["idx"]
"tier": tier,
"index": i,
}
)
i += 1
random.shuffle(instances)
self.writeInstances(instances, instancesFilename)
return instances
def readVQA(
self, datasetFilenames, instancesFilename, tier, updateVocab, imageIndex=None
):
vocabq = set(json.load(open("newqVocabFileVQA.json")))
vocaba = set(json.load(open("newaVocabFileVQA.json")))
counterq = 0
countera = 0
instances = []
qId2idx = {}
annotationsFilename = config.annotationsFile(tier)
pairsFilename = config.pairsFile(tier)
if os.path.exists(instancesFilename):
instances = self.readInstances(instancesFilename)
else:
with open(datasetFilenames[0], "r") as questionsFile:
questions = json.load(questionsFile)["questions"]
index = 0
for i in tqdm(range(len(questions)), desc="Preprocessing"):
instance = questions[i]
questionStr = instance["question"]
question = self.vqaProcessText(questionStr, True, True)
if config.questionLim > 0 and len(question) > config.questionLim:
continue
if updateVocab or (not config.wrdEmbQUnk):
self.questionDict.addSymbols(question)
self.qaDict.addSymbols(question)
choices, choiceStrs = None, None
if config.ansFormat == "mc":
choiceStrs = instance["multiple_choices"]
choices = []
for choiceStr in choiceStrs:
choice = self.vqaProcessText(
choiceStr, config.ansTokenize, False
)
if updateVocab or (not config.wrdEmbAUnk):
self.answerDict.addSymbols(choice)
self.qaDict.addSymbols(choice)
choices.append(choice)
choices = list(set(choices))
if len(choices) != len(choiceStrs):
print(choiceStrs)
print(choices)
imageId = instance["image_id"]
imageInfo = imageIndex[str(imageId)]
if all([(x in vocabq) for x in questionStr[:-1].split()]):
counterq += 1
instances.append(
{
"questionStr": questionStr,
"question": question,
"questionId": instance["question_id"],
"answer": "yes"
if config.ansFormat == "oe"
else 0, # Dummy answer
"answerFreq": ["yes"], # Dummy answer
"imageId": {
"group": tier,
"id": imageId,
"idx": imageInfo["idx"],
},
"choiceStrs": choiceStrs,
"choices": choices,
"tier": tier,
"index": index,
}
)
if config.imageObjects:
instances[-1]["objectsNum"] = imageInfo["objectsNum"]
qId2idx[instance["question_id"]] = index
index += 1
if tier != "test":
with open(annotationsFilename, "r") as annotationsFile:
annotations = json.load(annotationsFile)["annotations"]
for i in tqdm(range(len(annotations)), desc="Preprocessing"):
instance = annotations[i]
if instance["question_id"] not in qId2idx:
continue
idx = qId2idx[instance["question_id"]]
answerStr = instance["multiple_choice_answer"]
answer = self.vqaProcessText(answerStr, config.ansTokenize, False)
if config.ansFormat == "mc":
answer = instances[idx]["choices"].index(answer)
answerFreqStrs = []
answerFreq = []
if instance["multiple_choice_answer"] in vocaba:
countera += 1
for answerData in instance["answers"]:
answerStr = answerData["answer"]
answer = self.vqaProcessText(
answerStr, config.ansTokenize, False
)
if updateVocab or (not config.wrdEmbAUnk):
self.answerDict.addSymbols(answer)
self.qaDict.addSymbols(answer)
answerFreqStrs.append(answerStr)
answerFreq.append(answer)
instances[idx].update(
{
"answerStr": answerStr,
"answer": answer,
"answerFreqStrs": answerFreqStrs,
"answerFreq": answerFreq,
"questionType": instance["question_type"],
"answerType": instance["answer_type"],
}
)
if config.dataVer == 2:
with open(pairsFilename, "r") as pairsFile:
pairs = json.load(pairsFile)
for pair in pairs:
if pair[0] in qId2idx:
instances[qId2idx[pair[0]]]["complementary"] = qId2idx.get(
pair[1], None
)
if pair[1] in qId2idx:
instances[qId2idx[pair[1]]]["complementary"] = qId2idx.get(
pair[0], None
)
random.shuffle(instances)
self.writeInstances(instances, instancesFilename)
return instances
def filterUnk(self, dataset, tier, filterInstances):
print("filtering unknown answers " + tier)
totalScore = 0.0
numQuestions = float(len(dataset["instances"]))
for instance in dataset["instances"]:
instance["answerFreq"] = [
answer
for answer in instance["answerFreq"]
if answer in self.answerDict.sym2id
]
answersSet = {}
for answer in instance["answerFreq"]:
answersSet[answer] = answersSet.get(answer, 0) + 1
bestCount = 0
for answer in answersSet:
if answersSet[answer] > bestCount:
bestCount = answersSet[answer]
totalScore += min(bestCount * 0.3, 1)
print("max score {}".format(totalScore / numQuestions))
if filterInstances:
if config.lossType in ["softmax", "svm", "probSoftmax"]:
dataset["instances"] = [
instance
for instance in dataset["instances"]
if instance["answer"] in self.answerDict.sym2id
]
# else:
dataset["instances"] = [
instance
for instance in dataset["instances"]
if len(instance["answerFreq"]) > 0
]
# Reads CLEVR data entries and create a json dictionary.
def readVG(
self, datasetFilenames, instancesFilename, tier, updateVocab, imageIndex=None
):
instances = []
if os.path.exists(instancesFilename):
instances = self.readInstances(instancesFilename)
else:
with open(datasetFilenames[0], "r") as datasetFile:
data = json.load(datasetFile)
for i in tqdm(range(len(data)), desc="Preprocessing"):
instance = data[i]
for q in instance["qas"]:
questionStr = q["question"]
question = self.processText(questionStr)
if updateVocab or (not config.wrdEmbQUnk):
self.questionDict.addSymbols(question)
self.qaDict.addSymbols(question)
answer = instance.get("answer", "yes") # DUMMY_ANSWER
if updateVocab or (not config.wrdEmbAUnk):
self.answerDict.addSymbols(answer)
self.qaDict.addSymbols(answer)
# pass other fields to instance?
instances.append(
{
"questionStr": questionStr,
"question": question,
"answer": answer,
"imageId": {"group": tier, "id": 0, "idx": 0},
"tier": tier,
"index": i,
}
)
random.shuffle(instances)
self.writeInstances(instances, instancesFilename)
return instances
def readV7W(
self, datasetFilenames, instancesFilename, tier, updateVocab, imageIndex=None
):
instances = []
if os.path.exists(instancesFilename):
instances = self.readInstances(instancesFilename)
else:
with open(datasetFilenames[0], "r") as datasetFile:
data = json.load(datasetFile)["images"]
for i in tqdm(range(len(data)), desc="Preprocessing"):
instance = data[i]
for q in instance["qa_pairs"]:
questionStr = q["question"]
question = self.processText(questionStr)
if updateVocab or (not config.wrdEmbQUnk):
self.questionDict.addSymbols(question)
self.qaDict.addSymbols(question)
answer = instance.get("answer", "yes") # DUMMY_ANSWER
if updateVocab or (not config.wrdEmbAUnk):
self.answerDict.addSymbols(answer)
self.qaDict.addSymbols(answer)
instances.append(
{
"questionStr": questionStr,
"question": question,
"answer": answer,
"imageId": {
"group": tier,
"id": instance["image_id"],
"idx": instance["image_id"],
},
"tier": tier,
"index": i,
}
)
random.shuffle(instances)
self.writeInstances(instances, instancesFilename)
return instances
def readCLEVR(
self, datasetFilenames, instancesFilename, tier, updateVocab, imageIndex=None
):
instances = []
if os.path.exists(instancesFilename):
instances = self.readInstances(instancesFilename)
else:
with open(datasetFilenames[0], "r") as datasetFile:
data = json.load(datasetFile)["questions"]
for i in tqdm(range(len(data)), desc="Preprocessing"):
instance = data[i]
questionStr = instance["question"]
question = self.processText(questionStr)
if updateVocab or (not config.wrdEmbQUnk):
self.questionDict.addSymbols(question)
self.qaDict.addSymbols(question)
answer = instance.get("answer", "yes") # DUMMY_ANSWER
if updateVocab or (not config.wrdEmbAUnk):
self.answerDict.addSymbols(answer)
self.qaDict.addSymbols(answer)
dummyProgram = [{"function": "FUNC", "value_inputs": [], "inputs": []}]
program = instance.get("program", dummyProgram)
postfixProgram = self.programTranslator.programToPostfixProgram(program)
programSeq = self.programTranslator.programToSeq(postfixProgram)
programInputs = self.programTranslator.programToInputs(
postfixProgram, offset=2
)
instances.append(
{
"questionStr": questionStr,
"question": question,
"answer": answer,
"imageId": {
"group": tier,
"id": instance["image_index"],
"idx": instance["image_index"],
},
"program": program,
"programSeq": programSeq,
"programInputs": programInputs,
"tier": tier,
"index": i,
}
)
random.shuffle(instances)
self.writeInstances(instances, instancesFilename)
return instances
def readGQA(
self, datasetFilenames, instancesFilename, tier, updateVocab, imageIndex=None
):
instances = []
if os.path.exists(instancesFilename):
instances = self.readInstances(instancesFilename)
else:
data = []
for vf in datasetFilenames:
with open(vf, "r") as datasetFile:
raw_data = json.load(datasetFile)
data += [
{
"questionId": qid,
"group": val["types"]["detailed"],
"answer": val["answer"],
"type": val["types"]["structural"],
"fullAnswer": val["fullAnswer"],
"question": val["question"],
"imageId": val["imageId"],
"semanticStr": val["semanticStr"]
if tier not in ("train", "val")
else None,
"semantic": val["semantic"]
if tier not in ("train", "val")
else None,
}
for qid, val in raw_data.items()
]
for i in tqdm(range(len(data)), desc="Preprocessing"):
instance = data[i]
questionStr = instance["question"]
question = self.processText(questionStr)
if updateVocab or (not config.wrdEmbQUnk):
self.questionDict.addSymbols(question)
self.qaDict.addSymbols(question)
answer = instance.get("answer", "yes") # DUMMY_ANSWER
if updateVocab or (not config.wrdEmbQUnk):
self.answerDict.addSymbols(answer)
self.qaDict.addSymbols(answer)
imageId = instance["imageId"]
imageInfo = imageIndex[str(imageId)]
instances.append(
| |
B O 1
ATOM 3234 C CB . ALA B 1 197 ? 46.134 -45.681 16.710 1.00 16.70 ? 198 ALA B CB 1
ATOM 3235 N N . LEU B 1 198 ? 45.625 -44.929 19.735 1.00 14.13 ? 199 LEU B N 1
ATOM 3236 C CA . LEU B 1 198 ? 46.159 -44.455 21.041 1.00 14.99 ? 199 LEU B CA 1
ATOM 3237 C C . LEU B 1 198 ? 45.053 -44.448 22.091 1.00 14.98 ? 199 LEU B C 1
ATOM 3238 O O . LEU B 1 198 ? 43.844 -44.264 21.784 1.00 16.91 ? 199 LEU B O 1
ATOM 3239 C CB . LEU B 1 198 ? 46.707 -43.035 20.883 1.00 15.52 ? 199 LEU B CB 1
ATOM 3240 C CG . LEU B 1 198 ? 47.804 -42.805 19.852 1.00 15.77 ? 199 LEU B CG 1
ATOM 3241 C CD1 . LEU B 1 198 ? 48.149 -41.322 19.641 1.00 15.48 ? 199 LEU B CD1 1
ATOM 3242 C CD2 . LEU B 1 198 ? 49.078 -43.551 20.257 1.00 16.04 ? 199 LEU B CD2 1
ATOM 3243 N N . GLY B 1 199 ? 45.456 -44.596 23.347 1.00 15.84 ? 200 GLY B N 1
ATOM 3244 C CA . GLY B 1 199 ? 44.629 -44.304 24.492 1.00 14.62 ? 200 GLY B CA 1
ATOM 3245 C C . GLY B 1 199 ? 44.009 -42.920 24.470 1.00 15.48 ? 200 GLY B C 1
ATOM 3246 O O . GLY B 1 199 ? 44.603 -41.997 23.827 1.00 13.52 ? 200 GLY B O 1
ATOM 3247 N N . GLU B 1 200 ? 42.838 -42.764 25.109 1.00 15.60 ? 201 GLU B N 1
ATOM 3248 C CA . GLU B 1 200 ? 42.080 -41.529 24.962 1.00 17.52 ? 201 GLU B CA 1
ATOM 3249 C C . GLU B 1 200 ? 42.925 -40.305 25.383 1.00 16.49 ? 201 GLU B C 1
ATOM 3250 O O . GLU B 1 200 ? 42.946 -39.347 24.641 1.00 17.70 ? 201 GLU B O 1
ATOM 3251 C CB . GLU B 1 200 ? 40.756 -41.597 25.681 1.00 20.96 ? 201 GLU B CB 1
ATOM 3252 C CG . GLU B 1 200 ? 39.818 -40.438 25.413 1.00 23.73 ? 201 GLU B CG 1
ATOM 3253 C CD . GLU B 1 200 ? 38.517 -40.539 26.180 1.00 29.50 ? 201 GLU B CD 1
ATOM 3254 O OE1 . GLU B 1 200 ? 38.261 -41.592 26.816 1.00 33.57 ? 201 GLU B OE1 1
ATOM 3255 O OE2 . GLU B 1 200 ? 37.728 -39.563 26.123 1.00 34.56 ? 201 GLU B OE2 1
ATOM 3256 N N . THR B 1 201 ? 43.598 -40.368 26.531 1.00 16.60 ? 202 THR B N 1
ATOM 3257 C CA . THR B 1 201 ? 44.507 -39.263 26.971 1.00 17.34 ? 202 THR B CA 1
ATOM 3258 C C . THR B 1 201 ? 45.596 -38.916 25.955 1.00 17.05 ? 202 THR B C 1
ATOM 3259 O O . THR B 1 201 ? 45.847 -37.728 25.658 1.00 14.69 ? 202 THR B O 1
ATOM 3260 C CB . THR B 1 201 ? 45.157 -39.580 28.323 1.00 20.73 ? 202 THR B CB 1
ATOM 3261 O OG1 . THR B 1 201 ? 44.099 -39.864 29.224 1.00 25.22 ? 202 THR B OG1 1
ATOM 3262 C CG2 . THR B 1 201 ? 46.083 -38.394 28.815 1.00 19.61 ? 202 THR B CG2 1
ATOM 3263 N N . GLN B 1 202 ? 46.195 -39.961 25.384 1.00 14.65 ? 203 GLN B N 1
ATOM 3264 C CA . GLN B 1 202 ? 47.284 -39.805 24.433 1.00 15.65 ? 203 GLN B CA 1
ATOM 3265 C C . GLN B 1 202 ? 46.756 -39.260 23.093 1.00 14.71 ? 203 GLN B C 1
ATOM 3266 O O . GLN B 1 202 ? 47.360 -38.392 22.453 1.00 12.31 ? 203 GLN B O 1
ATOM 3267 C CB . GLN B 1 202 ? 48.009 -41.118 24.203 1.00 16.62 ? 203 GLN B CB 1
ATOM 3268 C CG . GLN B 1 202 ? 48.778 -41.682 25.362 1.00 17.84 ? 203 GLN B CG 1
ATOM 3269 C CD . GLN B 1 202 ? 47.890 -42.183 26.466 1.00 18.24 ? 203 GLN B CD 1
ATOM 3270 O OE1 . GLN B 1 202 ? 46.826 -42.768 26.192 1.00 15.96 ? 203 GLN B OE1 1
ATOM 3271 N NE2 . GLN B 1 202 ? 48.321 -41.985 27.699 1.00 19.21 ? 203 GLN B NE2 1
ATOM 3272 N N . ARG B 1 203 ? 45.595 -39.755 22.658 1.00 13.69 ? 204 ARG B N 1
ATOM 3273 C CA . ARG B 1 203 ? 44.975 -39.198 21.425 1.00 14.30 ? 204 ARG B CA 1
ATOM 3274 C C . ARG B 1 203 ? 44.690 -37.715 21.604 1.00 13.91 ? 204 ARG B C 1
ATOM 3275 O O . ARG B 1 203 ? 45.017 -36.877 20.727 1.00 12.20 ? 204 ARG B O 1
ATOM 3276 C CB . ARG B 1 203 ? 43.708 -39.970 21.022 1.00 14.56 ? 204 ARG B CB 1
ATOM 3277 C CG . ARG B 1 203 ? 42.973 -39.410 19.823 1.00 16.41 ? 204 ARG B CG 1
ATOM 3278 C CD . ARG B 1 203 ? 41.695 -40.187 19.539 1.00 18.05 ? 204 ARG B CD 1
ATOM 3279 N NE . ARG B 1 203 ? 40.615 -39.837 20.427 1.00 19.52 ? 204 ARG B NE 1
ATOM 3280 C CZ . ARG B 1 203 ? 39.375 -40.361 20.325 1.00 22.40 ? 204 ARG B CZ 1
ATOM 3281 N NH1 . ARG B 1 203 ? 39.062 -41.269 19.390 1.00 21.43 ? 204 ARG B NH1 1
ATOM 3282 N NH2 . ARG B 1 203 ? 38.418 -39.963 21.172 1.00 23.14 ? 204 ARG B NH2 1
ATOM 3283 N N . ALA B 1 204 ? 44.174 -37.378 22.765 1.00 12.99 ? 205 ALA B N 1
ATOM 3284 C CA . ALA B 1 204 ? 43.879 -35.980 23.090 1.00 13.91 ? 205 ALA B CA 1
ATOM 3285 C C . ALA B 1 204 ? 45.148 -35.154 23.059 1.00 11.76 ? 205 ALA B C 1
ATOM 3286 O O . ALA B 1 204 ? 45.103 -34.044 22.575 1.00 12.50 ? 205 ALA B O 1
ATOM 3287 C CB . ALA B 1 204 ? 43.225 -35.845 24.474 1.00 14.24 ? 205 ALA B CB 1
ATOM 3288 N N . GLN B 1 205 ? 46.217 -35.695 23.621 1.00 12.67 ? 206 GLN B N 1
ATOM 3289 C CA . GLN B 1 205 ? 47.498 -34.963 23.676 1.00 12.00 ? 206 GLN B CA 1
ATOM 3290 C C . GLN B 1 205 ? 48.049 -34.711 22.279 1.00 12.43 ? 206 GLN B C 1
ATOM 3291 O O . GLN B 1 205 ? 48.526 -33.633 21.998 1.00 11.63 ? 206 GLN B O 1
ATOM 3292 C CB . GLN B 1 205 ? 48.532 -35.672 24.532 1.00 12.61 ? 206 GLN B CB 1
ATOM 3293 C CG . GLN B 1 205 ? 49.848 -34.921 24.690 1.00 13.59 ? 206 GLN B CG 1
ATOM 3294 C CD . GLN B 1 205 ? 49.684 -33.581 25.341 1.00 14.42 ? 206 | |
<filename>solution.py
import time
import random
from collections import defaultdict
import numpy as np
import copy
import multiprocessing
def process_generate_scratch(schedule_agent, cur_time, job_status, machine_status, coeff_tardiness, makespan_dict, data_ct_dict):
makespan, op_seq_machines, job_seq_machines, start_time_op_macs, end_time_op_macs, start_time_ops, end_time_ops, mac_assignment_ops, flag_scheduled_ops, flag_scheduled_jobs = schedule_agent.generation_scratch(cur_time, job_status, machine_status, coeff_tardiness, 0)
for ind_job_check in range(schedule_agent.num_job):
name_job_check = schedule_agent.name_jobs[ind_job_check]
type_job_check = schedule_agent.type_jobs[ind_job_check]
priority_job_check = job_status[name_job_check]['priority']
num_op_job_check = int( schedule_agent.num_op_jobs[ind_job_check] )
idx_first_op_job_check = schedule_agent.idx_first_op_jobs[ind_job_check]
for ind_op_job_check in range(num_op_job_check):
op_check = int( idx_first_op_job_check + ind_op_job_check )
if priority_job_check > 0:
pending_constraint_op = schedule_agent.job_types[type_job_check][ind_op_job_check]['max_pend_time']
time_comp = -1
if ind_op_job_check == 0:
time_comp = schedule_agent.arrival_time_jobs[ind_job_check]
if time_comp < 0:
print('wrong arrival time')
# if time_comp <= 0:
else: # not the first operation of this job
time_comp = end_time_ops[op_check - 1]
# if ind_op_job_check == 0:
if start_time_ops[op_check] - time_comp > pending_constraint_op:
print('violate the pending contraint')
makespan = makespan + 99999
# penalty_pending_constraint = penalty_pending_constraint + priority_job_check * 10 * (schedule_agent.start_time_ops[op_check] - time_comp - pending_constraint_op)
# if end_time_ops[op_check] - time_comp >= pending_constraint_op:
# if job_status[name_job_check]['priority'] > 0:
# for ind_op_job_check in range(num_op_job_check):
# for ind_job_check in range(self.num_job):
makespan_dict[coeff_tardiness] = makespan
data_ct_dict[coeff_tardiness] = [op_seq_machines, job_seq_machines, start_time_op_macs, end_time_op_macs, start_time_ops, end_time_ops, mac_assignment_ops, flag_scheduled_ops, flag_scheduled_jobs]
# def process_generate_scratch(schedule_agent, cur_time, job_status, machine_status, coeff_tardiness, makespan_dict):
def process_generate_resume(schedule_agent, cur_time, job_status, machine_status, coeff_tardiness, makespan_dict, data_ct_dict):
makespan, new_op_seq_machines, new_job_seq_machines, new_start_time_op_macs, new_end_time_op_macs, new_start_time_ops, new_end_time_ops, new_mac_assignment_ops, new_flag_scheduled_ops, count_scheduled_op_macs, new_flag_scheduled_jobs = schedule_agent.generation_resume(cur_time, job_status, machine_status, coeff_tardiness, 0)
for ind_job_check in range(schedule_agent.num_job):
name_job_check = schedule_agent.name_jobs[ind_job_check]
type_job_check = schedule_agent.type_jobs[ind_job_check]
priority_job_check = job_status[name_job_check]['priority']
num_op_job_check = int( schedule_agent.num_op_jobs[ind_job_check] )
idx_first_op_job_check = schedule_agent.idx_first_op_jobs[ind_job_check]
for ind_op_job_check in range(num_op_job_check):
op_check = int( idx_first_op_job_check + ind_op_job_check )
if priority_job_check > 0:
pending_constraint_op = schedule_agent.job_types[type_job_check][ind_op_job_check]['max_pend_time']
time_comp = -1
if ind_op_job_check == 0:
time_comp = schedule_agent.arrival_time_jobs[ind_job_check]
if time_comp < 0:
print('wrong arrival time')
# if time_comp <= 0:
else: # not the first operation of this job
time_comp = new_end_time_ops[op_check - 1]
# if ind_op_job_check == 0:
if new_start_time_ops[op_check] - time_comp > pending_constraint_op:
print('violate the pending contraint')
makespan = makespan + 99999
# penalty_pending_constraint = penalty_pending_constraint + priority_job_check * 10 * (schedule_agent.start_time_ops[op_check] - time_comp - pending_constraint_op)
# if end_time_ops[op_check] - time_comp >= pending_constraint_op:
# if job_status[name_job_check]['priority'] > 0:
# for ind_op_job_check in range(num_op_job_check):
# for ind_job_check in range(self.num_job):
makespan_dict[coeff_tardiness] = makespan
data_ct_dict[coeff_tardiness] = [new_op_seq_machines, new_job_seq_machines, new_start_time_op_macs, new_end_time_op_macs, new_start_time_ops, new_end_time_ops, new_mac_assignment_ops, new_flag_scheduled_ops, count_scheduled_op_macs, new_flag_scheduled_jobs]
# def process_generate_scratch(schedule_agent, cur_time, job_status, machine_status, coeff_tardiness, makespan_dict):
def process_iteration_scratch(schedule_agent, ind_process, cur_time, job_status, machine_status, coeff_tardiness, makespan_comp, makespan_dict, data_ct_dict):
makespan_min = makespan_comp
op_seq_machines_min = None
job_seq_machines_min = None
start_time_op_macs_min = None
end_time_op_macs_min = None
start_time_ops_min = None
end_time_ops_min = None
mac_assignment_ops_min = None
flag_scheduled_ops_min = None
flag_scheduled_jobs_min = None
start_time_iter = time.time()
elapsed_time_iter = 0
while elapsed_time_iter < 12: # for ind_iter in range(7):
makespan, op_seq_machines, job_seq_machines, start_time_op_macs, end_time_op_macs, start_time_ops, end_time_ops, mac_assignment_ops, flag_scheduled_ops, flag_scheduled_jobs = schedule_agent.generation_scratch(cur_time, job_status, machine_status, coeff_tardiness, 1)
if makespan_min == -1 or makespan < makespan_min:
for ind_job_check in range(schedule_agent.num_job):
name_job_check = schedule_agent.name_jobs[ind_job_check]
type_job_check = schedule_agent.type_jobs[ind_job_check]
priority_job_check = job_status[name_job_check]['priority']
# if schedule_agent.flag_scheduled_jobs[ind_job_check] != 1:
# print('unscheduled job')
# # if flag_scheduled_jobs[flag_scheduled_jobs] != 0:
num_op_job_check = int( schedule_agent.num_op_jobs[ind_job_check] )
idx_first_op_job_check = schedule_agent.idx_first_op_jobs[ind_job_check]
for ind_op_job_check in range(num_op_job_check):
op_check = int( idx_first_op_job_check + ind_op_job_check )
# if self.flag_scheduled_ops[op_check] != 1:
# print('unscheduled_ operation')
# # if flag_scheduled_ops[idx_first_op_job_check + ind_op_job_check] != 1:
# if ind_op_job_check > 0:
# if self.end_time_ops[op_check - 1] > self.start_time_ops[op_check]:
# print('incorrect start time')
# # if end_time_ops[op_check - 1] > start_time_ops[op_check]:
# # if ind_op_job_check > 0:
if priority_job_check > 0:
pending_constraint_op = schedule_agent.job_types[type_job_check][ind_op_job_check]['max_pend_time']
time_comp = -1
if ind_op_job_check == 0:
time_comp = schedule_agent.arrival_time_jobs[ind_job_check]
if time_comp < 0:
print('wrong arrival time')
# if time_comp <= 0:
else: # not the first operation of this job
time_comp = end_time_ops[op_check - 1]
# if ind_op_job_check == 0:
if start_time_ops[op_check] - time_comp > pending_constraint_op:
print('violate the pending contraint')
makespan = makespan + 99999
# penalty_pending_constraint = penalty_pending_constraint + priority_job_check * 10 * (schedule_agent.start_time_ops[op_check] - time_comp - pending_constraint_op)
# if end_time_ops[op_check] - time_comp >= pending_constraint_op:
# if job_status[name_job_check]['priority'] > 0:
# for ind_op_job_check in range(num_op_job_check):
# for ind_job_check in range(self.num_job):
if makespan < makespan_min:
makespan_min = makespan
op_seq_machines_min = copy.deepcopy(op_seq_machines)
job_seq_machines_min = copy.deepcopy(job_seq_machines)
start_time_op_macs_min = copy.deepcopy(start_time_op_macs)
end_time_op_macs_min = copy.deepcopy(end_time_op_macs)
start_time_ops_min = copy.deepcopy(start_time_ops)
end_time_ops_min = copy.deepcopy(end_time_ops)
mac_assignment_ops_min = copy.deepcopy(mac_assignment_ops)
flag_scheduled_ops_min = copy.deepcopy(flag_scheduled_ops)
flag_scheduled_jobs_min = copy.deepcopy(flag_scheduled_jobs)
# makespan < makespan_min:
# if makespan_min == -1 or makespan < makespan_min:
elapsed_time_iter = time.time() - start_time_iter
# while elapsed_time_iter < 14:
makespan_dict[ind_process] = makespan_min
data_ct_dict[ind_process] = [op_seq_machines_min, job_seq_machines_min, start_time_op_macs_min, end_time_op_macs_min, start_time_ops_min, end_time_ops_min, mac_assignment_ops_min, flag_scheduled_ops_min, flag_scheduled_jobs_min]
# def process_iteration_scratch(schedule_agent, cur_time, job_status, machine_status, coeff_tardiness, makespan_dict, data_ct_dict):
def process_iteration_resume(schedule_agent, ind_process, cur_time, job_status, machine_status, coeff_tardiness, makespan_comp, makespan_dict, data_dict):
makespan_min = makespan_comp
op_seq_machines_min = None
job_seq_machines_min = None
start_time_op_macs_min = None
end_time_op_macs_min = None
start_time_ops_min = None
end_time_ops_min = None
mac_assignment_ops_min = None
flag_scheduled_ops_min = None
count_scheduled_op_macs_min = None
flag_scheduled_jobs_min = None
start_time_iter = time.time()
elapsed_time_iter = 0
while elapsed_time_iter < 12: # for ind_iter in range(8):
makespan, new_op_seq_machines, new_job_seq_machines, new_start_time_op_macs, new_end_time_op_macs, new_start_time_ops, new_end_time_ops, new_mac_assignment_ops, new_flag_scheduled_ops, count_scheduled_op_macs, new_flag_scheduled_jobs = schedule_agent.generation_resume(cur_time, job_status, machine_status, coeff_tardiness, 1)
# makespan, op_seq_machines, job_seq_machines, start_time_op_macs, end_time_op_macs, start_time_ops, end_time_ops, mac_assignment_ops, flag_scheduled_ops, flag_scheduled_jobs = schedule_agent.generation_scratch(cur_time, job_status, machine_status, coeff_tardiness, 1)
if makespan_min == -1 or makespan < makespan_min:
for ind_job_check in range(schedule_agent.num_job):
name_job_check = schedule_agent.name_jobs[ind_job_check]
type_job_check = schedule_agent.type_jobs[ind_job_check]
priority_job_check = job_status[name_job_check]['priority']
# if schedule_agent.flag_scheduled_jobs[ind_job_check] != 1:
# print('unscheduled job')
# # if flag_scheduled_jobs[flag_scheduled_jobs] != 0:
num_op_job_check = int( schedule_agent.num_op_jobs[ind_job_check] )
idx_first_op_job_check = schedule_agent.idx_first_op_jobs[ind_job_check]
for ind_op_job_check in range(num_op_job_check):
op_check = int( idx_first_op_job_check + ind_op_job_check )
# if self.flag_scheduled_ops[op_check] != 1:
# print('unscheduled_ operation')
# # if flag_scheduled_ops[idx_first_op_job_check + ind_op_job_check] != 1:
# if ind_op_job_check > 0:
# if self.end_time_ops[op_check - 1] > self.start_time_ops[op_check]:
# print('incorrect start time')
# # if end_time_ops[op_check - 1] > start_time_ops[op_check]:
# # if ind_op_job_check > 0:
if priority_job_check > 0:
pending_constraint_op = schedule_agent.job_types[type_job_check][ind_op_job_check]['max_pend_time']
time_comp = -1
if ind_op_job_check == 0:
time_comp = schedule_agent.arrival_time_jobs[ind_job_check]
if time_comp < 0:
print('wrong arrival time')
# if time_comp <= 0:
else: # not the first operation of this job
time_comp = new_end_time_ops[op_check - 1]
# if ind_op_job_check == 0:
if new_start_time_ops[op_check] - time_comp > pending_constraint_op:
print('violate the pending contraint')
makespan = makespan + 99999
# penalty_pending_constraint = penalty_pending_constraint + priority_job_check * 10 * (schedule_agent.start_time_ops[op_check] - time_comp - pending_constraint_op)
# if end_time_ops[op_check] - time_comp >= pending_constraint_op:
# if job_status[name_job_check]['priority'] > 0:
# for ind_op_job_check in range(num_op_job_check):
# for ind_job_check in range(self.num_job):
if makespan < makespan_min:
makespan_min = makespan
op_seq_machines_min = copy.deepcopy(new_op_seq_machines)
job_seq_machines_min = copy.deepcopy(new_job_seq_machines)
start_time_op_macs_min = copy.deepcopy(new_start_time_op_macs)
end_time_op_macs_min = copy.deepcopy(new_end_time_op_macs)
start_time_ops_min = copy.deepcopy(new_start_time_ops)
end_time_ops_min = copy.deepcopy(new_end_time_ops)
mac_assignment_ops_min = copy.deepcopy(new_mac_assignment_ops)
flag_scheduled_ops_min = copy.deepcopy(new_flag_scheduled_ops)
count_scheduled_op_macs_min = copy.deepcopy(count_scheduled_op_macs)
flag_scheduled_jobs_min = copy.deepcopy(new_flag_scheduled_jobs)
# if makespan < makespan_min:
# if makespan_min == -1 or makespan < makespan_min:
elapsed_time_iter = time.time() - start_time_iter
# while elapsed_time_iter < 13:
makespan_dict[ind_process] = makespan_min
data_dict[ind_process] = [op_seq_machines_min, job_seq_machines_min, start_time_op_macs_min, end_time_op_macs_min, start_time_ops_min, end_time_ops_min, mac_assignment_ops_min, flag_scheduled_ops_min, count_scheduled_op_macs_min, flag_scheduled_jobs_min]
# def process_iteration_scratch(schedule_agent, cur_time, job_status, machine_status, coeff_tardiness, makespan_dict, data_ct_dict):
class Trainer:
def __init__(self, Env, conf_list):
self.conf_list = conf_list
self.Env = Env
self.checkpoint = None
self.iter = 0
# def __init__(self, Env, conf_list):
def train(self, run_time):
env = self.Env(self.conf_list[0])
# obs = env.reset()
machine_status, job_status, t, job_list = env.reset()
return Agent(env.job_types, env.machines)
# def train(self, run_time):
# class Trainer:
class Agent:
def __init__(self, job_types, machines):
self.machines = machines
self.job_types = job_types
self.total_num_ops = -1
self.total_num_machines = 0
self.perc_pend_time = 0.8
self.set_coeff_tardiness = [1.0, 0.5, 0.2, 0.1, 0.05, 0.01]
# coeff_tardiness = 0.5
# num_cpu = multiprocessing.cpu_count()
num_op_job_types = {} # np.zeros(num_job_type)
key_dict_job_types = self.job_types.keys()
num_job_types = len(key_dict_job_types)
keys_job_types = []
hash_ind_job_types = {}
ind_job_type = 0
for key_job_type in key_dict_job_types:
keys_job_types.append(key_job_type)
num_op_temp = len(self.job_types[key_job_type])
num_op_job_types[key_job_type] = num_op_temp
hash_ind_job_types[key_job_type] = ind_job_type
ind_job_type = ind_job_type + 1
# for key_job_type in keys_job_type:
num_kind_mac = len(self.machines)
num_machine_types = {}
hash_ind_mac_types = {}
name_macs = []
idx_mac_types = {}
count_ind_mac = 0
for machine_temp in self.machines:
num_machine_types[machine_temp] = len(self.machines[machine_temp])
idx_mac_types[machine_temp] | |
<reponame>TiankunZhou/cctbx_project
from __future__ import absolute_import, division, print_function
import os
import iotbx.phil
import libtbx.load_env
import libtbx.phil
import mmtbx.model
from cctbx import crystal
from libtbx.program_template import ProgramTemplate
from libtbx.utils import Sorry
from iotbx.data_manager import DataManager
from six.moves import zip
# -----------------------------------------------------------------------------
def test_data_manager():
a = DataManager(['model'])
a.add_model('a', 'b')
a.add_model('c', 'd')
assert a.get_model() == 'b'
assert a.get_model('a') == 'b'
assert a.get_model('c') == 'd'
assert a.get_model_names() == ['a', 'c']
assert a.has_models()
assert a.has_models(exact_count=True, expected_n=2)
assert not a.has_models(expected_n=3, raise_sorry=False)
# exporting phil
working_phil = a.export_phil_scope()
assert len(working_phil.extract().data_manager.model) == 2
# data tracking
try:
a.has_models(expected_n=3, raise_sorry=True)
except Sorry:
pass
try:
a.has_models(exact_count=True, raise_sorry=True)
except Sorry:
pass
a.set_default_model('c')
assert a.get_model() == 'd'
assert a.get_model_names() == ['a', 'c'] or a.get_model_names() == ['c', 'a']
a.remove_model('c')
try:
a.get_model()
except Sorry:
pass
try:
a.get_model('missing')
except Sorry:
pass
try:
a.set_default_model('missing')
except Sorry:
pass
a = DataManager(datatypes=['sequence', 'phil'])
assert a.get_sequence_names() == []
assert not hasattr(a, 'get_model')
# phil functions
test_phil_str = '''
data_manager {
phil_files = data_manager_test.eff
}
'''
with open('data_manager_test.eff', 'w') as f:
f.write(test_phil_str)
# loading file with get function
assert len(a.get_phil_names()) == 0
p = a.get_phil('data_manager_test.eff')
assert type(p) == libtbx.phil.scope
assert 'data_manager_test.eff' in a.get_phil_names()
# loading file with phil
a = DataManager(datatypes=['phil'])
test_phil = iotbx.phil.parse(test_phil_str)
a.load_phil_scope(test_phil)
assert 'data_manager_test.eff' in a.get_phil_names()
assert a.get_default_phil_name() == 'data_manager_test.eff'
os.remove('data_manager_test.eff')
# writing
a = DataManager(datatypes=['model', 'phil', 'sequence'])
a.add_model('a','b')
a.add_phil('c','d')
a.add_sequence('e','f')
a.write_model_file(a.get_model(), filename='a.dat', overwrite=True)
a.write_phil_file(a.get_phil(), filename='c.dat', overwrite=True)
a.write_sequence_file(a.get_sequence(), filename='e.dat', overwrite=True)
with open('a.dat', 'r') as f:
lines = f.readlines()
assert lines[0] == 'b'
os.remove('a.dat')
os.remove('c.dat')
os.remove('e.dat')
# -----------------------------------------------------------------------------
def test_model_datatype():
import mmtbx.monomer_library.server
try:
mon_lib_srv = mmtbx.monomer_library.server.server()
except mmtbx.monomer_library.server.MonomerLibraryServerError:
print("Can not initialize monomer_library, skipping test_model_datatype.")
return
# 1yjp
model_str = '''
CRYST1 21.937 4.866 23.477 90.00 107.08 90.00 P 1 21 1 2
ORIGX1 1.000000 0.000000 0.000000 0.00000
ORIGX2 0.000000 1.000000 0.000000 0.00000
ORIGX3 0.000000 0.000000 1.000000 0.00000
SCALE1 0.045585 0.000000 0.014006 0.00000
SCALE2 0.000000 0.205508 0.000000 0.00000
SCALE3 0.000000 0.000000 0.044560 0.00000
ATOM 1 N GLY A 1 -9.009 4.612 6.102 1.00 16.77 N
ATOM 2 CA GLY A 1 -9.052 4.207 4.651 1.00 16.57 C
ATOM 3 C GLY A 1 -8.015 3.140 4.419 1.00 16.16 C
ATOM 4 O GLY A 1 -7.523 2.521 5.381 1.00 16.78 O
ATOM 5 N ASN A 2 -7.656 2.923 3.155 1.00 15.02 N
ATOM 6 CA ASN A 2 -6.522 2.038 2.831 1.00 14.10 C
ATOM 7 C ASN A 2 -5.241 2.537 3.427 1.00 13.13 C
ATOM 8 O ASN A 2 -4.978 3.742 3.426 1.00 11.91 O
ATOM 9 CB ASN A 2 -6.346 1.881 1.341 1.00 15.38 C
ATOM 10 CG ASN A 2 -7.584 1.342 0.692 1.00 14.08 C
ATOM 11 OD1 ASN A 2 -8.025 0.227 1.016 1.00 17.46 O
ATOM 12 ND2 ASN A 2 -8.204 2.155 -0.169 1.00 11.72 N
ATOM 13 N ASN A 3 -4.438 1.590 3.905 1.00 12.26 N
ATOM 14 CA ASN A 3 -3.193 1.904 4.589 1.00 11.74 C
ATOM 15 C ASN A 3 -1.955 1.332 3.895 1.00 11.10 C
ATOM 16 O ASN A 3 -1.872 0.119 3.648 1.00 10.42 O
ATOM 17 CB ASN A 3 -3.259 1.378 6.042 1.00 12.15 C
ATOM 18 CG ASN A 3 -2.006 1.739 6.861 1.00 12.82 C
ATOM 19 OD1 ASN A 3 -1.702 2.925 7.072 1.00 15.05 O
ATOM 20 ND2 ASN A 3 -1.271 0.715 7.306 1.00 13.48 N
ATOM 21 N GLN A 4 -1.005 2.228 3.598 1.00 10.29 N
ATOM 22 CA GLN A 4 0.384 1.888 3.199 1.00 10.53 C
ATOM 23 C GLN A 4 1.435 2.606 4.088 1.00 10.24 C
ATOM 24 O GLN A 4 1.547 3.843 4.115 1.00 8.86 O
ATOM 25 CB GLN A 4 0.656 2.148 1.711 1.00 9.80 C
ATOM 26 CG GLN A 4 1.944 1.458 1.213 1.00 10.25 C
ATOM 27 CD GLN A 4 2.504 2.044 -0.089 1.00 12.43 C
ATOM 28 OE1 GLN A 4 2.744 3.268 -0.190 1.00 14.62 O
ATOM 29 NE2 GLN A 4 2.750 1.161 -1.091 1.00 9.05 N
ATOM 30 N GLN A 5 2.154 1.821 4.871 1.00 10.38 N
ATOM 31 CA GLN A 5 3.270 2.361 5.640 1.00 11.39 C
ATOM 32 C GLN A 5 4.594 1.768 5.172 1.00 11.52 C
ATOM 33 O GLN A 5 4.768 0.546 5.054 1.00 12.05 O
ATOM 34 CB GLN A 5 3.056 2.183 7.147 1.00 11.96 C
ATOM 35 CG GLN A 5 1.829 2.950 7.647 1.00 10.81 C
ATOM 36 CD GLN A 5 1.344 2.414 8.954 1.00 13.10 C
ATOM 37 OE1 GLN A 5 0.774 1.325 9.002 1.00 10.65 O
ATOM 38 NE2 GLN A 5 1.549 3.187 10.039 1.00 12.30 N
ATOM 39 N ASN A 6 5.514 2.664 4.856 1.00 11.99 N
ATOM 40 CA ASN A 6 6.831 2.310 4.318 1.00 12.30 C
ATOM 41 C ASN A 6 7.854 2.761 5.324 1.00 13.40 C
ATOM 42 O ASN A 6 8.219 3.943 5.374 1.00 13.92 O
ATOM 43 CB ASN A 6 7.065 3.016 2.993 1.00 12.13 C
ATOM 44 CG ASN A 6 5.961 2.735 2.003 1.00 12.77 C
ATOM 45 OD1 ASN A 6 5.798 1.604 1.551 1.00 14.27 O
ATOM 46 ND2 ASN A 6 5.195 3.747 1.679 1.00 10.07 N
ATOM 47 N TYR A 7 8.292 1.817 6.147 1.00 14.70 N
ATOM 48 CA TYR A 7 9.159 2.144 7.299 1.00 15.18 C
ATOM 49 C TYR A 7 10.603 2.331 6.885 1.00 15.91 C
ATOM 50 O TYR A 7 11.041 1.811 5.855 1.00 15.76 O
ATOM 51 CB TYR A 7 9.061 1.065 8.369 1.00 15.35 C
ATOM 52 CG TYR A 7 7.665 0.929 8.902 1.00 14.45 C
ATOM 53 CD1 TYR A 7 6.771 0.021 8.327 1.00 15.68 C
ATOM 54 CD2 TYR A 7 7.210 1.756 9.920 1.00 14.80 C
ATOM 55 CE1 TYR A 7 5.480 -0.094 8.796 1.00 13.46 C
ATOM 56 CE2 TYR A 7 5.904 1.649 10.416 1.00 14.33 C
ATOM 57 CZ TYR A 7 5.047 0.729 9.831 1.00 15.09 C
ATOM 58 OH TYR A 7 3.766 0.589 10.291 1.00 14.39 O
ATOM 59 OXT TYR A 7 11.358 2.999 7.612 1.00 17.49 O
TER 60 TYR A 7
HETATM 61 O HOH A 8 -6.471 5.227 7.124 1.00 22.62 O
HETATM 62 O HOH A 9 10.431 1.858 3.216 1.00 19.71 O
HETATM 63 O HOH A 10 -11.286 1.756 -1.468 1.00 17.08 O
HETATM 64 O HOH A 11 11.808 4.179 9.970 1.00 23.99 O
HETATM 65 O HOH A 12 13.605 1.327 9.198 1.00 26.17 O
HETATM 66 O HOH A 13 -2.749 3.429 10.024 1.00 39.15 O
HETATM 67 O HOH A 14 -1.500 0.682 10.967 1.00 43.49 O
MASTER 238 0 0 0 0 0 0 6 66 1 0 1
END
'''
# test reading/writing PDB
test_filename = 'test_model.pdb'
test_output_filename = 'test_model_output.pdb'
test_eff = 'model.eff'
dm = DataManager(['model'])
dm.process_model_str(test_filename, model_str)
dm.write_model_file(model_str, filename=test_output_filename, overwrite=True)
m = dm.get_model(test_output_filename)
assert test_output_filename in dm.get_model_names()
dm.write_model_file(m, overwrite=True)
pdb_filename = 'cctbx_program.pdb'
assert os.path.exists(pdb_filename)
dm.process_model_file(pdb_filename)
assert not dm.get_model(pdb_filename).input_model_format_cif()
dm.write_model_file(m, test_filename, overwrite=True)
# test reading PDB writing CIF
test_filename = 'test_model.pdb'
test_output_filename = 'test_model.cif'
dm = DataManager(['model'])
dm.process_model_str(test_filename, model_str)
m = dm.get_model(test_filename)
dm.write_model_file(m, filename=test_output_filename, format='cif',
overwrite=True)
m = dm.get_model(test_output_filename)
assert test_output_filename in dm.get_model_names()
dm.write_model_file(m, overwrite=True)
cif_filename = 'cctbx_program.cif'
assert os.path.exists(cif_filename)
dm.process_model_file(cif_filename)
assert dm.get_model(cif_filename).input_model_format_cif()
# test type
assert dm.get_model_type() == 'x_ray'
dm.set_model_type(test_filename, 'neutron')
assert dm.get_model_type() == 'neutron'
phil_scope = dm.export_phil_scope()
extract = phil_scope.extract()
assert extract.data_manager.model[0].type == 'neutron'
with open(test_eff, 'w') as f:
f.write(phil_scope.as_str())
new_phil_scope = iotbx.phil.parse(file_name=test_eff)
new_dm = DataManager(['model'])
new_dm.load_phil_scope(new_phil_scope)
assert new_dm.get_model_type(test_filename) == 'neutron'
new_dm = DataManager(['model'])
try:
new_dm.set_default_model_type('nonsense')
except Sorry:
pass
new_dm.set_default_model_type('electron')
new_dm.process_model_file(test_filename)
assert new_dm.get_model_type() == 'electron'
assert len(new_dm.get_model_names()) == 1
assert len(new_dm.get_model_names(model_type='electron')) == 1
assert len(new_dm.get_model_names(model_type='neutron')) == 0
os.remove(test_eff)
os.remove(test_filename)
# test reading/writing CIF
test_filename = 'test_model_datatype.cif'
dm.write_model_file(dm.get_model().model_as_mmcif(),
filename=test_filename, overwrite=True)
dm.process_model_file(test_filename)
os.remove(test_filename)
assert test_filename in dm.get_model_names()
m = dm.get_model(test_filename)
dm.write_model_file(m, overwrite=True)
cif_filename = 'cctbx_program.cif'
assert os.path.exists(cif_filename)
dm.process_model_file(cif_filename)
assert dm.get_model(cif_filename).input_model_format_cif()
os.remove(pdb_filename)
os.remove(cif_filename)
# test pdb_interpretation
extract = mmtbx.model.manager.get_default_pdb_interpretation_params()
extract.pdb_interpretation.use_neutron_distances = True
dm.update_pdb_interpretation_for_model(test_filename, extract)
assert dm.get_model(test_filename).restraints_manager is None
# -----------------------------------------------------------------------------
def test_model_and_restraint():
# from 3tpj
model_str = '''
CRYST1 104.428 128.690 76.662 90.00 90.00 90.00 C 2 2 21
ATOM 5877 O URE A 403 -37.796 -38.296 5.693 1.00 15.43 O
ATOM 5878 C URE A 403 -36.624 -38.509 5.800 1.00 20.53 C
ATOM 5879 N2 URE A 403 -36.191 -39.836 6.120 1.00 27.82 N
ATOM 5880 N1 URE A 403 -35.679 -37.450 5.644 1.00 21.36 N
ATOM 5881 | |
import argparse
import math
import random
import os
import numpy as np
import torch
from torch import nn, autograd, optim
from torch.nn import functional as F
from torch.utils import data
import torch.distributed as dist
from torchvision import transforms, utils
from tqdm import tqdm
import time
from dataset import DeepFashionDataset
from model import Generator, Discriminator, VGGLoss
try:
import wandb
except ImportError:
wandb = None
from distributed import (
get_rank,
synchronize,
reduce_loss_dict,
reduce_sum,
get_world_size,
)
from op import conv2d_gradfix
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def data_sampler(dataset, shuffle, distributed):
if distributed:
return data.distributed.DistributedSampler(dataset)
if shuffle:
return data.RandomSampler(dataset)
else:
return data.SequentialSampler(dataset)
def requires_grad(model, flag=True):
for p in model.parameters():
p.requires_grad = flag
def accumulate(model1, model2, decay=0.999):
par1 = dict(model1.named_parameters())
par2 = dict(model2.named_parameters())
for k in par1.keys():
par1[k].data.mul_(decay).add_(par2[k].data, alpha=1 - decay)
def sample_data(loader):
while True:
for batch in loader:
yield batch
def d_logistic_loss(real_pred, fake_pred):
real_loss = F.softplus(-real_pred)
fake_loss = F.softplus(fake_pred)
return real_loss.mean() + fake_loss.mean()
def d_r1_loss(real_pred, real_img):
with conv2d_gradfix.no_weight_gradients():
grad_real, = autograd.grad(
outputs=real_pred.sum(), inputs=real_img, create_graph=True
)
grad_penalty = grad_real.pow(2).reshape(grad_real.shape[0], -1).sum(1).mean()
return grad_penalty
def g_nonsaturating_loss(fake_pred):
loss = F.softplus(-fake_pred).mean()
return loss
def set_grad_none(model, targets):
for n, p in model.named_parameters():
if n in targets:
p.grad = None
def getFace(images, FT, LP, RP):
"""
images: are images where we want to get the faces
FT: transform to get the aligned face
LP: left pad added to the imgae
RP: right pad added to the image
"""
faces = []
b, h, w, c = images.shape
for b in range(images.shape[0]):
if not (abs(FT[b]).sum() == 0): # all 3x3 elements are zero
# only apply the loss to image with detected faces
# need to do this per image because images are of different shape
current_im = images[b][:, :, int(RP[b].item()):w-int(LP[b].item())].unsqueeze(0)
theta = FT[b].unsqueeze(0)[:, :2] #bx2x3
grid = torch.nn.functional.affine_grid(theta, (1, 3, 112, 96))
current_face = torch.nn.functional.grid_sample(current_im, grid)
faces.append(current_face)
if len(faces) == 0:
return None
return torch.cat(faces, 0)
def train(args, loader, sampler, generator, discriminator, g_optim, d_optim, g_ema, device):
pbar = range(args.epoch)
if get_rank() == 0:
pbar = tqdm(pbar, initial=args.start_epoch, dynamic_ncols=True, smoothing=0.01)
pbar.set_description('Epoch Counter')
d_loss_val = 0
r1_loss = torch.tensor(0.0, device=device)
g_loss_val = 0
g_L1_loss_val = 0
g_vgg_loss_val = 0
g_l1 = torch.tensor(0.0, device=device)
g_vgg = torch.tensor(0.0, device=device)
g_cos = torch.tensor(0.0, device=device)
loss_dict = {}
criterionL1 = torch.nn.L1Loss()
criterionVGG = VGGLoss(device).to(device)
if args.faceloss:
criterionCOS = nn.CosineSimilarity()
if args.distributed:
g_module = generator.module
d_module = discriminator.module
else:
g_module = generator
d_module = discriminator
accum = 0.5 ** (32 / (10 * 1000))
for idx in pbar:
epoch = idx + args.start_epoch
if epoch > args.epoch:
print("Done!")
break
if args.distributed:
sampler.set_epoch(epoch)
batch_time = AverageMeter()
#####################################
############ START EPOCH ############
#####################################
for i, data in enumerate(loader):
batch_start_time = time.time()
input_image = data['input_image'].float().to(device)
real_img = data['target_image'].float().to(device)
pose = data['target_pose'].float().to(device)
sil = data['target_sil'].float().to(device)
LeftPad = data['target_left_pad'].float().to(device)
RightPad = data['target_right_pad'].float().to(device)
if args.faceloss:
FT = data['TargetFaceTransform'].float().to(device)
real_face = getFace(real_img, FT, LeftPad, RightPad)
if args.finetune:
# only mask padding
sil = torch.zeros((sil.shape)).float().to(device)
for b in range(sil.shape[0]):
w = sil.shape[3]
sil[b][:, :, int(RightPad[b].item()):w-int(LeftPad[b].item())] = 1 # mask out the padding
# else only focus on the foreground - initial step of training
real_img = real_img * sil
# appearance = human foregound + fg mask (pass coor for warping)
source_sil = data['input_sil'].float().to(device)
complete_coor = data['complete_coor'].float().to(device)
if args.size == 256:
complete_coor = torch.nn.functional.interpolate(complete_coor, size=(256, 256), mode='bilinear')
if args.finetune:
appearance = torch.cat([input_image, source_sil, complete_coor], 1)
else:
appearance = torch.cat([input_image * source_sil, source_sil, complete_coor], 1)
############ Optimize Discriminator ############
requires_grad(generator, False)
requires_grad(discriminator, True)
fake_img, _ = generator(appearance=appearance, pose=pose)
fake_img = fake_img * sil
fake_pred = discriminator(fake_img, pose=pose)
real_pred = discriminator(real_img, pose=pose)
d_loss = d_logistic_loss(real_pred, fake_pred)
loss_dict["d"] = d_loss
loss_dict["real_score"] = real_pred.mean()
loss_dict["fake_score"] = fake_pred.mean()
discriminator.zero_grad()
d_loss.backward()
d_optim.step()
d_regularize = i % args.d_reg_every == 0
if d_regularize:
real_img.requires_grad = True
real_pred = discriminator(real_img, pose=pose)
r1_loss = d_r1_loss(real_pred, real_img)
discriminator.zero_grad()
(args.r1 / 2 * r1_loss * args.d_reg_every + 0 * real_pred[0]).backward()
d_optim.step()
loss_dict["r1"] = r1_loss
############## Optimize Generator ##############
requires_grad(generator, True)
requires_grad(discriminator, False)
fake_img, _ = generator(appearance=appearance, pose=pose)
fake_img = fake_img * sil
fake_pred = discriminator(fake_img, pose=pose)
g_loss = g_nonsaturating_loss(fake_pred)
loss_dict["g"] = g_loss
## reconstruction loss: L1 and VGG loss + face identity loss
g_l1 = criterionL1(fake_img, real_img)
g_loss += g_l1
g_vgg = criterionVGG(fake_img, real_img)
g_loss += g_vgg
loss_dict["g_L1"] = g_l1
loss_dict["g_vgg"] = g_vgg
if args.faceloss and (real_face is not None):
fake_face = getFace(fake_img, FT, LeftPad, RightPad)
features_real_face = sphereface_net(real_face)
features_fake_face = sphereface_net(fake_face)
g_cos = 1. - criterionCOS(features_real_face, features_fake_face).mean()
g_loss += g_cos
loss_dict["g_cos"] = g_cos
generator.zero_grad()
g_loss.backward()
g_optim.step()
############ Optimization Done ############
accumulate(g_ema, g_module, accum)
loss_reduced = reduce_loss_dict(loss_dict)
d_loss_val = loss_reduced["d"].mean().item()
g_loss_val = loss_reduced["g"].mean().item()
g_L1_loss_val = loss_reduced["g_L1"].mean().item()
g_cos_loss_val = loss_reduced["g_cos"].mean().item()
g_vgg_loss_val = loss_reduced["g_vgg"].mean().item()
r1_val = loss_reduced["r1"].mean().item()
real_score_val = loss_reduced["real_score"].mean().item()
fake_score_val = loss_reduced["fake_score"].mean().item()
batch_time.update(time.time() - batch_start_time)
if i % 100 == 0:
print('Epoch: [{0}/{1}] Iter: [{2}/{3}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'.format(epoch, args.epoch, i, len(loader), batch_time=batch_time)
+
f"d: {d_loss_val:.4f}; g: {g_loss_val:.4f}; g_L1: {g_L1_loss_val:.4f}; g_vgg: {g_vgg_loss_val:.4f}; g_cos: {g_cos_loss_val:.4f}; r1: {r1_val:.4f}; "
)
if get_rank() == 0:
if wandb and args.wandb:
wandb.log(
{
"Generator": g_loss_val,
"Discriminator": d_loss_val,
"R1": r1_val,
"Real Score": real_score_val,
"Fake Score": fake_score_val,
"Generator_L1": g_L1_loss_val,
"Generator_vgg": g_vgg_loss_val,
"Generator_facecos": g_cos_loss_val,
}
)
if i % 5000 == 0:
with torch.no_grad():
g_ema.eval()
sample, _ = g_ema(appearance=appearance[:args.n_sample], pose=pose[:args.n_sample])
sample = sample * sil
utils.save_image(
sample,
os.path.join('sample', args.name, f"epoch_{str(epoch)}_iter_{str(i)}.png"),
nrow=int(args.n_sample ** 0.5),
normalize=True,
range=(-1, 1),
)
if i % 5000 == 0:
torch.save(
{
"g": g_module.state_dict(),
"d": d_module.state_dict(),
"g_ema": g_ema.state_dict(),
"g_optim": g_optim.state_dict(),
"d_optim": d_optim.state_dict(),
"args": args,
},
os.path.join('checkpoint', args.name, f"epoch_{str(epoch)}_iter_{str(i)}.pt"),
)
###################################
############ END EPOCH ############
###################################
if get_rank() == 0:
torch.save(
{
"g": g_module.state_dict(),
"d": d_module.state_dict(),
"g_ema": g_ema.state_dict(),
"g_optim": g_optim.state_dict(),
"d_optim": d_optim.state_dict(),
"args": args,
},
os.path.join('checkpoint', args.name, f"epoch_{str(epoch)}.pt"),
)
if __name__ == "__main__":
device = "cuda"
parser = argparse.ArgumentParser(description="Pose with Style trainer")
parser.add_argument("path", type=str, help="path to the lmdb dataset")
parser.add_argument("--name", type=str, help="name of experiment")
parser.add_argument("--epoch", type=int, default=50, help="total training epochs")
parser.add_argument("--batch", type=int, default=4, help="batch sizes for each gpus")
parser.add_argument("--workers", type=int, default=4, help="batch sizes for each gpus")
parser.add_argument("--n_sample", type=int, default=4, help="number of the samples generated during training")
parser.add_argument("--size", type=int, default=512, help="image sizes for the model")
parser.add_argument("--r1", type=float, default=10, help="weight of the r1 regularization")
parser.add_argument("--channel_multiplier", type=int, default=2, help="channel multiplier factor for the model. config-f = 2, else = 1")
parser.add_argument(
"--d_reg_every",
type=int,
default=16,
help="interval of the applying r1 regularization",
)
parser.add_argument(
"--g_reg_every",
type=int,
default=4,
help="interval of the applying path length regularization",
)
parser.add_argument("--ckpt", type=str, default=None, help="path to the checkpoints to resume training")
parser.add_argument("--lr", type=float, default=0.002, help="learning rate")
parser.add_argument("--wandb", action="store_true", help="use weights and biases logging")
parser.add_argument("--local_rank", type=int, default=0, help="local rank for distributed training")
parser.add_argument("--faceloss", action="store_true", help="add face loss when faces are detected")
parser.add_argument("--finetune", action="store_true", help="finetune to handle background- second step of training.")
args = parser.parse_args()
n_gpu = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = n_gpu > 1
if args.distributed:
print ('Distributed Training Mode.')
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
synchronize()
if get_rank() == 0:
if not os.path.exists(os.path.join('checkpoint', args.name)):
os.makedirs(os.path.join('checkpoint', args.name))
if not os.path.exists(os.path.join('sample', args.name)):
os.makedirs(os.path.join('sample', args.name))
args.latent = 2048
args.n_mlp = 8
args.start_epoch = 0
if args.finetune and (args.ckpt is None):
print ('to finetune the model, please specify --ckpt.')
import sys
sys.exit()
# define models
generator = Generator(args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier).to(device)
discriminator = Discriminator(args.size, channel_multiplier=args.channel_multiplier).to(device)
g_ema = Generator(args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier).to(device)
g_ema.eval()
accumulate(g_ema, generator, 0)
if args.faceloss:
import sphereface
sphereface_net = getattr(sphereface, 'sphere20a')()
sphereface_net.load_state_dict(torch.load(os.path.join(args.path, 'resources', 'sphere20a_20171020.pth')))
sphereface_net.to(device)
sphereface_net.eval()
sphereface_net.feature = True
g_reg_ratio = args.g_reg_every / (args.g_reg_every + 1)
d_reg_ratio = args.d_reg_every / (args.d_reg_every + 1)
g_optim = optim.Adam(
generator.parameters(),
lr=args.lr * g_reg_ratio,
betas=(0 ** g_reg_ratio, 0.99 ** g_reg_ratio),
)
d_optim = optim.Adam(
discriminator.parameters(),
lr=args.lr * d_reg_ratio,
betas=(0 ** d_reg_ratio, 0.99 ** d_reg_ratio),
)
if args.ckpt is not None:
print("load model:", args.ckpt)
ckpt = torch.load(args.ckpt, map_location=lambda storage, loc: storage)
try:
ckpt_name = os.path.basename(args.ckpt)
args.start_epoch = int(os.path.splitext(ckpt_name)[0].split('_')[1])+1 # asuming saving as epoch_1_iter_1000.pt or epoch_1.pt
except ValueError:
pass
generator.load_state_dict(ckpt["g"])
discriminator.load_state_dict(ckpt["d"])
g_ema.load_state_dict(ckpt["g_ema"])
g_optim.load_state_dict(ckpt["g_optim"])
d_optim.load_state_dict(ckpt["d_optim"])
if args.distributed:
generator = nn.parallel.DistributedDataParallel(
generator,
device_ids=[args.local_rank],
output_device=args.local_rank,
broadcast_buffers=False,
)
discriminator = nn.parallel.DistributedDataParallel(
discriminator,
device_ids=[args.local_rank],
output_device=args.local_rank,
broadcast_buffers=False,
)
dataset = | |
# Copyright 2020-2021 Cambridge Quantum Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pytket Backend for Honeywell devices."""
from ast import literal_eval
import json
from http import HTTPStatus
from typing import Dict, Iterable, List, Optional, Sequence, Any, cast
import numpy as np
import requests
import keyring # type: ignore
from pytket.backends import Backend, ResultHandle, CircuitStatus, StatusEnum
from requests.models import Response
from pytket.backends.backend import KwargTypes
from pytket.backends.resulthandle import _ResultIdTuple
from pytket.backends.backendresult import BackendResult
from pytket.backends.backend_exceptions import CircuitNotRunError
from pytket.circuit import Circuit, OpType, Bit # type: ignore
from pytket.device import Device # type: ignore
from pytket.qasm import circuit_to_qasm_str
from pytket.passes import ( # type: ignore
BasePass,
SequencePass,
SynthesiseIBM,
RemoveRedundancies,
RebaseHQS,
SquashHQS,
FullPeepholeOptimise,
DecomposeBoxes,
DecomposeClassicalExp,
SimplifyInitial,
)
from pytket.predicates import ( # type: ignore
GateSetPredicate,
MaxNQubitsPredicate,
Predicate,
NoSymbolsPredicate,
)
from pytket.routing import FullyConnected # type: ignore
from pytket.utils import prepare_circuit
from pytket.utils.outcomearray import OutcomeArray
from .config import set_honeywell_config
from .api_wrappers import HQSAPIError, HoneywellQAPI
_DEBUG_HANDLE_PREFIX = "_MACHINE_DEBUG_"
HONEYWELL_URL_PREFIX = "https://qapi.honeywell.com/v1/"
HONEYWELL_DEVICE_APIVAL = "HQS-LT-1.0-APIVAL"
_STATUS_MAP = {
"queued": StatusEnum.QUEUED,
"running": StatusEnum.RUNNING,
"completed": StatusEnum.COMPLETED,
"failed": StatusEnum.ERROR,
"canceling": StatusEnum.CANCELLED,
"canceled": StatusEnum.CANCELLED,
}
_GATE_SET = {
OpType.Rz,
OpType.PhasedX,
OpType.ZZMax,
OpType.Reset,
OpType.Measure,
OpType.Barrier,
OpType.RangePredicate,
OpType.MultiBit,
OpType.ExplicitPredicate,
OpType.ExplicitModifier,
OpType.SetBits,
}
class HoneywellBackend(Backend):
"""
Interface to a Honeywell device.
"""
_supports_shots = True
_supports_counts = True
_supports_contextual_optimisation = True
_persistent_handles = True
def __init__(
self,
device_name: str = HONEYWELL_DEVICE_APIVAL,
label: Optional[str] = "job",
machine_debug: bool = False,
login: bool = True,
):
"""
Construct a new Honeywell backend.
:param device_name: device name e.g. "HQS-LT-1.0"
:type device_name: string
:param label: label to apply to submitted jobs
:type label: string
"""
super().__init__()
self._device_name = device_name
self._label = label
self._device = None
if machine_debug:
self._api_handler = None
else:
self._api_handler = HoneywellQAPI(machine=device_name, login=login)
@property
def _MACHINE_DEBUG(self) -> bool:
return self._api_handler is None
@_MACHINE_DEBUG.setter
def _MACHINE_DEBUG(self, val: bool) -> None:
if val:
self._api_handler = None
elif self._api_handler is None:
raise RuntimeError("_MACHINE_DEBUG cannot be False with no _api_handler.")
@classmethod
def available_devices(
cls, _api_handler: Optional[HoneywellQAPI] = None
) -> List[Dict[str, Any]]:
"""List devices available from Honeywell.
>>> HoneywellBackend.available_devices()
e.g. [{'name': 'HQS-LT-1.0-APIVAL', 'n_qubits': 6}]
:param _api_handler: Instance of API handler, defaults to None
:type _api_handler: Optional[HoneywellQAPI], optional
:return: Dictionaries of machine name and number of qubits.
:rtype: List[Dict[str, Any]]
"""
if _api_handler is None:
_api_handler = HoneywellQAPI()
id_token = _api_handler.login()
res = requests.get(
f"{_api_handler.url}machine/?config=true",
headers={"Authorization": id_token},
)
_api_handler._response_check(res, "get machine list")
jr = res.json()
return jr # type: ignore
def _retrieve_device(self, machine: str) -> Device:
jr = self.available_devices(self._api_handler)
try:
self._machine_info = next(entry for entry in jr if entry["name"] == machine)
except StopIteration:
raise RuntimeError(f"Device {machine} is not available.")
return Device(FullyConnected(self._machine_info["n_qubits"]))
@classmethod
def device_state(
cls, device_name: str, _api_handler: Optional[HoneywellQAPI] = None
) -> str:
"""Check the status of a device.
>>> HoneywellBackend.device_state('HQS-LT-1.0-APIVAL') # e.g. "online"
:param device_name: Name of the device.
:type device_name: str
:param _api_handler: Instance of API handler, defaults to None
:type _api_handler: Optional[HoneywellQAPI], optional
:return: String of state, e.g. "online"
:rtype: str
"""
if _api_handler is None:
_api_handler = HoneywellQAPI()
res = requests.get(
f"{_api_handler.url}machine/{device_name}",
headers={"Authorization": _api_handler.login()},
)
_api_handler._response_check(res, "get machine status")
jr = res.json()
return str(jr["state"])
@property
def device(self) -> Optional[Device]:
if self._device is None and not self._MACHINE_DEBUG:
self._device = self._retrieve_device(self._device_name)
return self._device
@property
def required_predicates(self) -> List[Predicate]:
preds = [
NoSymbolsPredicate(),
GateSetPredicate(_GATE_SET),
]
if not self._MACHINE_DEBUG:
assert self.device is not None
preds.append(MaxNQubitsPredicate(len(self.device.nodes)))
return preds
def default_compilation_pass(self, optimisation_level: int = 1) -> BasePass:
assert optimisation_level in range(3)
if optimisation_level == 0:
return SequencePass(
[DecomposeClassicalExp(), DecomposeBoxes(), RebaseHQS()]
)
elif optimisation_level == 1:
return SequencePass(
[
DecomposeClassicalExp(),
DecomposeBoxes(),
SynthesiseIBM(),
RebaseHQS(),
RemoveRedundancies(),
SquashHQS(),
SimplifyInitial(
allow_classical=False, create_all_qubits=True, xcirc=_xcirc
),
]
)
else:
return SequencePass(
[
DecomposeClassicalExp(),
DecomposeBoxes(),
FullPeepholeOptimise(),
RebaseHQS(),
RemoveRedundancies(),
SquashHQS(),
SimplifyInitial(
allow_classical=False, create_all_qubits=True, xcirc=_xcirc
),
]
)
@property
def _result_id_type(self) -> _ResultIdTuple:
return tuple((str, str))
def process_circuits(
self,
circuits: Iterable[Circuit],
n_shots: Optional[int] = None,
valid_check: bool = True,
**kwargs: KwargTypes,
) -> List[ResultHandle]:
"""
See :py:meth:`pytket.backends.Backend.process_circuits`.
Supported kwargs: none.
"""
if n_shots is None or n_shots < 1:
raise ValueError("Parameter n_shots is required for this backend")
if valid_check:
self._check_all_circuits(circuits)
postprocess = kwargs.get("postprocess", False)
basebody = {
"machine": self._device_name,
"language": "OPENQASM 2.0",
"priority": "normal",
"count": n_shots,
"options": None,
}
handle_list = []
for i, circ in enumerate(circuits):
if postprocess:
c0, ppcirc = prepare_circuit(circ, allow_classical=False, xcirc=_xcirc)
ppcirc_rep = ppcirc.to_dict()
else:
c0, ppcirc_rep = circ, None
honeywell_circ = circuit_to_qasm_str(c0, header="hqslib1")
body = basebody.copy()
body["name"] = circ.name if circ.name else f"{self._label}_{i}"
body["program"] = honeywell_circ
if self._api_handler is None:
handle_list.append(
ResultHandle(
_DEBUG_HANDLE_PREFIX + str((circ.n_qubits, n_shots)),
json.dumps(ppcirc_rep),
)
)
else:
try:
res = _submit_job(self._api_handler, body)
if res.status_code != HTTPStatus.OK:
self.relogin()
res = _submit_job(self._api_handler, body)
jobdict = res.json()
if res.status_code != HTTPStatus.OK:
raise HQSAPIError(
f'HTTP error submitting job, {jobdict["error"]["text"]}'
)
except ConnectionError:
raise ConnectionError(
f"{self._label} Connection Error: Error during submit..."
)
# extract job ID from response
handle = ResultHandle(jobdict["job"], json.dumps(ppcirc_rep))
handle_list.append(handle)
self._cache[handle] = dict()
return handle_list
def _retrieve_job(
self, jobid: str, timeout: Optional[int] = None, wait: Optional[int] = None
) -> Dict:
if not self._api_handler:
raise RuntimeError("API handler not set")
with self._api_handler.override_timeouts(timeout=timeout, retry_timeout=wait):
# set and unset optional timeout parameters
try:
job_dict = self._api_handler.retrieve_job(jobid, use_websocket=True)
except HQSAPIError:
self.relogin()
job_dict = self._api_handler.retrieve_job(jobid, use_websocket=True)
if job_dict is None:
raise RuntimeError(f"Unable to retrieve job {jobid}")
return job_dict
def cancel(self, handle: ResultHandle) -> None:
if self._api_handler is not None:
jobid = str(handle[0])
self._api_handler.cancel(jobid)
def _update_cache_result(self, handle: ResultHandle, res: BackendResult) -> None:
rescache = {"result": res}
if handle in self._cache:
self._cache[handle].update(rescache)
else:
self._cache[handle] = rescache
def circuit_status(self, handle: ResultHandle) -> CircuitStatus:
self._check_handle_type(handle)
jobid = str(handle[0])
if self._api_handler is None or jobid.startswith(_DEBUG_HANDLE_PREFIX):
return CircuitStatus(StatusEnum.COMPLETED)
# TODO check queue position and add to message
try:
response = self._api_handler.retrieve_job_status(jobid, use_websocket=True)
except HQSAPIError:
self.relogin()
response = self._api_handler.retrieve_job_status(jobid, use_websocket=True)
if response is None:
raise RuntimeError(f"Unable to retrieve circuit status for handle {handle}")
circ_status = _parse_status(response)
if circ_status.status is StatusEnum.COMPLETED:
if "results" in response:
ppcirc_rep = json.loads(cast(str, handle[1]))
ppcirc = (
Circuit.from_dict(ppcirc_rep) if ppcirc_rep is not None else None
)
self._update_cache_result(
handle, _convert_result(response["results"], ppcirc)
)
return circ_status
def get_result(self, handle: ResultHandle, **kwargs: KwargTypes) -> BackendResult:
"""
See :py:meth:`pytket.backends.Backend.get_result`.
Supported kwargs: `timeout`, `wait`.
"""
try:
return super().get_result(handle)
except CircuitNotRunError:
jobid = str(handle[0])
ppcirc_rep = json.loads(cast(str, handle[1]))
ppcirc = Circuit.from_dict(ppcirc_rep) if ppcirc_rep is not None else None
if self._MACHINE_DEBUG or jobid.startswith(_DEBUG_HANDLE_PREFIX):
debug_handle_info = jobid[len(_DEBUG_HANDLE_PREFIX) :]
n_qubits, shots = literal_eval(debug_handle_info)
return _convert_result({"c": (["0" * n_qubits] * shots)}, ppcirc)
# TODO exception handling when jobid not found on backend
timeout = kwargs.get("timeout")
if timeout is not None:
timeout = int(timeout)
wait = kwargs.get("wait")
if wait is not None:
wait = int(wait)
job_retrieve = self._retrieve_job(jobid, timeout, wait)
circ_status = _parse_status(job_retrieve)
if circ_status.status not in (StatusEnum.COMPLETED, StatusEnum.CANCELLED):
raise RuntimeError(
f"Cannot retrieve results, job status is {circ_status.message}"
)
try:
res = job_retrieve["results"]
except KeyError:
raise RuntimeError("Results missing.")
backres = _convert_result(res, ppcirc)
self._update_cache_result(handle, backres)
return backres
def cost_estimate(self, circuit: Circuit, n_shots: int) -> float:
"""
Estimate the cost in Honeywell Quantum Credits (HQC) to complete this `circuit`
with `n_shots` repeats. The estimate is based on hard-coded constants, which may
be out of date, invalidating the estimate. Use with caution.
With 𝑁1𝑞 PhasedX gates, 𝑁2𝑞 ZZMax gates, 𝑁𝑚 state preparations and measurements,
and 𝐶 shots:
𝐻𝑄𝐶= 5 + (𝑁1𝑞 + 10𝑁2𝑞 + 5𝑁𝑚)*𝐶/5000
:param circuit: Circuit to calculate runtime estimate for. Must be valid for
backend.
:type circuit: Circuit
:param n_shots: Number of shots.
:type n_shots: int
:raises ValueError: Circuit is not valid, needs to be compiled.
:return: Cost in HQC to execute the shots.
:rtype: float
"""
if not self.valid_circuit(circuit):
raise ValueError(
"Circuit does not satisfy predicates of backend."
+ " Try running `backend.compile_circuit` first"
)
gate_counts: Dict[OpType, int] = {
g_type: circuit.n_gates_of_type(g_type) for g_type in _GATE_SET
}
n_1q = gate_counts[OpType.PhasedX]
n_m = circuit.n_qubits + gate_counts[OpType.Measure] + gate_counts[OpType.Reset]
| |
<filename>crawler/crawler.py
#!/usr/bin/python3
## --------------------------------------------------------------------------------------------------------------------
import logging # for log
import os # for file handling, exit
import sys # for exit
import multiprocessing # for multiprocessing purpose
import re # for pattern
from configparser import ConfigParser, ExtendedInterpolation # for loading config files
from .crawlererr import CrawlerConfigError, CrawlerError, CrawlerFileReadError, CrawlerProcessError, CrawlerMatchError # ioc crawler error handling
from .crawlerdata import CrawlerVo, CrawlerWhitelistData # data objects
## --------------------------------------------------------------------------------------------------------------------
LOG = logging.getLogger('IocCrawlerLog')
## --------------------------------------------------------------------------------------------------------------------
## Class for ioc crawling
class Crawler():
## constructor
# Init variables and read all files
def __init__(self, pathSrc:str, threadsSrc:int, patternSrc:str, printToStdoutSrc:bool,
resultColumnFormatSrc:list, sectionsSrc:list, matchHighlightingSrc:bool,
matchSizeSrc:int, whitelistSrc:str=None, beforeSrc:int=0, afterSrc:int=0) -> None:
try:
# init
self.blockQueue = multiprocessing.Queue()
self.sharedQueue = multiprocessing.Queue()
self.processCount = threadsSrc
self.processedFileCount = multiprocessing.Value('i', 0)
self.whiteListedMatches = multiprocessing.Value('i', 0)
self.overMaxMatchSize = multiprocessing.Value('i', 0)
self.rootFilePath = ""
self.rootRelPath = ""
self.printToStdOut = printToStdoutSrc
self.resultList = []
self.whitlist = None
self.whitlistedFiles = 0
self.result_columns = resultColumnFormatSrc
self.sectionsForResult = sectionsSrc
self.matchHighligting = matchHighlightingSrc
self.before = beforeSrc
self.after = afterSrc
self.matchSize = matchSizeSrc
self._printCrawlerMessage('[+] Init Crawler')
LOG.debug("Init Crawler")
# check path of source dir
if not os.path.exists(pathSrc):
raise CrawlerFileReadError("File not found.", os.path.basename(pathSrc))
if not os.path.isabs(pathSrc):
self.rootFilePath = os.path.abspath(pathSrc)
else:
self.rootFilePath = pathSrc
# set relative path
if self.rootFilePath != pathSrc:
self.rootRelPath = os.path.relpath(pathSrc)
else:
self.rootRelPath = pathSrc
# Check match size
if self.matchSize < 5:
raise CrawlerConfigError("Match size have to be greater then 5")
# load pattern
self.patterns = self._loadPattern(patternSrc)
LOG.debug('Pattern loaded: ' + str(len(self.patterns)))
# load whitelist
if whitelistSrc:
self.whitlist = self._loadWhitelist(whitelistSrc)
LOG.debug('Whitelist loaded')
else:
LOG.debug('No whitelist')
# check files
self._printCrawlerMessage('[+] Checking files')
self.fileList = self._readFiles(self.rootFilePath, self.rootRelPath)
self.fileListSize = len(self.fileList)
self._printCrawlerMessage(" |- %d files found, %d whitelisted." %(self.fileListSize, self.whitlistedFiles))
LOG.debug("%d files found for processing" %(self.fileListSize))
except CrawlerFileReadError as re:
raise re
except CrawlerConfigError as ce:
raise ce
except Exception as e:
raise CrawlerError("Initialisation error. " + getattr(e, 'message', repr(e)))
# end init
## Loads pattern from config or personal file
# - patterns will only loaded if they are selected from user
# @param patternFileSrc - all search pattern
# @return - a patterns dict
def _loadPattern(self, patternFileSrc) -> list:
try:
LOG.debug('Load patterns')
patternCfg = ConfigParser(interpolation=None)
patternCfg.read(patternFileSrc)
patterns = {}
for ioc_type in patternCfg.sections():
if ioc_type.lower() in self.sectionsForResult:
for option in patternCfg.options(ioc_type):
ioc_pattern = patternCfg[ioc_type][option]
if ioc_pattern:
if ioc_type not in patterns:
patterns[ioc_type] = [re.compile(b'%b' % bytearray(ioc_pattern.encode('utf-8')))]
else:
patterns[ioc_type].append(re.compile(b'%b' % bytearray(ioc_pattern.encode('utf-8'))))
# end if
# end if
# end for
# end for
return patterns
except Exception as e:
raise CrawlerConfigError(getattr(e, 'message', repr(e)))
# end _loadPattern
## Loads whitelist from config or personal file
# @param whitelistFileSrc
# @return - a patterns dict
def _loadWhitelist(self, whitelistFileSrc) -> CrawlerWhitelistData:
try:
LOG.debug('Load whitelist')
whitelistCfg = ConfigParser(interpolation=ExtendedInterpolation())
whitelistCfg.read(whitelistFileSrc)
whitelistObj = CrawlerWhitelistData()
for wh_section in whitelistCfg.sections():
for option in whitelistCfg.options(wh_section):
whitelistObj.addWhiteListItem(wh_section, option, whitelistCfg[wh_section][option].strip().split('\n'))
# end for
# end for
return whitelistObj
except Exception as e:
raise CrawlerConfigError(getattr(e, 'message', repr(e)))
# end _loadWhitelist
## Reads all files from the directory
# If the file/directory is whitelisted, it will not added to the file list
# @param dirSrc root source
# @return file list to read
def _readFiles(self, rootFilePathSrc, relPathSrc) -> list:
try:
filesList = []
filename = ""
if os.path.isfile(rootFilePathSrc):
filesList.append(rootFilePathSrc)
else:
for root, dirs, files in os.walk(rootFilePathSrc):
for filename in files:
filePathStr = os.path.join(root, filename)
if self.whitlist:
# get the index of the relative beginning of the file to check whitelisting
idx = filePathStr.index(relPathSrc) + len(relPathSrc)
if filePathStr[idx:] in self.whitlist:
LOG.debug("%s whitelisted." %(filePathStr[idx:]))
self.whitlistedFiles +=1
else:
filesList.append(filePathStr)
else:
filesList.append(filePathStr)
# end for
# end for
# end rootFilePath is directory
except IOError as io:
raise CrawlerFileReadError(getattr(io, 'message', repr(io)), filename)
except Exception as e:
raise CrawlerError(getattr(e, 'message', repr(e)))
return filesList
# end _readFiles
### Returns a summary to all found ioc types and the count of matches
# - checks the white listed file count
# - checks the white listed matches count
# @return List of strings
def getResultSummary(self) -> dict:
summaryDict = {}
if self.whitlist:
if self.whitlistedFiles > 0:
summaryDict["Whitelisted files"] = self.whitlistedFiles
if self.whiteListedMatches.value > 0:
summaryDict["Whitelisted matches"] = self.whiteListedMatches.value
if self.overMaxMatchSize.value > 0:
summaryDict["Matchs above the max match size"] = self.whiteListedMatches.value
# end if self.whitlist
for item in self.resultList:
for key in item.mCount.keys():
if key not in summaryDict:
summaryDict[key] = item.mCount[key]
else:
summaryDict[key] = summaryDict[key] + item.mCount[key]
# end for
return summaryDict
# end def getResultSummary
## Process files from block
# - do pattern search
# - check for whitelist etc
# @param blockFiles - the files to process
def _processBlock(self, blockFiles, shared_list) -> None:
try:
for file in blockFiles:
try:
# create value object for the results - save only the relative path to the results
cvo = CrawlerVo(file[len(self.rootRelPath):])
with open(file, 'rb') as f:
LOG.debug("Processing %s" %(file))
fileSize = os.path.getsize(file)
bufSize = 32384 # read buffer
overlap = 1024 # overlap reading size
filePos = 0 # current position in file
# if file size is smaler then the buffer, do no overlap reading
if fileSize < bufSize:
bufSize = fileSize
overlap = 0
# read the file in blocks
while filePos < fileSize:
# log status
if filePos > 0:
if (filePos/10) % 100 == 0:
LOG.debug("Hanging on %s; read %d/%d bytes" %(file, filePos, fileSize))
buffer = None
buffer = f.read(bufSize+overlap)
for ioc_type in self.patterns:
for pattern in self.patterns[ioc_type]:
matchDict = {}
searchRes = re.finditer(pattern, buffer)
for item in searchRes:
if item.start() < bufSize:
try:
matchString = item.group(0).decode("utf-8")
# Check match size
if len(matchString) > self.matchSize:
raise CrawlerMatchError("Match for %s is greater then %d." %(item, self.matchSize))
before = ""
after = ""
if self.before > 0:
raise CrawlerError("self.before not implemented")
elif self.after > 0:
raise CrawlerError("self.after not implemented")
#after = buffer[item.start() + len(matchString): item.start() + len(matchString) + self.after].decode("utf-8")
#printDict = {"file" : file, "ioc" : ioc_type,
# "match": before + matchString + after, "offset": str(filePos + item.start())}
# hint: save only relative path
printDict = {"file" : file[len(self.rootRelPath):], "ioc" : ioc_type, "match": matchString, "offset": str(filePos + item.start())}
isWhiteListed = False
if self.whitlist:
if matchString in self.whitlist:
isWhiteListed = True
with self.processedFileCount.get_lock():
self.whiteListedMatches.value +=1
if not isWhiteListed:
if self.printToStdOut:
self._printCrawlerResult(printDict)
if matchString not in matchDict:
matchDict[before + matchString + after] = [str(filePos + item.start())]
else:
matchDict[before + matchString + after].extend([str(filePos + item.start())])
# end try
except UnicodeDecodeError as ude:
LOG.debug("Decoding error while Processing %s" %(item))
except CrawlerMatchError as me:
with self.processedFileCount.get_lock():
self.overMaxMatchSize.value +=1
LOG.debug(me)
# end if item.start() < pos + bufSize
# end for item in searchRes
# add match
if matchDict:
cvo.addMatchResults(ioc_type, matchDict)
# end for pattern
# end for ioc_type
# set new offset
if f.tell() < fileSize:
filePos = f.seek(f.tell() - overlap)
else:
filePos = f.tell()
# end while filePos < fileSize:
# end with file
# add crawler file value object to the result list
shared_list.append(cvo)
except IOError as ioe:
LOG.info("[!] " + getattr(ioe, 'message', repr(ioe)))
# end for
# set lock for the process counter and save the new status
with self.processedFileCount.get_lock():
self.processedFileCount.value += len(blockFiles)
# log processing status for the user
self._printCrawlerMessage(" |- Processed files: %d / %d [%s %%]" % (self.processedFileCount.value,
self.fileListSize,
self._getProcessStatus()))
except Exception as e:
raise CrawlerProcessError(getattr(e, 'message', repr(e)))
# end processBlock
## Main function for processing
# - inhires the nested function "procesQueue" for getting tasks from queue
def do(self) -> None:
self._printCrawlerMessage("[+] Start processing files")
manager = multiprocessing.Manager()
shared_list = manager.list()
processList = []
## Get Blocks from Queue and process them until queue is empty
def _processQueue():
while not self.blockQueue.empty():
# process block
LOG.debug("Get new block from queue")
blockFiles = self.blockQueue.get()
self._processBlock(blockFiles, shared_list)
# end processBlock
try:
# check if there is anything to do
if self.fileListSize < 1:
raise CrawlerFileReadError("No files to read.")
# Calc block size
blockSize = 0
if self.fileListSize < 10:
blockSize = self.fileListSize
self.processCount = 1
elif self.fileListSize < 100:
blockSize = round(self.fileListSize / | |
or how to compute invariants.
return b
def gen(self, i):
"""
Return the i-th generator of self.
INPUT:
- ``i`` -- integer
EXAMPLES::
sage: V = span([[1/2,1,1],[3/2,2,1],[0,0,1]],ZZ); W = V.span([2*V.0+4*V.1, 9*V.0+12*V.1, 4*V.2])
sage: Q = V/W; Q
Finitely generated module V/W over Integer Ring with invariants (4, 12)
sage: Q.gen(0)
(1, 0)
sage: Q.gen(1)
(0, 1)
sage: Q.gen(2)
Traceback (most recent call last):
...
ValueError: Generator 2 not defined
sage: Q.gen(-1)
Traceback (most recent call last):
...
ValueError: Generator -1 not defined
"""
v = self.gens()
if i < 0 or i >= len(v):
raise ValueError("Generator %s not defined" % i)
return v[i]
def smith_form_gen(self, i):
"""
Return the i-th generator of self. A private name (so we can freely
override gen() in derived classes).
INPUT:
- ``i`` -- integer
EXAMPLES::
sage: V = span([[1/2,1,1],[3/2,2,1],[0,0,1]],ZZ); W = V.span([2*V.0+4*V.1, 9*V.0+12*V.1, 4*V.2])
sage: Q = V/W; Q
Finitely generated module V/W over Integer Ring with invariants (4, 12)
sage: Q.smith_form_gen(0)
(1, 0)
sage: Q.smith_form_gen(1)
(0, 1)
"""
v = self.smith_form_gens()
if i < 0 or i >= len(v):
raise ValueError("Smith form generator %s not defined" % i)
return v[i]
def optimized(self):
"""
Return a module isomorphic to this one, but with V replaced by
a submodule of V such that the generators of self all lift
trivially to generators of V. Replace W by the intersection
of V and W. This has the advantage that V has small dimension
and any homomorphism from self trivially extends to a
homomorphism from V.
OUTPUT:
- ``Q`` -- an optimized quotient V0/W0 with V0 a submodule of V
such that phi: V0/W0 --> V/W is an isomorphism
- ``Z`` -- matrix such that if x is in self.V() and
c gives the coordinates of x in terms of the
basis for self.V(), then c*Z is in V0
and c*Z maps to x via phi above.
EXAMPLES::
sage: V = span([[1/2,1,1],[3/2,2,1],[0,0,1]],ZZ); W = V.span([2*V.0+4*V.1, 9*V.0+12*V.1, 4*V.2])
sage: Q = V/W
sage: O, X = Q.optimized(); O
Finitely generated module V/W over Integer Ring with invariants (4, 12)
sage: O.V()
Free module of degree 3 and rank 2 over Integer Ring
User basis matrix:
[ 0 3 1]
[ 0 -1 0]
sage: O.W()
Free module of degree 3 and rank 2 over Integer Ring
Echelon basis matrix:
[ 0 12 0]
[ 0 0 4]
sage: X # random
[0 4 0]
[0 1 0]
[0 0 1]
sage: OV = O.V()
sage: Q(OV([0,-8,0])) == V.0
True
sage: Q(OV([0,1,0])) == V.1
True
sage: Q(OV([0,0,1])) == V.2
True
"""
try:
if self.__optimized is True:
return self, None
return self.__optimized
except AttributeError:
pass
V = self._V.span_of_basis([x.lift() for x in self.smith_form_gens()])
M = self._module_constructor(V, self._W.intersection(V))
# Compute matrix T of linear transformation from self._V to V.
# This matrix T gives each basis element of self._V in terms
# of our new optimized V, modulo the W's.
A = V.basis_matrix().stack(self._W.basis_matrix())
B, d = A._clear_denom()
H, U = B.hermite_form(transformation=True)
Y = H.solve_left(d*self._V.basis_matrix())
T = Y * U.matrix_from_columns(range(V.rank()))
self.__T = T
# Finally we multiply by V.basis_matrix() so X gives vectors
# in the ambient space instead of coefficients of linear
# combinations of the basis for V.
X = T * V.basis_matrix()
self.__optimized = M, X
return M, X
def hom(self, im_gens, codomain=None, check=True):
"""
Homomorphism defined by giving the images of ``self.gens()`` in some
fixed fg R-module.
.. NOTE::
We do not assume that the generators given by ``self.gens()`` are
the same as the Smith form generators, since this may not be true
for a general derived class.
INPUT:
- ``im_gens`` -- a list of the images of ``self.gens()`` in some
R-module
EXAMPLES::
sage: V = span([[1/2,1,1],[3/2,2,1],[0,0,1]],ZZ); W = V.span([2*V.0+4*V.1, 9*V.0+12*V.1, 4*V.2])
sage: Q = V/W
sage: phi = Q.hom([3*Q.1, Q.0])
sage: phi
Morphism from module over Integer Ring with invariants (4, 12) to module with invariants (4, 12) that sends the generators to [(0, 3), (1, 0)]
sage: phi(Q.0)
(0, 3)
sage: phi(Q.1)
(1, 0)
sage: Q.0 == phi(Q.1)
True
This example illustrates creating a morphism to a free module.
The free module is turned into an FGP module (i.e., quotient
V/W with W=0), and the morphism is constructed::
sage: V = span([[1/2,0,0],[3/2,2,1],[0,0,1]],ZZ); W = V.span([2*V.0+4*V.1])
sage: Q = V/W; Q
Finitely generated module V/W over Integer Ring with invariants (2, 0, 0)
sage: phi = Q.hom([0,V.0,V.1]); phi
Morphism from module over Integer Ring with invariants (2, 0, 0) to module with invariants (0, 0, 0) that sends the generators to [(0, 0, 0), (1, 0, 0), (0, 1, 0)]
sage: phi.domain()
Finitely generated module V/W over Integer Ring with invariants (2, 0, 0)
sage: phi.codomain()
Finitely generated module V/W over Integer Ring with invariants (0, 0, 0)
sage: phi(Q.0)
(0, 0, 0)
sage: phi(Q.1)
(1, 0, 0)
sage: phi(Q.2) == V.1
True
Constructing two zero maps from the zero module::
sage: A = (ZZ^2)/(ZZ^2); A
Finitely generated module V/W over Integer Ring with invariants ()
sage: A.hom([])
Morphism from module over Integer Ring with invariants () to module with invariants () that sends the generators to []
sage: A.hom([]).codomain() is A
True
sage: B = (ZZ^3)/(ZZ^3)
sage: A.hom([],codomain=B)
Morphism from module over Integer Ring with invariants () to module with invariants () that sends the generators to []
sage: phi = A.hom([],codomain=B); phi
Morphism from module over Integer Ring with invariants () to module with invariants () that sends the generators to []
sage: phi(A(0))
()
sage: phi(A(0)) == B(0)
True
A degenerate case::
sage: A = (ZZ^2)/(ZZ^2)
sage: phi = A.hom([]); phi
Morphism from module over Integer Ring with invariants () to module with invariants () that sends the generators to []
sage: phi(A(0))
()
The code checks that the morphism is valid. In the example
below we try to send a generator of order 2 to an element of
order 14::
sage: V = span([[1/14,3/14],[0,1/2]],ZZ); W = ZZ^2
sage: Q = V/W; Q
Finitely generated module V/W over Integer Ring with invariants (2, 14)
sage: Q.linear_combination_of_smith_form_gens([1,11]).additive_order()
14
sage: f = Q.hom([Q.linear_combination_of_smith_form_gens([1,11]), Q.linear_combination_of_smith_form_gens([1,3])]); f
Traceback (most recent call last):
...
ValueError: phi must send optimized submodule of M.W() into N.W()
"""
if len(im_gens) == 0:
# 0 map
N = self if codomain is None else codomain
else:
if codomain is None:
im_gens = Sequence(im_gens)
N = im_gens.universe()
else:
N = codomain
im_gens = Sequence(im_gens, universe=N)
if is_FreeModule(N):
# If im_smith_gens are not in an R-module, but are in a Free-module,
# then we quotient out by the 0 submodule and get an R-module.
N = FGP_Module(N, N.zero_submodule(), check=DEBUG)
im_gens = Sequence(im_gens, universe=N)
if len(im_gens) == 0:
VO = self.optimized()[0].V()
H = VO.Hom(N.V())
return FGP_Morphism(self.Hom(N), H(0), check=DEBUG)
if self.gens() == self.smith_form_gens():
return self._hom_from_smith(im_gens, check)
else:
return self._hom_general(im_gens, check)
def _hom_general(self, im_gens, check=True):
"""
Homomorphism defined by giving the images of ``self.gens()`` in some
fixed fg R-module. We do not assume that the generators given by
``self.gens()`` are the same as the Smith form generators, since this
may not be true for a general derived class.
INPUT:
- ``im_gens`` - a Sequence object giving the images of ``self.gens()``,
whose universe is some fixed fg R-module
EXAMPLES::
sage: class SillyModule(sage.modules.fg_pid.fgp_module.FGP_Module_class):
....: def gens(self):
....: return tuple(flatten([[x,x] for x in self.smith_form_gens()]))
sage: A = SillyModule(ZZ**1, span([[3]], ZZ))
sage: A.gen(0)
(1)
sage: A.gen(1)
(1)
sage: B = ZZ**1 / span([[3]], ZZ)
sage: A.hom([B.0, 2*B.0], B)
Traceback (most recent call last):
...
ValueError: Images do not determine a valid homomorphism
sage: A.hom([B.0, B.0], B) # indirect doctest
Morphism from module over Integer Ring with invariants (3,) to module with invariants (3,) that sends the generators to [(1), (1)]
"""
m = self.ngens()
A = ZZ**m
q = A.hom([x.lift() for x in self.gens()], self.V())
B = q.inverse_image(self.W())
N = im_gens.universe()
r = | |
text/html; charset=UTF-8
set-cookie: WEBSRV=web2; path=/
cache-control: private
10.0.0.27
[root@centos6 ~]#curl -i 10.0.0.7
HTTP/1.1 200 OK
date: Thu, 02 Apr 2020 02:26:15 GMT
server: Apache/2.4.6 (CentOS)
last-modified: Thu, 02 Apr 2020 01:44:13 GMT
etag: "a-5a244f01f8adc"
accept-ranges: bytes
content-length: 10
content-type: text/html; charset=UTF-8
set-cookie: WEBSRV=web1; path=/
cache-control: private
10.0.0.17
[root@centos6 ~]#curl -b WEBSRV=web1 10.0.0.7
10.0.0.17
[root@centos6 ~]#curl -b WEBSRV=web2 10.0.0.7
10.0.0.27
[root@centos6 ~]#curl -vb WEBSRV=web1 10.0.0.7
* About to connect() to 10.0.0.7 port 80 (#0)
* Trying 10.0.0.7... connected
* Connected to 10.0.0.7 (10.0.0.7) port 80 (#0)
> GET / HTTP/1.1
> User-Agent: curl/7.19.7 (x86_64-redhat-linux-gnu) libcurl/7.19.7 NSS/3.27.1 zlib/1.2.3 libidn/1.18 libssh2/1.4.2
> Host: 10.0.0.7
> Accept: */*
> Cookie: WEBSRV=web1
>
< HTTP/1.1 200 OK
< date: Thu, 02 Apr 2020 02:27:54 GMT
< server: Apache/2.4.6 (CentOS)
< last-modified: Thu, 02 Apr 2020 01:44:13 GMT
< etag: "a-5a244f01f8adc"
< accept-ranges: bytes
< content-length: 10
< content-type: text/html; charset=UTF-8
<
10.0.0.17
* Connection #0 to host 10.0.0.7 left intact
* Closing connection #0
[root@centos6 ~]#curl -vb WEBSRV=web2 10.0.0.7
* About to connect() to 10.0.0.7 port 80 (#0)
* Trying 10.0.0.7... connected
* Connected to 10.0.0.7 (10.0.0.7) port 80 (#0)
> GET / HTTP/1.1
> User-Agent: curl/7.19.7 (x86_64-redhat-linux-gnu) libcurl/7.19.7 NSS/3.27.1 zlib/1.2.3 libidn/1.18 libssh2/1.4.2
> Host: 10.0.0.7
> Accept: */*
> Cookie: WEBSRV=web2
>
< HTTP/1.1 200 OK
< date: Thu, 02 Apr 2020 02:27:57 GMT
< server: Apache/2.4.6 (CentOS)
< last-modified: Thu, 02 Apr 2020 01:44:28 GMT
< etag: "a-5a244f0fd5175"
< accept-ranges: bytes
< content-length: 10
< content-type: text/html; charset=UTF-8
<
10.0.0.27
* Connection #0 to host 10.0.0.7 left intact
* Closing connection #0
'''
HAProxy状态页
通过web界面,显示当前HAProxy的运行状态
官方帮助:
'''
http://cbonte.github.io/haproxy-dconv/2.1/configuration.html#4-stats%20admin
'''
状态页配置项
'''
stats enable #基于默认的参数启用stats page
stats hide-version #将状态页中haproxy版本隐藏
stats refresh <delay> #设定自动刷新时间间隔,默认不自动刷新
stats uri <prefix> #自定义stats page uri,默认值:/haproxy?stats
stats realm <realm> #账户认证时的提示信息,示例:stats realm HAProxy\ Statistics
stats auth <user>:<passwd> #认证时的账号和密码,可使用多次,默认:no authentication,可有多行用户
stats admin { if | unless } <cond> #启用stats page中的管理功能
'''
启用状态页
'''
listen stats
bind :9999
stats enable
#stats hide-version
stats uri /haproxy-status
stats realm HAPorxy\ Stats\ Page
stats auth haadmin:123456 #两个用户
stats auth admin:123456
#stats refresh 30s
stats admin if TRUE #安全原因,不建议打开
'''
登录状态页
'''
pid = 27134 (process #1, nbproc = 1, nbthread = 1) #pid为当前pid号,process为当前进程号,nbproc和nbthread为一共多少进程和每个进程多少个线程
uptime = 0d 0h00m04s #启动了多长时间
system limits: memmax = unlimited; ulimit-n = 200029 #系统资源限制:内存/最大打开文件数/
maxsock = 200029; maxconn = 100000; maxpipes = 0 #最大socket连接数/单进程最大连接数/最大管道数maxpipes
current conns = 2; current pipes = 0/0; conn rate = 2/sec; bit rate = 0.000 kbps #当前连接数/当前管道数/当前连接速率
Running tasks: 1/14; idle = 100 % #运行的任务/当前空闲率
active UP: #在线服务器
backup UP: #标记为backup的服务器
active UP, going down: #监测未通过正在进入down过程
backup UP, going down: #备份服务器正在进入down过程
active DOWN, going up: #down的服务器正在进入up过程
backup DOWN, going up: #备份服务器正在进入up过程
active or backup DOWN: #在线的服务器或者是backup的服务器已经转换成了down状态
not checked: #标记为不监测的服务器
active or backup DOWN for maintenance (MAINT) #active或者backup服务器人为下线的
active or backup SOFT STOPPED for maintenance #active或者backup被人为软下线(人为将weight改成0)
'''
haproxy-基于cookie的会话保持插图(2)
backend server信息
'''
session rate(每秒的连接会话信息): Errors(错误统计信息):
cur:每秒的当前会话数量 Req:错误请求量
max:每秒新的最大会话数量 conn:错误链接量
limit:每秒新的会话限制量 Resp:错误响应量
sessions(会话信息): Warnings(警告统计信息):
cur:当前会话量 Retr:重新尝试次数
max:最大会话量 Redis:再次发送次数
limit: 限制会话量
Total:总共会话量 Server(real server信息):
LBTot:选中一台服务器所用的总时间 Status:后端机的状态,包括UP和DOWN
Last:和服务器的持续连接时间 LastChk:持续检查后端服务器的时间
Wght:权重
Bytes(流量统计): Act:活动链接数量
In:网络的字节输入总量 Bck:备份的服务器数量
Out:网络的字节输出总量 Chk:心跳检测时间
Dwn:后端服务器连接后都是DOWN的数量
Denied(拒绝统计信息): Dwntme:总的downtime时间
Req:拒绝请求量 Thrtle:server 状态
Resp:拒绝回复量
'''
利用状态页实现haproxy服务器的健康性检查
范例:通过curl 命令对haproxy的状态页的访问实现健康检查
'''
[root@centos8 ~]#curl -I http://haadmin:123456@10.0.0.100:9999/haproxy-status
HTTP/1.1 200 OK
cache-control: no-cache
content-type: text/html
[root@centos8 ~]#curl -I -u haadmin:123456 http://10.0.0.100:9999/haproxy-status
HTTP/1.1 200 OK
cache-control: no-cache
content-type: text/html
[root@centos8 ~]#echo $?
0
[root@haproxy ~]#systemctl stop haproxy
[root@centos8 ~]#curl -I http://haadmin:123456@10.0.0.100:9999/haproxy-status
curl: (7) Failed to connect to 10.0.0.100 port 9999: Connection refused
[root@centos8 ~]#echo $?7
'''
---------------------------
haproxy - IP 透传
web服务器中需要记录客户端的真实IP地址,用于做访问统计、安全防护、行为分析、区域排行等场景。
layer 4 与 layer 7
四层:IP+PORT转发
七层:协议+内容交换
haproxy-IP透传插图
四层负载
在四层负载设备中,把client发送的报文目标地址(原来是负载均衡设备的IP地址),根据均衡设备设置的选择web服务器的规则选择对应的web服务器IP地址,这样client就可以直接跟此服务器建立TCP连接并发送数据,而四层负载自身不参与建立连接,而和LVS不同,haproxy是伪四层负载均衡,因为haproxy 需要分别和前端客户端及后端服务器建立连接
七层代理
七层负载均衡服务器起了一个反向代理服务器的作用,服务器建立一次TCP连接要三次握手,而client要访问webserver要先与七层负载设备进行三次握手后建立TCP连接,把要访问的报文信息发送给七层负载均衡;然后七层负载均衡再根据设置的均衡规则选择特定的webserver,然后通过三次握手与此台webserver建立TCP连接,然后webserver把需要的数据发送给七层负载均衡设备,负载均衡设备再把数据发送给client;所以,七层负载均衡设备起到了代理服务器的作用,七层代理需要和Client和后端服务器分别建立连接
'''
[root@haproxy ~]#tcpdump tcp -i eth0 -nn port ! 22 -w dump-tcp.pcap -v
[root@haproxy ~]#tcpdump tcp -i eth1 -nn port ! 22 -w dump-tcp2.pcap -v
'''
haproxy-IP透传插图(1)
haproxy-IP透传插图(2)
四层IP透传
'''
#haproxy 配置:
listen web_prot_http_nodes
bind 172.16.0.100:80
mode tcp
balance roundrobin
server web1 www.wangxiaochun.com:80 send-proxy check inter 3000 fall 3 rise 5
#nginx配置:变量$proxy_protocol_addr 记录透传过来的客户端IP
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" "$proxy_protocol_addr"'
server {
listen 80 proxy_protocol; #启用此项,将无法直接访问此网站,只能通过四层代理访问
server_name www.wangxiaochun.com;
......
'''
抓包可以看到
'''
continuation
'''
信息中带有客户端的源IP
haproxy-IP透传插图(3)
'''
#nginx在开启proxy_protocol前
[root@internet ~]#curl 172.16.0.100
<html>
<head><title>400 Bad Request</title></head>
<body>
<center><h1>400 Bad Request</h1></center>
<hr><center>nginx</center>
</body>
</html>
[root@VM_0_10_centos ~]# tail -f /apps/nginx/logs/nginx.access.log
172.16.31.10 - - [09/Apr/2020:20:48:51 +0800] "PROXY TCP4 10.0.0.100 192.168.3.11 35948 80" sendfileon
172.16.31.10 - - [09/Apr/2020:20:48:54 +0800] "PROXY TCP4 10.0.0.100 192.168.3.11 35952 80" sendfileon
172.16.31.10 - - [09/Apr/2020:20:48:57 +0800] "PROXY TCP4 10.0.0.100 192.168.3.11 35954 80" sendfileon
#在nginx服务器上开启日志格式和proxy_protocal
[root@VM_0_10_centos ~]# vim /apps/nginx/conf/nginx.conf
http {
.......
log_format main '$remote_addr - $remote_user [$time_local] "$request" "$proxy_protocol_addr"'
sendfile on;
keepalive_timeout 65;
client_max_body_size 100m;
server {
listen 80 default_server proxy_protocol ;
......
#nginx在开启proxy_protocol后,可以看客户端真实源IP
[root@VM_0_10_centos ~]# tail -f /apps/nginx/logs/nginx.access.log
172.16.31.10 - - [09/Apr/2020:20:52:52 +0800] "GET / HTTP/1.1" "172.16.0.200"sendfileon
'''
七层IP透传
当haproxy工作在七层的时候,如何透传客户端真实IP至后端服务器
HAProxy配置
在由haproxy发往后端主机的请求报文中添加“X-Forwarded-For”首部,其值为前端客户端的地址;用于向后端主发送真实的客户端IP
'''
option forwardfor [ except <network> ] [ header <name> ] [ if-none ]
[ except <network> ]:请求报请来自此处指定的网络时不予添加此首部,如haproxy自身所在网络
[ header <name> ]:使用自定义的首部名称,而非“X-Forwarded-For”,示例:X-client
[ if-none ] 如果没有首部才添加首部,如果有使用默认值
'''
范例:
'''
#haproxy 配置
defaults
option forwardfor #此为默认值,首部字段默为:X-Forwarded-For
#或者自定义首部为X-client:
option forwardfor except 127.0.0.0/8 header X-client
#listen配置
listen web_host
bind 10.0.0.7:80
mode http
log global
balance random
server web1 10.0.0.17:80 weight 1 check inter 3000 fall 2 rise 5
server web2 10.0.0.27:80 weight 1 check inter 3000 fall 2 rise 5
'''
web服务器日志格式配置
配置web服务器,记录负载均衡透传的客户端IP地址
'''
#apache 配置:
LogFormat "%{X-Forwarded-For}i %a %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined
#nginx 日志格式:
$proxy_add_x_forwarded_for:包括客户端IP和中间经过的所有代理的IP
$http_x_forwarded_For:只有客户端IP
log_format main '"$proxy_add_x_forwarded_for" - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" $http_x_forwarded_For';
[root@centos8 ~]#tail /var/log/nginx/access.log
"172.16.0.200, 10.0.0.100" 10.0.0.100 - - [09/Apr/2020:19:10:16 +0800] "GET / HTTP/1.1" 200 4057 "-" "curl/7.19.7 (x86_64-redhat-linux-gnu) libcurl/7.19.7 NSS/3.27.1 zlib/1.2.3 libidn/1.18 libssh2/1.4.2" "172.16.0.200"
#tomcat 配置:conf目录下的server.xml
<Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
prefix="localhost_access_log" suffix=".txt"
pattern="%{X-Forwarded-For}i %h %l %u %t "%r" %s %b" />
'''
验证客户端IP地址
apache日志:
'''
[root@centos7 ~]#vim /etc/httpd/conf/httpd.conf
LogFormat "%{X-Forwarded-For}i %h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined
[root@centos7 ~]#systemctl restart httpd
[root@centos6 ~]#hostname -I
10.0.0.6
[root@centos6 ~]#curl http://10.0.0.7
10.0.0.17
[root@centos7 ~]#tail -f /var/log/httpd/access_log
10.0.0.6 10.0.0.7 - - [01/Apr/2020:01:08:31 +0800] "GET / HTTP/1.1" 200 10 "-" "curl/7.19.7 (x86_64-redhat-linux-gnu) libcurl/7.19.7 NSS/3.27.1 zlib/1.2.3 libidn/1.18 libssh2/1.4.2"
10.0.0.6 10.0.0.7 - - [01/Apr/2020:01:08:33 +0800] "GET / HTTP/1.1" 200 10 "-" "curl/7.19.7 (x86_64-redhat-linux-gnu) libcurl/7.19.7 NSS/3.27.1 zlib/1.2.3 libidn/1.18 libssh2/1.4.2"
'''
--------------------------
haproxy - 报文修改
报文修改
在http模式下,基于实际需求修改客户端的请求报文与响应报文,通过reqadd和reqdel在请求报文添加删除字段,通过rspadd与rspidel在响应报文中添加与删除字段。
注意:此功能的以下相关指令在2.1版本中已经取消
官方文档:参看2.0的帮助文档
'''
http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#4-rspadd
'''
'''
#在向后端服务器转发的请求报文尾部添加指定首部
reqadd <string> [{if | unless} <cond>]
示例:reqadd X-Via:\ HAPorxy
#在向后端服务器转发的请求报文中删除匹配正则表达式的首部
reqdel <search> [{if | unless} <cond>]
reqidel <search> [{if | unless} <cond>] #忽略大小写
示例:reqidel user-agent
#在向前端客户端转发的响应报文尾部添加指定首部
rspadd <string> [{if | unless} <cond>]
示例:
rspadd X-Via:\ HAPorxy
rspadd Server:\ wanginx
#从向前端客户端转发的响应报文中删除匹配正则表达式的首部
rspdel <search> [{if | unless} <cond>]
rspidel <search> [{if | unless} <cond>] #忽略大小写
示例:
rspidel ^server:.* #从响应报文删除server信息
rspidel X-Powered-By:.* #从响应报文删除X-Powered-By信息,一般此首部字段保存php版本信息
'''
2.1版本以上用下面指令http-request和http-response代替
官方文档:
'''
http://cbonte.github.io/haproxy-dconv/2.1/configuration.html#4-http-request
http://cbonte.github.io/haproxy-dconv/2.1/configuration.html#4-http-response
'''
配置说明:
'''
http-request add-header <name> <fmt> [ { if | unless } <condition> ]
示例:http-request add-header X-Haproxy-Current-Date %T
http-request del-header <name> [ { if | unless } <condition> ]
http-response add-header <name> <fmt> [ { if | unless } <condition> ]
http-response del-header <name>
#示例:
http-response del-header Server
'''
范例:
'''
#添加向后端报务器发起的请求报文首部
vim haproxy.cfg
frontend main *:80
# bind *:80
default_backend websrvs
reqadd testheader:\ haporxyserver 加此行,只有一 个空格,并需要转义
#在后端httpd服务器
vim /etc/httpd/conf/httpd.conf
LogFormat "%{testheader}i %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined
#查看日志
tail –f /var/log/httpd/acesss_log
'''
范例:
'''
#添加响应报文首部
vim haproxy.cfg
frontend main *:80
# bind *:80
default_backend websrvs
rspadd X-Via:\ HAPorxy-1 #加此行
maxconn 5000
#客户端访问调试模式,查看reponse headers,看到
Server: Apache/2.2.15 (CentOS) 系统自带显示
X-Via: HAPorxy-1
'''
范例:
'''
#删除响应报文中的server首部
vim haproxy.cfg
frontend main *:80
# bind *:80
default_backend websrvs
rspadd X-Via:\ HAPorxy-1
rspdel Server 或者 rspidel server #加此行 ,忽略大小写
rspidel X-Powered-By:.* #删除Php版本
maxconn 5000
#客户端访问调试模式,查看reponse headers,看到
Server: Apache/2.2.15 (CentOS) 此行消失
X-Via: HAPorxy-1
'''
范例:
'''
#增加响应报文的首部,实现伪装Server首部
vim haproxy.cfg
frontend main *:80
# bind *:80
default_backend websrvs
rspadd X-Via:\ HAPorxy-1
rspdel Server #或者 rspidel server
rspadd Server:\ wanginx #增加此行
[root@internet ~]#curl -i 172.16.0.100
HTTP/1.1 200 OK
date: Thu, 09 Apr 2020 08:32:10 GMT
last-modified: Thu, 09 Apr 2020 01:23:18 GMT
etag: "f-5a2d17630635b"
accept-ranges: bytes
content-length: 15
content-type: text/html; charset=UTF-8
server: wanginx
RS1 10.0.0.17
'''
范例:
'''
[root@centos7 ~]#vim /etc/haproxy/haproxy.cfg
listen web_port
bind 10.0.0.7:80
http-request add-header X-Haproxy-Current-Date %T
http-response del-header server
mode http
log global
option httpchk
http-check expect status 200
server web1 10.0.0.17:80 check inter 3000 fall 2 rise 5
server web2 10.0.0.27:80 check inter 3000 fall 2 rise 5
#查看后端服务器日志
tail –f /var/log/httpd/acesss_log
10.0.0.7 - - [05/Apr/2020:20:13:48 +0800] "GET / HTTP/1.1" 200 10 "-" "curl/7.19.7 (x86_64-redhat-linux-gnu) l
ibcurl/7.19.7 NSS/3.27.1 zlib/1.2.3 libidn/1.18 libssh2/1.4.2" "05/Apr/2020:12:13:48 +0000"
'''
--------------------------------
haproxy - 自定义日志格式
log global 开启日志功能,默认只会在记录下面格式的日志
'''
[root@haproxy ~]#tail /var/log/haproxy.log
Apr 9 19:38:46 localhost haproxy[60049]: Connect from 172.16.0.200:54628 to 172.16.0.100:80 (web_prot_http_nodes/HTTP)
'''
option httplog 可以将http格式记录下,并且可以使用相关指令将特定信息记录在haproxy的日志中
但一般不建议开启,这会加重 HAProxy 负载
配置选项
'''
log global #开启记录日志,默认不开启
option httplog #开启记录httplog日志格式选项
capture cookie <name> len <length> #捕获请求和响应报文中的 cookie并记录日志
capture request header <name> len <length> #捕获请求报文中指定的首部内容和长度并记录日志
capture response header <name> len <length> #捕获响应报文中指定的内容和长度首部并记录日志
#示例:
log global
option httplog
capture request header Host len 256
capture request header User-Agent len 512
capture request header Referer len 15
capture request header X-Forwarded-For len 15
'''
只开启日志功能log global和option httplog,记录日志格式如下
'''
[root@haproxy ~]#tail /var/log/haproxy.log
Apr 9 19:42:02 localhost haproxy[60236]: 172.16.0.200:54630 [09/Apr/2020:19:42:02.623] web_prot_http_nodes web_prot_http_nodes/web1 0/0/1/1/2 200 4264 - - ---- 1/1/0/0/0 0/0 "GET / HTTP/1.1"
'''
配置示例
'''
listen web_host
bind 10.0.0.7:80
mode http
balance roundrobin
log global #开启日志功能
option httplog #开启httplog日志格式选项
capture request header User-Agent len 512 #记录日志信息
capture request header Host len 256 #记录日志信息
cookie SERVER-COOKIE insert indirect nocache
server web1 10.0.0.17:80 cookie web1 check inter 3000 fall 3 rise 5
server web2 10.0.0.27:80 cookie web2 check inter 3000 fall 3 rise 5
'''
验证日志格式
'''
[root@centos7 ~]#tail -n3 /var/log/haproxy.log
Apr 2 12:44:26 localhost haproxy[27637]: 10.0.0.6:50004 [02/Apr/2020:12:44:26.817] web_port web_port/web1 0/0/0/2/3 200 42484 - - --NI 1/1/0/0/0 0/0 {curl/7.19.7 (x86_64-redhat-linux-gnu) libcurl/7.19.7 NSS/3.27.1 zlib/1.2.3 libidn/1.18 libssh2/1.4.2|10.0.0.7} "GET /test.php HTTP/1.1"
Apr 2 12:44:27 localhost haproxy[27637]: 10.0.0.6:50006 [02/Apr/2020:12:44:27.294] web_port web_port/web2 0/0/0/1/1 404 370 - - --NI 1/1/0/0/0 0/0 {curl/7.19.7 (x86_64-redhat-linux-gnu) libcurl/7.19.7 NSS/3.27.1 zlib/1.2.3 libidn/1.18 libssh2/1.4.2|10.0.0.7} "GET /test.php HTTP/1.1"
Apr 2 12:44:27 localhost haproxy[27637]: 10.0.0.6:50008 [02/Apr/2020:12:44:27.840] web_port web_port/web1 0/0/0/3/4 200 42484 - - --NI 1/1/0/0/0 0/0 {curl/7.19.7 (x86_64-redhat-linux-gnu) libcurl/7.19.7 NSS/3.27.1 zlib/1.2.3 libidn/1.18 libssh2/1.4.2|10.0.0.7} "GET /test.php HTTP/1.1"
'''
-------------------------
haproxy - 压缩功能
对响应给客户端的报文进行压缩,以节省网络带宽,但是会占用部分CPU性能,建议在后端服务器开启压缩功能,而非在HAProxy上开启压缩
配置选项
'''
compression algo <algorithm> ... #启用http协议中的压缩机制,常用算法有gzip,deflate
<algorithm>支持下面类型:
identity #debug调试使用的压缩方式
gzip #常用的压缩方式,与各浏览器兼容较好
deflate #有些浏览器不支持
raw-deflate #新式的压缩方式
compression type <mime type> ... #要压缩的文件类型
#示例:
compression algo gzip deflate
compression type text/html text/csstext/plain
'''
配置示例
'''
listen web_host
bind 10.0.0.7:80
| |
go into an enemy base.
if len(self.cached_enemies.closer_than(10, position)) > 0:
continue
distance = position.distance_to(possible)
#make sure the enemy is not closer to the point we are going to.
e_distance = enemy.distance_to(possible)
if distance < e_distance:
locations.append([distance, possible])
if len(locations) > 0:
return sorted(locations, key=itemgetter(0))[0][1]
return None
def findGroundRetreatTarget(self, unit, inc_size=3, enemy_radius=10):
#get all possible retreat points around us.
retreatPoints = self.retreatGrid(unit.position, size=inc_size)
#filter out the retreat points that we can't move too.
retreatPoints = {x for x in retreatPoints if self.inPathingGrid(x)}
if retreatPoints:
#get the center of all the ground units that can attack us.
if self.cached_enemies.filter(lambda x: x.can_attack_ground).closer_than(enemy_radius, unit):
#enemyThreatsCenter = self.cached_enemies.filter(lambda x: x.can_attack_ground).closer_than(enemy_radius, unit).center
enemyThreatsCenter = self.center3d(self.cached_enemies.filter(lambda x: x.can_attack_ground).closer_than(enemy_radius, unit))
#retreatPoint = max(retreatPoints, key=lambda x: x.distance_to(enemyThreatsCenter) - x.distance_to(unit))
retreatPoint = enemyThreatsCenter.position.furthest(retreatPoints)
#debug info below.
#if unit.is_selected or _debug:
# self._client.debug_line_out(self.unitDebugPos(unit), self.p3AddZ(enemyThreatsCenter), (244, 217, 66))
# self._client.debug_line_out(self.unitDebugPos(unit), self.p2AddZ(retreatPoint), (176, 66, 244))
return retreatPoint
###########
#Utilities#
###########
def shadeCasting(self):
if self.time > (self.lastShadeCast + 11.4):
self.shadeCast = True
self.lastShadeCast = self.time
else:
self.shadeCast = False
################################
#map val kite/retreat functions#
################################
def findGroundRetreatTargetMapVals(self, unit, inc_size=3, enemy_radius=10):
#get all possible retreat points around us.
fullRetreatPoints = self.retreatGrid(unit.position, size=inc_size)
#filter out the retreat points that we can't move too.
#retreatPoints = {x for x in retreatPoints if self.inPathingGrid(x)}
retreatPoints = {x for x in fullRetreatPoints if self.abovePathingScore(x)}
if not retreatPoints:
retreatPoints = {x for x in fullRetreatPoints if self.inPathingGrid(x)}
if retreatPoints:
#get the center of all the ground units that can attack us.
if self.cached_enemies.filter(lambda x: x.can_attack_ground).closer_than(enemy_radius, unit):
#enemyThreatsCenter = self.cached_enemies.filter(lambda x: x.can_attack_ground).closer_than(enemy_radius, unit).center
enemyThreatsCenter = self.center3d(self.cached_enemies.filter(lambda x: x.can_attack_ground).closer_than(enemy_radius, unit))
#retreatPoint = max(retreatPoints, key=lambda x: x.distance_to(enemyThreatsCenter) - x.distance_to(unit))
retreatPoint = enemyThreatsCenter.position.furthest(retreatPoints)
#debug info below.
#if unit.is_selected or _debug:
# self._client.debug_line_out(self.unitDebugPos(unit), self.p3AddZ(enemyThreatsCenter), (244, 217, 66))
# self._client.debug_line_out(self.unitDebugPos(unit), self.p2AddZ(retreatPoint), (176, 66, 244))
return retreatPoint
###############
#odd functions#
###############
async def createArchons(self):
#count the number of high templars and morph if 2 of them exists.
# if self.units(HIGHTEMPLAR).amount > 1:
if self.units(UnitTypeId.HIGHTEMPLAR).idle.ready.amount >= 2:
ht1 = self.units(UnitTypeId.HIGHTEMPLAR).idle.ready.random
ht2 = next((ht for ht in self.units(UnitTypeId.HIGHTEMPLAR).idle.ready if ht.tag != ht1.tag), None)
if ht2:
command = raw_pb.ActionRawUnitCommand(
ability_id=AbilityId.MORPH_ARCHON.value,
unit_tags=[ht1.tag, ht2.tag],
queue_command=False
)
action = raw_pb.ActionRaw(unit_command=command)
await self._client._execute(action=sc_pb.RequestAction(
actions=[sc_pb.Action(action_raw=action)]
))
#self.combinedActions.append(self.units(HIGHTEMPLAR)[0](AbilityId.MORPH_ARCHON, [self.units(HIGHTEMPLAR)[0], self.units(HIGHTEMPLAR)[1]]))
async def endGameCheck(self):
#check if we are about to lose
if not self.defeat_detected and self._strat_manager.checkDefeat() and self.cached_enemies.structure.amount > 0:
self.defeat_detected = self.time + 10
#print ('lost', self.defeat_detected)
if _use_data:
self._training_data.removeResult(self.opp_id, self.match_id, self.enemy_race)
self._training_data.saveResult(self.opp_id, self._strat_manager.strat_id, 'l', self.match_id, self.enemy_race, self.map_name)
if not self.gg_said:
await self._client.chat_send(self._strat_manager.unitCounter.getLossSaying(), team_only=False)
self.gg_said = True
#check if we are not losing now.
if self.defeat_detected and self.units.structure.amount >= 5:
self.defeat_detected = None
#print ('not dead yet')
if _use_data:
self._training_data.removeResult(self.opp_id, self.match_id, self.enemy_race)
#divert back to win.
self._training_data.saveResult(self.opp_id, self._strat_manager.strat_id, 'w', self.match_id, self.enemy_race, self.map_name)
if not self.alive_said:
#await self._client.chat_send('trouble finishing? I should be dead already', team_only=False)
self.alive_said = True
def cancelBuildings(self):
#find the buildings that are building, and have low health.
for building in self.units.filter(lambda x: x.build_progress < 1 and x.health + x.shield < 10):
self.combinedActions.append(building(CANCEL))
###############
#utlities code#
###############
def loadStart(self):
#set the worker time as now.
self.worker_moved = self.time
#get the heightmap offset.
nexus = self.units(NEXUS).ready.random
#nexus = self.structures(NEXUS).ready.random
if nexus:
hmval = self.getHeight(nexus.position)
self.hm_offset = hmval - nexus.position3d.z - 1
if not _use_data:
#select starting strat by race.
if self.enemy_race == Race.Zerg:
self._strat_manager.strat_id = _zerg_race_strat_id
# self._strat_manager.start_build_order = ['Gateway', 'CyberneticsCore', 'Gateway', 'RoboticsFacility']
elif self.enemy_race == Race.Protoss:
self._strat_manager.strat_id = _protoss_race_strat_id
# self._strat_manager.start_build_order = ['Gateway', 'Gateway', 'CyberneticsCore', 'RoboticsFacility']
elif self.enemy_race == Race.Terran:
self._strat_manager.strat_id = _terran_race_strat_id
# self._strat_manager.start_build_order = ['Gateway', 'CyberneticsCore', 'Gateway', 'RoboticsFacility']
else:
self._strat_manager.strat_id = 1
# self._strat_manager.start_build_order = ['Gateway', 'CyberneticsCore', 'Gateway', 'RoboticsFacility']
print ('using race strat:', self._strat_manager.strat_id)
self.intro_value = "(glhf) {version}:{strat_id}".format(version=_version, strat_id=self._strat_manager.strat_id)
self.opp_unit_history = self._training_data.getOppHistory('0', self.enemy_race)
if self.opp_unit_history:
self.start_unit_ratio = self._strat_manager.calc_starter_counters(self.opp_unit_history)
self.intro_buildings = "First four buildings: {b1}, {b2}, {b3}, {b4}".format(b1=self.start_unit_ratio[0], b2=self.start_unit_ratio[1], b3=self.start_unit_ratio[2], b4=self.start_unit_ratio[3])
else:
self.map_name = "{}-{}-{}".format(self.game_info._proto.map_name, self.game_info.player_start_location.x, self.game_info.player_start_location.y)
#get map width and height:
#self.map_name = 'NoMapSeeding'
#self._game_info.pathing_grid.width
if not self.opp_id:
if self.enemy_race == Race.Zerg:
self.opp_id = 2
elif self.enemy_race == Race.Protoss:
self.opp_id = 3
elif self.enemy_race == Race.Terran:
self.opp_id = 4
else:
self.opp_id = 5
#load up the pickle info and the opp info.
self.opp_id = "{}-{}".format(self.enemy_race, self.opp_id)
if _local_ladder:
print ('playing vs', self.opp_id)
self._training_data.loadData()
#generate a unique id that will be used to identify this match.
self.match_id = time.strftime("%y%m%d%H%M", time.gmtime())
#find out which strat we want to use.
self._strat_manager.strat_id = self._training_data.findStrat(self.opp_id, self.enemy_race, self.map_name)
if _test_strat_id > 0:
self._strat_manager.strat_id = _test_strat_id
print ('using strat:', self._strat_manager.strat_id)
#save this as a victory in case the opponent crashes or leaves before we are able to log it.
#get length of training data.
trainingLen = len(self._training_data.data_dict.items())
oppLen = self._training_data.totalOppDataCount(self.opp_id, self.enemy_race)
trainingLen = self._training_data.totalDataCount()
self._training_data.saveResult(self.opp_id, self._strat_manager.strat_id, 'w', self.match_id, self.enemy_race, self.map_name)
self.intro_value = "(glhf) {version}:{strat_id}.{olen}.{tlen}".format(version=_version, strat_id=self._strat_manager.strat_id, tlen=trainingLen, olen=oppLen)
#print (self.intro_value)
#load up the units the opp used the last time.
self.opp_unit_history = self._training_data.getOppHistory(self.opp_id, self.enemy_race)
if self.opp_unit_history:
self.start_unit_ratio = self._strat_manager.calc_starter_counters(self.opp_unit_history)
self.intro_buildings = "First four buildings: {b1}, {b2}, {b3}, {b4}".format(b1=self.start_unit_ratio[0], b2=self.start_unit_ratio[1], b3=self.start_unit_ratio[2], b4=self.start_unit_ratio[3])
async def check_enemy_structures(self):
if len(self.cached_enemies.structure) != self.saved_enemy_structures:
self.saved_enemy_structures = len(self.cached_enemies.structure)
await self.getRallyPoint()
async def getRallyPoint(self):
if not self.cached_enemies:
#first frame or we killed them all, just use the defensive point.
self.rally_pos = self.defensive_pos
self.prism_pylon_pos = self.defensive_pos
return
#get the closest enemy structure.
#get the midpoint between the enemy structure and the defensive position.
#find the closest placement for a nexus in that area, make that the rally point.
#closestEnemyStructure = self.cached_enemies.structure.closest_to(self.defensive_pos)
if len(self.cached_enemies.structure) == 0:
self.rally_pos = self.defensive_pos
self.prism_pylon_pos = self.defensive_pos
return
closestEnemyStructure = self.cached_enemies.structure.furthest_to(random.choice(self.enemy_start_locations))
#self.debug_rally_es = closestEnemyStructure.position
mid = self.midpoint(closestEnemyStructure.position, self.defensive_pos)
#move a little closer to the enemy by getting another midpoint and use it for warpprisms.
closer_mid = self.midpoint(closestEnemyStructure.position, mid)
placement = await self.find_placement(NEXUS, mid)
placement2 = await self.find_placement(NEXUS, closer_mid)
if placement:
self.rally_pos = placement
else:
print ('could not find rally position')
self.rally_pos = self.defensive_pos
#make sure to place after rally pos because it is used below.
if placement2:
self.prism_pylon_pos = placement2
else:
print ('cound not find prism position')
self.prism_pylon_pos = self.rally_pos
def midpoint(self, pos1, pos2):
return Point2(((pos1.position.x + pos2.position.x) / 2, (pos1.position.y + pos2.position.y) /2))
def getDefensivePoint(self):
#make sure nexus exists.
if len(self.units(NEXUS)) == 0:
return
#get the center point of our nexus.
center = self.units(NEXUS).center
#print ('center', center)
closestNexus = self.units(NEXUS).closest_to(self.enemy_start_locations[0])
NexusDistance = closestNexus.distance_to(self.enemy_start_locations[0])
#loop the ramps and find ramps that are closer than nexus.
possibles = []
for ramp in self.game_info.map_ramps:
#get the distance of the ramp to the enemy location and see if it's a qualifying ramp.
#if the ramp is too far away, then don't use it.
nexus_ramp_distance = closestNexus.distance_to(ramp.top_center)
if nexus_ramp_distance < 30 and ramp.top_center.distance_to(self.enemy_start_locations[0]) < NexusDistance + 10:
#print ('adding', nexus_ramp_distance, NexusDistance, ramp.top_center.distance_to(self.enemy_start_locations[0]))
possibles.append(ramp)
#loop the ramps that are left and get the one that is closest to the center point.
closestDistance = 1000
closestRamp = self.main_base_ramp
for ramp in possibles:
distance = sqrt((ramp.top_center[0] - center[0])**2 + (ramp.top_center[1] - center[1])**2)
if distance < closestDistance:
closestRamp = ramp
closestDistance = distance
if closestRamp:
#this is our defensive point, get the position 2 distance behind it.
if closestRamp.bottom_center.position != closestRamp.top_center.position:
self.defensive_pos = closestRamp.bottom_center.towards(closestRamp.top_center, 9)
def findOppId(self):
parser = argparse.ArgumentParser()
parser.add_argument('--OpponentId', type=str, nargs="?", help='Opponent Id')
args, unknown = parser.parse_known_args()
if args.OpponentId:
return args.OpponentId
return None
def getEnemyCenteredStats(self, unit_obj, enemy_range=10):
#find all the enemy units that are near us.
enemyThreatsClose = unit_obj.closestEnemies.closer_than(enemy_range, unit_obj.unit).filter(lambda x: x.name not in ['Probe', 'SCV', 'Drone'] and (x.can_attack_air or x.can_attack_ground))
enemyGroundtoAirDPS = 0
enemyAirtoGroundDPS = 0
enemyGroundtoGroundDPS = 0
enemyAirtoAirDPS = 0
enemyDPStoGround = 0
enemyDPStoAir = 0
enemyAirHealth = 0
enemyGroundHealth = 0
enemyTotalDPS = 0
closestEnemy = None
if enemyThreatsClose:
for enemy in enemyThreatsClose:
if enemy.can_attack_ground or enemy.can_attack_air:
if enemy.is_flying:
enemyAirHealth += enemy.health + enemy.shield
if unit_obj.unit.is_flying:
enemyAirtoAirDPS += enemy.air_dps
else:
enemyAirtoGroundDPS += enemy.ground_dps
else:
enemyGroundHealth += enemy.health + enemy.shield
if unit_obj.unit.is_flying:
enemyGroundtoAirDPS += enemy.air_dps
else:
enemyGroundtoGroundDPS = enemy.ground_dps
enemyDPStoGround += enemy.ground_dps
enemyDPStoAir += enemy.air_dps
if enemy.ground_dps > enemy.air_dps:
enemyTotalDPS += enemy.ground_dps
else:
enemyTotalDPS += enemy.air_dps
#get the closest enemy.
if unit_obj.unit.is_flying and enemyThreatsClose.filter(lambda x: x.can_attack_air):
closestEnemy = enemyThreatsClose.filter(lambda x: x.can_attack_air).closest_to(unit_obj.unit)
elif not unit_obj.unit.is_flying and enemyThreatsClose.filter(lambda x: x.can_attack_ground):
closestEnemy = enemyThreatsClose.filter(lambda x: x.can_attack_ground).closest_to(unit_obj.unit)
else:
closestEnemy = enemyThreatsClose.closest_to(unit_obj.unit)
return [enemyDPStoGround, enemyDPStoAir, enemyAirHealth, enemyGroundHealth, enemyTotalDPS, closestEnemy, enemyGroundtoAirDPS, enemyAirtoGroundDPS, enemyGroundtoGroundDPS, enemyAirtoAirDPS]
def getAllEnemyStats(self, unit_obj, enemy_range=10):
#find all the enemy units that are near us.
#enemyThreatsClose = unit_obj.closestEnemies.exclude_type([PROBE,SCV,DRONE]).filter(lambda x: x.can_attack_air or x.can_attack_ground)
enemyThreatsClose = unit_obj.closestEnemies.filter(lambda x: not x.name in ['Probe', 'SCV', 'Drone'] and (x.can_attack_air or x.can_attack_ground))
enemyGroundtoAirDPS = 0
enemyAirtoGroundDPS = 0
enemyGroundtoGroundDPS = 0
enemyAirtoAirDPS = 0
enemyDPStoGround = 0
enemyDPStoAir = 0
enemyAirHealth = 0
enemyGroundHealth = 0
enemyTotalDPS = 0
closestEnemy = None
if enemyThreatsClose:
for enemy in enemyThreatsClose:
if enemy.can_attack_ground or enemy.can_attack_air:
if enemy.is_flying:
enemyAirHealth += enemy.health + enemy.shield
if unit_obj.unit.is_flying:
enemyAirtoAirDPS += enemy.air_dps
else:
enemyAirtoGroundDPS += enemy.ground_dps
else:
enemyGroundHealth += enemy.health + enemy.shield
if unit_obj.unit.is_flying:
enemyGroundtoAirDPS += enemy.air_dps
else:
enemyGroundtoGroundDPS = enemy.ground_dps
enemyDPStoGround += enemy.ground_dps
enemyDPStoAir += enemy.air_dps
if enemy.ground_dps > enemy.air_dps:
enemyTotalDPS += enemy.ground_dps
else:
enemyTotalDPS += enemy.air_dps
#get the closest enemy.
if unit_obj.unit.is_flying and enemyThreatsClose.filter(lambda x: x.can_attack_air):
closestEnemy = enemyThreatsClose.filter(lambda x: x.can_attack_air).closest_to(unit_obj.unit)
elif not unit_obj.unit.is_flying and enemyThreatsClose.filter(lambda x: x.can_attack_ground):
closestEnemy = enemyThreatsClose.filter(lambda x: x.can_attack_ground).closest_to(unit_obj.unit)
else:
closestEnemy = enemyThreatsClose.closest_to(unit_obj.unit)
return [enemyDPStoGround, enemyDPStoAir, enemyAirHealth, enemyGroundHealth, enemyTotalDPS, closestEnemy, enemyGroundtoAirDPS, enemyAirtoGroundDPS, enemyGroundtoGroundDPS, enemyAirtoAirDPS]
def getEnemyStats(self, unit_obj, enemy_range=10):
#find all the enemy units that are near us.
enemyThreatsClose = unit_obj.closestEnemies.closer_than(enemy_range, unit_obj.unit).filter(lambda x: x.can_attack_air or x.can_attack_ground)
enemyGroundtoAirDPS = 0
enemyAirtoGroundDPS = 0
enemyGroundtoGroundDPS = 0
enemyAirtoAirDPS = 0
enemyDPStoGround = 0
enemyDPStoAir = 0
enemyAirHealth = 0
enemyGroundHealth = 0
enemyTotalDPS = 0
closestEnemy = None
if enemyThreatsClose:
for enemy in enemyThreatsClose:
if enemy.can_attack_ground or enemy.can_attack_air:
if enemy.is_flying:
enemyAirHealth += enemy.health + enemy.shield
if unit_obj.unit.is_flying:
enemyAirtoAirDPS += enemy.air_dps
else:
enemyAirtoGroundDPS += enemy.ground_dps
else:
enemyGroundHealth += enemy.health + enemy.shield
if unit_obj.unit.is_flying:
enemyGroundtoAirDPS += enemy.air_dps
else:
enemyGroundtoGroundDPS = enemy.ground_dps
enemyDPStoGround += enemy.ground_dps
enemyDPStoAir += enemy.air_dps
if enemy.ground_dps > enemy.air_dps:
enemyTotalDPS += enemy.ground_dps
else:
enemyTotalDPS += enemy.air_dps
#get the closest enemy.
if unit_obj.unit.is_flying and enemyThreatsClose.filter(lambda x: x.can_attack_air):
closestEnemy = enemyThreatsClose.filter(lambda x: x.can_attack_air).closest_to(unit_obj.unit)
elif not unit_obj.unit.is_flying and enemyThreatsClose.filter(lambda x: x.can_attack_ground):
closestEnemy = enemyThreatsClose.filter(lambda x: x.can_attack_ground).closest_to(unit_obj.unit)
else:
closestEnemy = enemyThreatsClose.closest_to(unit_obj.unit)
return [enemyDPStoGround, enemyDPStoAir, enemyAirHealth, enemyGroundHealth, enemyTotalDPS, closestEnemy, enemyGroundtoAirDPS, enemyAirtoGroundDPS, enemyGroundtoGroundDPS, enemyAirtoAirDPS]
async def getAllAbilities(self, ignore_resources=False):
result = await self._client._execute(query=query_pb.RequestQuery(
abilities=[query_pb.RequestQueryAvailableAbilities(
unit_tag=unit.tag) for unit in self.units],
ignore_resource_requirements=ignore_resources)
)
ab_dict = {}
for unit in result.query.abilities:
abils = []
for abil in unit.abilities:
abils.append(AbilityId(abil.ability_id))
ab_dict.update({unit.unit_tag:abils})
return ab_dict
def cache_enemies(self):
#self.cached_enemies = self.cached_enemies
self.cached_enemies = self.state.enemy_units
def check_movements(self):
new_positions = {}
new_moves = {}
#loop units and compare their current positions with the previous positions.
for unit in | |
import bisect
import bucket
import cross_bundle_bucket
import pairedread
import copy
import random
import time
class Alignments:
''' A set of reads aligned to a genome '''
def __init__(self, chromosomes, frag_len_cutoff=None, split_discordant=True):
''' Initialize a genome for alignments
chromosomes: A dictionary with keys corresponding to chromosome names and values corresponding to lengths
'''
self.frag_len_cutoff = frag_len_cutoff
self.split_discordant = split_discordant
self.chromosomeNames = chromosomes[0]
self.chromosomes = dict()
for i in range(len(chromosomes[0])):
self.chromosomes[chromosomes[0][i]] = chromosomes[1][i]
# Initialize exon breaks between all chromosomes
self.exons = set()
# Offset of each chromosome from the start of the genome
self.chromOffsets = dict()
# Bases are generally indexed from 1
nextOffset = 1
for i in range(len(chromosomes[0])):
self.chromOffsets[chromosomes[0][i]] = nextOffset
nextOffset += chromosomes[1][i] + 1
#print(self.chromOffsets)
# List of potential gene boundaries as tuples
self.gene_bounds = []
# If 2 reads are less than this far apart, combine them into a single gene
self.overlap_radius = 50
# paired reads for which the mate still needs to be found
self.unmatched = dict()
# Unmatched reads that span multiple bundles
self.cross_bundle_reads = dict()
# Unmatched reads *in the current bundle* that span multiple bundles
self.curr_cross_bundle_reads = dict()
# Matched mates spanning multiple bundles
self.cross_bundle_pairs = []
self.cross_bundle_buckets = dict()
self.max_cross_bundle_read_len = 0
self.unpaired = []
self.paired = []
self.read_timeA = 0.0
self.read_countA = 0
self.read_timeB = 0.0
self.read_countB = 0
self.numUnmatched = 0
self.gtf_exons = []
self.gtf_id = 0
def processRead(self, name, read, paired):
''' If read is unpaired, add it to the correct spliced or unspliced list of reads.
If read is paired, find its pair or add it to a list to be found later. Once a pair of reads is found, add the combined read to the appropriate list of reads
'''
# Update read location for chromosome
offset = self.chromOffsets[read.chrom]
read.pos += offset
if read.exons:
for i in range(len(read.exons)):
read.exons[i] = [read.exons[i][0]+offset, read.exons[i][1]+offset]
# update list of subexon bounds
alignment = read.exons
if len(alignment) > 1:
for i in range(len(alignment)-1):
self.exons.add(alignment[i][1])
self.exons.add(alignment[i+1][0])
# Update the boundaries of the current bundle
#self.update_gene_bounds(read.exons[0][0], read.exons[-1][1])
if not paired:
if not read.exons:
print(name)
self.update_gene_bounds(read.exons[0][0], read.exons[-1][1])
self.add_unpaired(read)
else: # paired read
# update pair location for chromsome
read.pairOffset += self.chromOffsets[read.pairChrom]
if read.exons:
self.update_gene_bounds(read.exons[0][0], read.exons[-1][1])
if read.pairOffset <= read.pos:
# Look for mates from the current bundle
foundMate = True
while foundMate and read.NH > 0:
i = self.find_mate(read, name, self.unmatched)
if i >= 0:
mate = self.unmatched[name][i]
if mate.exons[-1][1] > read.exons[-1][1]:
self.add_paired(read, mate)
else:
self.add_paired(mate, read)
if not read.NH == mate.NH:
print(name)
exit()
if read.NH >= mate.NH:
read.NH -= mate.NH
mate.NH = 0
del self.unmatched[name][i]
else:
mate.NH -= read.NH
read.NH = 0
else:
foundMate = False
# Look for mates from a previous bundle
foundMate = True
while foundMate and read.NH > 0:
i = self.find_mate(read, name, self.cross_bundle_reads)
if i >= 0:
mate = self.cross_bundle_reads[name][i]
self.cross_bundle_pairs.append((mate, read, min(read.NH, mate.NH)))
if not read.NH == mate.NH:
print(name)
exit()
if read.NH >= mate.NH:
read.NH -= mate.NH
mate.NH = 0
del self.cross_bundle_reads[name][i]
else:
mate.NH -= read.NH
read.NH = 0
else:
foundMate = False
if read.NH > 0:
# One of its mates has not been processed yet
if (read.pairOffset - read.pos) < self.frag_len_cutoff:
self.update_gene_bounds(read.pos, read.pairOffset)
if name in self.unmatched:
self.unmatched[name].append(read)
else:
self.unmatched[name] = [read]
def find_mate(self, read, name, unmatched):
'''
Search the list of unmatched reads for one matching the given name and location
:param read: Read information including location and mate location
:param name: Identifying name (first column of SAM information)
:param unmatched: Dictionary of unmatched reads with key = name, value = list of reads
:return: Index in unmatched of matching read, or -1 if no match found
'''
if not name in unmatched:
return -1
r = unmatched[name]
for i in range(len(r)):
match = r[i]
if read.pairOffset == match.pos and match.pairOffset == read.pos:
if not read.exons:
if not read.pos == match.pos:
print('Error matching reads with name %s' % name)
exit()
read.exons = match.exons[:]
return i
elif not match.exons:
if not read.pos == match.pos:
print('Error matching reads with name %s' % name)
exit()
match.exons = read.exons[:]
return i
elif not self.split_discordant or not self.conflicts(read.exons, match.exons):
# Return index in unmatched dictionary of match
return i
# If no match found, return -1
return -1
def update_gene_bounds(self, start, end):
'''
Update the boundaries of the current bundle to include [start, end]
'''
if not self.gene_bounds:
self.gene_bounds = [start, end]
else:
if start < self.gene_bounds[0]:
self.gene_bounds[0] = start
if end > self.gene_bounds[1]:
self.gene_bounds[1] = end
def conflicts(self, exonsA, exonsB):
'''
:param exonsA: List containing the exon bounds for gene A in the form [(x_0,y_0), (x_1,y_1),...]
:param exonsB: List containing the exon bounds for gene B in the same form as exonsA
:return: 1 if an exon in gene A overlaps an intron in gene B, 2 if vice versa, 3 if one gene range lies strictly inside the other, 0 otherwise.
'''
if (exonsA[0][0] < exonsB[0][0] and exonsA[-1][1] > exonsB[-1][1]) or (exonsB[0][0] < exonsA[0][0] and exonsB[-1][1] > exonsA[-1][1]):
# One set of exons contains the other
return 3
for e in exonsB:
if e[0] > exonsA[-1][0]:
break
for i in range(len(exonsA)-1):
if e[0] >= exonsA[-i-1][0]:
break
elif e[1] > exonsA[-i-2][1]:
# Exon in B overlaps an intron in A
return 1
countA = len(exonsA)
for i in range(countA):
e = exonsA[countA-i-1]
if e[1] < exonsB[0][1]:
break
for i in range(len(exonsB)-1):
if e[1] <= exonsB[i][1]:
break
elif e[1] > exonsB[i][1] and e[0] < exonsB[i+1][0]:
# Exon in A overlaps an intron in B
return 2
return 0
def add_unpaired(self, read):
'''
Add this read to the list of unpaired
'''
self.unpaired.append(read)
def add_paired(self, read1, read2):
'''
Create a single paired read from these two reads and add it to the paired list
'''
strand = read1.strand or read2.strand
NH = min(read1.NH, read2.NH)
p = pairedread.PairedRead(read1.chrom, read1.exons, read2.chrom, read2.exons, strand, NH)
if not self.split_discordant and read1.chrom == read2.chrom and self.conflicts(read1.exons, read2.exons):
p.discordant = True
#if not read1.name == read2.name:
# print('Names %s, %s do not match' % (read1.name, read2.name))
# exit()
#print(read1.name)
#exit()
else:
p.discordant = False
self.paired.append(p)
def finalizeUnmatched(self):
'''
Finalize unmatched (discordant) reads. We convert them to unpaired reads.
'''
for name,reads in self.unmatched.items():
if reads:
for r in reads:
if hasattr(r,'pairOffset') and r.pairOffset > self.gene_bounds[1]:
# Waiting for mate in a future bundle
if not hasattr(r, 'exonIds'):
r.exonIds, r.length = self.getExonIds(r.exons)
r.bucket_length = sum([self.exons[e+1]-self.exons[e] for e in r.exonIds])
r.startOffset = r.exons[0][0] - self.exons[r.exonIds[0]]
r.endOffset = self.exons[r.exonIds[-1]+1] - r.exons[-1][1]
if name in self.cross_bundle_reads:
self.cross_bundle_reads[name].append(r)
else:
self.cross_bundle_reads[name] = [r]
else:
self.numUnmatched += 1
if not r.exons:
print(name)
exit()
self.add_unpaired(r)
# Reset dictionary for next bundle
self.unmatched = dict()
def finalizeExons(self):
'''
Convert the set of exon boundaries to a list
'''
start = self.gene_bounds[0]
end = self.gene_bounds[1]
self.exons.add(start)
self.exons.add(end)
if self.gtf_exons:
for i in range(self.gtf_id, len(self.gtf_exons)):
e = self.gtf_exons[i]
if e > end:
break
elif e > start:
self.exons.add(e)
self.gtf_id = i
self.exons = sorted(list(self.exons))
def finalize_cross_bundle_reads(self):
'''
Process the list of reads with mates outside this bundle
'''
# Finalize cross-bundle pairs that were discovered in this bundle
for p in self.cross_bundle_pairs:
if not hasattr(p[0], 'exonIds'):
#p[0].bundle = bundle_id
p[0].exonIds, p[0].length = self.getExonIds(p[0].exons)
p[0].bucket_length = sum([self.exons[e+1]-self.exons[e] for e in p[0].exonIds])
p[0].startOffset = p[0].exons[0][0] - self.exons[p[0].exonIds[0]]
p[0].endOffset = self.exons[p[0].exonIds[-1]+1] - p[0].exons[-1][1]
if not hasattr(p[1], 'exonIds'):
#p[1].bundle = bundle_id
p[1].exonIds, p[1].length = self.getExonIds(p[1].exons)
p[1].bucket_length = sum([self.exons[e+1]-self.exons[e] for e in p[1].exonIds])
p[1].startOffset = p[1].exons[0][0] - self.exons[p[1].exonIds[0]]
p[1].endOffset = self.exons[p[1].exonIds[-1]+1] - p[1].exons[-1][1]
# Update maximum length of a read in a cross-bucket bundle (needed to efficiently store length distribution)
if p[0].length > self.max_cross_bundle_read_len:
self.max_cross_bundle_read_len = p[0].length
if p[1].length > self.max_cross_bundle_read_len:
self.max_cross_bundle_read_len = p[1].length
NH = p[2]
strand = p[0].strand or p[1].strand
if strand == '-':
strand = -1
elif strand == '+':
strand = 1
else:
strand = 0
key = str(p[0].bundle) + ' ' + ' '.join([str(e) for e in p[0].exonIds]) + | |
a datetime object
# Also clear feature reference column (6) in case the file being read already has some existing data
for row in data[1:]: # skip top row since it's just labels
#print(str(len(row)))
row[1] = datetime.datetime.strptime(row[1], "%Y-%m-%d %H:%M:%S")
# the 7th and 8th column might be empty, if so, append a blank entry
try:
row[6] = ""
#print("old row len =" + str(len(row)))
except IndexError:
row.append("")
try:
row[7] = ""
except IndexError:
row.append("")
#print("new row len =" + str(len(row)))
# Dealing with the top header row
# Sometimes the header row will be missing entries, so checking for the proper length, if not, then append as needed
#print(data[0])
if len(data[0]) == 6:
#print("Len is 6")
data[0].append("")
data[0].append("")
elif len(row[0]) == 7:
#print("Len is 7")
data[0].append("")
# then rename row 7 and 8 with proper names
data[0][6] = "Feature Reference"
data[0][7] = "HMM Sequence"
#print(data[0])
# start range() at 1 to skip the top row, then reverse it to start from the bottom of the csv file
for i in reversed(range(1,len(data))):
#print("Currently on index: " + str(i))
# for adding part studio features (sketches/all other feature)
if data[i][5] == "Add part studio feature":
#print("Found add part studio feature at: " + str(i))
featureStartIndex = i
while True:
featureStartIndex -= 1
#print(featureStartIndex)
if "Insert feature" in data[featureStartIndex][5]:
#print("found Insert feature at index " + str(featureStartIndex))
if featureStartIndex in insertFeatureIndices:
# check to see if this insert feature has already been matched to another "add ps feature"
# if yes, then skip to find the next available
pass
else:
# if "add of modify a sketch" is before or after insert feature, that means the
# inserted feature was likely a sketch
if "Add or modify a sketch" in data[featureStartIndex + 1][5] or \
"Add or modify a sketch" in data[featureStartIndex - 1][5]:
#print("Added sketch at: " + str(featureStartIndex))
featureName = data[featureStartIndex][5].split(" : ")[1] + " (Sketch)"
data[featureStartIndex][6] = featureName
data[i][6] = featureName
data[featureStartIndex][7] = "End Create"
data[i][7] = "Start Create"
# add this to the running list of discovered insert feature indices
insertFeatureIndices.append(featureStartIndex)
# calculate time spent creating a new sketch
sketchCreateTime += data[featureStartIndex][1] - data[i][1]
timeSeriesEntry = "sketchCreateTime - " + featureName
time_series.append((timeSeriesEntry, data[i][1] , (data[featureStartIndex][1] - data[i][1])))
sketchesCreated += 1
sketchesCreatedNames.append(featureName)
HMMList.append("Create")
break
else:
#print("Regular feature added at: " + str(featureStartIndex))
featureName = data[featureStartIndex][5].split(" : ")[1]
data[featureStartIndex][6] = featureName
data[i][6] = featureName
data[featureStartIndex][7] = "End Create"
data[i][7] = "Start Create"
# add this to the running list of discovered insert feature indices
insertFeatureIndices.append(featureStartIndex)
# calculate time spent creating a new feature
featureCreateTime += data[featureStartIndex][1] - data[i][1]
timeSeriesEntry = "featureCreateTime - " + featureName
time_series.append((timeSeriesEntry, data[i][1], (data[featureStartIndex][1] - data[i][1])))
featuresCreated += 1
HMMList.append("Create")
break
if "Cancel Operation" in data[featureStartIndex][5]:
#print("Operation cancelled at: " + str(featureStartIndex))
featureName = "Cancelled add feature"
data[featureStartIndex][6] = featureName
data[i][6] = featureName
#data[featureStartIndex][7] = "End Create"
#data[i][7] = "Start Create"
# calculate time spent on a cancelled new feature creation
cancelledCreateTime += data[featureStartIndex][1] - data[i][1]
time_series.append(("cancelCreateTime", data[i][1], (data[featureStartIndex][1] - data[i][1])))
operationsCancelled += 1
#HMMList.append("Create")
break
# for editing of part studio features (sketches/all other feature)
elif data[i][5] == "Start edit of part studio feature":
#print("Found edit of part studio feature at: " + str(i))
featureStartIndex = i
HMMList.append("Revise")
while True:
featureStartIndex -= 1
if "Edit :" in data[featureStartIndex][5]:
#print("found Edit at index " + str(featureStartIndex))
if featureStartIndex in editFeatureIndices:
# check to see if this insert feature has already been matched to another "edit ps feature"
# if yes, then skip to find the next available
pass
else:
if "Add or modify a sketch" in data[featureStartIndex + 1][5] or \
"Add or modify a sketch" in data[featureStartIndex - 1][5]:
#print("Edited (Add or modify) sketch at: " + str(featureStartIndex))
featureName = data[featureStartIndex][5].split(" : ")[1] + " (Sketch)"
data[featureStartIndex][6] = featureName
data[i][6] = featureName
data[featureStartIndex][7] = "End Edit"
data[i][7] = "Start Edit"
# add this to the running list of discovered edit feature indices
editFeatureIndices.append(featureStartIndex)
# calculate time spent editing a sketch
sketchEditTime += data[featureStartIndex][1] - data[i][1]
timeSeriesEntry = "sketchEditTime - " + featureName
time_series.append((timeSeriesEntry, data[i][1] , (data[featureStartIndex][1] - data[i][1])))
sketchesEdited += 1
break
else:
#print("Regular feature edit at: " + str(featureStartIndex))
featureName = data[featureStartIndex][5].split(" : ")[1]
data[featureStartIndex][6] = featureName
data[i][6] = featureName
data[featureStartIndex][7] = "End Edit"
data[i][7] = "Start Edit"
# add this to the running list of discovered edit feature indices
editFeatureIndices.append(featureStartIndex)
# calculate time spent editing a feature
featureEditTime += data[featureStartIndex][1] - data[i][1]
timeSeriesEntry = "featureEditTime - " + featureName
time_series.append((timeSeriesEntry, data[i][1] , (data[featureStartIndex][1] - data[i][1])))
featuresEdited += 1
break
# if the next thing following "start edit" is "add or modify a sketch" without an "Edit : ", then
# that means the user clicked the green checkmark without making any actual changes to a sketch
elif "Add or modify a sketch" in data[featureStartIndex][5]:
# sometimes the "edit" entry can come after the "add or modify a sketch" entry, so we still need to
# check to make sure the entry above isn't an feature edit commit
# if it is, then this there were in fact modifications done to a sketch feature
if "Edit" in data[featureStartIndex - 1][5]:
featureName = data[featureStartIndex-1][5].split(" : ")[1] + " (Sketch)"
data[featureStartIndex-1][6] = featureName
data[i][6] = featureName
data[featureStartIndex-1][7] = "End Edit"
data[i][7] = "Start Edit"
# add this to the running list of discovered edit feature indices
editFeatureIndices.append(featureStartIndex)
# calculate time spent editing a sketch
sketchEditTime += data[featureStartIndex-1][1] - data[i][1]
timeSeriesEntry = "sketchEditTime - " + featureName
time_series.append((timeSeriesEntry, data[i][1], (data[featureStartIndex-1][1] - data[i][1])))
sketchesEdited += 1
break
else:
# if "edit" wasn't found in the entry above, then this was likely a edit with no real changes
featureName = "No change edit to a sketch feature"
data[featureStartIndex][6] = featureName
data[i][6] = featureName
data[featureStartIndex][7] = "End Edit"
data[i][7] = "Start Edit"
# add this to the running list of discovered edit feature indices
editFeatureIndices.append(featureStartIndex)
# counting this time as same as cancelledEditTime, lumping them together
cancelledEditTime += data[featureStartIndex][1] - data[i][1]
time_series.append(("cancelledEditTime", data[i][1], (data[featureStartIndex][1] - data[i][1])))
operationsCancelled += 1
break
# if another "start edit" or "add part studio feature" is encountered before finding an "edit :", then
# the user likely started editing a feature, but didn't actually make a change before clicking the green checkmark
# essentially leaving two "start edit part studio feature" entries back to back
# similar situation to the no-change edit sitaution for sketches, but in this case there's no entry at all
elif "Start edit of part studio feature" in data[featureStartIndex][5] or \
"Add part studio feature" in data[featureStartIndex][5]:
#print("NO CHANGE FEATURE EDIT AT INDEX: " + str(featureStartIndex))
featureName = "No change edit to a feature"
# only mark the i-th (start) entry with featureName, since there's no ending entry in audit trail
data[i][6] = featureName
# add this to the running list of discovered edit feature indices
editFeatureIndices.append(featureStartIndex)
# counting these as zeroDelta times since there's no way to determine for sure how long they spent on these
cancelledEditTime += datetime.timedelta(seconds=noTimeDeltaFeatures)
time_series.append(("cancelledEditTime", data[i][1], datetime.timedelta(seconds=noTimeDeltaFeatures)))
operationsCancelled += 1
break
elif "Cancel Operation" in data[featureStartIndex][5]:
#print("Edit operation cancelled at: " + str(featureStartIndex))
featureName = "Cancelled edit feature"
data[featureStartIndex][6] = featureName
data[i][6] = featureName
data[featureStartIndex][7] = "End Edit"
data[i][7] = "Start Edit"
# calculate time spent on a cancelled new feature creation
cancelledEditTime += data[featureStartIndex][1] - data[i][1]
time_series.append(("cancelledEditTime", data[i][1], (data[featureStartIndex][1] - data[i][1])))
operationsCancelled += 1
break
# tracking opening and closing drawings
elif "BLOB opened" in data[i][5]:
currentDrawing = data[i][3]
# search ahead for when this drawing was closed
featureStartIndex = i
HMMList.append("Drawing")
while True:
featureStartIndex -= 1
# finds the next "BLOB closed"
if "BLOB closed" in data[featureStartIndex][5]:
# check and see if this "BLOB closed" matches the current drawing
if data[featureStartIndex][3] == currentDrawing:
data[featureStartIndex][6] = currentDrawing + "(closed)"
data[i][6] = currentDrawing + "(opened)"
#data[featureStartIndex][7] | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['MembersArgs', 'Members']
@pulumi.input_type
class MembersArgs:
def __init__(__self__, *,
pool_id: pulumi.Input[str],
members: Optional[pulumi.Input[Sequence[pulumi.Input['MembersMemberArgs']]]] = None,
region: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Members resource.
:param pulumi.Input[str] pool_id: The id of the pool that members will be assigned to.
Changing this creates a new members resource.
:param pulumi.Input[Sequence[pulumi.Input['MembersMemberArgs']]] members: A set of dictionaries containing member parameters. The
structure is described below.
:param pulumi.Input[str] region: The region in which to obtain the V2 Networking client.
A Networking client is needed to create pool members. If omitted, the
`region` argument of the provider is used. Changing this creates a new
members resource.
"""
pulumi.set(__self__, "pool_id", pool_id)
if members is not None:
pulumi.set(__self__, "members", members)
if region is not None:
pulumi.set(__self__, "region", region)
@property
@pulumi.getter(name="poolId")
def pool_id(self) -> pulumi.Input[str]:
"""
The id of the pool that members will be assigned to.
Changing this creates a new members resource.
"""
return pulumi.get(self, "pool_id")
@pool_id.setter
def pool_id(self, value: pulumi.Input[str]):
pulumi.set(self, "pool_id", value)
@property
@pulumi.getter
def members(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MembersMemberArgs']]]]:
"""
A set of dictionaries containing member parameters. The
structure is described below.
"""
return pulumi.get(self, "members")
@members.setter
def members(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MembersMemberArgs']]]]):
pulumi.set(self, "members", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region in which to obtain the V2 Networking client.
A Networking client is needed to create pool members. If omitted, the
`region` argument of the provider is used. Changing this creates a new
members resource.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@pulumi.input_type
class _MembersState:
def __init__(__self__, *,
members: Optional[pulumi.Input[Sequence[pulumi.Input['MembersMemberArgs']]]] = None,
pool_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Members resources.
:param pulumi.Input[Sequence[pulumi.Input['MembersMemberArgs']]] members: A set of dictionaries containing member parameters. The
structure is described below.
:param pulumi.Input[str] pool_id: The id of the pool that members will be assigned to.
Changing this creates a new members resource.
:param pulumi.Input[str] region: The region in which to obtain the V2 Networking client.
A Networking client is needed to create pool members. If omitted, the
`region` argument of the provider is used. Changing this creates a new
members resource.
"""
if members is not None:
pulumi.set(__self__, "members", members)
if pool_id is not None:
pulumi.set(__self__, "pool_id", pool_id)
if region is not None:
pulumi.set(__self__, "region", region)
@property
@pulumi.getter
def members(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MembersMemberArgs']]]]:
"""
A set of dictionaries containing member parameters. The
structure is described below.
"""
return pulumi.get(self, "members")
@members.setter
def members(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MembersMemberArgs']]]]):
pulumi.set(self, "members", value)
@property
@pulumi.getter(name="poolId")
def pool_id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the pool that members will be assigned to.
Changing this creates a new members resource.
"""
return pulumi.get(self, "pool_id")
@pool_id.setter
def pool_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pool_id", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region in which to obtain the V2 Networking client.
A Networking client is needed to create pool members. If omitted, the
`region` argument of the provider is used. Changing this creates a new
members resource.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
class Members(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MembersMemberArgs']]]]] = None,
pool_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a V2 members resource within OpenStack (batch members update).
> **Note:** This resource has attributes that depend on octavia minor versions.
Please ensure your Openstack cloud supports the required minor version.
> **Note:** This resource works only within Octavia API. For
legacy Neutron LBaaS v2 extension please use
loadbalancer.Member resource.
## Example Usage
```python
import pulumi
import pulumi_openstack as openstack
members1 = openstack.loadbalancer.Members("members1",
members=[
openstack.loadbalancer.MembersMemberArgs(
address="192.168.199.23",
protocol_port=8080,
),
openstack.loadbalancer.MembersMemberArgs(
address="192.168.199.24",
protocol_port=8080,
),
],
pool_id="935685fb-a896-40f9-9ff4-ae531a3a00fe")
```
## Import
Load Balancer Pool Members can be imported using the Pool ID, e.g.
```sh
$ pulumi import openstack:loadbalancer/members:Members members_1 c22974d2-4c95-4bcb-9819-0afc5ed303d5
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MembersMemberArgs']]]] members: A set of dictionaries containing member parameters. The
structure is described below.
:param pulumi.Input[str] pool_id: The id of the pool that members will be assigned to.
Changing this creates a new members resource.
:param pulumi.Input[str] region: The region in which to obtain the V2 Networking client.
A Networking client is needed to create pool members. If omitted, the
`region` argument of the provider is used. Changing this creates a new
members resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: MembersArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a V2 members resource within OpenStack (batch members update).
> **Note:** This resource has attributes that depend on octavia minor versions.
Please ensure your Openstack cloud supports the required minor version.
> **Note:** This resource works only within Octavia API. For
legacy Neutron LBaaS v2 extension please use
loadbalancer.Member resource.
## Example Usage
```python
import pulumi
import pulumi_openstack as openstack
members1 = openstack.loadbalancer.Members("members1",
members=[
openstack.loadbalancer.MembersMemberArgs(
address="192.168.199.23",
protocol_port=8080,
),
openstack.loadbalancer.MembersMemberArgs(
address="192.168.199.24",
protocol_port=8080,
),
],
pool_id="935685fb-a896-40f9-9ff4-ae531a3a00fe")
```
## Import
Load Balancer Pool Members can be imported using the Pool ID, e.g.
```sh
$ pulumi import openstack:loadbalancer/members:Members members_1 c22974d2-4c95-4bcb-9819-0afc5ed303d5
```
:param str resource_name: The name of the resource.
:param MembersArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(MembersArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MembersMemberArgs']]]]] = None,
pool_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = MembersArgs.__new__(MembersArgs)
__props__.__dict__["members"] = members
if pool_id is None and not opts.urn:
raise TypeError("Missing required property 'pool_id'")
__props__.__dict__["pool_id"] = pool_id
__props__.__dict__["region"] = region
super(Members, __self__).__init__(
'openstack:loadbalancer/members:Members',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MembersMemberArgs']]]]] = None,
pool_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None) -> 'Members':
"""
Get an existing Members resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MembersMemberArgs']]]] members: A set of dictionaries containing member parameters. The
structure is described below.
:param pulumi.Input[str] pool_id: The id of the pool that members will be assigned to.
Changing this creates a new members resource.
:param pulumi.Input[str] region: The region in which to obtain the V2 Networking client.
A Networking client is needed to create pool members. If omitted, the
`region` argument of the provider is used. Changing this creates a new
members resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _MembersState.__new__(_MembersState)
__props__.__dict__["members"] = members
__props__.__dict__["pool_id"] = pool_id
__props__.__dict__["region"] = region
return Members(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def members(self) -> pulumi.Output[Optional[Sequence['outputs.MembersMember']]]:
"""
A set of dictionaries containing member parameters. The
structure is described below.
"""
return pulumi.get(self, "members")
@property
@pulumi.getter(name="poolId")
def pool_id(self) -> pulumi.Output[str]:
"""
The id of the pool that members will be assigned to.
Changing this creates a new members resource.
"""
return pulumi.get(self, "pool_id")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
The region in which to obtain the V2 Networking client.
A Networking client is needed to create pool members. If omitted, | |
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
from typing import DefaultDict, Dict, Iterable, List, Optional, Union
from .constants import SECS_IN_DAY, SECS_IN_HR
from .helpers import ConstellationId, get_constellation, get_closest, get_el_az, TimeRangeHolder
from .ephemeris import Ephemeris, EphemerisType, GLONASSEphemeris, GPSEphemeris, PolyEphemeris, parse_sp3_orbits, parse_rinex_nav_msg_gps, \
parse_rinex_nav_msg_glonass
from .downloader import download_orbits_gps, download_orbits_russia_src, download_nav, download_ionex, download_dcb, download_prediction_orbits_russia_src
from .downloader import download_cors_station
from .trop import saast
from .iono import IonexMap, parse_ionex
from .dcb import DCB, parse_dcbs
from .gps_time import GPSTime
from .dgps import get_closest_station_names, parse_dgps
from . import constants
MAX_DGPS_DISTANCE = 100_000 # in meters, because we're not barbarians
class AstroDog:
'''
auto_update: flag indicating whether laika should fetch files from web automatically
cache_dir: directory where data files are downloaded to and cached
dgps: flag indicating whether laika should use dgps (CORS)
data to calculate pseudorange corrections
valid_const: list of constellation identifiers laika will try process
valid_ephem_types: set of ephemeris types that are allowed to use and download.
Default is set to use all orbit ephemeris types
clear_old_ephemeris: flag indicating if ephemeris for an individual satellite should be overwritten when new ephemeris is added.
'''
def __init__(self, auto_update=True,
cache_dir='/tmp/gnss/',
dgps=False,
valid_const=('GPS', 'GLONASS'),
valid_ephem_types=EphemerisType.all_orbits(),
clear_old_ephemeris=False):
self.auto_update = auto_update
self.cache_dir = cache_dir
self.clear_old_ephemeris = clear_old_ephemeris
self.dgps = dgps
if not isinstance(valid_ephem_types, Iterable):
valid_ephem_types = [valid_ephem_types]
self.pull_orbit = len(set(EphemerisType.all_orbits()) & set(valid_ephem_types)) > 0
self.pull_nav = EphemerisType.NAV in valid_ephem_types
self.valid_const = valid_const
self.valid_ephem_types = valid_ephem_types
self.orbit_fetched_times = TimeRangeHolder()
self.nav_fetched_times = TimeRangeHolder()
self.dcbs_fetched_times = TimeRangeHolder()
self.dgps_delays = []
self.ionex_maps: List[IonexMap] = []
self.orbits: DefaultDict[str, List[PolyEphemeris]] = defaultdict(list)
self.nav: DefaultDict[str, List[Union[GPSEphemeris, GLONASSEphemeris]]] = defaultdict(list)
self.dcbs: DefaultDict[str, List[DCB]] = defaultdict(list)
self.cached_ionex: Optional[IonexMap] = None
self.cached_dgps = None
self.cached_orbit: DefaultDict[str, Optional[PolyEphemeris]] = defaultdict(lambda: None)
self.cached_nav: DefaultDict[str, Union[GPSEphemeris, GLONASSEphemeris, None]] = defaultdict(lambda: None)
self.cached_dcb: DefaultDict[str, Optional[DCB]] = defaultdict(lambda: None)
def get_ionex(self, time) -> Optional[IonexMap]:
ionex: Optional[IonexMap] = self._get_latest_valid_data(self.ionex_maps, self.cached_ionex, self.get_ionex_data, time)
if ionex is None:
if self.auto_update:
raise RuntimeError("Pulled ionex, but still can't get valid for time " + str(time))
else:
self.cached_ionex = ionex
return ionex
def get_nav(self, prn, time):
skip_download = time in self.nav_fetched_times
nav = self._get_latest_valid_data(self.nav[prn], self.cached_nav[prn], self.get_nav_data, time, skip_download)
if nav is not None:
self.cached_nav[prn] = nav
return nav
@staticmethod
def _select_valid_temporal_items(item_dict, time, cache):
'''Returns only valid temporal item for specific time from currently fetched
data.'''
result = {}
for prn, temporal_objects in item_dict.items():
cached = cache[prn]
if cached is not None and cached.valid(time):
obj = cached
else:
obj = get_closest(time, temporal_objects)
if obj is None or not obj.valid(time):
continue
cache[prn] = obj
result[prn] = obj
return result
def get_navs(self, time):
if time not in self.nav_fetched_times and self.auto_update:
self.get_nav_data(time)
return AstroDog._select_valid_temporal_items(self.nav, time, self.cached_nav)
def get_orbit(self, prn: str, time: GPSTime):
skip_download = time in self.orbit_fetched_times
orbit = self._get_latest_valid_data(self.orbits[prn], self.cached_orbit[prn], self.get_orbit_data, time, skip_download)
if orbit is not None:
self.cached_orbit[prn] = orbit
return orbit
def get_orbits(self, time):
if time not in self.orbit_fetched_times:
self.get_orbit_data(time)
return AstroDog._select_valid_temporal_items(self.orbits, time, self.cached_orbit)
def get_dcb(self, prn, time):
skip_download = time in self.dcbs_fetched_times
dcb = self._get_latest_valid_data(self.dcbs[prn], self.cached_dcb[prn], self.get_dcb_data, time, skip_download)
if dcb is not None:
self.cached_dcb[prn] = dcb
return dcb
def get_dgps_corrections(self, time, recv_pos):
latest_data = self._get_latest_valid_data(self.dgps_delays, self.cached_dgps, self.get_dgps_data, time, recv_pos=recv_pos)
if latest_data is None:
if self.auto_update:
raise RuntimeError("Pulled dgps, but still can't get valid for time " + str(time))
else:
self.cached_dgps = latest_data
return latest_data
def add_orbits(self, new_ephems: Dict[str, List[Ephemeris]]):
self._add_ephems(new_ephems, self.orbits, self.orbit_fetched_times)
def add_navs(self, new_ephems: Dict[str, List[Ephemeris]]):
self._add_ephems(new_ephems, self.nav, self.nav_fetched_times)
def _add_ephems(self, new_ephems: Dict[str, List[Ephemeris]], ephems_dict, fetched_times):
for k, v in new_ephems.items():
if len(v) > 0:
if self.clear_old_ephemeris:
ephems_dict[k] = v
else:
ephems_dict[k].extend(v)
min_epochs = []
max_epochs = []
for v in new_ephems.values():
if len(v) > 0:
min_ephem, max_ephem = self.get_epoch_range(v)
min_epochs.append(min_ephem)
max_epochs.append(max_ephem)
if len(min_epochs) > 0:
min_epoch = min(min_epochs)
max_epoch = max(max_epochs)
fetched_times.add(min_epoch, max_epoch)
def get_nav_data(self, time):
def download_and_parse(constellation, parse_rinex_nav_func):
file_path = download_nav(time, cache_dir=self.cache_dir, constellation=constellation)
return parse_rinex_nav_func(file_path) if file_path else {}
fetched_ephems = {}
if 'GPS' in self.valid_const:
fetched_ephems = download_and_parse(ConstellationId.GPS, parse_rinex_nav_msg_gps)
if 'GLONASS' in self.valid_const:
for k, v in download_and_parse(ConstellationId.GLONASS, parse_rinex_nav_msg_glonass).items():
fetched_ephems.setdefault(k, []).extend(v)
self.add_navs(fetched_ephems)
if sum([len(v) for v in fetched_ephems.values()]) == 0:
begin_day = GPSTime(time.week, SECS_IN_DAY * (time.tow // SECS_IN_DAY))
end_day = GPSTime(time.week, SECS_IN_DAY * (1 + (time.tow // SECS_IN_DAY)))
self.nav_fetched_times.add(begin_day, end_day)
def download_parse_orbit(self, gps_time: GPSTime, skip_before_epoch=None) -> Dict[str, List[PolyEphemeris]]:
# Download multiple days to be able to polyfit at the start-end of the day
time_steps = [gps_time - SECS_IN_DAY, gps_time, gps_time + SECS_IN_DAY]
with ThreadPoolExecutor() as executor:
futures_other = [executor.submit(download_orbits_russia_src, t, self.cache_dir, self.valid_ephem_types) for t in time_steps]
futures_gps = None
if "GPS" in self.valid_const:
futures_gps = [executor.submit(download_orbits_gps, t, self.cache_dir, self.valid_ephem_types) for t in time_steps]
ephems_other = parse_sp3_orbits([f.result() for f in futures_other if f.result()], self.valid_const, skip_before_epoch)
ephems_us = parse_sp3_orbits([f.result() for f in futures_gps if f.result()], self.valid_const, skip_before_epoch) if futures_gps else {}
return {k: ephems_other.get(k, []) + ephems_us.get(k, []) for k in set(list(ephems_other.keys()) + list(ephems_us.keys()))}
def download_parse_prediction_orbit(self, gps_time: GPSTime):
assert EphemerisType.ULTRA_RAPID_ORBIT in self.valid_ephem_types
skip_until_epoch = gps_time - 2 * SECS_IN_HR
result = download_prediction_orbits_russia_src(gps_time, self.cache_dir)
if result is not None:
result = [result]
elif "GPS" in self.valid_const:
# Slower fallback. Russia src prediction orbits are published from 2022
result = [download_orbits_gps(t, self.cache_dir, self.valid_ephem_types) for t in [gps_time - SECS_IN_DAY, gps_time]]
if result is None:
return {}
return parse_sp3_orbits(result, self.valid_const, skip_until_epoch=skip_until_epoch)
def get_orbit_data(self, time: GPSTime, only_predictions=False):
if only_predictions:
ephems_sp3 = self.download_parse_prediction_orbit(time)
else:
ephems_sp3 = self.download_parse_orbit(time)
if sum([len(v) for v in ephems_sp3.values()]) < 5:
raise RuntimeError(f'No orbit data found. For Time {time.as_datetime()} constellations {self.valid_const} valid ephem types {self.valid_ephem_types}')
self.add_orbits(ephems_sp3)
def get_dcb_data(self, time):
file_path_dcb = download_dcb(time, cache_dir=self.cache_dir)
dcbs = parse_dcbs(file_path_dcb, self.valid_const)
for dcb in dcbs:
self.dcbs[dcb.prn].append(dcb)
if len(dcbs) != 0:
min_epoch, max_epoch = self.get_epoch_range(dcbs)
self.dcbs_fetched_times.add(min_epoch, max_epoch)
def get_epoch_range(self, new_ephems):
min_ephem = min(new_ephems, key=lambda e: e.epoch)
max_ephem = max(new_ephems, key=lambda e: e.epoch)
min_epoch = min_ephem.epoch - min_ephem.max_time_diff
max_epoch = max_ephem.epoch + max_ephem.max_time_diff
return min_epoch, max_epoch
def get_ionex_data(self, time):
file_path_ionex = download_ionex(time, cache_dir=self.cache_dir)
ionex_maps = parse_ionex(file_path_ionex)
for im in ionex_maps:
self.ionex_maps.append(im)
def get_dgps_data(self, time, recv_pos):
station_names = get_closest_station_names(recv_pos, k=8, max_distance=MAX_DGPS_DISTANCE, cache_dir=self.cache_dir)
for station_name in station_names:
file_path_station = download_cors_station(time, station_name, cache_dir=self.cache_dir)
if file_path_station:
dgps = parse_dgps(station_name, file_path_station,
self, max_distance=MAX_DGPS_DISTANCE,
required_constellations=self.valid_const)
if dgps is not None:
self.dgps_delays.append(dgps)
break
def get_tgd_from_nav(self, prn, time):
if get_constellation(prn) not in self.valid_const:
return None
eph = self.get_nav(prn, time)
if eph:
return eph.get_tgd()
return None
def get_sat_info(self, prn, time):
if get_constellation(prn) not in self.valid_const:
return None
eph = None
if self.pull_orbit:
eph = self.get_orbit(prn, time)
if not eph and self.pull_nav:
eph = self.get_nav(prn, time)
if eph:
return eph.get_sat_info(time)
return None
def get_all_sat_info(self, time):
ephs = {}
if self.pull_orbit:
ephs = self.get_orbits(time)
if len(ephs) == 0 and self.pull_nav:
ephs = self.get_navs(time)
return {prn: eph.get_sat_info(time) for prn, eph in ephs.items()}
def get_glonass_channel(self, prn, time):
nav = self.get_nav(prn, time)
if nav:
return nav.channel
return None
def get_frequency(self, prn, time, signal='C1C'):
if get_constellation(prn) == 'GPS':
switch = {'1': constants.GPS_L1,
'2': constants.GPS_L2,
'5': constants.GPS_L5,
'6': constants.GALILEO_E6,
'7': constants.GALILEO_E5B,
'8': constants.GALILEO_E5AB}
freq = switch.get(signal[1])
if freq:
return freq
raise NotImplementedError("Dont know this GPS frequency: ", signal, prn)
elif get_constellation(prn) == 'GLONASS':
n = self.get_glonass_channel(prn, time)
if n is None:
return None
switch = {'1': constants.GLONASS_L1 + n * constants.GLONASS_L1_DELTA,
'2': constants.GLONASS_L2 + n * constants.GLONASS_L2_DELTA,
'5': constants.GLONASS_L5 + n * constants.GLONASS_L5_DELTA,
'6': constants.GALILEO_E6,
'7': constants.GALILEO_E5B,
'8': constants.GALILEO_E5AB}
freq = switch.get(signal[1])
if freq:
return freq
raise NotImplementedError("Dont know this GLONASS frequency: ", signal, prn)
def get_delay(self, prn, time, rcv_pos, no_dgps=False, signal='C1C', freq=None):
sat_info = self.get_sat_info(prn, time)
if sat_info is None:
return None
sat_pos = sat_info[0]
el, az = get_el_az(rcv_pos, sat_pos)
if el < 0.2:
return None
if self.dgps and not no_dgps:
return self._get_delay_dgps(prn, rcv_pos, time)
ionex = self.get_ionex(time)
if not freq and ionex is not None:
freq = self.get_frequency(prn, time, signal)
dcb = self.get_dcb(prn, time)
# When using internet we expect all data or return None
if self.auto_update and (ionex is None or dcb is None or freq is None):
return None
iono_delay = ionex.get_delay(rcv_pos, az, el, sat_pos, time, freq) if ionex is not None else 0.
trop_delay = saast(rcv_pos, el)
code_bias = dcb.get_delay(signal) if dcb is not None else 0.
return iono_delay + trop_delay + code_bias
def _get_delay_dgps(self, prn, rcv_pos, time):
dgps_corrections = self.get_dgps_corrections(time, rcv_pos)
| |
<filename>src/svnwrap.py
#!/usr/bin/env python
# coding=utf-8
from __future__ import print_function
from __future__ import unicode_literals
import atexit
import codecs
import difflib
import errno
import io
import locale
import os
import platform
import re
import shlex
import shutil
import signal
import subprocess
import sys
import textwrap
import threading
try:
from typing import (
Any,
BinaryIO,
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Set,
TextIO,
Tuple,
Union,
)
except ImportError:
pass
if sys.version_info < (3, 0):
import ConfigParser as configparser
from ConfigParser import SafeConfigParser as ConfigParser
import Queue as queue
else:
import configparser
from configparser import ConfigParser
import queue
__version__ = "0.8.2"
platform_is_windows = platform.system() == "Windows"
def color_supported():
# type: () -> bool
if platform_is_windows:
try:
import colorama
colorama.init()
except ImportError:
return False
return True
class State:
def __init__(self):
# type: () -> None
# True when debugging.
self.debugging = False
# Path of Subversion client executable.
self.SVN = "svn"
# True if stdout is a TTY.
self.isatty = os.isatty(sys.stdout.fileno())
# True to use color highlighting on output.
self.using_color = self.isatty and color_supported()
# True to feed output through a pager.
self.use_pager = self.isatty
# Will contain a subprocess.Popen object, if a pager is in use.
self.pager = None # type: Optional[subprocess.Popen]
state = State()
sample_ini_contents = """
[aliases]
# Aliases are used at the start of a URL. They are replaced by their
# aliased value. When the alias "project1" has been defined, this URL:
# //project1
# will be replaced by the associated URL, e.g.:
# http://server/url/for/project1
#
# Define aliases as follows:
## project1 = http://server/url/for/project1
[pager]
# The pager is used by several commands to paginate the output.
# Set "enabled" to "false" to disable use of a pager.
## enabled = true
# Customize which pager to use (along with any desired arguments) via the "cmd"
# setting here, or via the environment variable SVN_PAGER, or via the system
# default specified in the PAGER environment variable. If none of the above
# are set, then "less -FKRX" will be assumed.
#
# Switches for "less":
# -F quit the pager early if output fits on one screen
# -K allow Ctrl-C to exit less
# -R process color escape sequences
# -X don't clear the screen when pager quits
## cmd = less -FKRX
#
# If "use_shell" is true, svnwrap will feed "cmd" directly to the shell,
# allowing more complicated commands such as this one (but note that the
# "diff-highlight" command does not come with svnwrap):
# cmd = diff-highlight | less
#
# **WARNING** If you enable this behavior, svnwrap will not be able to
# detect failures of "cmd".
## use_shell = false
"""
def debug(s):
# type: (str) -> None
if state.debugging:
sys.stdout.write(s)
def debug_ln(s=""):
# type: (str) -> None
debug(s + "\n")
class SvnError(Exception):
pass
class PagerClosed(Exception):
pass
def remove_chars(s, chars):
# type: (str, str) -> str
# Remove from all individual characters in chars.
for c in chars:
s = s.replace(c, "")
return s
def get_environ(env_var, default=None):
# type: (str, str) -> str
try:
return os.environ[env_var]
except KeyError:
if default is None:
raise SvnError("missing environment variable %s" % env_var)
return default
def get_svnwrap_config_dir():
# type: () -> str
config_home = os.path.join(get_environ("HOME", ""), ".config")
if platform_is_windows:
config_home = get_environ("APPDATA", config_home)
config_home = get_environ("XDG_CONFIG_HOME", config_home)
return os.path.join(config_home, "svnwrap")
def get_svnwrap_ini_path():
# type: () -> str
config_dir = get_svnwrap_config_dir()
ini_path = os.path.join(config_dir, "config.ini")
if not os.path.isdir(config_dir):
os.makedirs(config_dir)
if not os.path.isfile(ini_path):
with open(ini_path, "w") as f:
f.write(sample_ini_contents)
return ini_path
def svnwrap_config():
# type: () -> ConfigParser
config = ConfigParser()
config.read(get_svnwrap_ini_path())
return config
def config_boolean(config, section, option, default_value):
# type: (ConfigParser, str, str, bool) -> bool
if config.has_option(section, option):
return config.getboolean(section, option)
else:
return default_value
def get_aliases():
# type: () -> Dict[str, str]
config = svnwrap_config()
try:
aliases = config.items("aliases")
except configparser.NoSectionError:
aliases = []
return dict(aliases)
def get_subversion_config_dir():
# type: () -> str
if platform_is_windows:
config_dir = os.path.join(get_environ("APPDATA", ""), "Subversion")
else:
config_dir = os.path.join(get_environ("HOME", ""), ".subversion")
return config_dir
def get_subversion_ini_path():
# type: () -> str
return os.path.join(get_subversion_config_dir(), "config")
def subversion_config():
# type: () -> ConfigParser
# Python 3.2 added ``strict`` to prohibit duplicate keys.
# ~/.subversion/config may well have duplicate keys because of
# lines handling lowercase and uppercase filename globs, e.g.::
#
# [auto-props]
# *.c = svn:eol-style=native
# *.C = svn:eol-style=native
# Disable this strict checking if it's available, and otherwise
# just use the older ConfigParser behavior that permitted duplicates.
try:
config = ConfigParser(strict=False)
except TypeError:
config = ConfigParser()
config.read(get_subversion_ini_path())
return config
STATUS_REX = r"^Performing status|^\s*$|^X[ \t]"
UPDATE_REX = (
r"^Fetching external|^External |^Updated external|^\s*$" + r"|^At revision"
)
CHECKOUT_REX = r"^Fetching external|^\s*$"
color_names = [
"black",
"red",
"green",
"yellow",
"blue",
"magenta",
"cyan",
"white",
]
color_dict = {}
for i, base_name in enumerate(color_names):
color_dict["dark" + base_name] = i
color_dict["light" + base_name] = i + 8
"""
[30m black foreground
[40m black background
[90m light black foreground
[100m light black background
[01m bold colors
[0m reset colors
"""
color_scheme = {
"diffAdd": ("lightblue", None),
"diffRemoved": ("lightred", None),
"diffMisc": ("darkyellow", None),
"conflict": ("lightwhite", "darkred"),
"statusAdded": ("darkgreen", None),
"statusDeleted": ("darkred", None),
"statusUpdated": ("lightblue", None),
"statusConflict": ("lightwhite", "darkred"),
"statusModified": ("lightblue", None),
"statusMerged": ("darkmagenta", None),
"statusUntracked": ("lightblack", None),
"status": ("lightblack", None),
"info": ("darkgreen", None),
"logRev": ("lightyellow", None),
"logCommitter": ("lightblue", None),
"logDate": ("lightblack", None),
"logNumLines": ("lightblack", None),
"logFieldSeparator": ("lightblack", None),
"logSeparator": ("darkgreen", None),
"logText": ("darkwhite", None),
"warning": ("lightwhite", "darkred"),
} # type: Dict[str, Tuple[str, Optional[str]]]
entry_name_to_style_name = {}
for key in color_scheme:
entry_name_to_style_name[key.lower()] = key
def read_color_scheme():
# type: () -> None
config = svnwrap_config()
try:
configured_colors = dict(config.items("colors"))
except configparser.NoSectionError:
configured_colors = {}
valid_keys = set(color_scheme.keys())
for key, value in configured_colors.items():
key = entry_name_to_style_name.get(key, key)
if key not in valid_keys:
continue
colors = list(map(lambda x: x.strip() or "default", value.split(",")))
if len(colors) == 1:
foreground, background = colors[0], None
elif len(colors) == 2:
foreground, background = colors
else:
raise SvnError(
"invalid number of colors specified for '%s' in config"
% (key,)
)
if foreground == "default":
foreground = color_scheme[key][0]
if background == "default":
background = color_scheme[key][1]
if foreground is not None and foreground not in color_dict:
raise SvnError(
"invalid color ('%s') specified for '%s'" % (foreground, key)
)
if background is not None and background not in color_dict:
raise SvnError(
"invalid color ('%s') specified for '%s'" % (background, key)
)
color_scheme[key] = (foreground, background)
def set_color_num(color_num):
# type: (int) -> str
if state.using_color:
return "\x1b[%dm" % color_num
else:
return ""
def set_foreground(foreground):
# type: (Optional[str]) -> str
if foreground is None:
return ""
i = color_dict[foreground]
if i < 8:
color_num = 30 + i
else:
color_num = 90 + (i - 8)
return set_color_num(color_num)
def set_background(background):
# type: (Optional[str]) -> str
if background is None:
return ""
i = color_dict[background]
if i < 8:
color_num = 40 + i
else:
color_num = 100 + (i - 8)
return set_color_num(color_num)
def reset_colors():
# type: () -> str
return set_color_num(0)
def wrap_color(s, style):
# type: (str, str) -> str
foreground, background = color_scheme[style]
return (
set_foreground(foreground)
+ set_background(background)
+ s
+ reset_colors()
)
def write(s, f=None):
# type: (str, Optional[TextIO]) -> None
if f is None:
# We don't set f to sys.stdout as a default argument since a pager
# maybe launched and change the value of sys.stdout. So we defer
# resolution until we need it.
f = sys.stdout
try:
f.write(s)
f.flush()
except IOError as e:
if e.errno != errno.EPIPE:
raise
raise PagerClosed("Pager pipe closed.")
except ValueError:
# If the pager pipe is closed (because someone exited it before we
# are finished reading off the data from Subversion), then we get a
# ValueError saying that we provided a bad output file. Convert this
# to a PagerClosed exception.
raise PagerClosed("Pager pipe closed.")
def write_ln(line=""):
# type: (str) -> None
write(line + "\n")
def write_lines(lines):
# type: (Iterable[str]) -> None
for line in lines:
write_ln(line)
warning_lines = []
def add_warning_line(line):
# type: (str) -> None
warning_lines.append(line)
stderr_parts = []
def add_stderr_text(text):
# type: (str) -> None
stderr_parts.append(text)
def restore_signals():
# type: () -> None
# Python sets up or ignores several signals by default. This restores the
# default signal handling for the child process.
for attr in "SIGINT SIGPIPE SIGXFZ SIGXFSZ".split():
if hasattr(signal, attr):
signal.signal(getattr(signal, attr), signal.SIG_DFL)
def add_restore_signals(kwargs):
# type: (Dict[str, Any]) -> Dict[str, Any]
# preexec_fn is not supported on Windows, but we want to use it to restore
# the signal handlers on other platforms.
if not platform_is_windows:
kwargs = kwargs.copy()
kwargs["preexec_fn"] = restore_signals
return | |
(N, 1444, 4), there are a total 1444 boxes on this feature map
# Predict classes in localization boxes
c_conv3_3 = self.cl_conv3_3(conv3_3_feats) # (N, 4 * n_classes, 150, 150)
c_conv3_3 = c_conv3_3.permute(0, 2, 3,
1).contiguous() # (N, 150, 150, 4 * n_classes), to match prior-box order (after .view())
c_conv3_3 = c_conv3_3.view(batch_size, -1,
self.n_classes) # (N, 90000, n_classes), there are a total 90000 boxes on this feature map
# Predict classes in localization boxes
c_conv4_3 = self.cl_conv4_3(conv4_3_feats) # (N, 4 * n_classes, 75, 75)
c_conv4_3 = c_conv4_3.permute(0, 2, 3,
1).contiguous() # (N, 75, 75, 4 * n_classes), to match prior-box order (after .view())
c_conv4_3 = c_conv4_3.view(batch_size, -1,
self.n_classes) # (N, 22500, n_classes), there are a total 22500 boxes on this feature map
c_conv7 = self.cl_conv7(conv7_feats) # (N, 4 * n_classes, 38, 38)
c_conv7 = c_conv7.permute(0, 2, 3, 1).contiguous() # (N, 38, 38, 4 * n_classes)
c_conv7 = c_conv7.view(batch_size, -1,
self.n_classes) # (N, 5776, n_classes), there are a total 5776 boxes on this feature map
c_conv8_2 = self.cl_conv8_2(conv8_2_feats) # (N, 4 * n_classes, 19, 19)
c_conv8_2 = c_conv8_2.permute(0, 2, 3, 1).contiguous() # (N, 19, 19, 4 * n_classes)
c_conv8_2 = c_conv8_2.view(batch_size, -1, self.n_classes) # (N, 1444, n_classes), there are a total 1444 boxes on this feature map
# A total of 119720 boxes
# Concatenate in this specific order (i.e. must match the order of the prior-boxes)
locs = torch.cat([l_conv3_3, l_conv4_3, l_conv7, l_conv8_2 ], dim=1) # (N, 119720, 4)
classes_scores = torch.cat([c_conv3_3, c_conv4_3, c_conv7, c_conv8_2 ], dim=1) # (N, 119720, n_classes)
return locs, classes_scores
def create_prior_boxes(self):
"""
Create the 119720 prior (default) boxes for the SSD600.
:return: prior boxes in center-size coordinates, a tensor of dimensions (119720, 4)
"""
fmap_dims = {'conv3_3': 150,
'conv4_3': 75,
'conv7': 38,
'conv8_2': 19}
# small objects only
obj_scales = {'conv3_3': 0.04,
'conv4_3': 0.1,
'conv7': 0.2,
'conv8_2': 0.375}
# near-square aspect ratios only
aspect_ratios = {'conv3_3': [0.5, 0.6, 0.7],
'conv4_3': [0.5, 0.6, 0.7],
'conv7': [0.5, 0.6, 0.7],
'conv8_2': [0.5, 0.6, 0.7]}
fmaps = list(fmap_dims.keys())
prior_boxes = []
for k, fmap in enumerate(fmaps):
for i in range(fmap_dims[fmap]):
for j in range(fmap_dims[fmap]):
cx = (j + 0.5) / fmap_dims[fmap]
cy = (i + 0.5) / fmap_dims[fmap]
for ratio in aspect_ratios[fmap]:
prior_boxes.append([cx, cy, obj_scales[fmap] * sqrt(ratio), obj_scales[fmap] / sqrt(ratio)])
# For an aspect ratio of 0.6, use an additional prior whose scale is the geometric mean of the
# scale of the current feature map and the scale of the next feature map
if ratio == 0.6:
try:
additional_scale = sqrt(obj_scales[fmap] * obj_scales[fmaps[k + 1]])
# For the last feature map, there is no "next" feature map
except IndexError:
additional_scale = 1.
prior_boxes.append([cx, cy, additional_scale, additional_scale])
prior_boxes = torch.FloatTensor(prior_boxes).to(device) # (119720, 4)
prior_boxes.clamp_(0, 1) # (119720, 4)
return prior_boxes
def detect_objects(self, predicted_locs, predicted_scores, min_score, max_overlap, top_k):
"""
Decipher the 119720 locations and class scores (output of ths SSD600) to detect objects.
For each class, perform Non-Maximum Suppression (NMS) on boxes that are above a minimum threshold.
:param predicted_locs: predicted locations/boxes w.r.t the 119720 prior boxes, a tensor of dimensions (N, 119720, 4)
:param predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 119720, n_classes)
:param min_score: minimum threshold for a box to be considered a match for a certain class
:param max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via NMS
:param top_k: if there are a lot of resulting detection across all classes, keep only the top 'k'
:return: detections (boxes, labels, and scores), lists of length batch_size
"""
batch_size = predicted_locs.size(0)
n_priors = self.priors_cxcy.size(0)
predicted_scores = F.softmax(predicted_scores, dim=2) # (N, 119720, n_classes)
# Lists to store final predicted boxes, labels, and scores for all images
all_images_boxes = list()
all_images_labels = list()
all_images_scores = list()
assert n_priors == predicted_locs.size(1) == predicted_scores.size(1)
for i in range(batch_size):
# Decode object coordinates from the form we regressed predicted boxes to
decoded_locs = cxcy_to_xy(
gcxgcy_to_cxcy(predicted_locs[i], self.priors_cxcy)) # (119720, 4), these are fractional pt. coordinates
# Lists to store boxes and scores for this image
image_boxes = list()
image_labels = list()
image_scores = list()
max_scores, best_label = predicted_scores[i].max(dim=1) # (119720)
# Check for each class
for c in range(1, self.n_classes):
# Keep only predicted boxes and scores where scores for this class are above the minimum score
class_scores = predicted_scores[i][:, c] # (119720)
score_above_min_score = class_scores > min_score # torch.uint8 (byte) tensor, for indexing
n_above_min_score = score_above_min_score.sum().item()
if n_above_min_score == 0:
continue
class_scores = class_scores[score_above_min_score] # (n_qualified), n_min_score <= 119720
class_decoded_locs = decoded_locs[score_above_min_score] # (n_qualified, 4)
# Sort predicted boxes and scores by scores
class_scores, sort_ind = class_scores.sort(dim=0, descending=True) # (n_qualified), (n_min_score)
class_decoded_locs = class_decoded_locs[sort_ind] # (n_min_score, 4)
# Find the overlap between predicted boxes
overlap = find_jaccard_overlap(class_decoded_locs, class_decoded_locs) # (n_qualified, n_min_score)
# Non-Maximum Suppression (NMS)
# A torch.uint8 (byte) tensor to keep track of which predicted boxes to suppress
# 1 implies suppress, 0 implies don't suppress
suppress = torch.zeros((n_above_min_score), dtype=torch.bool).to(device) # (n_qualified)
# Consider each box in order of decreasing scores
for box in range(class_decoded_locs.size(0)):
# If this box is already marked for suppression
if suppress[box] == True:
continue
# Suppress boxes whose overlaps (with this box) are greater than maximum overlap
# Find such boxes and update suppress indices
suppress = torch.max(suppress, overlap[box] > max_overlap)
# The max operation retains previously suppressed boxes, like an 'OR' operation
# Don't suppress this box, even though it has an overlap of 1 with itself
suppress[box] = False
# Store only unsuppressed boxes for this class
image_boxes.append(class_decoded_locs[~suppress])
image_labels.append(torch.LongTensor((~suppress).sum().item() * [c]).to(device))
image_scores.append(class_scores[~suppress])
# If no object in any class is found, store a placeholder for 'background'
if len(image_boxes) == 0:
image_boxes.append(torch.FloatTensor([[0., 0., 1., 1.]]).to(device))
image_labels.append(torch.LongTensor([0]).to(device))
image_scores.append(torch.FloatTensor([0.]).to(device))
# Concatenate into single tensors
image_boxes = torch.cat(image_boxes, dim=0) # (n_objects, 4)
image_labels = torch.cat(image_labels, dim=0) # (n_objects)
image_scores = torch.cat(image_scores, dim=0) # (n_objects)
n_objects = image_scores.size(0)
# Keep only the top k objects
if n_objects > top_k:
image_scores, sort_ind = image_scores.sort(dim=0, descending=True)
image_scores = image_scores[:top_k] # (top_k)
image_boxes = image_boxes[sort_ind][:top_k] # (top_k, 4)
image_labels = image_labels[sort_ind][:top_k] # (top_k)
# remove multiple class suggestions for the same object
image_boxes, image_scores, image_labels = self.remove_duplicates(image_boxes, image_scores, image_labels, max_overlap)
# Append to lists that store predicted boxes and scores for all images
all_images_boxes.append(image_boxes)
all_images_labels.append(image_labels)
all_images_scores.append(image_scores)
return all_images_boxes, all_images_labels, all_images_scores # lists of length batch_size
def remove_duplicates(self, image_boxes, image_scores, image_labels, max_overlap):
"""
:param image_boxes: the boxes in an image
:param image_scores: the scores of the boxes
:param image_labels: the labels of the boxes
:param max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed here
:return: the unsuppressed boxes with scores and labels
"""
suppress = torch.zeros((image_scores.shape[0]), dtype=torch.bool).to(device)
for i in range(image_scores.shape[0]):
for j in range(image_scores.shape[0]):
if i == j:
continue
if find_jaccard_overlap(image_boxes[i].unsqueeze(0), image_boxes[j].unsqueeze(0)) > max_overlap:
suppress[i if image_scores[i] < image_scores[j] else j] = True
return self.remove_duplicates(image_boxes[~suppress], image_scores[~suppress], image_labels[~suppress], max_overlap)
return image_boxes, image_scores, image_labels
class MultiBoxLoss(nn.Module):
"""
The MultiBox loss, a loss function for object detection.
This is a combination of:
(1) a localization loss for the predicted locations of the boxes, and
(2) a confidence loss for the predicted class scores.
"""
def __init__(self, priors_cxcy, threshold=0.5, neg_pos_ratio=3, alpha=1.):
super(MultiBoxLoss, self).__init__()
self.priors_cxcy = priors_cxcy
self.priors_xy = cxcy_to_xy(priors_cxcy)
self.threshold = threshold
self.neg_pos_ratio = neg_pos_ratio
self.alpha = alpha
self.smooth_l1 = nn.L1Loss()
self.cross_entropy = nn.CrossEntropyLoss(reduce=False)
def forward(self, predicted_locs, predicted_scores, boxes, labels):
"""
Forward propagation.
:param predicted_locs: predicted locations/boxes w.r.t the 119720 prior boxes, a tensor of dimensions (N, 119720, 4)
:param predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 119720, n_classes)
:param boxes: true object bounding boxes in boundary coordinates, a list of N tensors
:param labels: true object labels, a list of N tensors
:return: multibox loss, a scalar
"""
| |
# The indirection here is a little confusing:
# we're using the resource path template as the base of a regex,
# with each resource ID segment being captured by a regex.
# E.g., the path schema
# kingdoms/{kingdom}/phyla/{phylum}
# becomes the regex
# ^kingdoms/(?P<kingdom>.+?)/phyla/(?P<phylum>.+?)$
parsing_regex_str = (
"^" +
self.PATH_ARG_RE.sub(
# We can't just use (?P<name>[^/]+) because segments may be
# separated by delimiters other than '/'.
# Multiple delimiter characters within one schema are allowed,
# e.g.
# as/{a}-{b}/cs/{c}%{d}_{e}
# This is discouraged but permitted by AIP4231
lambda m: "(?P<{name}>.+?)".format(name=m.groups()[0]),
self.resource_path or ''
) +
"$"
)
return parsing_regex_str
def get_field(self, *field_path: str,
collisions: FrozenSet[str] = frozenset()) -> Field:
"""Return a field arbitrarily deep in this message's structure.
This method recursively traverses the message tree to return the
requested inner-field.
Traversing through repeated fields is not supported; a repeated field
may be specified if and only if it is the last field in the path.
Args:
field_path (Sequence[str]): The field path.
Returns:
~.Field: A field object.
Raises:
KeyError: If a repeated field is used in the non-terminal position
in the path.
"""
# If collisions are not explicitly specified, retrieve them
# from this message's address.
# This ensures that calls to `get_field` will return a field with
# the same context, regardless of the number of levels through the
# chain (in order to avoid infinite recursion on circular references,
# we only shallowly bind message references held by fields; this
# binds deeply in the one spot where that might be a problem).
collisions = collisions or self.meta.address.collisions
# Get the first field in the path.
first_field = field_path[0]
cursor = self.fields[first_field +
('_' if first_field in utils.RESERVED_NAMES else '')]
# Base case: If this is the last field in the path, return it outright.
if len(field_path) == 1:
return cursor.with_context(
collisions=collisions,
visited_messages=frozenset({self}),
)
# Sanity check: If cursor is a repeated field, then raise an exception.
# Repeated fields are only permitted in the terminal position.
if cursor.repeated:
raise KeyError(
f'The {cursor.name} field is repeated; unable to use '
'`get_field` to retrieve its children.\n'
'This exception usually indicates that a '
'google.api.method_signature annotation uses a repeated field '
'in the fields list in a position other than the end.',
)
# Sanity check: If this cursor has no message, there is a problem.
if not cursor.message:
raise KeyError(
f'Field {".".join(field_path)} could not be resolved from '
f'{cursor.name}.',
)
# Recursion case: Pass the remainder of the path to the sub-field's
# message.
return cursor.message.get_field(*field_path[1:], collisions=collisions)
def with_context(self, *,
collisions: FrozenSet[str],
skip_fields: bool = False,
visited_messages: FrozenSet["MessageType"] = frozenset(),
) -> 'MessageType':
"""Return a derivative of this message with the provided context.
This method is used to address naming collisions. The returned
``MessageType`` object aliases module names to avoid naming collisions
in the file being written.
The ``skip_fields`` argument will omit applying the context to the
underlying fields. This provides for an "exit" in the case of circular
references.
"""
visited_messages = visited_messages | {self}
return dataclasses.replace(
self,
fields={
k: v.with_context(
collisions=collisions,
visited_messages=visited_messages
) for k, v in self.fields.items()
} if not skip_fields else self.fields,
nested_enums={
k: v.with_context(collisions=collisions)
for k, v in self.nested_enums.items()
},
nested_messages={
k: v.with_context(
collisions=collisions,
skip_fields=skip_fields,
visited_messages=visited_messages,
)
for k, v in self.nested_messages.items()
},
meta=self.meta.with_context(collisions=collisions),
)
@dataclasses.dataclass(frozen=True)
class EnumValueType:
"""Description of an enum value."""
enum_value_pb: descriptor_pb2.EnumValueDescriptorProto
meta: metadata.Metadata = dataclasses.field(
default_factory=metadata.Metadata,
)
def __getattr__(self, name):
return getattr(self.enum_value_pb, name)
@dataclasses.dataclass(frozen=True)
class EnumType:
"""Description of an enum (defined with the ``enum`` keyword.)"""
enum_pb: descriptor_pb2.EnumDescriptorProto
values: List[EnumValueType]
meta: metadata.Metadata = dataclasses.field(
default_factory=metadata.Metadata,
)
def __hash__(self):
# Identity is sufficiently unambiguous.
return hash(self.ident)
def __getattr__(self, name):
return getattr(self.enum_pb, name)
@property
def resource_path(self) -> Optional[str]:
# This is a minor duck-typing workaround for the resource_messages
# property in the Service class: we need to check fields recursively
# to see if they're resources, and recursive_field_types includes enums
return None
@property
def ident(self) -> metadata.Address:
"""Return the identifier data to be used in templates."""
return self.meta.address
def with_context(self, *, collisions: FrozenSet[str]) -> 'EnumType':
"""Return a derivative of this enum with the provided context.
This method is used to address naming collisions. The returned
``EnumType`` object aliases module names to avoid naming collisions in
the file being written.
"""
return dataclasses.replace(
self,
meta=self.meta.with_context(collisions=collisions),
) if collisions else self
@property
def options_dict(self) -> Dict:
"""Return the EnumOptions (if present) as a dict.
This is a hack to support a pythonic structure representation for
the generator templates.
"""
return MessageToDict(
self.enum_pb.options,
preserving_proto_field_name=True
)
@dataclasses.dataclass(frozen=True)
class PythonType:
"""Wrapper class for Python types.
This exists for interface consistency, so that methods like
:meth:`Field.type` can return an object and the caller can be confident
that a ``name`` property will be present.
"""
meta: metadata.Metadata
def __eq__(self, other):
return self.meta == other.meta
def __ne__(self, other):
return not self == other
@utils.cached_property
def ident(self) -> metadata.Address:
"""Return the identifier to be used in templates."""
return self.meta.address
@property
def name(self) -> str:
return self.ident.name
@property
def field_types(self) -> Sequence[Union['MessageType', 'EnumType']]:
return tuple()
@dataclasses.dataclass(frozen=True)
class PrimitiveType(PythonType):
"""A representation of a Python primitive type."""
python_type: Optional[type]
@classmethod
def build(cls, primitive_type: Optional[type]):
"""Return a PrimitiveType object for the given Python primitive type.
Args:
primitive_type (cls): A Python primitive type, such as
:class:`int` or :class:`str`. Despite not being a type,
``None`` is also accepted here.
Returns:
~.PrimitiveType: The instantiated PrimitiveType object.
"""
# Primitives have no import, and no module to reference, so the
# address just uses the name of the class (e.g. "int", "str").
return cls(meta=metadata.Metadata(address=metadata.Address(
name='None' if primitive_type is None else primitive_type.__name__,
)), python_type=primitive_type)
def __eq__(self, other):
# If we are sent the actual Python type (not the PrimitiveType object),
# claim to be equal to that.
if not hasattr(other, 'meta'):
return self.python_type is other
return super().__eq__(other)
@dataclasses.dataclass(frozen=True)
class OperationInfo:
"""Representation of long-running operation info."""
response_type: MessageType
metadata_type: MessageType
def with_context(self, *, collisions: FrozenSet[str]) -> 'OperationInfo':
"""Return a derivative of this OperationInfo with the provided context.
This method is used to address naming collisions. The returned
``OperationInfo`` object aliases module names to avoid naming collisions
in the file being written.
"""
return dataclasses.replace(
self,
response_type=self.response_type.with_context(
collisions=collisions
),
metadata_type=self.metadata_type.with_context(
collisions=collisions
),
)
@dataclasses.dataclass(frozen=True)
class RetryInfo:
"""Representation of the method's retry behavior."""
max_attempts: int
initial_backoff: float
max_backoff: float
backoff_multiplier: float
retryable_exceptions: FrozenSet[exceptions.GoogleAPICallError]
@dataclasses.dataclass(frozen=True)
class Method:
"""Description of a method (defined with the ``rpc`` keyword)."""
method_pb: descriptor_pb2.MethodDescriptorProto
input: MessageType
output: MessageType
lro: Optional[OperationInfo] = dataclasses.field(default=None)
retry: Optional[RetryInfo] = dataclasses.field(default=None)
timeout: Optional[float] = None
meta: metadata.Metadata = dataclasses.field(
default_factory=metadata.Metadata,
)
def __getattr__(self, name):
return getattr(self.method_pb, name)
@utils.cached_property
def client_output(self):
return self._client_output(enable_asyncio=False)
@utils.cached_property
def client_output_async(self):
return self._client_output(enable_asyncio=True)
def flattened_oneof_fields(self, include_optional=False):
oneof_fields = collections.defaultdict(list)
for field in self.flattened_fields.values():
# Only include proto3 optional oneofs if explicitly looked for.
if field.oneof and not field.proto3_optional or include_optional:
oneof_fields[field.oneof].append(field)
return oneof_fields
def _client_output(self, enable_asyncio: bool):
"""Return the output from the client layer.
This takes into account transformations made by the outer GAPIC
client to transform the output from the transport.
Returns:
Union[~.MessageType, ~.PythonType]:
A description of the return type.
"""
# Void messages ultimately return None.
if self.void:
return PrimitiveType.build(None)
# If this method is an LRO, return a PythonType instance representing
# that.
if self.lro:
return PythonType(meta=metadata.Metadata(
address=metadata.Address(
name='AsyncOperation' if enable_asyncio else 'Operation',
module='operation_async' if enable_asyncio else 'operation',
package=('google', 'api_core'),
collisions=self.lro.response_type.ident.collisions,
),
documentation=utils.doc(
'An object representing a long-running operation. \n\n'
'The result type for the operation will be '
':class:`{ident}` {doc}'.format(
doc=self.lro.response_type.meta.doc,
ident=self.lro.response_type.ident.sphinx,
),
),
))
# If this method is paginated, return that method's pager class.
if self.paged_result_field:
return PythonType(meta=metadata.Metadata(
address=metadata.Address(
name=f'{self.name}AsyncPager' if enable_asyncio else f'{self.name}Pager',
package=self.ident.api_naming.module_namespace + (self.ident.api_naming.versioned_module_name,) + self.ident.subpackage + (
'services',
utils.to_snake_case(self.ident.parent[-1]),
),
module='pagers',
collisions=self.input.ident.collisions,
),
documentation=utils.doc(
f'{self.output.meta.doc}\n\n'
'Iterating over this object will yield results and '
'resolve additional pages automatically.',
),
))
# Return the usual output.
return self.output
# TODO(yon-mg): remove or rewrite: don't think it performs as intended
# e.g. doesn't work with basic | |
'Albuquerque, NM'},
'1505730':{'en': 'Albuquerque, NM'},
'1505747':{'en': 'Espanola, NM'},
'1505753':{'en': 'Espanola, NM'},
'1505757':{'en': 'Pecos, NM'},
'1505764':{'en': 'Albuquerque, NM'},
'1505766':{'en': 'Albuquerque, NM'},
'1505768':{'en': 'Albuquerque, NM'},
'1505782':{'en': 'Zuni, NM'},
'1505786':{'en': 'Crownpoint, NM'},
'1505792':{'en': 'Albuquerque, NM'},
'1505795':{'en': 'Santa Fe, NM'},
'1505796':{'en': 'Albuquerque, NM'},
'1505797':{'en': 'Albuquerque, NM'},
'1505798':{'en': 'Albuquerque, NM'},
'1505804':{'en': 'Albuquerque, NM'},
'1505820':{'en': 'Santa Fe, NM'},
'1505821':{'en': 'Albuquerque, NM'},
'1505822':{'en': 'Albuquerque, NM'},
'1505823':{'en': 'Albuquerque, NM'},
'1505827':{'en': 'Santa Fe, NM'},
'1505828':{'en': 'Albuquerque, NM'},
'150583':{'en': 'Albuquerque, NM'},
'1505832':{'en': 'Moriarty, NM'},
'1505841':{'en': 'Albuquerque, NM'},
'1505842':{'en': 'Albuquerque, NM'},
'1505843':{'en': 'Albuquerque, NM'},
'1505847':{'en': 'Mountainair, NM'},
'1505848':{'en': 'Albuquerque, NM'},
'1505856':{'en': 'Albuquerque, NM'},
'1505857':{'en': 'Albuquerque, NM'},
'1505858':{'en': 'Albuquerque, NM'},
'1505861':{'en': 'Belen, NM'},
'1505863':{'en': 'Gallup, NM'},
'1505864':{'en': 'Belen, NM'},
'1505865':{'en': 'Los Lunas, NM'},
'1505866':{'en': 'Los Lunas, NM'},
'1505867':{'en': 'Bernalillo, NM'},
'1505869':{'en': 'Bosque Farms, NM'},
'1505872':{'en': 'Albuquerque, NM'},
'1505873':{'en': 'Albuquerque, NM'},
'1505877':{'en': 'Albuquerque, NM'},
'150588':{'en': 'Albuquerque, NM'},
'1505890':{'en': 'Albuquerque, NM'},
'1505891':{'en': 'Rio Rancho, NM'},
'1505892':{'en': 'Rio Rancho, NM'},
'1505896':{'en': 'R<NAME>, NM'},
'1505897':{'en': 'Albuquerque, NM'},
'1505898':{'en': 'Albuquerque, NM'},
'1505899':{'en': 'Albuquerque, NM'},
'1505920':{'en': 'Santa Fe, NM'},
'1505922':{'en': 'Albuquerque, NM'},
'1505954':{'en': 'Santa Fe, NM'},
'1505955':{'en': 'Santa Fe, NM'},
'150598':{'en': 'Santa Fe, NM'},
'1505980':{'en': 'Albuquerque, NM'},
'1505992':{'en': 'Santa Fe, NM'},
'1505994':{'en': 'R<NAME>, NM'},
'1505995':{'en': 'Santa Fe, NM'},
'1505998':{'en': 'Albuquerque, NM'},
'1506':{'en': 'New Brunswick'},
'1506204':{'en': 'Moncton, NB'},
'1506235':{'en': 'Saint-Quentin, NB'},
'1506273':{'en': 'Perth-Andover, NB'},
'1506325':{'en': 'Woodstock, NB'},
'1506327':{'en': 'Minto, NB'},
'1506328':{'en': 'Woodstock, NB'},
'1506336':{'en': 'Shippagan, NB'},
'1506344':{'en': u('Lam\u00e8que, NB')},
'1506357':{'en': 'Oromocto, NB'},
'1506372':{'en': 'Salisbury, NB'},
'1506375':{'en': 'Hartland, NB'},
'1506382':{'en': 'Moncton, NB'},
'1506383':{'en': 'Moncton, NB'},
'1506384':{'en': 'Moncton, NB'},
'1506388':{'en': 'Moncton, NB'},
'1506389':{'en': 'Moncton, NB'},
'1506392':{'en': 'Florenceville, NB'},
'1506393':{'en': 'Tracadie-Sheila, NB'},
'1506395':{'en': 'Tracadie-Sheila, NB'},
'1506432':{'en': 'Sussex, NB'},
'1506433':{'en': 'Sussex, NB'},
'1506446':{'en': 'Oromocto, NB'},
'150645':{'en': 'Fredericton, NB'},
'1506466':{'en': '<NAME>, NB'},
'1506472':{'en': 'Fredericton, NB'},
'1506473':{'en': 'Grand Falls, NB'},
'1506523':{'en': 'Richibucto, NB'},
'1506529':{'en': 'S<NAME>, NB'},
'1506532':{'en': 'Shediac, NB'},
'1506533':{'en': 'Shediac, NB'},
'1506536':{'en': 'Sackville, NB'},
'1506546':{'en': 'Bathurst, NB'},
'1506548':{'en': 'Bathurst, NB'},
'1506576':{'en': 'Cocagne, NB'},
'1506577':{'en': 'Cap-Pele, NB'},
'1506622':{'en': 'Nelson-Miramichi, NB'},
'1506632':{'en': 'Saint John, NB'},
'1506633':{'en': 'Saint John, NB'},
'1506634':{'en': 'Saint John, NB'},
'1506635':{'en': 'Saint John, NB'},
'1506642':{'en': 'Saint John, NB'},
'1506648':{'en': 'Saint John, NB'},
'1506652':{'en': 'Saint John, NB'},
'1506657':{'en': 'Saint John, NB'},
'1506658':{'en': 'Saint John, NB'},
'1506662':{'en': '<NAME>, NB'},
'1506672':{'en': 'Saint John, NB'},
'1506684':{'en': 'Dalhousie, NB'},
'1506693':{'en': 'Saint John, NB'},
'1506696':{'en': 'Saint John, NB'},
'1506727':{'en': 'Caraquet, NB'},
'1506735':{'en': 'Edmundston, NB'},
'1506737':{'en': 'Edmundston, NB'},
'1506739':{'en': 'Edmundston, NB'},
'1506743':{'en': 'Bouctouche, NB'},
'1506753':{'en': 'Campbellton, NB'},
'1506755':{'en': 'Saint George, NB'},
'1506756':{'en': 'Petitcodiac, NB'},
'1506759':{'en': 'Campbellton, NB'},
'1506773':{'en': 'Nelson-Miramichi, NB'},
'1506776':{'en': 'Neguac, NB'},
'1506778':{'en': 'Nelson-Miramichi, NB'},
'1506783':{'en': '<NAME>, NB'},
'1506789':{'en': 'Campbellton, NB'},
'1506832':{'en': 'Hampton, NB'},
'1506847':{'en': 'Rothesay, NB'},
'150685':{'en': 'Moncton, NB'},
'1506866':{'en': 'Moncton, NB'},
'1506876':{'en': '<NAME>, NB'},
'1506992':{'en': 'Clair, NB'},
'1507':{'en': 'Minnesota'},
'1507206':{'en': 'Rochester, MN'},
'1507223':{'en': 'Canby, MN'},
'1507226':{'en': 'Rochester, MN'},
'1507233':{'en': 'New Ulm, MN'},
'1507234':{'en': 'Janesville, MN'},
'1507235':{'en': 'Fairmont, MN'},
'1507237':{'en': 'Gaylord, MN'},
'1507238':{'en': 'Fairmont, MN'},
'1507247':{'en': 'Tyler, MN'},
'1507252':{'en': 'Rochester, MN'},
'1507255':{'en': 'Rochester, MN'},
'1507263':{'en': '<NAME>, MN'},
'1507274':{'en': 'Westbrook, MN'},
'1507275':{'en': 'Hendricks, MN'},
'150728':{'en': 'Rochester, MN'},
'1507283':{'en': 'Luverne, MN'},
'1507292':{'en': 'Rochester, MN'},
'1507328':{'en': 'Rochester, MN'},
'1507332':{'en': 'Faribault, MN'},
'1507333':{'en': 'Faribault, MN'},
'1507334':{'en': 'Faribault, MN'},
'1507344':{'en': 'Mankato, MN'},
'1507345':{'en': 'Mankato, MN'},
'1507346':{'en': 'Spring Valley, MN'},
'1507354':{'en': 'New Ulm, MN'},
'1507356':{'en': 'Pine Island, MN'},
'1507357':{'en': 'Le Center, MN'},
'1507359':{'en': 'New Ulm, MN'},
'1507362':{'en': 'Waterville, MN'},
'1507364':{'en': 'Montgomery, MN'},
'1507372':{'en': 'Worthington, MN'},
'1507373':{'en': 'Albert Lea, MN'},
'1507374':{'en': 'Dodge Center, MN'},
'1507375':{'en': 'Saint James, MN'},
'1507376':{'en': 'Worthington, MN'},
'1507377':{'en': 'Albert Lea, MN'},
'1507385':{'en': 'Mankato, MN'},
'1507386':{'en': 'Mankato, MN'},
'1507387':{'en': 'Mankato, MN'},
'1507388':{'en': 'Mankato, MN'},
'1507389':{'en': 'Mankato, MN'},
'1507424':{'en': 'Rochester, MN'},
'1507426':{'en': 'Fairfax, MN'},
'1507427':{'en': 'Mountain Lake, MN'},
'1507433':{'en': 'Austin, MN'},
'1507434':{'en': 'Austin, MN'},
'1507437':{'en': 'Austin, MN'},
'1507442':{'en': 'Edgerton, MN'},
'1507444':{'en': 'Owatonna, MN'},
'1507446':{'en': 'Owatonna, MN'},
'1507451':{'en': 'Owatonna, MN'},
'1507452':{'en': 'Winona, MN'},
'1507453':{'en': 'Winona, MN'},
'1507454':{'en': 'Winona, MN'},
'1507455':{'en': 'Owatonna, MN'},
'1507457':{'en': 'Winona, MN'},
'1507467':{'en': 'Lanesboro, MN'},
'1507474':{'en': 'Winona, MN'},
'1507483':{'en': 'Adrian, MN'},
'1507498':{'en': 'Spring Grove, MN'},
'1507523':{'en': 'Lewiston, MN'},
'1507524':{'en': 'Mapleton, MN'},
'1507526':{'en': 'Blue Earth, MN'},
'1507529':{'en': 'Rochester, MN'},
'1507532':{'en': 'Marshall, MN'},
'1507533':{'en': 'Stewartville, MN'},
'1507534':{'en': 'Plainview, MN'},
'1507536':{'en': 'Rochester, MN'},
'1507537':{'en': 'Marshall, MN'},
'1507553':{'en': 'Wells, MN'},
'1507583':{'en': 'Blooming Prairie, MN'},
'1507625':{'en': 'Mankato, MN'},
'1507629':{'en': 'Tracy, MN'},
'1507634':{'en': 'Kasson, MN'},
'1507637':{'en': 'Redwood Falls, MN'},
'1507642':{'en': 'Madelia, MN'},
'1507644':{'en': 'Redwood Falls, MN'},
'1507645':{'en': 'Northfield, MN'},
'1507646':{'en': 'Northfield, MN'},
'1507647':{'en': 'Winthrop, MN'},
'1507662':{'en': 'Lakefield, MN'},
'1507663':{'en': 'Northfield, MN'},
'1507665':{'en': 'Le Sueur, MN'},
'1507694':{'en': 'Ivanhoe, MN'},
'1507723':{'en': 'Springfield, MN'},
'1507725':{'en': 'Caledonia, MN'},
'1507726':{'en': 'Lake Crystal, MN'},
'1507732':{'en': 'Zumbrota, MN'},
'1507744':{'en': 'Lonsdale, MN'},
'1507754':{'en': 'Grand Meadow, MN'},
'1507765':{'en': 'Preston, MN'},
'1507775':{'en': 'Byron, MN'},
'1507776':{'en': 'Truman, MN'},
'1507789':{'en': 'Kenyon, MN'},
'1507794':{'en': 'Sleepy Eye, MN'},
'1507825':{'en': 'Pipestone, MN'},
'1507831':{'en': 'Windom, MN'},
'1507835':{'en': 'Waseca, MN'},
'1507836':{'en': 'Slayton, MN'},
'1507847':{'en': 'Jackson, MN'},
'1507864':{'en': 'Rushford, MN'},
'1507867':{'en': 'Chatfield, MN'},
'1507872':{'en': 'Minneota, MN'},
'1507886':{'en': 'Harmony, MN'},
'1507893':{'en': 'Winnebago, MN'},
'1507895':{'en': 'La Crescent, MN'},
'1507896':{'en': 'Houston, MN'},
'1507931':{'en': 'St. Peter, MN'},
'1507932':{'en': 'St. Charles, MN'},
'1507934':{'en': 'St. Peter, MN'},
'1507964':{'en': 'Arlington, MN'},
'1508':{'en': 'Massachusetts'},
'1508222':{'en': 'Attleboro, MA'},
'1508223':{'en': 'Attleboro, MA'},
'1508224':{'en': 'Plymouth, MA'},
'1508226':{'en': 'Attleboro, MA'},
'1508228':{'en': 'Nantucket, MA'},
'1508229':{'en': 'Marlborough, MA'},
'1508232':{'en': 'Brockton, MA'},
'1508234':{'en': 'Whitinsville, MA'},
'1508235':{'en': 'Fall River, MA'},
'1508240':{'en': 'Orleans, MA'},
'1508248':{'en': 'Charlton, MA'},
'1508251':{'en': 'Marlborough, MA'},
'1508252':{'en': 'Rehoboth, MA'},
'1508255':{'en': 'Orleans, MA'},
'1508261':{'en': 'Mansfield, MA'},
'1508273':{'en': 'Wareham, MA'},
'1508278':{'en': 'Uxbridge, MA'},
'1508279':{'en': 'Bridgewater, MA'},
'1508281':{'en': 'Marlborough, MA'},
'1508285':{'en': 'Norton, MA'},
'1508291':{'en': 'Wareham, MA'},
'1508295':{'en': 'Wareham, MA'},
'1508303':{'en': 'Marlborough, MA'},
'1508309':{'en': 'Framingham, MA'},
'1508324':{'en': 'Fall River, MA'},
'1508325':{'en': 'Nantucket, MA'},
'1508334':{'en': 'Worcester, MA'},
'1508336':{'en': 'Seekonk, MA'},
'1508337':{'en': 'Mansfield, MA'},
'1508339':{'en': 'Mansfield, MA'},
'1508347':{'en': 'Sturbridge, MA'},
'1508349':{'en': 'Wellfleet, MA'},
'1508358':{'en': 'Wayland, MA'},
'1508359':{'en': 'Medfield, MA'},
'1508363':{'en': 'Worcester, MA'},
'1508366':{'en': 'Westborough, MA'},
'1508368':{'en': 'Worcester, MA'},
'1508370':{'en': 'Framingham, MA'},
'1508376':{'en': 'Millis, MA'},
'1508378':{'en': 'East Bridgewater, MA'},
'1508379':{'en': 'Swansea, MA'},
'1508381':{'en': 'Milford, MA'},
'1508383':{'en': 'Framingham, MA'},
'1508384':{'en': 'Wrentham, MA'},
'1508385':{'en': 'Dennis, MA'},
'1508393':{'en': 'Northborough, MA'},
'1508399':{'en': 'Attleboro, MA'},
'1508405':{'en': 'Framingham, MA'},
'1508421':{'en': 'Worcester, MA'},
'1508422':{'en': 'Milford, MA'},
'1508427':{'en': 'Brockton, MA'},
'1508429':{'en': 'Holliston, MA'},
'1508430':{'en': 'Harwich, MA'},
'1508435':{'en': 'Hopkinton, MA'},
'1508436':{'en': 'Brockton, MA'},
'1508457':{'en': 'Falmouth, MA'},
'1508459':{'en': 'Worcester, MA'},
'1508460':{'en': 'Marlborough, MA'},
'1508473':{'en': 'Milford, MA'},
'1508476':{'en': 'Douglas, MA'},
'1508477':{'en': 'Mashpee, MA'},
'1508478':{'en': 'Milford, MA'},
'1508480':{'en': 'Marlborough, MA'},
'1508481':{'en': 'Marlborough, MA'},
'1508482':{'en': 'Milford, MA'},
'1508485':{'en': 'Marlborough, MA'},
'1508487':{'en': 'Provincetown, MA'},
'1508495':{'en': 'Falmouth, MA'},
'1508497':{'en': 'Hopkinton, MA'},
'1508510':{'en': 'Brockton, MA'},
'1508520':{'en': 'Franklin, MA'},
'1508528':{'en': 'Franklin, MA'},
'1508529':{'en': 'Upton, MA'},
'1508533':{'en': 'Medway, MA'},
'1508539':{'en': 'Mashpee, MA'},
'1508540':{'en': 'Falmouth, MA'},
'1508541':{'en': 'Franklin, MA'},
'1508543':{'en': 'Foxboro, MA'},
'1508548':{'en': 'Falmouth, MA'},
'1508553':{'en': 'Franklin, MA'},
'1508559':{'en': 'Brockton, MA'},
'1508567':{'en': 'Fall River, MA'},
'150858':{'en': 'Brockton, MA'},
'1508595':{'en': 'Worcester, MA'},
'1508616':{'en': 'Westborough, MA'},
'1508620':{'en': 'Framingham, MA'},
'1508624':{'en': 'Marlborough, MA'},
'1508626':{'en': 'Framingham, MA'},
'1508627':{'en': 'Edgartown, MA'},
'1508628':{'en': 'Framingham, MA'},
'1508634':{'en': 'Milford, MA'},
'1508636':{'en': 'Westport, MA'},
'1508643':{'en': 'North Attleborough, MA'},
'1508644':{'en': 'Assonet, MA'},
'1508645':{'en': 'Chilmark, MA'},
'1508646':{'en': 'Fall River, MA'},
'1508647':{'en': 'Natick, MA'},
'1508650':{'en': 'Natick, MA'},
'1508651':{'en': 'Natick, MA'},
'1508653':{'en': 'Natick, MA'},
'1508655':{'en': 'Natick, MA'},
'1508660':{'en': 'Walpole, MA'},
'1508668':{'en': 'Walpole, MA'},
'1508669':{'en': 'Dighton, MA'},
'150867':{'en': 'Fall River, MA'},
'1508693':{'en': 'Vineyard Haven, MA'},
'1508695':{'en': 'North Attleborough, MA'},
'1508696':{'en': 'Vineyard Haven, MA'},
'1508697':{'en': 'Bridgewater, MA'},
'1508698':{'en': 'Foxboro, MA'},
'1508699':{'en': 'North Attleborough, MA'},
'1508717':{'en': 'New Bedford, MA'},
'1508721':{'en': 'Auburn, MA'},
'1508730':{'en': 'Fall River, MA'},
'1508732':{'en': 'Plymouth, MA'},
'1508746':{'en': 'Plymouth, MA'},
'1508747':{'en': 'Plymouth, MA'},
'1508748':{'en': 'Marion, MA'},
'150875':{'en': 'Worcester, MA'},
'1508758':{'en': 'Mattapoisett, MA'},
'1508761':{'en': 'Attleboro, MA'},
'1508764':{'en': 'Southbridge, MA'},
'1508765':{'en': 'Southbridge, MA'},
'1508767':{'en': 'Worcester, MA'},
'1508770':{'en': 'Worcester, MA'},
'1508771':{'en': 'Hyannis, MA'},
'1508775':{'en': 'Hyannis, MA'},
'1508778':{'en': 'Hyannis, MA'},
'1508785':{'en': 'Dover, MA'},
'1508788':{'en': 'Framingham, MA'},
'150879':{'en': 'Worcester, MA'},
'1508790':{'en': 'Hyannis, MA'},
'1508820':{'en': 'Framingham, MA'},
'1508821':{'en': 'Taunton, MA'},
'1508822':{'en': 'Taunton, MA'},
'1508823':{'en': 'Taunton, MA'},
'1508824':{'en': 'Taunton, MA'},
'1508828':{'en': 'Taunton, MA'},
'1508829':{'en': 'Holden, MA'},
'1508830':{'en': 'Plymouth, MA'},
'1508831':{'en': 'Worcester, MA'},
'1508832':{'en': 'Auburn, MA'},
'1508835':{'en': 'West Boylston, MA'},
'1508836':{'en': 'Westborough, MA'},
'1508842':{'en': 'Shrewsbury, MA'},
'1508845':{'en': 'Shrewsbury, MA'},
'1508852':{'en': 'Worcester, MA'},
'1508853':{'en': 'Worcester, MA'},
'1508854':{'en': 'Worcester, MA'},
'1508856':{'en': 'Worcester, MA'},
'1508857':{'en': 'Brockton, MA'},
'1508860':{'en': 'Worcester, MA'},
'1508862':{'en': 'Hyannis, MA'},
'1508865':{'en': 'Millbury, MA'},
'1508866':{'en': 'Carver, MA'},
'1508869':{'en': 'Boylston, MA'},
'1508870':{'en': 'Westborough, MA'},
'1508872':{'en': 'Framingham, MA'},
'1508875':{'en': 'Framingham, MA'},
'1508877':{'en': 'Framingham, MA'},
'1508879':{'en': 'Framingham, MA'},
'1508880':{'en': 'Taunton, | |
acolor[0]), None, resolveFilename(SCOPE_SKIN) + SkinName + "/" + png_name)
# if name does not match set it to inactive
else:
currenttheme.set("value", "inactive")
if rootTheme.find("screenthemes") is not None:
themes = rootTheme.find("screenthemes")
for screens in themes.findall("screens"):
for screenname in screenList:
if screens.get("name") == screenname:
for screen in screens.findall("screentheme"):
if configDict is None:
currentValue = config.plugins.MerlinSkinThemes.Screens[screenname].value
else:
currentValue = configDict.get("%s" %(screenname),None)
if screen.get("name") == currentValue:
screen.set("value", "active")
if mode == "apply":
f.write("%s:::%s\n" %(screenname, currentValue))
newscreen = screen.find("screen")
# delete old screen
for SkinScreen in rootSkin.findall("screen"):
if SkinScreen.get("name") == screenname:
rootSkin.remove(SkinScreen)
# Set new screen
rootSkin.append(Tree.fromstring(Tree.tostring(newscreen)))
else:
screen.set("value", "inactive")
# LCD / OLED / External LCD
if displayTag is not None:
if rootTheme.find(displayTag) is not None:
themes = rootTheme.find(displayTag)
for screens in themes.findall("screens"):
for displayscreenname in displayScreenList:
if screens.get("name") == displayscreenname:
for screen in screens.findall(displayTag[:-1]):
if configDict is None:
currentValue = config.plugins.MerlinSkinThemes.DisplayScreens[displayscreenname].value
else:
currentValue = configDict.get("%s" %(displayscreenname), None)
if screen.get("name") == currentValue:
screen.set("value", "active")
if mode == "apply":
f.write("%s:::%s\n" %(displayscreenname, currentValue))
newscreen = screen.find("screen")
# delete old screen
for SkinScreen in rootSkin.findall("screen"):
if SkinScreen.get("name") == displayscreenname and SkinScreen.get("id") == IdString:
rootSkin.remove(SkinScreen)
# Set new screen
rootSkin.append(Tree.fromstring(Tree.tostring(newscreen)))
else:
screen.set("value", "inactive")
# corner Radius in skin.xml in allen eLabel ersetzen
if config.plugins.MerlinSkinThemes.CornerRadius.value <> "":
for elabel in rootSkin.findall('.//eLabel[@cornerRadius]'):
if 'cornerRadius' in elabel.attrib:
if rootTheme.find("cornerradius") is not None:
crtheme = rootTheme.find("cornerradius")
if configDict is None:
radiusValue = config.plugins.MerlinSkinThemes.CornerRadius.value
else:
radiusValue = configDict.get("cornerradius", None)
if elabel.get("cornerRadius") <> crtheme.get("exclude"):
if radiusValue is not None:
elabel.set("cornerRadius", config.plugins.MerlinSkinThemes.CornerRadius.value)
for r in crtheme.findall("radius"):
if r.get("name") == config.plugins.MerlinSkinThemes.CornerRadius.value:
r.set("value", "active")
else:
r.set("value", "inactive")
if mode == "apply":
f.write("%s:::%s\n" %("cornerradius", config.plugins.MerlinSkinThemes.CornerRadius.value))
XMLindent(rootSkin, 0)
curSkin.write(skinFile)
# SkinPathTheme
xmlTree = Tree.parse(skinFile)
xmlRoot = xmlTree.getroot()
xmlString = Tree.tostring(xmlRoot)
if rootTheme.find("skinpaththemes") is not None:
spt = rootTheme.find("skinpaththemes")
for theme in spt.findall("theme"):
if configDict is None:
currentValue = config.plugins.MerlinSkinThemes.Themes["skinpaththeme"].value
else:
currentValue = configDict.get("skinpaththemes", None)
if theme.get("name") == currentValue:
newPath = theme.get("path")
theme.set("value", "active")
if mode == "apply":
f.write("skinpaththemes:::%s\n" %(currentValue))
else:
theme.set("value", "inactive")
for theme in spt.findall("theme"):
xmlString = xmlString.replace(theme.get("path"), newPath)
xmlSkin = open(skinFile, "w")
xmlSkin.write(xmlString)
xmlSkin.close()
curTheme.write(themeFile)
if mode == "apply":
f.close()
class MerlinSkinThemes(Screen, HelpableScreen, ConfigListScreen):
skin = """
<screen position="center,center" size="1920,1080" title="%s" backgroundColor="#00808080" >
<widget name="DescLabel" position="10,10" size="1900,40" font="Regular;26" zPosition="2" valign="center" halign="center" />
<widget name="ListLabel" position="10,60" size="945,40" font="Regular;26" zPosition="2" valign="center" halign="left" />
<widget name="ImageInfo" position="965,60" size="945,40" font="Regular;26" zPosition="2" halign="left" />
<widget name="SkinsList" position="10,110" size="945,910" scrollbarMode="showOnDemand" zPosition="1" />
<widget name="config" position="10,110" size="945,910" scrollbarMode="showOnDemand" zPosition="1" />
<widget name="SkinCopyright" position="965,110" size="945,200" font="Regular;18" zPosition="2" halign="left" />
<widget name="Preview" position="965,320" size="945,700" alphatest="blend" />
<widget name="key_red" position="10,1030" size="200,40" valign="center" halign="center" zPosition="3" transparent="1" font="Regular;24" />
<widget name="key_green" position="258,1030" size="200,40" valign="center" halign="center" zPosition="3" transparent="1" font="Regular;24" />
<widget name="key_yellow" position="506,1030" size="200,40" valign="center" halign="center" zPosition="3" transparent="1" font="Regular;24" />
<widget name="key_blue" position="755,1030" size="200,40" valign="center" halign="center" zPosition="3" transparent="1" font="Regular;24" />
<ePixmap name="red" position="10,1030" zPosition="1" size="200,40" pixmap="skin_default/buttons/red.png" transparent="1" alphatest="blend" />
<ePixmap name="green" position="258,1030" zPosition="1" size="200,40" pixmap="skin_default/buttons/green.png" transparent="1" alphatest="blend" />
<ePixmap name="yellow" position="506,1030" zPosition="1" size="200,40" pixmap="skin_default/buttons/yellow.png" transparent="1" alphatest="blend" />
<ePixmap name="blue" position="755,1030" zPosition="1" size="200,40" pixmap="skin_default/buttons/blue.png" transparent="1" alphatest="blend" />
</screen>"""% ("MerlinSkinThemes")
ThemeName = ""
selSkinName = ""
selThemeFile = ""
def __init__(self, session):
print "[MST] " + PluginVersion + " running..."
self.session = session
Screen.__init__(self, session)
HelpableScreen.__init__(self)
self.clist = []
ConfigListScreen.__init__(self, self.clist)
self.setTitle(Title + " " + PluginVersion + " - " + Author)
if not SkinUser:
self["ListLabel"] = Label(_("Skinlist") )
else:
self["ListLabel"] = Label(_("Skinlist") + " - ATTENTION: skin_user.xml found!!!")
self["DescLabel"] = Label(Title + " " + PluginVersion + " " + Author)
self["SkinCopyright"] = Label()
self["Preview"] = Pixmap()
self["ImageInfo"] = Label()
self.curList = "SkinsList"
self["key_red"] = Button(_("exit"))
self["key_green"] = Button(_("switch to skin"))
self["key_yellow"] = Button(_("save as design"))
self["key_blue"] = Button(_("open config"))
self.skinsList = []
self["SkinsList"] = GetSkinsList([])
self.onSelectionChanged = [ ]
self["ColorActions"] = HelpableActionMap(self, "ColorActions",
{
"red": self.buttonRed,
"green": self.buttonGreen,
"yellow": self.buttonYellow,
"blue": self.openConfig,
}, -1)
self["DirectionActions"] = HelpableActionMap(self, "DirectionActions",
{
"up": (self.up, _("Move cursor up")),
"down": (self.down, _("Move cursor down")),
"left": (self.left, _("Move cursor left")),
"right": (self.right, _("Move cursor right")),
}, -1)
self["OkCancelActions"] = HelpableActionMap(self, "OkCancelActions",
{
"ok": (self.ok, ""),
"cancel": (self.exit, _("Close plugin")),
}, -1)
self["TeleTextActions"] = HelpableActionMap(self, "TeleTextActions",
{
"help": (self.Help, ""),
"info": (self.Info, ""),
}, -1)
self["MenuActions"] = HelpableActionMap(self, "MenuActions",
{
"menu": (self.MSTMenu, ""),
}, -1)
self.updateSkinList()
MerlinSkinThemes.selSkinName = self["SkinsList"].getCurrent()[1][7]
MerlinSkinThemes.selSkinFile = resolveFilename(SCOPE_SKIN) + MerlinSkinThemes.selSkinName + "/skin.xml"
MerlinSkinThemes.selThemeFile = resolveFilename(SCOPE_SKIN) + MerlinSkinThemes.selSkinName + "/themes.xml"
self.onLayoutFinish.append(self.startRun)
def openConfig(self):
self.session.open(MerlinSkinThemesConfig)
def startRun(self):
self["SkinsList"].onSelectionChanged.append(self.changedSkinsList)
MerlinSkinThemes.selSkinName = self["SkinsList"].getCurrent()[1][7]
MerlinSkinThemes.selSkinFile = resolveFilename(SCOPE_SKIN) + MerlinSkinThemes.selSkinName + "/skin.xml"
MerlinSkinThemes.selThemeFile = resolveFilename(SCOPE_SKIN) + MerlinSkinThemes.selSkinName + "/themes.xml"
self["config"].hide()
self["SkinsList"].moveToIndex(self["SkinsList"].selectedIndex)
self.ImageInfo()
if fileExists(MerlinSkinThemes.selSkinFile):
self.CopyrightInfo()
# parse themes.xml
def readThemes(self):
self.clist = []
try:
xml = Tree.parse(MerlinSkinThemes.selThemeFile)
selSkinList = []
selSkinList.append(MerlinSkinThemes.selSkinName)
config.plugins.MerlinSkinThemes.selSkin = MyConfigSelection(default=MerlinSkinThemes.selSkinName, choices = selSkinList)
self.clist.append(getConfigListEntry(" ", ))
self.clist.append(getConfigListEntry("Skin", config.plugins.MerlinSkinThemes.selSkin))
#####################
# -- Designs -- #
#####################
# <designs>
# <design>
# <xyzTheme />
# </design>
# </designs>
# this reads all <design> and its themes that are defined in <designs>
# name of <design> is read and displayed in the section DESIGNS, active one is set as default
self.clist.append(getConfigListEntry(" ", ))
self.clist.append(getConfigListEntry(" " + u'\u00b7' + " DESIGNS", ))
initDesignsDone = False
if xml.find("designs") is not None:
ds = xml.find("designs")
for element in ["design"]:
elementList = []
elementList.append("-none-")
defaultValue = "-none-"
for design in ds.findall(element):
elementList.append(design.get("name"))
if len(elementList) > 0:
if not initDesignsDone:
initConfigSubDict("Design")
initDesignsDone = True
config.plugins.MerlinSkinThemes.Designs[element] = MyConfigSelection(default=defaultValue, choices = elementList)
self.clist.append(getConfigListEntry("Design", config.plugins.MerlinSkinThemes.Designs[element]))
else:
if not initDesignsDone:
initConfigSubDict("Design")
initDesignsDone = True
config.plugins.MerlinSkinThemes.Designs["design"] = MyConfigSelection(default="-none-", choices = ["-none-"])
self.clist.append(getConfigListEntry("Design", config.plugins.MerlinSkinThemes.Designs["design"]))
################
# -- Themes -- #
################
# name of theme is read and displayed in the section THEMES, active one is set as default
self.clist.append(getConfigListEntry(" ", ))
self.clist.append(getConfigListEntry(" " + u'\u00b7' + " THEMES", ))
themesInitDone = False
for element in themeList:
elementList = []
defaultValue = None
tag = element.lower()
if tag == "skinpaththeme":
tag = tag + "s"
if xml.find(tag) is not None:
if tag != "skinpaththemes":
for theme in xml.findall(tag):
elementList.append(theme.get("name"))
if theme.get("value") == "active":
defaultValue = theme.get("name")
else:
themes = xml.find(tag)
for theme in themes.findall("theme"):
elementList.append(theme.get("name"))
if theme.get("value") == "active":
defaultValue = theme.get("name")
if len(elementList) > 0:
if not themesInitDone:
initConfigSubDict("Themes")
themesInitDone = True
config.plugins.MerlinSkinThemes.Themes[element.lower()] = MyConfigSelection(default=defaultValue, choices = elementList)
self.clist.append(getConfigListEntry(element, config.plugins.MerlinSkinThemes.Themes[element.lower()]))
#################
# -- SCREENS -- #
#################
# <screenthemes>
# <!-- multiple screens possible -->
# <screens name="screenname">
# <!-- multiple screentheme possible -->
# <screentheme>
# <screen>...</screen>
# </screentheme>
# </screens>
# </screenthemes>
self.clist.append(getConfigListEntry(" ", ))
self.clist.append(getConfigListEntry(" " + u'\u00b7' + " SCREENS", ))
if xml.find("screenthemes") is not None:
st = xml.find("screenthemes")
initScreenDone = False
for screens in st.findall("screens"):
for screenname in screenList:
elementList = []
defaultValue = None
if screens.get("name") == screenname:
for themes in screens.findall("screentheme"):
elementList.append(themes.get("name"))
if themes.get("value") == "active":
defaultValue = themes.get("name")
if len(elementList)>0:
if not initScreenDone:
initConfigSubDict("Screens")
initScreenDone = True
config.plugins.MerlinSkinThemes.Screens[screenname] = MyConfigSelection(default=defaultValue, choices = elementList)
self.clist.append(getConfigListEntry(screenname, config.plugins.MerlinSkinThemes.Screens[screenname]))
#########################
# -- Display Screens -- #
#########################
# <lcdscreenthemes> / <oledscreenthemes> / <extlcdscreenthemes>
# <!-- multiple screens possible -->
# <screens name="screenname">
# <!-- multiple lcdscreentheme possible -->
# <lcdscreentheme> / <oledscreentheme> / <extlcdscreentheme>
# <screen>...</screen>
# </lcdscreentheme> / </oledscreentheme> / </extlcdscreentheme>
# </screens>
# </lcdscreenthemes> / <oledscreenthemes> / </extlcdscreenthemes>
if displayTag is not None:
if xml.find(displayTag) is not None:
self.clist.append(getConfigListEntry(" ", ))
self.clist.append(getConfigListEntry(" " + u'\u00b7' + " DISPLAY SCREENS ID=%s (%s) %s" %(IdString, ModelString, DisplayXY ), ))
initDisplayScreenDone = False
for element in displayScreenList:
elementList = []
defaultValue = None
st = xml.find(displayTag)
if st.find("screens[@name='%s']" %(element)) is not None:
lst = st.find("screens[@name='%s']" %(element))
for th in lst.findall(displayTag[:-1]):
for screen in th.findall("screen"):
if screen.get("name") == element and screen.get("id") == IdString:
elementList.append(th.get("name"))
if th.get("value") == "active":
defaultValue = th.get("name")
if len(elementList) > 0:
if not initDisplayScreenDone:
initConfigSubDict("DisplayScreens")
initDisplayScreenDone = True
config.plugins.MerlinSkinThemes.DisplayScreens[element] = MyConfigSelection(default=defaultValue, choices = elementList)
self.clist.append(getConfigListEntry(element, config.plugins.MerlinSkinThemes.DisplayScreens[element]))
######################
# -- cornerRadius -- #
######################
# <cornerradius>
# <radius />
# </cornerradius>
if xml.find("cornerradius") is not None:
self.clist.append(getConfigListEntry(" ", ))
self.clist.append(getConfigListEntry(" " + u'\u00b7' + " CORNERRADIUS", ))
elementList = []
defaultValue = None
cr = xml.find("cornerradius")
for cradius in cr.findall("radius"):
elementList.append(cradius.get("name"))
if cradius.get("value") == "active":
defaultValue = cradius.get("name")
if len(elementList) > 0:
config.plugins.MerlinSkinThemes.CornerRadius = MyConfigSelection(default=defaultValue, choices = elementList)
self.clist.append(getConfigListEntry("CornerRadius", config.plugins.MerlinSkinThemes.CornerRadius))
except Exception as error:
print "Error", error
print "[MST] themes.xml in " + MerlinSkinThemes.selSkinName + " corrupt!"
self.clist.append(getConfigListEntry(" ", ))
self.clist.append(getConfigListEntry(_(">>> ERROR - themes.xml in " + MerlinSkinThemes.selSkinName + " corrupt! <<<"), ))
self["config"].setList(self.clist)
def buttonGreen(self):
if self.curList == "SkinsList":
# set new skin
sel = self["SkinsList"].getCurrent()
if sel[1][7] == "Default Skin":
skinfile = "skin.xml"
else:
skinfile = "%s/skin.xml" % sel[1][7]
# Dr. Best Infobar position
if fileExists("/usr/share/enigma2/merlin_setup.xml"):
config.merlin2.infobar_position_offset_x.value = 0
config.merlin2.infobar_position_offset_x.save()
config.merlin2.infobar_position_offset_y.value = 0
config.merlin2.infobar_position_offset_y.save()
config.merlin2.movieplayer_infobar_position_offset_x.value = 0
config.merlin2.movieplayer_infobar_position_offset_x.save()
config.merlin2.movieplayer_infobar_position_offset_y.value = 0
config.merlin2.movieplayer_infobar_position_offset_y.save()
config.skin.primary_skin.value = skinfile
config.skin.primary_skin.save()
restartbox = self.session.openWithCallback(self.restartGUI,MessageBox,_("GUI needs a restart to apply a new skin\nDo you want to Restart the GUI now?"), MessageBox.TYPE_YESNO)
restartbox.setTitle(_("Restart GUI now?"))
elif self.curList == "ConfigList":
askBox = self.session.openWithCallback(self.askYN,MessageBox,_("[apply themes] needs time to build new settings\nDo you want to do this now?"), MessageBox.TYPE_YESNO)
askBox.setTitle(_("Apply themes now?"))
def askYN(self, answer):
if answer is True:
setThemes(MerlinSkinThemes.selThemeFile, MerlinSkinThemes.selSkinFile, None)
for x in self["config"].list:
if len(x) > 1:
x[1].save()
configfile.save()
if SkinName == MerlinSkinThemes.selSkinName:
restartbox = self.session.openWithCallback(self.restartGUI,MessageBox,_("GUI needs a restart to apply a new skin\nDo you want to Restart the GUI now?"), MessageBox.TYPE_YESNO)
restartbox.setTitle(_("Restart GUI now?"))
else:
self.session.open(MessageBox, _("Changes to skin " + MerlinSkinThemes.selSkinName + " ready!"), MessageBox.TYPE_INFO)
def MSTScrFix(self, answer):
if answer is True:
curSkin = Tree.parse(MerlinSkinThemes.selSkinFile)
rootSkin = curSkin.getroot()
mstscreen = rootSkin.find("screen[@name='MerlinSkinThemes']")
rootSkin.remove(mstscreen)
XMLindent(rootSkin, 0)
curSkin.write(MerlinSkinThemes.selSkinFile)
self.updateSkinList()
self.session.open(MessageBox, '<screen name="MerlinSkinThemes"...> was removed from selected skin.', MessageBox.TYPE_INFO)
def buttonRed(self):
self.exit()
def buttonYellow(self):
if self.curList == "SkinsList":
if self["SkinsList"].getCurrent()[3][7] == _("no themes.xml"):
self.createThemes()
if self["SkinsList"].getCurrent()[3][7] == _("no skin.xml"):
self.delSkinDir()
elif self.curList == "ConfigList":
if self["config"].getCurrent()[0] == "Design":
# delete design
self.deleteDesign()
else:
# save as design
self.session.openWithCallback(self.saveDesign, InputBox, title=_("Please enter designname!"))
# write a new design into <designs>
def saveDesign(self, designname):
if designname is not None:
designname = designname.strip()
curTree = Tree.parse(MerlinSkinThemes.selThemeFile)
xmlroot = curTree.getroot()
if xmlroot.find("designs") is None:
xmldesigns = Tree.SubElement(xmlroot, "designs")
else:
xmldesigns = xmlroot.find("designs")
# check if design exists
if xmldesigns.find("design[@name='" + designname + "']") is not None:
xmldesigns.remove(xmldesigns.find("design[@name='" + designname + "']"))
# write design
xmldesign = Tree.SubElement(xmldesigns, "design", {"name": designname, "value": "active"})
for element in themeList:
# remark: for now don't handle it here. Needs alignment
if element == "SkinPathTheme":
continue
# check if theme exists
if element.lower() in config.plugins.MerlinSkinThemes.Themes.keys():
if xmlroot.find("%s[@name='" %(element.lower()) + config.plugins.MerlinSkinThemes.Themes[element.lower()].value + "']" ) is not None:
if xmldesign.find(element) is not None:
td = xmldesign.find(element)
td.set("name", config.plugins.MerlinSkinThemes.Themes[element.lower()].value)
else:
Tree.SubElement(xmldesign, element, {"name": config.plugins.MerlinSkinThemes.Themes[element.lower()].value })
# SkinPathThemes
# todo: same check required like for themes? is it really possible to define it in Designs?
if xmlroot.find("skinpaththemes") is not None:
t = xmlroot.find("skinpaththemes")
if t.find("theme[@name='" + config.plugins.MerlinSkinThemes.Themes["skinpaththemes"].value + "']") is not None:
if xmldesign.find("SkinPathTheme") is not None:
td = xmldesign.find("SkinPathTheme")
td.set("name", config.plugins.MerlinSkinThemes.Themes["skinpaththemes"].value)
else:
Tree.SubElement(xmldesign, "SkinPathTheme", {"name": config.plugins.MerlinSkinThemes.Themes["skinpaththemes"].value})
# Screens
if xmlroot.find("screenthemes") is not None:
t = xmlroot.find("screenthemes")
for element in screenList:
if t.find("screens[@name='%s']" %(element)) is not None:
ts = t.find("screens[@name='%s']" %(element))
if ts.find("screentheme[@name='" + config.plugins.MerlinSkinThemes.Screens[element].value + "']") is not None:
Tree.SubElement(xmldesign, element, {"name": config.plugins.MerlinSkinThemes.Screens[element].value})
# LCD Screens
if displayTag is not None:
if | |
# coding=utf-8
# Copyright 2019 The Google NoisyStudent Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Efficient input pipeline using tf.data.Dataset.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import abc
import collections
import functools
import os
import tensorflow as tf
import preprocessing
import efficientnet_builder
FLAGS = tf.app.flags.FLAGS
class TFExampleInput(object):
'''Base class for input_fn generator.
Args:
is_training: `bool` for whether the input is for training
use_bfloat16: If True, use bfloat16 precision; else use float32.
num_cores: `int` for the number of TPU cores
image_size: `int` for image size (both width and height).
transpose_input: 'bool' for whether to use the double transpose trick
'''
__metaclass__ = abc.ABCMeta
def __init__(self,
is_training,
use_bfloat16,
num_cores=8,
image_size=224,
transpose_input=False,
label_minus_one=True):
self.image_preprocessing_fn = preprocessing.preprocess_image
self.is_training = is_training
self.use_bfloat16 = use_bfloat16
self.num_cores = num_cores
self.transpose_input = transpose_input
self.image_size = image_size
self.label_minus_one = label_minus_one
def set_shapes(self, batch_size, features):
'''Statically set the batch_size dimension.'''
if self.is_training and self.transpose_input:
features['image'].set_shape(features['image'].get_shape().merge_with(
tf.TensorShape([None, None, None, batch_size])))
features['label'].set_shape(features['label'].get_shape().merge_with(
tf.TensorShape([batch_size])))
else:
features['image'].set_shape(features['image'].get_shape().merge_with(
tf.TensorShape([batch_size, None, None, None])))
features['label'].set_shape(features['label'].get_shape().merge_with(
tf.TensorShape([batch_size])))
return features
def dataset_parser(self, value):
'''Parses an image and its label from a serialized ResNet-50 TFExample.
Args:
value: serialized string containing an ImageNet TFExample.
Returns:
Returns a tuple of (image, label) from the TFExample.
'''
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, ''),
'image/class/label': tf.FixedLenFeature([], tf.int64, -1),
}
parsed = tf.parse_single_example(value, keys_to_features)
image_bytes = tf.reshape(parsed['image/encoded'], shape=[])
image = self.image_preprocessing_fn(
input_tensor=image_bytes,
is_training=self.is_training and not FLAGS.remove_aug,
image_size=self.image_size,
use_bfloat16=self.use_bfloat16,
augment_name=FLAGS.augment_name,
randaug_mag=FLAGS.randaug_mag,
)
label = tf.cast(
tf.reshape(parsed['image/class/label'], shape=[]), dtype=tf.int32)
# Subtract one so that labels are in [0, 1000).
if self.label_minus_one:
label = label - 1
parsed_results = {'image': image, 'label': label}
if FLAGS.teacher_model_name:
teacher_image_size = efficientnet_builder.efficientnet_params(
FLAGS.teacher_model_name)[2]
if FLAGS.small_image_model:
teacher_image_size = FLAGS.input_image_size
teacher_image = self.image_preprocessing_fn(
input_tensor=image_bytes,
is_training=False,
image_size=teacher_image_size,
use_bfloat16=self.use_bfloat16)
parsed_results['teacher_image'] = teacher_image
return parsed_results
@abc.abstractmethod
def make_source_dataset(self,
index,
num_hosts,
all_data_dir=None,
cache=None,
unl=False,
num_train_shards=None):
return
def unl_dst_parser(self, value):
keys_to_features = {
'probabilities':
tf.FixedLenFeature([FLAGS.num_label_classes], tf.float32),
'label':
tf.FixedLenFeature([], tf.int64, -1),
'prob':
tf.FixedLenFeature([], tf.float32),
'image/encoded':
tf.FixedLenFeature((), tf.string, ''),
}
parsed = tf.parse_single_example(value, keys_to_features)
image_bytes = tf.reshape(parsed['image/encoded'], shape=[])
ori_image = tf.image.decode_jpeg(image_bytes, channels=3)
if FLAGS.unl_aug == 'default':
augment_name = FLAGS.augment_name
else:
augment_name = FLAGS.unl_aug
image = self.image_preprocessing_fn(
input_tensor=ori_image,
is_training=self.is_training and not FLAGS.remove_aug,
image_size=self.image_size,
use_bfloat16=self.use_bfloat16,
augment_name=augment_name,
randaug_mag=FLAGS.randaug_mag,
is_image_bytes=False,
)
label = tf.cast(tf.reshape(parsed['label'], shape=[]), dtype=tf.int32)
probabilities = tf.cast(
tf.reshape(parsed['probabilities'], shape=[FLAGS.num_label_classes]),
dtype=tf.float32)
top_1_prob = tf.cast(tf.reshape(parsed['prob'], shape=[]), dtype=tf.float32)
parsed_results = {
'unl_image': image,
'unl_label': label,
'unl_probs': probabilities,
'top_1_prob': top_1_prob,
}
if FLAGS.teacher_model_name:
teacher_image_size = efficientnet_builder.efficientnet_params(
FLAGS.teacher_model_name)[2]
if FLAGS.small_image_model:
teacher_image_size = FLAGS.input_image_size
teacher_image = self.image_preprocessing_fn(
input_tensor=image_bytes,
is_training=False,
image_size=teacher_image_size,
use_bfloat16=self.use_bfloat16,
augment_name=augment_name,
randaug_mag=FLAGS.randaug_mag)
parsed_results['unl_teacher_image'] = teacher_image
return parsed_results
def flatten_input(self, *features_list):
flatten_result = {}
for features in features_list:
for key in features:
assert key not in flatten_result
flatten_result[key] = features[key]
new_result = {}
image_fields = ['image', 'unl_image']
label_fields = ['label', 'unl_label']
new_result['image'] = tf.concat(
[flatten_result[key] for key in image_fields], 0)
new_result['label'] = tf.concat(
[flatten_result[key] for key in label_fields], 0)
new_result['unl_probs'] = flatten_result['unl_probs']
new_result['top_1_prob'] = flatten_result['top_1_prob']
if FLAGS.teacher_model_name:
new_result['teacher_image'] = tf.concat(
[flatten_result['teacher_image'], flatten_result['unl_teacher_image']],
0)
return new_result
def input_fn(self, params):
'''input function which provides a single batch for train or eval.
args:
params: `dict` of parameters passed from the `tpuestimator`.
`params['batch_size']` is always provided and should be used as the
effective batch size.
returns:
a `tf.data.dataset` object.
'''
# retrieves the batch size for the current shard. the # of shards is
# computed according to the input pipeline deployment. see
# tf.contrib.tpu.runconfig for details.
batch_size = params['batch_size']
if self.is_training and 'context' in params:
current_host = params['context'].current_input_fn_deployment()[1]
num_hosts = params['context'].num_hosts
else:
# when evaluation, always let the first host read all data
current_host = 0
num_hosts = 1
# use the fused map-and-batch operation.
#
# for xla, we must used fixed shapes. because we repeat the source training
# dataset indefinitely, we can use `drop_remainder=true` to get fixed-size
# batches without dropping any training examples.
#
# when evaluating, `drop_remainder=true` prevents accidentally evaluating
# the same image twice by dropping the final batch if it is less than a full
# batch size. as long as this validation is done with consistent batch size,
# exactly the same images will be used.
dataset = self.make_source_dataset(
current_host, num_hosts, cache=self.cache,
num_train_shards=FLAGS.num_train_shards) # Thang
dataset_parser = self.dataset_parser
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
dataset_parser,
batch_size=batch_size,
num_parallel_batches=self.num_cores,
drop_remainder=True))
if FLAGS.unlabel_ratio != 0 and self.is_training:
real_unl_bsz = batch_size * FLAGS.label_data_sample_prob * FLAGS.unlabel_ratio
unl_bsz = int(math.ceil(real_unl_bsz))
unl_dst = self.make_source_dataset(
current_host,
num_hosts,
all_data_dir=FLAGS.unlabel_data_dir,
cache=self.cache,
unl=True)
unl_dst = unl_dst.map(
self.unl_dst_parser, num_parallel_calls=self.num_cores * unl_bsz)
unl_dst = unl_dst.batch(unl_bsz, drop_remainder=True)
dataset = tf.data.Dataset.zip((dataset, unl_dst))
dataset = dataset.map(
self.flatten_input, num_parallel_calls=self.num_cores)
else:
unl_bsz = 0
# transpose for performance on tpu
if self.transpose_input and self.is_training:
def transpose_fn(features):
for key in features:
if 'image' in key:
# image and teacher_image
features[key] = tf.transpose(features[key], [1, 2, 3, 0])
return features
dataset = dataset.map(transpose_fn, num_parallel_calls=self.num_cores)
# assign static batch size dimension
total_batch_size = batch_size + unl_bsz
dataset = dataset.map(functools.partial(self.set_shapes, total_batch_size))
# prefetch overlaps in-feed with training
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
class DataInput(TFExampleInput):
'''generates imagenet input_fn from a series of tfrecord files.
the training data is assumed to be in tfrecord format with keys as specified
in the dataset_parser below, sharded across 1024 files, named sequentially:
train-00000-of-01024
train-00001-of-01024
...
train-01023-of-01024
the validation data is in the same format but sharded in 128 files.
the format of the data required is created by the script at:
https://github.com/tensorflow/tpu/blob/master/tools/datasets/imagenet_to_gcs.py
'''
def __init__(self,
is_training,
use_bfloat16,
transpose_input,
data_dir,
image_size=224,
num_parallel_calls=64,
cache=False,
label_minus_one=True,
subset=None
):
'''create an input from tfrecord files.
args:
is_training: `bool` for whether the input is for training
use_bfloat16: if True, use bfloat16 precision; else use float32.
transpose_input: 'bool' for whether to use the double transpose trick
data_dir: `str` for the directory of the training and validation data;
if 'null' (the literal string 'null') or implicitly false
then construct a null pipeline, consisting of empty images
and blank labels.
image_size: `int` for image size (both width and height).
num_parallel_calls: concurrency level to use when reading data from disk.
cache: if True, fill the dataset by repeating from its cache
'''
super(DataInput, self).__init__(
is_training=is_training,
image_size=image_size,
use_bfloat16=use_bfloat16,
transpose_input=transpose_input,
label_minus_one=label_minus_one)
self.data_dir = data_dir
if self.data_dir == 'null' or not self.data_dir:
self.data_dir = None
self.num_parallel_calls = num_parallel_calls
self.cache = cache
self.subset = subset
def _get_null_input(self, data):
'''returns a null image (all black pixels).
args:
data: element of a dataset, ignored in this method, since it produces
the same null image regardless of the element.
returns:
a tensor representing a null image.
'''
del data # unused since output is constant regardless of input
return tf.zeros([self.image_size, self.image_size, 3],
tf.bfloat16 if self.use_bfloat16 else tf.float32)
def dataset_parser(self, value):
'''see base class.'''
if not self.data_dir:
return value, tf.constant(0, tf.int32)
return super(DataInput, self).dataset_parser(value)
def make_source_dataset(self,
index,
num_hosts,
all_data_dir=None,
cache=None,
unl=False,
num_train_shards=None):
'''see base class.'''
if not self.data_dir:
tf.logging.info('undefined data_dir implies null input')
return tf.data.Dataset.range(1).repeat().map(self._get_null_input)
if cache is None:
cache = self.cache
# shuffle the filenames to ensure better randomization.
if all_data_dir is None:
all_data_dir = self.data_dir
file_list = []
for data_dir in all_data_dir.split(';'):
if self.subset:
subset = self.subset
else:
if unl:
subset = 'train'
else:
subset = 'train' if self.is_training else 'validation'
file_pattern = os.path.join(data_dir, '{}*'.format(subset))
new_files = tf.gfile.Glob(file_pattern)
if subset == 'train' and unl:
file_pattern = os.path.join(data_dir, 'extra*')
new_files += tf.gfile.Glob(file_pattern)
tf.logging.info('# files={} for file_pattern: {}'.format(
len(new_files), file_pattern))
file_list += new_files
file_list = sorted(file_list)
# Thang: limit num_train_shards
if self.is_training and num_train_shards:
tf.logging.info('Thang: use %d out of %d shards' % (
num_train_shards, len(file_list)))
file_list = file_list[:num_train_shards]
dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(file_list, dtype=tf.string))
tf.logging.info('file stats for {}, num: {}, all: {}'.format(
'unl' if unl else 'in', len(file_list), str(file_list[:10])))
assert len(file_list) >= num_hosts, 'File list len | |
*args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
HasDefault = None
None = None
Reserved2 = None
Reserved3 = None
Reserved4 = None
ReservedMask = None
RTSpecialName = None
SpecialName = None
value__ = None
class PropertyInfo(MemberInfo, ICustomAttributeProvider, _MemberInfo, _PropertyInfo):
# no doc
def Equals(self, obj):
""" Equals(self: PropertyInfo, obj: object) -> bool """
pass
def GetAccessors(self, nonPublic=None):
"""
GetAccessors(self: PropertyInfo) -> Array[MethodInfo]
GetAccessors(self: PropertyInfo, nonPublic: bool) -> Array[MethodInfo]
"""
pass
def GetConstantValue(self):
""" GetConstantValue(self: PropertyInfo) -> object """
pass
def GetGetMethod(self, nonPublic=None):
"""
GetGetMethod(self: PropertyInfo) -> MethodInfo
GetGetMethod(self: PropertyInfo, nonPublic: bool) -> MethodInfo
"""
pass
def GetHashCode(self):
""" GetHashCode(self: PropertyInfo) -> int """
pass
def GetIndexParameters(self):
""" GetIndexParameters(self: PropertyInfo) -> Array[ParameterInfo] """
pass
def GetOptionalCustomModifiers(self):
""" GetOptionalCustomModifiers(self: PropertyInfo) -> Array[Type] """
pass
def GetRawConstantValue(self):
""" GetRawConstantValue(self: PropertyInfo) -> object """
pass
def GetRequiredCustomModifiers(self):
""" GetRequiredCustomModifiers(self: PropertyInfo) -> Array[Type] """
pass
def GetSetMethod(self, nonPublic=None):
"""
GetSetMethod(self: PropertyInfo) -> MethodInfo
GetSetMethod(self: PropertyInfo, nonPublic: bool) -> MethodInfo
"""
pass
def GetValue(self, obj, *__args):
"""
GetValue(self: PropertyInfo, obj: object) -> object
GetValue(self: PropertyInfo, obj: object, index: Array[object]) -> object
GetValue(self: PropertyInfo, obj: object, invokeAttr: BindingFlags, binder: Binder, index: Array[object], culture: CultureInfo) -> object
"""
pass
def SetValue(self, obj, value, *__args):
""" SetValue(self: PropertyInfo, obj: object, value: object)SetValue(self: PropertyInfo, obj: object, value: object, index: Array[object])SetValue(self: PropertyInfo, obj: object, value: object, invokeAttr: BindingFlags, binder: Binder, index: Array[object], culture: CultureInfo) """
pass
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==y """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
Attributes = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Attributes(self: PropertyInfo) -> PropertyAttributes
"""
CanRead = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: CanRead(self: PropertyInfo) -> bool
"""
CanWrite = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: CanWrite(self: PropertyInfo) -> bool
"""
GetMethod = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: GetMethod(self: PropertyInfo) -> MethodInfo
"""
IsSpecialName = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: IsSpecialName(self: PropertyInfo) -> bool
"""
MemberType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: MemberType(self: PropertyInfo) -> MemberTypes
"""
PropertyType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: PropertyType(self: PropertyInfo) -> Type
"""
SetMethod = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: SetMethod(self: PropertyInfo) -> MethodInfo
"""
class ReflectionContext(object):
# no doc
def GetTypeForObject(self, value):
""" GetTypeForObject(self: ReflectionContext, value: object) -> TypeInfo """
pass
def MapAssembly(self, assembly):
""" MapAssembly(self: ReflectionContext, assembly: Assembly) -> Assembly """
pass
def MapType(self, type):
""" MapType(self: ReflectionContext, type: TypeInfo) -> TypeInfo """
pass
class ReflectionTypeLoadException(SystemException, ISerializable, _Exception):
"""
ReflectionTypeLoadException(classes: Array[Type], exceptions: Array[Exception])
ReflectionTypeLoadException(classes: Array[Type], exceptions: Array[Exception], message: str)
"""
def GetObjectData(self, info, context):
""" GetObjectData(self: ReflectionTypeLoadException, info: SerializationInfo, context: StreamingContext) """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, classes, exceptions, message=None):
"""
__new__(cls: type, classes: Array[Type], exceptions: Array[Exception])
__new__(cls: type, classes: Array[Type], exceptions: Array[Exception], message: str)
"""
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
LoaderExceptions = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: LoaderExceptions(self: ReflectionTypeLoadException) -> Array[Exception]
"""
Types = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Types(self: ReflectionTypeLoadException) -> Array[Type]
"""
SerializeObjectState = None
class ResourceAttributes(Enum, IComparable, IFormattable, IConvertible):
""" enum (flags) ResourceAttributes, values: Private (2), Public (1) """
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Private = None
Public = None
value__ = None
class ResourceLocation(Enum, IComparable, IFormattable, IConvertible):
""" enum (flags) ResourceLocation, values: ContainedInAnotherAssembly (2), ContainedInManifestFile (4), Embedded (1) """
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
ContainedInAnotherAssembly = None
ContainedInManifestFile = None
Embedded = None
value__ = None
class RuntimeReflectionExtensions(object):
# no doc
@staticmethod
def GetMethodInfo(del):
""" GetMethodInfo(del: Delegate) -> MethodInfo """
pass
@staticmethod
def GetRuntimeBaseDefinition(method):
""" GetRuntimeBaseDefinition(method: MethodInfo) -> MethodInfo """
pass
@staticmethod
def GetRuntimeEvent(type, name):
""" GetRuntimeEvent(type: Type, name: str) -> EventInfo """
pass
@staticmethod
def GetRuntimeEvents(type):
""" GetRuntimeEvents(type: Type) -> IEnumerable[EventInfo] """
pass
@staticmethod
def GetRuntimeField(type, name):
""" GetRuntimeField(type: Type, name: str) -> FieldInfo """
pass
@staticmethod
def GetRuntimeFields(type):
""" GetRuntimeFields(type: Type) -> IEnumerable[FieldInfo] """
pass
@staticmethod
def GetRuntimeInterfaceMap(typeInfo, interfaceType):
""" GetRuntimeInterfaceMap(typeInfo: TypeInfo, interfaceType: Type) -> InterfaceMapping """
pass
@staticmethod
def GetRuntimeMethod(type, name, parameters):
""" GetRuntimeMethod(type: Type, name: str, parameters: Array[Type]) -> MethodInfo """
pass
@staticmethod
def GetRuntimeMethods(type):
""" GetRuntimeMethods(type: Type) -> IEnumerable[MethodInfo] """
pass
@staticmethod
def GetRuntimeProperties(type):
""" GetRuntimeProperties(type: Type) -> IEnumerable[PropertyInfo] """
pass
@staticmethod
def GetRuntimeProperty(type, name):
""" GetRuntimeProperty(type: Type, name: str) -> PropertyInfo """
pass
__all__ = [
'GetMethodInfo',
'GetRuntimeBaseDefinition',
'GetRuntimeEvent',
'GetRuntimeEvents',
'GetRuntimeField',
'GetRuntimeFields',
'GetRuntimeInterfaceMap',
'GetRuntimeMethod',
'GetRuntimeMethods',
'GetRuntimeProperties',
'GetRuntimeProperty',
]
class StrongNameKeyPair(object, IDeserializationCallback, ISerializable):
"""
StrongNameKeyPair(keyPairFile: FileStream)
StrongNameKeyPair(keyPairArray: Array[Byte])
StrongNameKeyPair(keyPairContainer: str)
"""
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, *__args):
"""
__new__(cls: type, keyPairFile: FileStream)
__new__(cls: type, keyPairArray: Array[Byte])
__new__(cls: type, keyPairContainer: str)
__new__(cls: type, info: SerializationInfo, context: StreamingContext)
"""
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __repr__(self, *args): #cannot find CLR method
""" __repr__(self: object) -> str """
pass
PublicKey = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: PublicKey(self: StrongNameKeyPair) -> Array[Byte]
"""
class TargetException(ApplicationException, ISerializable, _Exception):
"""
TargetException()
TargetException(message: str)
TargetException(message: str, inner: Exception)
"""
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, message=None, inner=None):
"""
__new__(cls: type)
__new__(cls: type, message: str)
__new__(cls: type, message: str, inner: Exception)
__new__(cls: type, info: SerializationInfo, context: StreamingContext)
"""
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
SerializeObjectState = None
class TargetInvocationException(ApplicationException, ISerializable, _Exception):
"""
TargetInvocationException(inner: Exception)
TargetInvocationException(message: str, inner: Exception)
"""
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes | |
<gh_stars>0
import numpy as np
from math import isfinite, isnan, sqrt, log
from numpy import nan
from numba import jit
def _ref(arr, period):
bc = len(arr)
result = np.full(bc, np.nan)
for i in range(max(0, -period), min(bc, bc - period)):
if not isfinite(arr[i + period]):
# Replace all NaN, Inf, -Inf by NaN!
result[i] = nan
else:
result[i] = arr[i + period]
return result
def _iif(cond, if_true_arr, if_false_arr):
bc = len(cond)
result = np.full(bc, np.nan)
for i in range(bc):
if not isfinite(cond[i]):
# Replace all NaN, Inf, -Inf by NaN!
result[i] = nan
else:
if cond[i] == 1.0 or cond[i] is True:
if isfinite(if_true_arr[i]):
result[i] = if_true_arr[i]
elif cond[i] == 0.0 or cond[i] is False:
if isfinite(if_false_arr[i]):
result[i] = if_false_arr[i]
else:
raise ValueError("Only True / False / 1.0 / 0.0 and NaN values are allowed for 'cond' array")
return result
def _hhv(arr, period):
if period <= 0:
raise ValueError()
bc = len(arr)
result = np.full(bc, nan)
cur = nan
icur = -1
for i in range(bc):
if not isfinite(arr[i]):
continue
if arr[i] > cur or not isfinite(cur):
cur = arr[i]
icur = i
else:
if i - icur >= period:
cur = nan
icur = i - period + 1
for k in range(i - period + 1, i + 1):
if not isfinite(arr[k]):
continue
if k == i - period + 1 or arr[k] > cur or not isfinite(cur):
cur = arr[k]
icur = k
if i >= period - 1:
result[i] = cur
return result
def _llv(arr, period):
if period <= 0:
raise ValueError()
bc = len(arr)
result = np.full(bc, nan)
cur = nan
icur = -1
for i in range(bc):
if not isfinite(arr[i]):
continue
if arr[i] < cur or not isfinite(cur):
cur = arr[i]
icur = i
else:
if i - icur >= period:
cur = nan
icur = i - period + 1
for k in range(i - period + 1, i + 1):
if not isfinite(arr[k]):
continue
if k == i - period + 1 or arr[k] < cur or not isfinite(cur):
cur = arr[k]
icur = k
if i >= period - 1:
result[i] = cur
return result
def _highest_since(arr, cond):
bc = len(arr)
result = np.full(bc, nan)
cur = nan
has_cond = False
for i in range(bc):
if cond[i] == 1.0 or cond[i] is True:
cur = nan
has_cond = True
if isfinite(arr[i]):
cur = arr[i]
elif cond[i] == 0.0 or cond[i] is False or isnan(cond[i]):
if not isnan(cond[i]):
if isfinite(arr[i]) and (arr[i] > cur or not isfinite(cur)) and has_cond:
cur = arr[i]
else:
raise ValueError("Only True / False / 1.0 / 0.0 and NaN values are allowed for 'cond' array")
if isfinite(arr[i]) and isfinite(cond[i]):
result[i] = cur
else:
result[i] = nan
return result
def _lowest_since(arr, cond):
bc = len(arr)
result = np.full(bc, nan)
cur = nan
has_cond = False
for i in range(bc):
if cond[i] == 1.0 or cond[i] is True:
cur = nan
has_cond = True
if isfinite(arr[i]):
cur = arr[i]
elif cond[i] == 0.0 or cond[i] is False or isnan(cond[i]):
if not isnan(cond[i]):
if isfinite(arr[i]) and (arr[i] < cur or not isfinite(cur)) and has_cond:
cur = arr[i]
else:
raise ValueError("Only True / False / 1.0 / 0.0 and NaN values are allowed for 'cond' array")
if isfinite(arr[i]) and isfinite(cond[i]):
result[i] = cur
else:
result[i] = nan
return result
def _bars_since(cond):
bc = len(cond)
result = np.full(bc, nan)
icur = -1
for i in range(bc):
if not isfinite(cond[i]):
continue
if cond[i] == 1.0 or cond[i] is True:
icur = i
result[i] = 0
elif cond[i] == 0.0 or cond[i] is False:
if icur >= 0:
result[i] = i - icur
else:
raise ValueError("Only True / False / 1.0 / 0.0 and NaN values are allowed for 'cond' array")
return result
def _cross_up(arr, arr_threshold):
bc = len(arr_threshold)
result = np.full(bc, nan)
for i in range(1, bc):
if isfinite(arr_threshold[i]) and isfinite(arr[i]) and isfinite(arr_threshold[i - 1]) and isfinite(arr[i - 1]):
if arr[i - 1] < arr_threshold[i - 1] and arr[i] > arr_threshold[i]:
result[i] = 1.0
else:
result[i] = 0.0
return result
def _cross_dn(arr, arr_threshold):
bc = len(arr_threshold)
result = np.full(bc, nan)
for i in range(1, bc):
if isfinite(arr_threshold[i]) and isfinite(arr[i]) and isfinite(arr_threshold[i - 1]) and isfinite(arr[i - 1]):
if arr[i - 1] > arr_threshold[i - 1] and arr[i] < arr_threshold[i]:
result[i] = 1.0
else:
result[i] = 0.0
return result
def _sum(arr, period):
if period <= 0:
raise ValueError('Period must be positive')
bc = len(arr)
result = np.full(bc, nan)
_sum_total = 0.0
_valid_cnt = 0
for i in range(bc):
if i < period:
if isfinite(arr[i]):
_sum_total += arr[i]
_valid_cnt += 1
if i == period - 1 and _valid_cnt > 0:
result[i] = _sum_total
else:
if isfinite(arr[i - period]):
_sum_total -= arr[i - period]
_valid_cnt -= 1
if isfinite(arr[i]):
_sum_total += arr[i]
_valid_cnt += 1
if _valid_cnt > 0:
result[i] = _sum_total
return result
def _ma(arr, period):
if period <= 0:
raise ValueError('Period must be positive')
bc = len(arr)
result = np.full(bc, nan)
_sum_total = 0.0
_valid_cnt = 0
for i in range(bc):
if i < period:
if isfinite(arr[i]):
_sum_total += arr[i]
_valid_cnt += 1
if i == period - 1 and _valid_cnt > 0:
result[i] = _sum_total / _valid_cnt
else:
if isfinite(arr[i - period]):
_sum_total -= arr[i - period]
_valid_cnt -= 1
if isfinite(arr[i]):
_sum_total += arr[i]
_valid_cnt += 1
if _valid_cnt > 0:
result[i] = _sum_total / _valid_cnt
return result
def _stdev(arr, period):
MIN_STDEV_PERIODS = 5
if period < MIN_STDEV_PERIODS:
raise ValueError('Period must be >= 5')
bc = len(arr)
result = np.full(bc, nan)
_sum_total = 0.0
_valid_cnt = 0
_disp_sum = 0.0
for i in range(bc):
if i < period:
if isfinite(arr[i]):
_sum_total += arr[i]
_valid_cnt += 1
_disp_sum += arr[i] ** 2
if i == period - 1 and _valid_cnt >= MIN_STDEV_PERIODS:
_avg = (_sum_total / _valid_cnt)
result[i] = sqrt((_disp_sum / _valid_cnt) - _avg ** 2)
else:
if isfinite(arr[i - period]):
_sum_total -= arr[i - period]
_disp_sum -= arr[i - period] ** 2
_valid_cnt -= 1
if isfinite(arr[i]):
_sum_total += arr[i]
_disp_sum += arr[i] ** 2
_valid_cnt += 1
if _valid_cnt >= MIN_STDEV_PERIODS:
_avg = (_sum_total / _valid_cnt)
result[i] = sqrt((_disp_sum / _valid_cnt) - _avg ** 2)
return result
def _sum_since(arr, cond, first_is_zero=False):
bc = len(cond)
result = np.full(bc, nan)
icur = -1
cur_sum = nan
for i in range(bc):
if not isfinite(cond[i]):
continue
if cond[i] == 1.0 or cond[i] is True:
icur = i
if isfinite(arr[i]):
if first_is_zero:
cur_sum = 0.0
else:
cur_sum = arr[i]
else:
cur_sum = nan
elif cond[i] == 0.0 or cond[i] is False or isnan(cond[i]):
if not isnan(cond[i]) and isfinite(arr[i]) and icur >= 0:
if isfinite(cur_sum):
cur_sum += arr[i]
else:
cur_sum = arr[i]
else:
raise ValueError("Only True / False / 1.0 / 0.0 and NaN values are allowed for 'cond' array")
if isfinite(arr[i]):
result[i] = cur_sum
return result
def _zscore(arr, period):
MIN_STDEV_PERIODS = 5
if period < MIN_STDEV_PERIODS:
raise ValueError('Period must be >= 5')
bc = len(arr)
result = np.full(bc, nan)
_sum_total = 0.0
_valid_cnt = 0
_disp_sum = 0.0
_sd = 0.0
for i in range(bc):
if i < period:
if isfinite(arr[i]):
_sum_total += arr[i]
_valid_cnt += 1
_disp_sum += arr[i] ** 2
if i == period - 1 and _valid_cnt >= MIN_STDEV_PERIODS:
_avg = (_sum_total / _valid_cnt)
_sd = sqrt((_disp_sum / _valid_cnt) - _avg ** 2)
if _sd == 0.0:
# in case of no dispersion like for arr [1, 1, 1, 1, 1] the zScore must be 0.0
result[i] = 0.0
else:
result[i] = (arr[i] - _avg) / _sd
else:
if isfinite(arr[i - period]):
_sum_total -= arr[i - period]
_disp_sum -= arr[i - period] ** 2
_valid_cnt -= 1
if isfinite(arr[i]):
_sum_total += arr[i]
_disp_sum += arr[i] ** 2
_valid_cnt += 1
if _valid_cnt >= MIN_STDEV_PERIODS:
_avg = (_sum_total / _valid_cnt)
_sd = sqrt((_disp_sum / _valid_cnt) - _avg ** 2)
if _sd == 0.0:
# in case of no dispersion like for arr | |
\
'<tr>' + \
'<td>Supplies expense</td>' + \
'<td>' + r22c1 + '</td>' + \
'<td>' + r22c2 + '</td>' + \
'<td>' + r22c3 + '</td>' + \
'<td>' + r22c4 + '</td>' + \
'<td>' + r22c5 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Travel expense</td>' + \
'<td>' + r23c1 + '</td>' + \
'<td>' + r23c2 + '</td>' + \
'<td>' + r23c3 + '</td>' + \
'<td>' + r23c4 + '</td>' + \
'<td>' + r23c5 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Other expense</td>' + \
'<td>' + r24c1 + '</td>' + \
'<td>' + r24c2 + '</td>' + \
'<td>' + r24c3 + '</td>' + \
'<td>' + r24c4 + '</td>' + \
'<td>' + r24c5 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Income total</td>' + \
'<td>' + r25c1 + '</td>' + \
'<td>' + r25c2 + '</td>' + \
'<td>' + r25c3 + '</td>' + \
'<td>' + r25c4 + '</td>' + \
'<td>' + r25c5 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Expense total</td>' + \
'<td>' + r26c1 + '</td>' + \
'<td>' + r26c2 + '</td>' + \
'<td>' + r26c3 + '</td>' + \
'<td>' + r26c4 + '</td>' + \
'<td>' + r26c5 + '</td>' + \
'</tr>' + \
'</tbody>' + \
'</table>'
body += '<br>' + \
'</div>' + \
'</div>' + \
'</div>' + \
'<br>' + \
'<script src="https://code.jquery.com/jquery-3.5.1.slim.min.js"' + \
'integrity="<KEY>"' + \
'crossorigin="anonymous"></script>' + \
'<script src="https://cdn.jsdelivr.net/npm/bootstrap@4.5.3/dist/js/bootstrap.bundle.min.js"' + \
'integrity="<KEY>"' + \
'crossorigin="anonymous"></script>' + \
'</body>' + \
'</html>'
options = {
'page-size': 'A4',
'header-center': 'Monthly budget',
'footer-left': 'Company : ' + company_name + ' [' + establishment_number + ']',
'footer-right': '[page] sur [topage]',
'encoding': 'UTF-8',
'no-outline': None,
'custom-header': [
('Accept-Encoding', 'pdf')
]
}
# path_wkthmltopdf = 'static/reporting/static/wkhtmltopdf.exe'
# config = pdfkit.configuration(wkhtmltopdf=path_wkthmltopdf)
# output = pdfkit.from_string(body, output_path=False, configuration=config, options=options)
output = pdfkit.from_string(body, output_path=False, options=options)
response = HttpResponse(output, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="monthly_budget.pdf"'
return response
def project_budget(request):
return render(request, 'reporting/project_budget.html')
def generate_html_to_pdf_project_budget(request):
company_name = request.POST.get('company_name').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
share_capital = request.POST.get('share_capital').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
head_office_address = request.POST.get('head_office_address').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
establishment_number = request.POST.get('establishment_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
register_of_trade_and_companies = request.POST.get('register_of_trade_and_companies').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
main_activities = request.POST.get('main_activities').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
activity_number = request.POST.get('activity_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
intra_community_vat_number = request.POST.get('intra_community_vat_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
president = request.POST.get('president').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
registration_date = request.POST.get('registration_date').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c1 = request.POST.get('r1c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c2 = request.POST.get('r1c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c3 = request.POST.get('r1c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c4 = request.POST.get('r1c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c5 = request.POST.get('r1c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c6 = request.POST.get('r1c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c7 = request.POST.get('r1c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c8 = request.POST.get('r1c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c9 = request.POST.get('r1c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c1 = request.POST.get('r2c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c2 = request.POST.get('r2c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c3 = request.POST.get('r2c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c4 = request.POST.get('r2c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c5 = request.POST.get('r2c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c6 = request.POST.get('r2c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c7 = request.POST.get('r2c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c8 = request.POST.get('r2c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c9 = request.POST.get('r2c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c1 = request.POST.get('r3c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c2 = request.POST.get('r3c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c3 = request.POST.get('r3c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c4 = request.POST.get('r3c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c5 = request.POST.get('r3c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c6 = request.POST.get('r3c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c7 = request.POST.get('r3c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c8 = request.POST.get('r3c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c9 = request.POST.get('r3c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c1 = request.POST.get('r4c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c2 = request.POST.get('r4c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c3 = request.POST.get('r4c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c4 = request.POST.get('r4c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c5 = request.POST.get('r4c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c6 = request.POST.get('r4c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c7 = request.POST.get('r4c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c8 = request.POST.get('r4c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c9 = request.POST.get('r4c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c1 = request.POST.get('r5c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c2 = request.POST.get('r5c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c3 = request.POST.get('r5c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c4 = request.POST.get('r5c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c5 = request.POST.get('r5c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c6 = request.POST.get('r5c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c7 = request.POST.get('r5c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c8 = request.POST.get('r5c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c9 = request.POST.get('r5c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c1 = request.POST.get('r6c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c2 = request.POST.get('r6c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c3 = request.POST.get('r6c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c4 = request.POST.get('r6c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c5 = request.POST.get('r6c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c6 = request.POST.get('r6c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c7 = request.POST.get('r6c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c8 = request.POST.get('r6c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c9 = request.POST.get('r6c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c1 = request.POST.get('r7c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c2 = request.POST.get('r7c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c3 = request.POST.get('r7c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c4 = request.POST.get('r7c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c5 = request.POST.get('r7c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c6 = request.POST.get('r7c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c7 = request.POST.get('r7c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c8 = request.POST.get('r7c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c9 = request.POST.get('r7c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c1 = request.POST.get('r8c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c2 = request.POST.get('r8c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c3 = request.POST.get('r8c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c4 = request.POST.get('r8c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c5 = request.POST.get('r8c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c6 = request.POST.get('r8c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c7 = request.POST.get('r8c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c8 = request.POST.get('r8c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c9 = request.POST.get('r8c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c1 = request.POST.get('r9c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c2 = request.POST.get('r9c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c3 = request.POST.get('r9c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c4 = request.POST.get('r9c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c5 = request.POST.get('r9c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c6 = request.POST.get('r9c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c7 = request.POST.get('r9c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c8 = request.POST.get('r9c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c9 = request.POST.get('r9c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c1 = request.POST.get('r10c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c2 = request.POST.get('r10c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c3 = request.POST.get('r10c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c4 = request.POST.get('r10c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c5 = request.POST.get('r10c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c6 = request.POST.get('r10c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c7 = request.POST.get('r10c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c8 = request.POST.get('r10c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c9 = request.POST.get('r10c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c1 = request.POST.get('r11c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c2 = request.POST.get('r11c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c3 = request.POST.get('r11c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c4 = request.POST.get('r11c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c5 = request.POST.get('r11c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c6 = request.POST.get('r11c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c7 = request.POST.get('r11c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c8 = request.POST.get('r11c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c9 = request.POST.get('r11c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c1 = request.POST.get('r12c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c2 = request.POST.get('r12c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c3 = request.POST.get('r12c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c4 = request.POST.get('r12c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c5 = request.POST.get('r12c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c6 = request.POST.get('r12c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c7 = request.POST.get('r12c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c8 = request.POST.get('r12c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c9 = request.POST.get('r12c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c1 = request.POST.get('r13c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c2 = request.POST.get('r13c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c3 = request.POST.get('r13c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c4 = request.POST.get('r13c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c5 = request.POST.get('r13c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c6 = request.POST.get('r13c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c7 = request.POST.get('r13c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c8 = request.POST.get('r13c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c9 = request.POST.get('r13c9').replace('\t', ' ').replace('\n', ' | |
linker to force
# everything onto the dynamic symbol table. Since this only affects
# object files from sources immediately owned by `cpp_binary` rules,
# this shouldn't have much of a performance issue.
buck_platform = platform_utils.get_buck_platform_for_base_path(base_path)
if (cpp_common.get_link_style() == 'shared' and
self.read_shlib_interfaces(buck_platform) == 'defined_only'):
ldflags.append('-Wl,--export-dynamic')
return ldflags
def get_ldflags(
self,
base_path,
name,
rule_type,
binary=False,
deployable=None,
strip_mode=None,
build_info=False,
lto=False,
platform=None):
"""
Return linker flags to apply to links.
"""
# Default `deployable` to whatever `binary` was set to, as very rule
# types make a distinction.
if deployable is None:
deployable = binary
# The `binary`, `build_info`, and `plaform` params only make sense for
# "deployable" rules.
assert not binary or deployable
assert not lto or deployable
assert not build_info or deployable
assert not (deployable ^ (platform is not None))
ldflags = []
# 1. Add in build-mode ldflags.
build_mode = _build_mode.get_build_mode_for_base_path(base_path)
if build_mode is not None:
ldflags.extend(build_mode.ld_flags)
# 2. Add flag to strip debug symbols.
if strip_mode is None:
strip_mode = self.get_strip_mode(base_path, name)
strip_ldflag = self.get_strip_ldflag(strip_mode)
if strip_ldflag is not None:
ldflags.append(strip_ldflag)
# 3. Add in flags specific for linking a binary.
if binary:
ldflags.extend(
self.get_binary_ldflags(base_path, name, rule_type, platform))
# 4. Add in the build info linker flags.
# In OSS, we don't need to actually use the build info (and the
# linker will not understand these options anyways) so skip in that case
if build_info and self._context.config.get_use_build_info_linker_flags():
ldflags.extend(
self.get_build_info_linker_flags(
base_path,
name,
rule_type,
platform,
compiler.get_compiler_for_current_buildfile()))
# 5. If enabled, add in LTO linker flags.
if cpp_flags.get_lto_is_enabled():
compiler.require_global_compiler(
'can only use LTO in modes with a fixed global compiler')
if self._context.global_compiler == 'clang':
if self._context.lto_type not in ('monolithic', 'thin'):
raise ValueError(
'clang does not support {} LTO'
.format(self._context.lto_type))
# Clang does not support fat LTO objects, so we build everything
# as IR only, and must also link everything with -flto
ldflags.append('-flto=thin' if self._context.lto_type ==
'thin' else '-flto')
# HACK(marksan): don't break HFSort/"Hot Text" (t19644410)
ldflags.append('-Wl,-plugin-opt,-function-sections')
ldflags.append('-Wl,-plugin-opt,-profile-guided-section-prefix=false')
# Equivalent of -fdebug-types-section for LLVM backend
ldflags.append('-Wl,-plugin-opt,-generate-type-units')
else:
assert self._context.global_compiler == 'gcc'
if self._context.lto_type != 'fat':
raise ValueError(
'gcc does not support {} LTO'
.format(cxx_mode.lto_type))
# GCC has fat LTO objects, where we build everything as both IR
# and object code and then conditionally opt-in here, at link-
# time, based on "enable_lto" in the TARGETS file.
if lto:
ldflags.extend(cpp_flags.get_gcc_lto_ldflags(base_path, platform))
else:
ldflags.append('-fno-lto')
# 6. Add in command-line ldflags.
ldflags.extend(cpp_flags.get_extra_ldflags())
return ldflags
def create_cxx_build_info_rule(
self,
base_path,
name,
rule_type,
platform,
linker_flags=(),
static=True,
visibility=None):
"""
Create rules to generate a C/C++ library with build info.
"""
rules = []
# Setup a rule to generate the build info C file.
source_name = name + '-cxx-build-info'
info = CXX_BUILD_INFO_TEMPLATE.format(
**self.get_build_info(
base_path,
name,
rule_type,
platform))
source_attrs = collections.OrderedDict()
source_attrs['name'] = source_name
if visibility is not None:
source_attrs['visibility'] = visibility
source_attrs['out'] = source_name + '.c'
source_attrs['cmd'] = (
'mkdir -p `dirname $OUT` && echo {0} > $OUT'
.format(pipes.quote(info)))
rules.append(Rule('genrule', source_attrs))
# Setup a rule to compile the build info C file into a library.
lib_name = name + '-cxx-build-info-lib'
lib_attrs = collections.OrderedDict()
lib_attrs['name'] = lib_name
if visibility is not None:
lib_attrs['visibility'] = visibility
lib_attrs['srcs'] = [':' + source_name]
lib_attrs['compiler_flags'] = cpp_flags.get_extra_cflags()
lib_attrs['linker_flags'] = (
list(cpp_flags.get_extra_ldflags()) +
['-nodefaultlibs'] +
list(linker_flags))
# Setup platform default for compilation DB, and direct building.
buck_platform = platform_utils.get_buck_platform_for_base_path(base_path)
lib_attrs['default_platform'] = buck_platform
lib_attrs['defaults'] = {'platform': buck_platform}
# Clang does not support fat LTO objects, so we build everything
# as IR only, and must also link everything with -flto
if cpp_flags.get_lto_is_enabled():
lib_attrs['platform_linker_flags'] = (
src_and_dep_helpers.format_platform_param(
partial.make(_lto_linker_flags_partial)))
if static:
# Use link_whole to make sure the build info symbols are always
# added to the binary, even if the binary does not refer to them.
lib_attrs['link_whole'] = True
# Use force_static so that the build info symbols are always put
# directly in the main binary, even if dynamic linking is used.
lib_attrs['force_static'] = True
rules.append(Rule('cxx_library', lib_attrs))
return target_utils.RootRuleTarget(base_path, lib_name), rules
def get_build_info(self, base_path, name, rule_type, platform):
if core_tools.is_core_tool(base_path, name):
# Ignore user-provided build-info args for a set of core
# targets and just return defaults (as if the user hadn't
# provided built-info in the first place).
def default_read_config(info, field, default):
return default
read_config = default_read_config
else:
read_config = self._context.buck_ops.read_config
build_info = collections.OrderedDict()
build_info['build_tool'] = 'buck'
build_info['build_mode'] = self._context.mode
build_info['compiler'] = compiler.get_compiler_for_current_buildfile()
build_info['epochtime'] = (
int(read_config('build_info', 'epochtime', '0')))
build_info['host'] = read_config('build_info', 'host', '')
build_info['package_name'] = (
read_config('build_info', 'package_name', ''))
build_info['package_version'] = (
read_config('build_info', 'package_version', ''))
build_info['package_release'] = (
read_config('build_info', 'package_release', ''))
build_info['path'] = read_config('build_info', 'path', '')
build_info['platform'] = platform
build_info['revision'] = read_config('build_info', 'revision', '')
build_info['revision_epochtime'] = (
int(read_config('build_info', 'revision_epochtime', '0')))
build_info['rule'] = 'fbcode:' + base_path + ':' + name
build_info['rule_type'] = rule_type
build_info['time'] = read_config('build_info', 'time', '')
build_info['time_iso8601'] = (
read_config('build_info', 'time_iso8601', ''))
build_info['upstream_revision'] = (
read_config('build_info', 'upstream_revision', ''))
build_info['upstream_revision_epochtime'] = (
int(read_config('build_info', 'upstream_revision_epochtime', '0')))
build_info['user'] = read_config('build_info', 'user', '')
return build_info
def get_buck_out_path(self):
return self._context.buck_ops.read_config(
'project',
'buck_out',
'buck-out')
def get_gen_path(self):
return os.path.join(
self.get_buck_out_path(),
'gen')
def get_bin_path(self):
return os.path.join(
self.get_buck_out_path(),
'bin')
def get_fbcode_dir_from_gen_dir(self):
return os.path.relpath(os.curdir, self.get_gen_path())
def copy_rule(self, src, name, out=None, propagate_versions=False, visibility=None):
"""
Returns a `genrule` which copies the given source.
"""
if out is None:
out = name
attrs = collections.OrderedDict()
attrs['name'] = name
if visibility is not None:
attrs['visibility'] = visibility
attrs['out'] = out
attrs['cmd'] = ' && '.join([
'mkdir -p `dirname $OUT`',
'cp {src} $OUT'.format(src=src),
])
# If this rule needs to be part of the versioned sub-tree of it's
# consumer, use a `cxx_genrule` which propagates versions (e.g. this
# is useful for cases like `hsc2hs` which should use a dep tree which
# is part of the same version sub-tree as the top-level binary).
genrule_type = 'cxx_genrule' if propagate_versions else 'genrule'
return Rule(genrule_type, attrs)
def generate_merge_tree_rule(
self,
base_path,
name,
paths,
deps,
visibility=None):
"""
Generate a rule which creates an output dir with the given paths merged
with the merged directories of it's dependencies.
"""
cmds = []
for dep in sorted(deps):
cmds.append('rsync -a $(location {})/ "$OUT"'.format(dep))
for src in sorted(paths):
src = src_and_dep_helpers.get_source_name(src)
dst = os.path.join('"$OUT"', base_path, src)
cmds.append('mkdir -p {}'.format(os.path.dirname(dst)))
cmds.append('cp {} {}'.format(src, dst))
attrs = collections.OrderedDict()
attrs['name'] = name
if visibility is not None:
attrs['visibility'] = visibility
attrs['out'] = os.curdir
attrs['srcs'] = sorted(paths)
attrs['cmd'] = ' && '.join(cmds)
return Rule('genrule', attrs)
def get_tp2_build_dat(self, base_path):
"""
Load the TP2 metadata for the TP2 project at the given base path.
"""
build_dat = self._tp2_build_dat_cache.get(base_path)
if build_dat is not None:
return build_dat
fbsource_root = read_config('fbsource', 'repo_relative_path', '..');
build_dat_name = os.path.join(fbsource_root, "fbcode", base_path, 'build.dat')
self._context.buck_ops.add_build_file_dep('fbcode//' + build_dat_name)
with open(build_dat_name) as f:
build_dat = json.load(f)
self._tp2_build_dat_cache[base_path] = build_dat
return build_dat
def get_tp2_platform(self, base_path):
"""
Get the fbcode this tp2 project was built for.
"""
return self.get_tp2_build_dat(base_path)['platform']
def get_tp2_project_builds(self, base_path, relevant_deps=None):
"""
Return the implicit project deps and their corresponding versions for
each build of the TP2 project at the given base path.
"""
build_dat = self.get_tp2_build_dat(base_path)
default_versions = (
{p: v[0] for p, v in build_dat['dependencies'].items()})
def should_skip_build(build_dep_versions):
"""
Returns whether this project build should skipped, which happens
when using non-default versions of irrelevant dependencies.
"""
# If the user passed in an explicit relevant dep list, verify that
# any deps this project build was against were either in the
# relevant dep list or were using default versions.
if relevant_deps is not None:
for dep, version in build_dep_versions.items():
if (dep not in relevant_deps and
version != default_versions[dep]):
return True
return False
project_builds = collections.OrderedDict()
for build, versions in sorted(build_dat['builds'].items()):
# If this buils isnt usable, skip it.
if should_skip_build(versions):
continue
build_deps = collections.OrderedDict()
for project, version in sorted(versions.items()):
# If this isn't a relevant, ignore it.
if relevant_deps is not None and project not in relevant_deps:
continue
pdep = (
target_utils.target_to_label(
third_party.get_tp2_project_target(project),
platform=build_dat['platform']))
build_deps[pdep] = version
project_builds[build] = (
Tp2ProjectBuild(
project_deps=build_deps,
subdir=build,
versions=versions))
# If we have just one build, `buckify_tp2` will inline its contents,
# so update | |
import math
import numpy as np
import os
import torch
import torch.nn as nn
from torch.nn import Parameter
import torch.nn.functional as F
# import resnet
# from efficientnet.model import EfficientNet
import models.segtran_modified.code.networks.aj_i3d.aj_i3d as aj_i3d
from models.segtran_modified.code.networks.aj_i3d.aj_i3d import InceptionI3d
from models.segtran_modified.code.networks.segtran_shared import SegtranConfig, bb2feat_dims, SegtranFusionEncoder, CrossAttFeatTrans, ExpandedFeatTrans, \
SegtranInitWeights, gen_all_indices
from models.segtran_modified.code.train_util import batch_norm
# Only application-specific settings and overrode settings.
class Segtran3dConfig(SegtranConfig):
def __init__(self):
super(Segtran3dConfig, self).__init__()
self.backbone_type = 'i3d' # only i3d is supported.
self.use_pretrained = True
self.bb_feat_dims = bb2feat_dims[self.backbone_type]
self.num_translayers = 1
# vanilla transformer: 1, use_squeezed_transformer: 2.
self.cross_attn_score_scales = [1., 2.]
# Set in_fpn_scheme and out_fpn_scheme to 'NA' and 'NA', respectively.
# NA: normalize first, then add. AN: add first, then normalize.
self.set_fpn_layers('default', '34', '1234', 'AN', 'AN',
translayer_compress_ratios=[1, 1], do_print=False)
# Configure backbone to generate x2 feature maps.
self.bb_feat_upsize = True
# If in FPN uses BN, it performs slightly worse than using GN.
self.in_fpn_use_bn = False
# If out FPN uses BN, it performs worse than using GN.
self.out_fpn_use_bn = False
# Converting resnet BN to GN reduces performance.
self.resnet_bn_to_gn = False
self.G = 8 # number of groups in all group norms.
self.pos_dim = 3
self.input_scale = (1., 1., 1.)
self.num_classes = 2
# Architecture settings
self.num_attractors = 1024
self.orig_in_channels = 1
# 3D specific settings.
# inchan_to3_scheme:
# avgto3 (average central two slices, yield 3 slices), only for effective slices == 2 or 4.
# or stemconv (modifying stem conv to take 2-channel input). Not implemented for i3d;
# or dup3 (duplicate 1 channel 3 times to fake RGB channels).
# or bridgeconv (a conv to convert C > 1 channels to 3),
# or None (only if effective channels == 3), do nothing.
self.inchan_to3_scheme = 'bridgeconv'
self.D_groupsize = 1 # Depth grouping: 1, 2, 4.
self.D_pool_K = 2 # Depth pooling after infpn
self.out_fpn_upsampleD_scheme = 'conv' # conv, interpolate, none
def set_fpn_layers(self, config_name, in_fpn_layers, out_fpn_layers,
in_fpn_scheme, out_fpn_scheme,
translayer_compress_ratios, do_print=True):
self.in_fpn_layers = [int(layer) for layer in in_fpn_layers]
self.out_fpn_layers = [int(layer) for layer in out_fpn_layers]
# out_fpn_layers cannot be a subset of in_fpn_layers, like: in=234, out=34.
# in_fpn_layers could be a subset of out_fpn_layers, like: in=34, out=234.
if self.out_fpn_layers[-1] > self.in_fpn_layers[-1]:
print("in_fpn_layers=%s is not compatible with out_fpn_layers=%s" %
(self.in_fpn_layers, self.out_fpn_layers))
exit(0)
self.orig_in_feat_dim = self.bb_feat_dims[self.in_fpn_layers[-1]]
self.translayer_compress_ratios = translayer_compress_ratios
assert len(translayer_compress_ratios) == self.num_translayers + 1, \
"Length of {} != 1 + num_translayers {}".format(
translayer_compress_ratios, self.num_translayers)
# Convert adjacent ratios to absolute ratios:
# 1., 2., 2., 2. => 1, 2., 4., 8.
translayer_compress_ratios = np.cumprod(translayer_compress_ratios)
# Input/output dimensions of each transformer layer.
# Could be different from self.orig_in_feat_dim,
# which is the backbone feature dimension from in_fpn.
self.translayer_dims = [int(self.orig_in_feat_dim / ratio)
for ratio in translayer_compress_ratios]
self.trans_in_dim = self.translayer_dims[0]
self.min_feat_dim = np.min(self.translayer_dims)
self.trans_out_dim = self.translayer_dims[-1]
self.in_fpn_scheme = in_fpn_scheme
self.out_fpn_scheme = out_fpn_scheme
if do_print:
print("'%s' orig in-feat: %d, in-feat: %d, out-feat: %d, in-scheme: %s, out-scheme: %s, "
"translayer_dims: %s" %
(config_name, self.orig_in_feat_dim, self.trans_in_dim, self.trans_out_dim,
self.in_fpn_scheme, self.out_fpn_scheme,
self.translayer_dims))
CONFIG = Segtran3dConfig()
def set_segtran3d_config(args):
CONFIG.num_classes = args.num_classes
CONFIG.backbone_type = args.backbone_type
CONFIG.use_pretrained = args.use_pretrained
CONFIG.bb_feat_upsize = args.bb_feat_upsize
CONFIG.bb_feat_dims = bb2feat_dims[CONFIG.backbone_type]
CONFIG.in_fpn_use_bn = args.in_fpn_use_bn
CONFIG.use_squeezed_transformer = args.use_squeezed_transformer
CONFIG.cross_attn_score_scale = CONFIG.cross_attn_score_scales[
CONFIG.use_squeezed_transformer]
CONFIG.num_attractors = args.num_attractors
CONFIG.num_translayers = args.num_translayers
CONFIG.num_modes = args.num_modes
CONFIG.trans_output_type = args.trans_output_type
CONFIG.mid_type = args.mid_type
CONFIG.pos_code_every_layer = args.pos_code_every_layer
CONFIG.pos_in_attn_only = args.pos_in_attn_only
CONFIG.base_initializer_range = args.base_initializer_range
CONFIG.pos_code_type = args.pos_code_type
CONFIG.pos_code_weight = args.pos_code_weight
CONFIG.pos_bias_radius = args.pos_bias_radius
CONFIG.ablate_multihead = args.ablate_multihead
if 'dropout_prob' in args:
CONFIG.hidden_dropout_prob = args.dropout_prob
CONFIG.attention_probs_dropout_prob = args.dropout_prob
if 'out_fpn_do_dropout' in args:
CONFIG.out_fpn_do_dropout = args.out_fpn_do_dropout
if 'perturb_posw_range' in args:
CONFIG.perturb_posw_range = args.perturb_posw_range
if 'qk_have_bias' in args:
CONFIG.qk_have_bias = args.qk_have_bias
CONFIG.has_FFN_in_squeeze = args.has_FFN_in_squeeze
CONFIG.attn_clip = args.attn_clip
CONFIG.set_fpn_layers('args', args.in_fpn_layers, args.out_fpn_layers,
args.in_fpn_scheme, args.out_fpn_scheme,
translayer_compress_ratios=args.translayer_compress_ratios)
CONFIG.orig_in_channels = args.orig_in_channels
CONFIG.inchan_to3_scheme = args.inchan_to3_scheme
CONFIG.D_groupsize = args.D_groupsize
CONFIG.D_pool_K = args.D_pool_K
CONFIG.out_fpn_upsampleD_scheme = args.out_fpn_upsampleD_scheme
CONFIG.input_scale = args.input_scale
# CONFIG.device = args.device
CONFIG.device = 'cuda'
if 'eval_robustness' in args:
CONFIG.eval_robustness = args.eval_robustness
return CONFIG
class Segtran3d(SegtranInitWeights):
def __init__(self, config):
super(Segtran3d, self).__init__(config)
self.config = config
self.device = config.device
self.orig_in_channels = config.orig_in_channels
self.trans_in_dim = config.trans_in_dim
self.trans_out_dim = config.trans_out_dim
self.num_translayers = config.num_translayers
self.bb_feat_upsize = config.bb_feat_upsize
self.G = config.G
self.voxel_fusion = SegtranFusionEncoder(config, 'Fusion')
self.backbone_type = config.backbone_type
self.use_pretrained = config.use_pretrained
self.pos_code_every_layer = config.pos_code_every_layer
if self.backbone_type.startswith('i3d'):
self.backbone = InceptionI3d(do_pool1=not self.bb_feat_upsize)
print("%s created" % self.backbone_type)
# if backbone_type == 'i3d-scratch', then do not load pretrained weights.
if self.use_pretrained:
i3d_folder = os.path.dirname(aj_i3d.__file__)
pretrained_i3d_path = os.path.join(
i3d_folder, "aj_rgb_imagenet.pth")
state_dict = torch.load(
pretrained_i3d_path, map_location=torch.device('cuda'))
self.backbone.load_state_dict(state_dict)
print("Loaded pretrained i3d model '{}'".format(
pretrained_i3d_path))
else:
raise NotImplementedError("Only support i3d as the 3D backbone")
self.inchan_to3_scheme = config.inchan_to3_scheme
self.D_groupsize = config.D_groupsize
self.eff_in_channels = self.orig_in_channels * self.D_groupsize
self.D_pool_K = config.D_pool_K
self.out_fpn_upsampleD_scheme = config.out_fpn_upsampleD_scheme
self.input_scale = config.input_scale
# For brats, eff_in_channels = 4 (4 modalities, D_groupsize = 1).
if self.eff_in_channels != 3:
if self.inchan_to3_scheme == 'avgto3':
if self.eff_in_channels == 2:
self.in_bridge_to3 = nn.Linear(2, 3, bias=False)
in_avg_2to3_weight = torch.tensor(
[[1, 0], [0.5, 0.5], [0, 1]])
self.in_bridge_to3.weight.data.copy_(in_avg_2to3_weight)
elif self.eff_in_channels == 4:
self.in_bridge_to3 = nn.Linear(4, 3, bias=False)
in_avg_4to3_weight = torch.tensor(
[[1, 0, 0, 0], [0, 0.5, 0.5, 0], [0, 0, 0, 1]])
self.in_bridge_to3.weight.data.copy_(in_avg_4to3_weight)
else:
raise NotImplementedError(
"'avgto3' is only for effective channels == 2 or 4, not {}".format(self.eff_in_channels))
self.in_bridge_to3.weight.requires_grad = False
elif self.eff_in_channels == 1 and self.inchan_to3_scheme == 'dup3':
self.in_bridge_to3 = lambda x: x.expand(-1, 3, -1, -1, -1)
elif self.inchan_to3_scheme == 'bridgeconv':
self.in_bridge_to3 = nn.Conv3d(self.eff_in_channels, 3, 1)
# stemconv is only applicable for efficientnet.
elif self.eff_in_channels > 3 and self.inchan_to3_scheme == 'stemconv':
raise NotImplementedError(
"Changing stemconv channel number is not supported for i3d")
else:
raise NotImplementedError("Effective input channel size={}*{} is not supported for scheme '{}'".format(
self.orig_in_channels, self.D_groupsize, self.inchan_to3_scheme))
self.in_fpn_use_bn = config.in_fpn_use_bn
self.in_fpn_layers = config.in_fpn_layers
self.in_fpn_scheme = config.in_fpn_scheme
# in_fpn_layers: default [3,4].
# FPN output resolution is determined by the smallest number (lowest layer).
if self.bb_feat_upsize:
if 2 in self.in_fpn_layers:
self.mask_pool = nn.AvgPool3d((2, 4, 4))
elif 3 in self.in_fpn_layers:
self.mask_pool = nn.AvgPool3d((4, 8, 8))
else:
self.mask_pool = nn.AvgPool3d((8, 16, 16))
else:
if 2 in self.in_fpn_layers:
self.mask_pool = nn.AvgPool3d((2, 8, 8))
elif 3 in self.in_fpn_layers:
self.mask_pool = nn.AvgPool3d((4, 16, 16))
else:
# This resolution is too low. Put here for completeness.
self.mask_pool = nn.AvgPool3d((8, 32, 32))
self.bb_feat_dims = config.bb_feat_dims
self.in_fpn23_conv = nn.Conv3d(
self.bb_feat_dims[2], self.bb_feat_dims[3], 1)
self.in_fpn34_conv = nn.Conv3d(
self.bb_feat_dims[3], self.bb_feat_dims[4], 1)
# Default in_fpn_layers: 34. last_in_fpn_layer_idx: 4.
last_in_fpn_layer_idx = self.in_fpn_layers[-1]
if self.bb_feat_dims[last_in_fpn_layer_idx] != self.trans_in_dim:
self.in_fpn_bridgeconv = nn.Conv3d(
self.bb_feat_dims[last_in_fpn_layer_idx], self.trans_in_dim, 1)
else:
self.in_fpn_bridgeconv = nn.Identity()
# in_bn4b/in_gn4b normalizes in_fpn43_conv(layer 4 features),
# so the feature dim = dim of layer 3.
# in_bn3b/in_gn3b normalizes in_fpn32_conv(layer 3 features),
# so the feature dim = dim of layer 2.
if self.in_fpn_use_bn:
self.in_bn3b = nn.BatchNorm3d(self.bb_feat_dims[3])
self.in_bn4b = nn.BatchNorm3d(self.bb_feat_dims[4])
self.in_fpn_norms = [None, None, None, self.in_bn3b, self.in_bn4b]
else:
self.in_gn3b = nn.GroupNorm(self.G, self.bb_feat_dims[3])
self.in_gn4b = nn.GroupNorm(self.G, self.bb_feat_dims[4])
self.in_fpn_norms = [None, None, None, self.in_gn3b, self.in_gn4b]
self.in_fpn_convs = [None, None,
self.in_fpn23_conv, self.in_fpn34_conv]
self.num_classes = config.num_classes
self.out_fpn_use_bn = config.out_fpn_use_bn
self.out_fpn_layers = config.out_fpn_layers
self.out_fpn_scheme = config.out_fpn_scheme
self.out_fpn_do_dropout = config.out_fpn_do_dropout
if self.out_fpn_layers != self.in_fpn_layers:
self.do_out_fpn = True
self.out_fpn12_conv3d = nn.Conv3d(self.bb_feat_dims[1],
self.bb_feat_dims[2], 1)
self.out_fpn23_conv3d = nn.Conv3d(self.bb_feat_dims[2],
self.bb_feat_dims[3], 1)
self.out_fpn34_conv3d = nn.Conv3d(self.bb_feat_dims[3],
self.bb_feat_dims[4], 1)
last_out_fpn_layer = self.out_fpn_layers[-len(self.in_fpn_layers)]
self.out_fpn_bridgeconv3d = nn.Conv3d(self.bb_feat_dims[last_out_fpn_layer],
self.trans_out_dim, 1)
if self.out_fpn_upsampleD_scheme == 'conv':
self.out_feat_dim = self.trans_out_dim // self.D_pool_K
self.out_fpn_upsampleD = nn.Conv3d(
self.trans_out_dim, self.out_feat_dim * self.D_pool_K, 1)
else:
self.out_feat_dim = self.trans_out_dim
# out_bn3b/out_gn3b normalizes out_fpn23_conv3d(layer 3 features),
# so the feature dim = dim of layer 2.
# out_bn2b/out_gn2b normalizes out_fpn12_conv3d(layer 2 features),
# so the feature dim = dim of layer 1.
if self.out_fpn_use_bn:
self.out_bn2b = nn.BatchNorm3d(self.bb_feat_dims[2])
self.out_bn3b = nn.BatchNorm3d(self.bb_feat_dims[3])
self.out_bn4b = nn.BatchNorm3d(self.bb_feat_dims[4])
self.out_fpn_norms = [
None, None, self.out_bn2b, self.out_bn3b, self.out_bn4b]
else:
self.out_gn2b = nn.GroupNorm(self.G, self.bb_feat_dims[2])
self.out_gn3b = nn.GroupNorm(self.G, self.bb_feat_dims[3])
self.out_gn4b = nn.GroupNorm(self.G, self.bb_feat_dims[4])
self.out_fpn_norms = [
None, None, self.out_gn2b, self.out_gn3b, self.out_gn4b]
self.out_fpn_convs = [None, self.out_fpn12_conv3d,
self.out_fpn23_conv3d, self.out_fpn34_conv3d]
# For i3d, even if D_pool_K == 2, out_fpn_upsampleD is not used. So the input feature dim is still trans_out_dim.
self.out_conv3d = nn.Conv3d(
self.trans_out_dim, self.num_classes, 1)
self.out_fpn_dropout = nn.Dropout(config.hidden_dropout_prob)
# out_fpn_layers = in_fpn_layers, no need to do fpn at the output end.
# Output class scores directly.
else:
self.do_out_fpn = False
if '2' in self.in_fpn_layers:
# Output resolution is 1/4 of input already. No need to do upsampling here.
self.out_conv3d = nn.Conv3d(
config.trans_out_dim, self.num_classes, | |
""" This callback will emitted when autodoc has formatted a signature for an object.
It parses and caches the signature arguments for function and classes so it can
be used in the process_docstring_callback callback.
"""
global cached_parameters
global cached_signature
if name.startswith('mitsuba.python.'):
# Signatures from python scripts do not contain any argument types or
# return type information. So we don't need to do anything.
cached_signature = signature
cached_parameters = []
else:
if signature and what == 'class':
# For classes we don't display any signature in the class headers
# Parse the signature string
cached_signature, cached_parameters = parse_signature_args(signature)
elif signature and what in ['method', 'function']:
# For methods and functions, parameter types will be added to the docstring.
# Parse the signature string
cached_signature, cached_parameters = parse_signature_args(signature)
# Return type (if any) will also be added to the docstring
if return_annotation:
return_annotation = sanitize_cpp_types(return_annotation)
cached_parameters.append(['__return', return_annotation, None])
else:
cached_signature = ''
cached_parameters = []
return signature, None
def process_docstring_callback(app, what, name, obj, options, lines):
""" This callback will emitted when autodoc has read and processed a docstring.
Using the cached parameter list and signature, this function modifies the docstring
and extract the resulting RST code into the extracted_rst text list.
"""
global cached_parameters
global cached_signature
global last_class_name
global extracted_rst
global rst_block_range
global block_line_start
global last_block_name
# True is the documentation wasn't generated with pybind11 (e.g. python script)
is_python_doc = name.startswith('mitsuba.python.')
if type(obj) in (int, float, bool, str):
what = 'data'
#----------------------------
# Handle classes
if what == 'class':
# process_docstring callback is always called twice for a class:
# 1. class description
# 2. constructor(s) description. If the constructor is overloaded, the docstring
# will enumerate the constructor signature (similar to overloaded functions)
if not last_class_name == name: # First call (class description)
# Add information about the base class if it is a Mitsuba type
if len(obj.__bases__) > 0:
full_base_name = str(obj.__bases__[0])[8:-2]
if full_base_name.startswith('mitsuba'):
lines.insert(0, 'Base class: %s' % full_base_name)
lines.insert(1, '')
# Handle enums properly
# Search for 'Members:' line to define whether this class is an enum or not
is_enum = False
next_idx = 0
for i, l in enumerate(lines):
if 'Members:' in l:
is_enum = True
next_idx = i + 2
break
if is_enum:
# Iterate over the enum members
while next_idx < len(lines) and not '__init__' in lines[next_idx]:
l = lines[next_idx]
# Skip empy lines
if not l == '':
# Check if this line defines a enum member
if re.match(r' [a-zA-Z\_0-9]+ : .*', l):
e_name, e_desc = l[2:].split(' : ')
lines[next_idx] = '.. py:data:: %s' % (e_name)
next_idx += 1
lines.insert(next_idx, '')
next_idx += 1
lines.insert(next_idx, ' %s' % e_desc)
else:
# Add indentation
lines[next_idx] = ' %s' % l
next_idx += 1
# Handle aliases (will only call the callback once)
if len(lines) > 0 and 'Overloaded function.' in lines[0]:
process_overload_block(lines, 'method')
else: # Second call (constructor(s) description)
if not cached_signature == '(overloaded)':
# Increase indent to all lines
for i, l in enumerate(lines):
lines[i] = ' ' + l
# Insert constructor signature
lines.insert(0, '.. py:method:: %s%s' %
('__init__', cached_signature))
lines.insert(1, '')
lines.insert(len(lines)-1, '')
insert_params_and_return_docstring(
lines, cached_parameters, len(lines)-1, indent=' ')
else:
process_overload_block(lines, 'method')
#----------------------------
# Handle methods and functions
if what in ['method', 'function']:
if cached_signature == '(overloaded)':
process_overload_block(lines, what)
else:
# Find where to insert next parameter if necessary
# For this we look for the 'Returns:', otherwise we insert at the end
next_idx = len(lines)
for i, l in enumerate(lines):
if 'Returns:' in l:
next_idx = i
break
insert_params_and_return_docstring(
lines, cached_parameters, next_idx)
# Add code-block directive and cross reference for every mitsuba types
in_code_block = False
in_bullet_item_block = False
for i, l in enumerate(lines):
# Process code block
if re.match(r'([ ]+|)```', l):
if not in_code_block:
lines[i] = l[:-3] + '.. code-block:: c'
lines.insert(i+1, '')
in_code_block = True
else:
lines[i] = ''
in_code_block = False
else:
# Adjust the indentation
if in_code_block and not l == '':
lines[i] = ' ' + l
# Adjust indentation for multi-line bullet list item
if re.match(r'([ ]+|) \* ', lines[i]):
in_bullet_item_block = True
elif lines[i] == '':
in_bullet_item_block = False
elif in_bullet_item_block:
lines[i] = ' ' + lines[i]
# Add cross-reference
if not in_code_block:
lines[i] = re.sub(r'(?<!`)(mitsuba(?:\.[a-zA-Z\_0-9]+)+)',
r':py:obj:`\1`', lines[i])
#----------------------------
# Extract RST
# Check whether this class is defined within another like (e.g. Bitmap.FileFormat),
# in which case the indentation will be adjusted.
local_class = what == 'class' and re.fullmatch(
r'%s\.[\w]+' % last_block_name, name)
# Check whether to start a new block
if what in ['function', 'class', 'module', 'data'] and not local_class and not last_class_name == name:
# Register previous block
if last_block_name:
rst_block_range[last_block_name] = [
block_line_start, len(extracted_rst) - 1]
# Start a new block
block_line_start = len(extracted_rst)
last_block_name = name
# Get rid of default 'name' property in enums
if what == 'property' and lines[0] == '(self: handle) -> str':
return
# Adjust the indentation
doc_indent = ' '
directive_indent = ''
if what in ['method', 'attribute', 'property'] or local_class:
doc_indent += ' '
directive_indent += ' '
# Don't write class directive twice
if not (what == 'class' and last_class_name == name):
directive_type = what
if what == 'property':
directive_type = 'method'
# Add the corresponding RST directive
directive = '%s.. py:%s:: %s' % (
directive_indent, directive_type, name)
# Display signature for methods and functions
if what in ['method', 'function']:
directive += cached_signature
# Display signature for classes for python doc
if what == 'class' and is_python_doc and cached_signature:
directive += cached_signature
extracted_rst.append(directive + '\n')
# 'property' fields need an extra argument to the directive
if what == 'property':
extracted_rst.append(doc_indent + ':property:\n')
# 'data' fields get extra arguments
if what == 'data':
extracted_rst.append(doc_indent + ':type: %s\n' % str(type(obj))[8:-2])
extracted_rst.append(doc_indent + ':value: %s\n' % str(obj))
extracted_rst.append('\n')
# Extract the docstring (if not a module)
if not what in ['module', 'data']:
for l in lines:
if l == '':
extracted_rst.append('\n')
else:
extracted_rst.append(doc_indent + l + '\n')
# Keep track of last class name (to distingush the two callbacks)
last_class_name = name
def write_rst_file_callback(app, exception):
"""Write to a file the extracted RST and generate the RST reference
pages for the different libraries"""
# Register last block
rst_block_range[last_block_name] = [block_line_start, len(extracted_rst)]
# Given a class/fucntion "block" name, add an RST 'include' directive with the
# corresponding start/end-line to the output file.
def write_block(f, block_name):
f.write('.. include:: extracted_rst_api.rst\n')
f.write(' :start-line: %i\n' % rst_block_range[block_name][0])
f.write(' :end-line: %i\n' % rst_block_range[block_name][1])
# Add a horizontal separator line
f.write('\n')
f.write('------------\n')
f.write('\n')
# Write extracted RST to file
with open(extracted_rst_filename, 'w') as f:
print('Extract API doc into: %s' % extracted_rst_filename)
for l in extracted_rst:
f.write(l)
# Generate API Reference RST according to the api_doc_structure description
for lib in api_doc_structure.keys():
lib_structure = api_doc_structure[lib]
lib_api_filename = join(docs_path, 'generated/%s_api.rst' % lib)
with open(lib_api_filename, 'w') as f:
print('Generate %s API RST file: %s' % (lib, lib_api_filename))
f.write('.. _sec-api-%s:\n\n' % lib)
f.write('%s API Reference\n' % lib.title())
f.write('=' * len(lib) + '==============\n')
f.write('\n')
# Keep track of the added block, so to add the rest in the 'Other' section
added_block = []
for section_name in lib_structure.keys():
# Write section name
f.write('%s\n' % section_name)
f.write('-' * len(section_name) + '\n')
f.write('\n')
# Write all the blocks
for pattern in lib_structure[section_name]:
for block_name in rst_block_range.keys():
if re.fullmatch(pattern, block_name):
write_block(f, block_name)
added_block.append(block_name)
# Add the rest into the 'Other' section
f.write('Other\n')
f.write('-----\n')
f.write('\n')
for block_name in rst_block_range.keys():
if block_name in added_block:
continue
if not block_name.startswith('mitsuba.%s' % lib):
continue
write_block(f, block_name)
def generate_list_api_callback(app):
"""Generate a RST file listing all the python members (classes or functions)
to be parsed and extracted. This function will recursively explore submodules
and packages."""
import importlib
from inspect import isclass, isfunction, ismodule, ismethod
def process(f, obj, lib, name):
if re.match(r'__[a-zA-Z\_0-9]+__', name):
return
if ismodule(obj):
# 'python' is a package, so it needs to be treated differently
if name == 'python':
# | |
<reponame>cdoolin/labdrivers
# This file is auto-generated. Do not edit!
from collections import namedtuple
_d = {u'AIConv_ActiveEdge': 6227,
u'AIConv_DigFltr_Enable': 11996,
u'AIConv_DigFltr_MinPulseWidth': 11997,
u'AIConv_DigFltr_TimebaseRate': 11999,
u'AIConv_DigFltr_TimebaseSrc': 11998,
u'AIConv_DigSync_Enable': 12000,
u'AIConv_MaxRate': 8905,
u'AIConv_Rate': 6216,
u'AIConv_Src': 5378,
u'AIConv_TimebaseDiv': 4917,
u'AIConv_Timebase_Src': 4921,
u'AI_ACExcit_Freq': 257,
u'AI_ACExcit_SyncEnable': 258,
u'AI_ACExcit_WireMode': 6349,
u'AI_ADCCustomTimingMode': 12139,
u'AI_ADCTimingMode': 10745,
u'AI_Accel_Sensitivity': 1682,
u'AI_Accel_SensitivityUnits': 8604,
u'AI_Accel_Units': 1651,
u'AI_Accel_dBRef': 10674,
u'AI_Atten': 6145,
u'AI_AutoZeroMode': 5984,
u'AI_AveragingWinSize': 12270,
u'AI_Bridge_Balance_CoarsePot': 6129,
u'AI_Bridge_Balance_FinePot': 6388,
u'AI_Bridge_Cfg': 135,
u'AI_Bridge_ElectricalUnits': 12167,
u'AI_Bridge_InitialRatio': 12166,
u'AI_Bridge_InitialVoltage': 6125,
u'AI_Bridge_NomResistance': 6124,
u'AI_Bridge_PhysicalUnits': 12168,
u'AI_Bridge_Poly_ForwardCoeff': 12176,
u'AI_Bridge_Poly_ReverseCoeff': 12177,
u'AI_Bridge_ScaleType': 12169,
u'AI_Bridge_ShuntCal_Enable': 148,
u'AI_Bridge_ShuntCal_GainAdjust': 6463,
u'AI_Bridge_ShuntCal_Select': 8661,
u'AI_Bridge_ShuntCal_ShuntCalAActualResistance': 12153,
u'AI_Bridge_ShuntCal_ShuntCalAResistance': 12152,
u'AI_Bridge_Table_ElectricalVals': 12174,
u'AI_Bridge_Table_PhysicalVals': 12175,
u'AI_Bridge_TwoPointLin_First_ElectricalVal': 12170,
u'AI_Bridge_TwoPointLin_First_PhysicalVal': 12171,
u'AI_Bridge_TwoPointLin_Second_ElectricalVal': 12172,
u'AI_Bridge_TwoPointLin_Second_PhysicalVal': 12173,
u'AI_Bridge_Units': 12178,
u'AI_ChanCal_ApplyCalIfExp': 8857,
u'AI_ChanCal_Desc': 8868,
u'AI_ChanCal_EnableCal': 8856,
u'AI_ChanCal_HasValidCalInfo': 8855,
u'AI_ChanCal_OperatorName': 8867,
u'AI_ChanCal_Poly_ForwardCoeff': 8863,
u'AI_ChanCal_Poly_ReverseCoeff': 8864,
u'AI_ChanCal_ScaleType': 8860,
u'AI_ChanCal_Table_PreScaledVals': 8861,
u'AI_ChanCal_Table_ScaledVals': 8862,
u'AI_ChanCal_Verif_AcqVals': 8866,
u'AI_ChanCal_Verif_RefVals': 8865,
u'AI_Coupling': 100,
u'AI_CurrentShunt_Loc': 6130,
u'AI_CurrentShunt_Resistance': 6131,
u'AI_Current_ACRMS_Units': 6115,
u'AI_Current_Units': 1793,
u'AI_CustomScaleName': 6112,
u'AI_DCOffset': 10889,
u'AI_DataXferCustomThreshold': 8972,
u'AI_DataXferMech': 6177,
u'AI_DataXferReqCond': 6283,
u'AI_DevScalingCoeff': 6448,
u'AI_Dither_Enable': 104,
u'AI_EddyCurrentProxProbe_Sensitivity': 10942,
u'AI_EddyCurrentProxProbe_SensitivityUnits': 10943,
u'AI_EddyCurrentProxProbe_Units': 10944,
u'AI_EnhancedAliasRejectionEnable': 8852,
u'AI_Excit_ActualVal': 6275,
u'AI_Excit_DCorAC': 6139,
u'AI_Excit_Src': 6132,
u'AI_Excit_UseForScaling': 6140,
u'AI_Excit_UseMultiplexed': 8576,
u'AI_Excit_Val': 6133,
u'AI_Excit_VoltageOrCurrent': 6134,
u'AI_FilterDelay': 12269,
u'AI_ForceReadFromChan': 6392,
u'AI_Force_IEPESensor_Sensitivity': 12161,
u'AI_Force_IEPESensor_SensitivityUnits': 12162,
u'AI_Force_Units': 12149,
u'AI_Freq_Hyst': 2068,
u'AI_Freq_ThreshVoltage': 2069,
u'AI_Freq_Units': 2054,
u'AI_Gain': 6168,
u'AI_Impedance': 98,
u'AI_InputSrc': 8600,
u'AI_Is_TEDS': 10627,
u'AI_LVDT_Sensitivity': 2361,
u'AI_LVDT_SensitivityUnits': 8602,
u'AI_LVDT_Units': 2320,
u'AI_LeadWireResistance': 6126,
u'AI_LossyLSBRemoval_CompressedSampSize': 8921,
u'AI_Lowpass_CutoffFreq': 6147,
u'AI_Lowpass_Enable': 6146,
u'AI_Lowpass_SwitchCap_ClkSrc': 6276,
u'AI_Lowpass_SwitchCap_ExtClkDiv': 6278,
u'AI_Lowpass_SwitchCap_ExtClkFreq': 6277,
u'AI_Lowpass_SwitchCap_OutClkDiv': 6279,
u'AI_Max': 6109,
u'AI_MeasType': 1685,
u'AI_MemMapEnable': 6284,
u'AI_Microphone_Sensitivity': 5430,
u'AI_Min': 6110,
u'AI_OpenThrmcplDetectEnable': 12146,
u'AI_Pressure_Units': 12150,
u'AI_ProbeAtten': 10888,
u'AI_RTD_A': 4112,
u'AI_RTD_B': 4113,
u'AI_RTD_C': 4115,
u'AI_RTD_R0': 4144,
u'AI_RTD_Type': 4146,
u'AI_RVDT_Sensitivity': 2307,
u'AI_RVDT_SensitivityUnits': 8603,
u'AI_RVDT_Units': 2167,
u'AI_RawDataCompressionType': 8920,
u'AI_RawSampJustification': 80,
u'AI_RawSampSize': 8922,
u'AI_RemoveFilterDelay': 12221,
u'AI_ResistanceCfg': 6273,
u'AI_Resistance_Units': 2389,
u'AI_Resolution': 5989,
u'AI_ResolutionUnits': 5988,
u'AI_Rng_High': 6165,
u'AI_Rng_Low': 6166,
u'AI_RosetteStrainGage_Orientation': 12284,
u'AI_RosetteStrainGage_RosetteMeasType': 12285,
u'AI_RosetteStrainGage_RosetteType': 12286,
u'AI_RosetteStrainGage_StrainChans': 12283,
u'AI_SampAndHold_Enable': 6170,
u'AI_SoundPressure_MaxSoundPressureLvl': 8762,
u'AI_SoundPressure_Units': 5416,
u'AI_SoundPressure_dBRef': 10673,
u'AI_StrainGage_Cfg': 2434,
u'AI_StrainGage_ForceReadFromChan': 12282,
u'AI_StrainGage_GageFactor': 2452,
u'AI_StrainGage_PoissonRatio': 2456,
u'AI_Strain_Units': 2433,
u'AI_TEDS_Units': 8672,
u'AI_Temp_Units': 4147,
u'AI_TermCfg': 4247,
u'AI_Thrmcpl_CJCChan': 4148,
u'AI_Thrmcpl_CJCSrc': 4149,
u'AI_Thrmcpl_CJCVal': 4150,
u'AI_Thrmcpl_LeadOffsetVoltage': 12216,
u'AI_Thrmcpl_ScaleType': 10704,
u'AI_Thrmcpl_Type': 4176,
u'AI_Thrmstr_A': 6345,
u'AI_Thrmstr_B': 6347,
u'AI_Thrmstr_C': 6346,
u'AI_Thrmstr_R1': 4193,
u'AI_Torque_Units': 12151,
u'AI_UsbXferReqCount': 12288,
u'AI_UsbXferReqSize': 10894,
u'AI_Velocity_IEPESensor_Sensitivity': 12278,
u'AI_Velocity_IEPESensor_SensitivityUnits': 12279,
u'AI_Velocity_IEPESensor_dBRef': 12277,
u'AI_Velocity_Units': 12276,
u'AI_Voltage_ACRMS_Units': 6114,
u'AI_Voltage_Units': 4244,
u'AI_Voltage_dBRef': 10672,
u'AO_Current_Units': 4361,
u'AO_CustomScaleName': 4488,
u'AO_DAC_Offset_ExtSrc': 8788,
u'AO_DAC_Offset_Src': 8787,
u'AO_DAC_Offset_Val': 8789,
u'AO_DAC_Ref_AllowConnToGnd': 6192,
u'AO_DAC_Ref_ConnToGnd': 304,
u'AO_DAC_Ref_ExtSrc': 8786,
u'AO_DAC_Ref_Src': 306,
u'AO_DAC_Ref_Val': 6194,
u'AO_DAC_Rng_High': 6190,
u'AO_DAC_Rng_Low': 6189,
u'AO_DataXferMech': 308,
u'AO_DataXferReqCond': 6204,
u'AO_DevScalingCoeff': 6449,
u'AO_EnhancedImageRejectionEnable': 8769,
u'AO_FuncGen_Amplitude': 10778,
u'AO_FuncGen_FMDeviation': 10787,
u'AO_FuncGen_Freq': 10777,
u'AO_FuncGen_ModulationType': 10786,
u'AO_FuncGen_Offset': 10779,
u'AO_FuncGen_Square_DutyCycle': 10780,
u'AO_FuncGen_Type': 10776,
u'AO_Gain': 280,
u'AO_IdleOutputBehavior': 8768,
u'AO_LoadImpedance': 289,
u'AO_Max': 4486,
u'AO_MemMapEnable': 6287,
u'AO_Min': 4487,
u'AO_OutputImpedance': 5264,
u'AO_OutputType': 4360,
u'AO_PowerAmp_ChannelEnable': 12386,
u'AO_PowerAmp_Gain': 12389,
u'AO_PowerAmp_Offset': 12390,
u'AO_PowerAmp_Overcurrent': 12388,
u'AO_PowerAmp_ScalingCoeff': 12387,
u'AO_ReglitchEnable': 307,
u'AO_Resolution': 6188,
u'AO_ResolutionUnits': 6187,
u'AO_TermCfg': 6286,
u'AO_UsbXferReqCount': 12289,
u'AO_UsbXferReqSize': 10895,
u'AO_UseOnlyOnBrdMem': 6202,
u'AO_Voltage_CurrentLimit': 10781,
u'AO_Voltage_Units': 4484,
u'AdvTrig_Type': 4965,
u'AnlgEdge_RefTrig_Coupling': 8757,
u'AnlgEdge_RefTrig_DigFltr_Enable': 12006,
u'AnlgEdge_RefTrig_DigFltr_MinPulseWidth': 12007,
u'AnlgEdge_RefTrig_DigFltr_TimebaseRate': 12009,
u'AnlgEdge_RefTrig_DigFltr_TimebaseSrc': 12008,
u'AnlgEdge_RefTrig_DigSync_Enable': 12010,
u'AnlgEdge_RefTrig_Hyst': 5153,
u'AnlgEdge_RefTrig_Lvl': 5154,
u'AnlgEdge_RefTrig_Slope': 5155,
u'AnlgEdge_RefTrig_Src': 5156,
u'AnlgEdge_StartTrig_Coupling': 8755,
u'AnlgEdge_StartTrig_DigFltr_Enable': 12001,
u'AnlgEdge_StartTrig_DigFltr_MinPulseWidth': 12002,
u'AnlgEdge_StartTrig_DigFltr_TimebaseRate': 12004,
u'AnlgEdge_StartTrig_DigFltr_TimebaseSrc': 12003,
u'AnlgEdge_StartTrig_DigSync_Enable': 12005,
u'AnlgEdge_StartTrig_Hyst': 5013,
u'AnlgEdge_StartTrig_Lvl': 5014,
u'AnlgEdge_StartTrig_Slope': 5015,
u'AnlgEdge_StartTrig_Src': 5016,
u'AnlgLvl_PauseTrig_Coupling': 8758,
u'AnlgLvl_PauseTrig_DigFltr_Enable': 12016,
u'AnlgLvl_PauseTrig_DigFltr_MinPulseWidth': 12017,
u'AnlgLvl_PauseTrig_DigFltr_TimebaseRate': 12019,
u'AnlgLvl_PauseTrig_DigFltr_TimebaseSrc': 12018,
u'AnlgLvl_PauseTrig_DigSync_Enable': 12020,
u'AnlgLvl_PauseTrig_Hyst': 4968,
u'AnlgLvl_PauseTrig_Lvl': 4969,
u'AnlgLvl_PauseTrig_Src': 4976,
u'AnlgLvl_PauseTrig_When': 4977,
u'AnlgWin_PauseTrig_Btm': 4981,
u'AnlgWin_PauseTrig_Coupling': 8759,
u'AnlgWin_PauseTrig_DigFltr_Enable': 12021,
u'AnlgWin_PauseTrig_DigFltr_MinPulseWidth': 12022,
u'AnlgWin_PauseTrig_DigFltr_TimebaseRate': 12024,
u'AnlgWin_PauseTrig_DigFltr_TimebaseSrc': 12023,
u'AnlgWin_PauseTrig_DigSync_Enable': 12025,
u'AnlgWin_PauseTrig_Src': 4979,
u'AnlgWin_PauseTrig_Top': 4982,
u'AnlgWin_PauseTrig_When': 4980,
u'AnlgWin_RefTrig_Btm': 5160,
u'AnlgWin_RefTrig_Coupling': 6231,
u'AnlgWin_RefTrig_DigFltr_Enable': 12011,
u'AnlgWin_RefTrig_DigFltr_MinPulseWidth': 12012,
u'AnlgWin_RefTrig_DigFltr_TimebaseRate': 12014,
u'AnlgWin_RefTrig_DigFltr_TimebaseSrc': 12013,
u'AnlgWin_RefTrig_DigSync_Enable': 12015,
u'AnlgWin_RefTrig_Src': 5158,
u'AnlgWin_RefTrig_Top': 5161,
u'AnlgWin_RefTrig_When': 5159,
u'AnlgWin_StartTrig_Btm': 5122,
u'AnlgWin_StartTrig_Coupling': 8756,
u'AnlgWin_StartTrig_DigFltr_Enable': 12031,
u'AnlgWin_StartTrig_DigFltr_MinPulseWidth': 12032,
u'AnlgWin_StartTrig_DigFltr_TimebaseRate': 12034,
u'AnlgWin_StartTrig_DigFltr_TimebaseSrc': 12033,
u'AnlgWin_StartTrig_DigSync_Enable': 12035,
u'AnlgWin_StartTrig_Src': 5120,
u'AnlgWin_StartTrig_Top': 5123,
u'AnlgWin_StartTrig_When': 5121,
u'ArmStartTrig_Type': 5140,
u'ArmStart_Term': 12159,
u'Buf_Input_BufSize': 6252,
u'Buf_Input_OnbrdBufSize': 8970,
u'Buf_Output_BufSize': 6253,
u'Buf_Output_OnbrdBufSize': 8971,
u'CI_AngEncoder_InitialAngle': 2177,
u'CI_AngEncoder_PulsesPerRev': 2165,
u'CI_AngEncoder_Units': 6310,
u'CI_Count': 328,
u'CI_CountEdges_ActiveEdge': 1687,
u'CI_CountEdges_CountDir_DigFltr_Enable': 8689,
u'CI_CountEdges_CountDir_DigFltr_MinPulseWidth': 8690,
u'CI_CountEdges_CountDir_DigFltr_TimebaseRate': 8692,
u'CI_CountEdges_CountDir_DigFltr_TimebaseSrc': 8691,
u'CI_CountEdges_CountDir_DigSync_Enable': 8693,
u'CI_CountEdges_CountReset_ActiveEdge': 12210,
u'CI_CountEdges_CountReset_DigFltr_Enable': 12211,
u'CI_CountEdges_CountReset_DigFltr_MinPulseWidth': 12212,
u'CI_CountEdges_CountReset_DigFltr_TimebaseRate': 12214,
u'CI_CountEdges_CountReset_DigFltr_TimebaseSrc': 12213,
u'CI_CountEdges_CountReset_DigSync_Enable': 12215,
u'CI_CountEdges_CountReset_Enable': 12207,
u'CI_CountEdges_CountReset_ResetCount': 12208,
u'CI_CountEdges_CountReset_Term': 12209,
u'CI_CountEdges_DigFltr_Enable': 8694,
u'CI_CountEdges_DigFltr_MinPulseWidth': 8695,
u'CI_CountEdges_DigFltr_TimebaseRate': 8697,
u'CI_CountEdges_DigFltr_TimebaseSrc': 8696,
u'CI_CountEdges_DigSync_Enable': 8698,
u'CI_CountEdges_Dir': 1686,
u'CI_CountEdges_DirTerm': 8673,
u'CI_CountEdges_InitialCnt': 1688,
u'CI_CountEdges_Term': 6343,
u'CI_CtrTimebaseActiveEdge': 322,
u'CI_CtrTimebaseMasterTimebaseDiv': 6323,
u'CI_CtrTimebaseRate': 6322,
u'CI_CtrTimebaseSrc': 323,
u'CI_CtrTimebase_DigFltr_Enable': 8817,
u'CI_CtrTimebase_DigFltr_MinPulseWidth': 8818,
u'CI_CtrTimebase_DigFltr_TimebaseRate': 8820,
u'CI_CtrTimebase_DigFltr_TimebaseSrc': 8819,
u'CI_CtrTimebase_DigSync_Enable': 8821,
u'CI_CustomScaleName': 6302,
u'CI_DataXferMech': 512,
u'CI_DataXferReqCond': 12027,
u'CI_DupCountPrevent': 8620,
u'CI_Encoder_AInputTerm': 8605,
u'CI_Encoder_AInput_DigFltr_Enable': 8699,
u'CI_Encoder_AInput_DigFltr_MinPulseWidth': 8700,
u'CI_Encoder_AInput_DigFltr_TimebaseRate': 8702,
u'CI_Encoder_AInput_DigFltr_TimebaseSrc': 8701,
u'CI_Encoder_AInput_DigSync_Enable': 8703,
u'CI_Encoder_BInputTerm': 8606,
u'CI_Encoder_BInput_DigFltr_Enable': 8704,
u'CI_Encoder_BInput_DigFltr_MinPulseWidth': 8705,
u'CI_Encoder_BInput_DigFltr_TimebaseRate': 8707,
u'CI_Encoder_BInput_DigFltr_TimebaseSrc': 8706,
u'CI_Encoder_BInput_DigSync_Enable': 8708,
u'CI_Encoder_DecodingType': 8678,
u'CI_Encoder_ZIndexEnable': 2192,
u'CI_Encoder_ZIndexPhase': 2185,
u'CI_Encoder_ZIndexVal': 2184,
u'CI_Encoder_ZInputTerm': 8607,
u'CI_Encoder_ZInput_DigFltr_Enable': 8709,
u'CI_Encoder_ZInput_DigFltr_MinPulseWidth': 8710,
u'CI_Encoder_ZInput_DigFltr_TimebaseRate': 8712,
u'CI_Encoder_ZInput_DigFltr_TimebaseSrc': 8711,
u'CI_Encoder_ZInput_DigSync_Enable': 8713,
u'CI_Freq_DigFltr_Enable': 8679,
u'CI_Freq_DigFltr_MinPulseWidth': 8680,
u'CI_Freq_DigFltr_TimebaseRate': 8682,
u'CI_Freq_DigFltr_TimebaseSrc': 8681,
u'CI_Freq_DigSync_Enable': 8683,
u'CI_Freq_Div': 327,
u'CI_Freq_EnableAveraging': 11984,
u'CI_Freq_MeasMeth': 324,
u'CI_Freq_MeasTime': 325,
u'CI_Freq_StartingEdge': 1945,
u'CI_Freq_Term': 6306,
u'CI_Freq_Units': 6305,
u'CI_GPS_SyncMethod': 4242,
u'CI_GPS_SyncSrc': 4243,
u'CI_LinEncoder_DistPerPulse': 2321,
u'CI_LinEncoder_InitialPos': 2325,
u'CI_LinEncoder_Units': 6313,
u'CI_Max': 6300,
u'CI_MeasType': 6304,
u'CI_MemMapEnable': 11986,
u'CI_Min': 6301,
u'CI_NumPossiblyInvalidSamps': 6460,
u'CI_OutputState': 329,
u'CI_Period_DigFltr_Enable': 8684,
u'CI_Period_DigFltr_MinPulseWidth': 8685,
u'CI_Period_DigFltr_TimebaseRate': 8687,
u'CI_Period_DigFltr_TimebaseSrc': 8686,
u'CI_Period_DigSync_Enable': 8688,
u'CI_Period_Div': 6446,
u'CI_Period_EnableAveraging': 11985,
u'CI_Period_MeasMeth': 6444,
u'CI_Period_MeasTime': 6445,
u'CI_Period_StartingEdge': 2130,
u'CI_Period_Term': 6308,
u'CI_Period_Units': 6307,
u'CI_Prescaler': 8761,
u'CI_PulseWidth_DigFltr_Enable': 8714,
u'CI_PulseWidth_DigFltr_MinPulseWidth': 8715,
u'CI_PulseWidth_DigFltr_TimebaseRate': 8717,
u'CI_PulseWidth_DigFltr_TimebaseSrc': 8716,
u'CI_PulseWidth_DigSync_Enable': 8718,
u'CI_PulseWidth_StartingEdge': 2085,
u'CI_PulseWidth_Term': 6314,
u'CI_PulseWidth_Units': 2083,
u'CI_Pulse_Freq_DigFltr_Enable': 12038,
u'CI_Pulse_Freq_DigFltr_MinPulseWidth': 12039,
u'CI_Pulse_Freq_DigFltr_TimebaseRate': 12041,
u'CI_Pulse_Freq_DigFltr_TimebaseSrc': 12040,
u'CI_Pulse_Freq_DigSync_Enable': 12042,
u'CI_Pulse_Freq_Start_Edge': 12037,
u'CI_Pulse_Freq_Term': 12036,
u'CI_Pulse_Freq_Units': 12043,
u'CI_Pulse_Ticks_DigFltr_Enable': 12054,
u'CI_Pulse_Ticks_DigFltr_MinPulseWidth': 12055,
u'CI_Pulse_Ticks_DigFltr_TimebaseRate': 12057,
u'CI_Pulse_Ticks_DigFltr_TimebaseSrc': 12056,
u'CI_Pulse_Ticks_DigSync_Enable': 12058,
u'CI_Pulse_Ticks_StartEdge': 12053,
u'CI_Pulse_Ticks_Term': 12052,
u'CI_Pulse_Time_DigFltr_Enable': 12046,
u'CI_Pulse_Time_DigFltr_MinPulseWidth': 12047,
u'CI_Pulse_Time_DigFltr_TimebaseRate': 12049,
u'CI_Pulse_Time_DigFltr_TimebaseSrc': 12048,
u'CI_Pulse_Time_DigSync_Enable': 12050,
u'CI_Pulse_Time_StartEdge': 12045,
u'CI_Pulse_Time_Term': 12044,
u'CI_Pulse_Time_Units': 12051,
u'CI_SemiPeriod_DigFltr_Enable': 8729,
u'CI_SemiPeriod_DigFltr_MinPulseWidth': 8730,
u'CI_SemiPeriod_DigFltr_TimebaseRate': 8732,
u'CI_SemiPeriod_DigFltr_TimebaseSrc': 8731,
u'CI_SemiPeriod_DigSync_Enable': 8733,
u'CI_SemiPeriod_StartingEdge': 8958,
u'CI_SemiPeriod_Term': 6320,
u'CI_SemiPeriod_Units': 6319,
u'CI_TCReached': 336,
u'CI_Timestamp_InitialSeconds': 8884,
u'CI_Timestamp_Units': 8883,
u'CI_TwoEdgeSep_FirstEdge': 2099,
u'CI_TwoEdgeSep_FirstTerm': 6317,
u'CI_TwoEdgeSep_First_DigFltr_Enable': 8719,
u'CI_TwoEdgeSep_First_DigFltr_MinPulseWidth': 8720,
u'CI_TwoEdgeSep_First_DigFltr_TimebaseRate': 8722,
u'CI_TwoEdgeSep_First_DigFltr_TimebaseSrc': 8721,
u'CI_TwoEdgeSep_First_DigSync_Enable': 8723,
u'CI_TwoEdgeSep_SecondEdge': 2100,
u'CI_TwoEdgeSep_SecondTerm': 6318,
u'CI_TwoEdgeSep_Second_DigFltr_Enable': 8724,
u'CI_TwoEdgeSep_Second_DigFltr_MinPulseWidth': 8725,
u'CI_TwoEdgeSep_Second_DigFltr_TimebaseRate': 8727,
u'CI_TwoEdgeSep_Second_DigFltr_TimebaseSrc': 8726,
u'CI_TwoEdgeSep_Second_DigSync_Enable': 8728,
u'CI_TwoEdgeSep_Units': 6316,
u'CI_UsbXferReqCount': 12292,
u'CI_UsbXferReqSize': 10898,
u'CO_AutoIncrCnt': 661,
u'CO_ConstrainedGenMode': 10738,
u'CO_Count': 659,
u'CO_CtrTimebaseActiveEdge': 833,
u'CO_CtrTimebaseMasterTimebaseDiv': 6339,
u'CO_CtrTimebaseRate': 6338,
u'CO_CtrTimebaseSrc': 825,
u'CO_CtrTimebase_DigFltr_Enable': 8822,
u'CO_CtrTimebase_DigFltr_MinPulseWidth': 8823,
u'CO_CtrTimebase_DigFltr_TimebaseRate': 8825,
u'CO_CtrTimebase_DigFltr_TimebaseSrc': 8824,
u'CO_CtrTimebase_DigSync_Enable': 8826,
u'CO_DataXferMech': 11980,
u'CO_DataXferReqCond': 11981,
u'CO_EnableInitialDelayOnRetrigger': 11977,
u'CO_MemMapEnable': 11987,
u'CO_OutputState': 660,
u'CO_OutputType': 6325,
u'CO_Prescaler': 8813,
u'CO_PulseDone': 6414,
u'CO_Pulse_DutyCyc': 4470,
u'CO_Pulse_Freq': 4472,
u'CO_Pulse_Freq_InitialDelay': 665,
u'CO_Pulse_Freq_Units': 6357,
u'CO_Pulse_HighTicks': 4457,
u'CO_Pulse_HighTime': 6330,
u'CO_Pulse_IdleState': 4464,
u'CO_Pulse_LowTicks': 4465,
u'CO_Pulse_LowTime': 6331,
u'CO_Pulse_Term': 6369,
u'CO_Pulse_Ticks_InitialDelay': 664,
u'CO_Pulse_Time_InitialDelay': 6332,
u'CO_Pulse_Time_Units': 6358,
u'CO_RdyForNewVal': 8959,
u'CO_UsbXferReqCount': 12293,
u'CO_UsbXferReqSize': 10899,
u'CO_UseOnlyOnBrdMem': 11979,
u'Cal_AccConnectionCount': 12267,
u'Cal_DevTemp': 8763,
u'Cal_RecommendedAccConnectionCountLimit': 12268,
u'Cal_UserDefinedInfo': 6241,
u'Cal_UserDefinedInfo_MaxSize': 6428,
u'Carrier_SerialNum': 10890,
u'ChanDescr': 6438,
u'ChanIsGlobal': 8964,
u'ChanType': 6271,
u'ChangeDetect_DI_FallingEdgePhysicalChans': 8598,
u'ChangeDetect_DI_RisingEdgePhysicalChans': 8597,
u'ChangeDetect_DI_Tristate': 12026,
u'DAQmxSuccess': 0,
u'DI_AcquireOn': 10598,
u'DI_DataXferMech': 8803,
u'DI_DataXferReqCond': 8804,
u'DI_DigFltr_Enable': 8662,
u'DI_DigFltr_EnableBusMode': 12030,
u'DI_DigFltr_MinPulseWidth': 8663,
u'DI_DigFltr_TimebaseRate': 11989,
u'DI_DigFltr_TimebaseSrc': 11988,
u'DI_DigSync_Enable': 11990,
u'DI_InvertLines': 1939,
u'DI_LogicFamily': 10605,
u'DI_MemMapEnable': 10602,
u'DI_NumLines': 8568,
u'DI_Tristate': 6288,
u'DI_UsbXferReqCount': 12290,
u'DI_UsbXferReqSize': 10896,
u'DO_DataXferMech': 8806,
u'DO_DataXferReqCond': 8807,
u'DO_GenerateOn': 10601,
u'DO_InvertLines': 4403,
u'DO_LineStates_DoneState': 10600,
u'DO_LineStates_PausedState': 10599,
u'DO_LineStates_StartState': 10610,
u'DO_LogicFamily': 10606,
u'DO_MemMapEnable': 10603,
u'DO_NumLines': 8569,
u'DO_OutputDriveType': 4407,
u'DO_Overcurrent_AutoReenable': 10886,
u'DO_Overcurrent_Limit': 10885,
u'DO_Overcurrent_ReenablePeriod': 10887,
u'DO_Tristate': 6387,
u'DO_UsbXferReqCount': 12291,
u'DO_UsbXferReqSize': 10897,
u'DO_UseOnlyOnBrdMem': 8805,
u'DelayFromSampClk_Delay': 4887,
u'DelayFromSampClk_DelayUnits': 4868,
u'Dev_AI_BridgeRngs': 12240,
u'Dev_AI_Couplings': 10644,
u'Dev_AI_CurrentIntExcitDiscreteVals': 10699,
u'Dev_AI_CurrentRngs': 10641,
u'Dev_AI_FreqRngs': 10642,
u'Dev_AI_Gains': 10643,
u'Dev_AI_LowpassCutoffFreqDiscreteVals': 10645,
u'Dev_AI_LowpassCutoffFreqRangeVals': 10703,
u'Dev_AI_MaxMultiChanRate': 10637,
u'Dev_AI_MaxSingleChanRate': 10636,
u'Dev_AI_MinRate': 10638,
u'Dev_AI_PhysicalChans': 8990,
u'Dev_AI_ResistanceRngs': 10773,
u'Dev_AI_SampModes': 12252,
u'Dev_AI_SimultaneousSamplingSupported': 10639,
u'Dev_AI_SupportedMeasTypes': 12242,
u'Dev_AI_TrigUsage': 10630,
u'Dev_AI_VoltageIntExcitDiscreteVals': 10697,
u'Dev_AI_VoltageIntExcitRangeVals': 10698,
u'Dev_AI_VoltageRngs': 10640,
u'Dev_AO_CurrentRngs': 10652,
u'Dev_AO_Gains': 10653,
u'Dev_AO_MaxRate': 10647,
u'Dev_AO_MinRate': 10648,
u'Dev_AO_PhysicalChans': 8991,
u'Dev_AO_SampClkSupported': 10646,
u'Dev_AO_SampModes': 12253,
u'Dev_AO_SupportedOutputTypes': 12243,
u'Dev_AO_TrigUsage': 10631,
u'Dev_AO_VoltageRngs': 10651,
u'Dev_Accessory_ProductNums': 12142,
u'Dev_Accessory_ProductTypes': 12141,
u'Dev_Accessory_SerialNums': 12143,
u'Dev_AnlgTrigSupported': 10628,
u'Dev_BusType': 8998,
u'Dev_CI_MaxSize': 10655,
u'Dev_CI_MaxTimebase': 10656,
u'Dev_CI_PhysicalChans': 8996,
u'Dev_CI_SampClkSupported': 10654,
u'Dev_CI_SampModes': 12254,
u'Dev_CI_SupportedMeasTypes': 12244,
u'Dev_CI_TrigUsage': 10634,
u'Dev_CO_MaxSize': 10657,
u'Dev_CO_MaxTimebase': 10658,
u'Dev_CO_PhysicalChans': 8997,
u'Dev_CO_SampClkSupported': 12123,
u'Dev_CO_SampModes': 12255,
u'Dev_CO_SupportedOutputTypes': 12245,
u'Dev_CO_TrigUsage': 10635,
u'Dev_Chassis_ModuleDevNames': 10678,
u'Dev_CompactDAQ_ChassisDevName': 10679,
u'Dev_CompactDAQ_SlotNum': 10680,
u'Dev_DI_Lines': 8992,
u'Dev_DI_MaxRate': 10649,
u'Dev_DI_Ports': 8993,
u'Dev_DI_TrigUsage': 10632,
u'Dev_DO_Lines': 8994,
u'Dev_DO_MaxRate': 10650,
u'Dev_DO_Ports': 8995,
u'Dev_DO_TrigUsage': 10633,
u'Dev_DigTrigSupported': 10629,
u'Dev_IsSimulated': 8906,
u'Dev_NumDMAChans': 9020,
u'Dev_PCI_BusNum': 8999,
u'Dev_PCI_DevNum': 9000,
u'Dev_PXI_ChassisNum': 9001,
u'Dev_PXI_SlotNum': 9002,
u'Dev_ProductCategory': 10665,
u'Dev_ProductNum': 8989,
u'Dev_ProductType': 1585,
u'Dev_SerialNum': 1586,
u'Dev_TCPIP_EthernetIP': 10892,
u'Dev_TCPIP_Hostname': 10891,
u'Dev_TCPIP_WirelessIP': 10893,
u'Dev_TEDS_HWTEDSSupported': 12246,
u'Dev_Terminals': 10816,
u'DigEdge_AdvTrig_DigFltr_Enable': 8760,
u'DigEdge_AdvTrig_Edge': 4960,
u'DigEdge_AdvTrig_Src': 4962,
u'DigEdge_ArmStartTrig_DigFltr_Enable': 8749,
u'DigEdge_ArmStartTrig_DigFltr_MinPulseWidth': 8750,
u'DigEdge_ArmStartTrig_DigFltr_TimebaseRate': 8752,
u'DigEdge_ArmStartTrig_DigFltr_TimebaseSrc': 8751,
u'DigEdge_ArmStartTrig_DigSync_Enable': 8753,
u'DigEdge_ArmStartTrig_Edge': 5141,
u'DigEdge_ArmStartTrig_Src': 5143,
u'DigEdge_RefTrig_DigFltr_Enable': 11991,
u'DigEdge_RefTrig_DigFltr_MinPulseWidth': 11992,
u'DigEdge_RefTrig_DigFltr_TimebaseRate': 11994,
u'DigEdge_RefTrig_DigFltr_TimebaseSrc': 11993,
u'DigEdge_RefTrig_DigSync_Enable': 11995,
u'DigEdge_RefTrig_Edge': 5168,
u'DigEdge_RefTrig_Src': 5172,
u'DigEdge_StartTrig_DigFltr_Enable': 8739,
u'DigEdge_StartTrig_DigFltr_MinPulseWidth': 8740,
u'DigEdge_StartTrig_DigFltr_TimebaseRate': 8742,
u'DigEdge_StartTrig_DigFltr_TimebaseSrc': 8741,
u'DigEdge_StartTrig_DigSync_Enable': 8743,
u'DigEdge_StartTrig_Edge': 5124,
u'DigEdge_StartTrig_Src': 5127,
u'DigEdge_WatchdogExpirTrig_Edge': 8613,
u'DigEdge_WatchdogExpirTrig_Src': 8612,
u'DigLvl_PauseTrig_DigFltr_Enable': 8744,
u'DigLvl_PauseTrig_DigFltr_MinPulseWidth': 8745,
u'DigLvl_PauseTrig_DigFltr_TimebaseRate': 8747,
u'DigLvl_PauseTrig_DigFltr_TimebaseSrc': 8746,
u'DigLvl_PauseTrig_DigSync_Enable': 8748,
u'DigLvl_PauseTrig_Src': 4985,
u'DigLvl_PauseTrig_When': 4992,
u'DigPattern_PauseTrig_Pattern': 8584,
u'DigPattern_PauseTrig_Src': 8559,
u'DigPattern_PauseTrig_When': 8560,
| |
from collections import Iterable
from datetime import datetime
from typing import Any
from flask import Blueprint, Response, jsonify, redirect, render_template, session, url_for
from flask.views import MethodView
from werkzeug.exceptions import default_exceptions
from pysite.constants import ALL_STAFF_ROLES, DEBUG_MODE, ErrorCodes
from pysite.mixins import OAuthMixin
class BaseView(MethodView, OAuthMixin):
"""
Base view class with functions and attributes that should be common to all view classes.
This class should be subclassed, and is not intended to be used directly.
"""
name = None # type: str
blueprint = None # type: str
def render(self, *template_names: str, **context: Any) -> str:
"""
Render some templates and get them back in a form that you can simply return from your view function.
Here's what's inserted:
* "current_page" - the "name" attribute from the view class
* "view" - the view class instance
* "logged_in" - a boolean, True if the user is logged in
* "static_file(filename)", a function used to get the URL for a given static file
* "csrf_token()", a function returning the CSRF token stored in the current session
For XSS protection, a CSRF token must be used. The "csrf_token()" function returns the correct token
to be used in the current rendering context - if your view methods are to be protected from XSS
exploits, the following steps must be taken:
1. Apply the "csrf" decorator to the view method
2. For forms, a hidden input must be declared in the template, with the name "csrf_token", and the value set to
the CSRF token.
3. For any AJAX work, the CSRF token should be stored in a variable, and sent as part of the request headers.
You can set the "X-CSRFToken" header to the CSRF token for this.
Any API call or form submission not protected by an API key must not be vulnerable to XSS, unless the API
call is intended to be a completely public feature. Public API methods must not be account-bound, and they
must never return information on a current user or perform any action. Only data retrieval is permissible.
:param template_names: Names of the templates to render
:param context: Extra data to pass into the template
:return: String representing the rendered templates
"""
context["current_page"] = self.name
context["view"] = self
context["logged_in"] = self.logged_in
context["user"] = self.user_data
context["static_file"] = self._static_file
context["debug"] = DEBUG_MODE
context["format_datetime"] = lambda dt: dt.strftime("%b %d %Y, %H:%M") if isinstance(dt, datetime) else dt
context["blueprint"] = self.blueprint
def is_staff():
if DEBUG_MODE:
return True
if not self.logged_in:
return False
for role in ALL_STAFF_ROLES:
if role in self.user_data.get("roles", []):
return True
return False
context["is_staff"] = is_staff
return render_template(template_names, **context)
def _static_file(self, filename):
return url_for("static", filename=filename)
class RouteView(BaseView):
"""
Standard route-based page view. For a standard page, this is what you want.
This class is intended to be subclassed - use it as a base class for your own views, and set the class-level
attributes as appropriate. For example:
>>> class MyView(RouteView):
... name = "my_view" # Flask internal name for this route
... path = "/my_view" # Actual URL path to reach this route
...
... def get(self): # Name your function after the relevant HTTP method
... return self.render("index.html")
For more complicated routing, see http://exploreflask.com/en/latest/views.html#built-in-converters
"""
path = None # type: str
@classmethod
def setup(cls: "RouteView", manager: "pysite.route_manager.RouteManager", blueprint: Blueprint):
"""
Set up the view by adding it to the blueprint passed in - this will also deal with multiple inheritance by
calling `super().setup()` as appropriate.
This is for a standard route view. Nothing special here.
:param manager: Instance of the current RouteManager
:param blueprint: Current Flask blueprint to register this route to
"""
if hasattr(super(), "setup"):
super().setup(manager, blueprint)
if not cls.path or not cls.name:
raise RuntimeError("Route views must have both `path` and `name` defined")
blueprint.add_url_rule(cls.path, view_func=cls.as_view(cls.name))
cls.blueprint = blueprint.name
cls.name = f"{blueprint.name}.{cls.name}" # Add blueprint to page name
def redirect_login(self, **kwargs):
session["redirect_target"] = {
"url": self.name,
"kwargs": kwargs
}
response = redirect(url_for("discord.login"))
response.headers.add("X-Robots-Tag", "noindex")
return response
class APIView(RouteView):
"""
API route view, with extra methods to help you add routes to the JSON API with ease.
This class is intended to be subclassed - use it as a base class for your own views, and set the class-level
attributes as appropriate. For example:
>>> class MyView(APIView):
... name = "my_view" # Flask internal name for this route
... path = "/my_view" # Actual URL path to reach this route
...
... def get(self): # Name your function after the relevant HTTP method
... return self.error(ErrorCodes.unknown_route)
"""
def error(self, error_code: ErrorCodes, error_info: str = "") -> Response:
"""
Generate a JSON response for you to return from your handler, for a specific type of API error
:param error_code: The type of error to generate a response for - see `constants.ErrorCodes` for more
:param error_info: An optional message with more information about the error.
:return: A Flask Response object that you can return from your handler
"""
data = {
"error_code": error_code.value,
"error_message": error_info or "Unknown error"
}
http_code = 200
if error_code is ErrorCodes.unknown_route:
data["error_message"] = error_info or "Unknown API route"
http_code = 404
elif error_code is ErrorCodes.unauthorized:
data["error_message"] = error_info or "Unauthorized"
http_code = 401
elif error_code is ErrorCodes.invalid_api_key:
data["error_message"] = error_info or "Invalid API-key"
http_code = 401
elif error_code is ErrorCodes.bad_data_format:
data["error_message"] = error_info or "Input data in incorrect format"
http_code = 400
elif error_code is ErrorCodes.incorrect_parameters:
data["error_message"] = error_info or "Incorrect parameters provided"
http_code = 400
response = jsonify(data)
response.status_code = http_code
return response
class ErrorView(BaseView):
"""
Error view, shown for a specific HTTP status code, as defined in the class attributes.
This class is intended to be subclassed - use it as a base class for your own views, and set the class-level
attributes as appropriate. For example:
>>> class MyView(ErrorView):
... name = "my_view" # Flask internal name for this route
... path = "/my_view" # Actual URL path to reach this route
... error_code = 404 # Error code
...
... def get(self, error: HTTPException): # Name your function after the relevant HTTP method
... return "Replace me with a template, 404 not found", 404
If you'd like to catch multiple HTTP error codes, feel free to supply an iterable for `error_code`. For example...
>>> error_code = [401, 403] # Handle two specific errors
>>> error_code = range(500, 600) # Handle all 5xx errors
"""
error_code = None # type: Union[int, Iterable]
register_on_app = True
blueprint = "error" # Because it doesn't truly have its own
@classmethod
def setup(cls: "ErrorView", manager: "pysite.route_manager.RouteManager", blueprint: Blueprint):
"""
Set up the view by registering it as the error handler for the HTTP status codes specified in the class
attributes - this will also deal with multiple inheritance by calling `super().setup()` as appropriate.
:param manager: Instance of the current RouteManager
:param blueprint: Current Flask blueprint to register the error handler for
"""
if hasattr(super(), "setup"):
super().setup(manager, blueprint) # pragma: no cover
if not cls.name or not cls.error_code:
raise RuntimeError("Error views must have both `name` and `error_code` defined")
if isinstance(cls.error_code, int):
cls.error_code = [cls.error_code]
if isinstance(cls.error_code, Iterable):
for code in cls.error_code:
if isinstance(code, int) and code not in default_exceptions:
continue # Otherwise we'll possibly get an exception thrown during blueprint registration
if cls.register_on_app:
manager.app.errorhandler(code)(cls.as_view(cls.name))
else:
blueprint.errorhandler(code)(cls.as_view(cls.name))
else:
raise RuntimeError(
"Error views must have an `error_code` that is either an `int` or an iterable") # pragma: no cover # noqa: E501
class TemplateView(RouteView):
"""
An easy view for routes that simply render a template with no extra information.
This class is intended to be subclassed - use it as a base class for your own views, and set the class-level
attributes as appropriate. For example:
>>> class MyView(TemplateView):
... name = "my_view" # Flask internal name for this route
... path = "/my_view" # Actual URL path to reach this route
... template = "my_view.html" # Template to use
Note that this view only handles | |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import random
import sys
from functools import partial
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import numpy as np
import tensorflow as tf
from prepro_utils import preprocess_text, encode_ids
import sentencepiece as spm
special_symbols = {
"<unk>" : 0,
"<s>" : 1,
"</s>" : 2,
"<pad>" : 3,
"<eod>" : 4,
"<eop>" : 5,
"<hi>" : 6,
"<eng>" : 7
}
VOCAB_SIZE = 32000
UNK_ID = special_symbols["<unk>"]
EOD_ID = special_symbols["<eod>"]
EOP_ID = special_symbols["<eop>"]
HIN_ID = special_symbols["<hi>"]
ENG_ID = special_symbols["<eng>"]
SOS_ID = special_symbols["<s>"]
EOS_ID = special_symbols["</s>"]
PAD_ID = special_symbols["<pad>"]
def _int64_feature(values):
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def _float_feature(values):
return tf.train.Feature(float_list=tf.train.FloatList(value=values))
def format_filename_gen(prefix, seq_len, tgt_len, bi_data, suffix,
src_lang,tgt_lang,uncased=False,):
"""docs."""
if not uncased:
uncased_str = ""
else:
uncased_str = "uncased."
if bi_data:
bi_data_str = "bi"
else:
bi_data_str = "uni"
file_name = "{}-{}_{}.seqlen-{}.tgtlen-{}.{}{}.gen.{}".format(
src_lang[:2],tgt_lang[:2],
prefix, seq_len, tgt_len, uncased_str,
bi_data_str, suffix)
return file_name
def _create_data(idx, src_file, tgt_file, src_lang, tgt_lang,
transliterate=True, language_tag=True):
# Load sentence-piece model
sp = spm.SentencePieceProcessor()
sp.Load(FLAGS.sp_path)
input_data = []
target_data = []
target_mask_data = []
input_mask_data = []
total_line_cnt = 0
for src_line,tgt_line in zip(tf.gfile.Open(src_file),
tf.gfile.Open(tgt_file)):
if total_line_cnt % 100000 == 0:
tf.logging.info("Loading line %d", total_line_cnt)
if not src_line.strip() or not tgt_line.strip():
continue
if FLAGS.from_raw_text:
src_sent = preprocess_text(src_line.strip(), lower=FLAGS.uncased)
tgt_sent = preprocess_text(tgt_line.strip(), lower=FLAGS.uncased)
src_sent = encode_ids(sp, src_sent,
transliterate=transliterate, language_tag=False)
tgt_sent = encode_ids(sp, tgt_sent,
transliterate=transliterate, language_tag=False)
tgt_sent = tgt_sent+[EOS_ID]
tgt_sent_input = tgt_sent[:-1]
tgt_sent_output = tgt_sent
#Maximum size allowed for target
tgt_sent_output = tgt_sent_output[:FLAGS.tgt_len]
tgt_sent_input = tgt_sent_input[:FLAGS.tgt_len]
if FLAGS.language_tag:
src_id = ENG_ID if src_lang=="english" else HIN_ID
tgt_id = ENG_ID if tgt_lang=="english" else HIN_ID
src_sent_e = [src_id]+src_sent
tgt_sent_input = [tgt_id]+tgt_sent_input
if FLAGS.use_sos:
src_sent_e = [SOS_ID]+src_sent_e
tgt_sent_input = [SOS_ID]+tgt_sent_input
input_len = len(src_sent_e)+len(tgt_sent_input)+1 #One extra for EOS after source
if input_len>FLAGS.seq_len:
if FLAGS.long_sentences=='ignore':
continue
else:
# Truncate in ratio of their original lenghts
to_trunc = input_len - FLAGS.seq_len
len_ratio = len(src_sent_e)/len(tgt_sent_input)
to_trunc_src = min(int(len_ratio*to_trunc),to_trunc)
to_trunc_tgt = to_trunc-to_trunc_src
if to_trunc_src>0:
src_sent_e = src_sent_e[:-to_trunc_src]
if to_trunc_tgt>0:
tgt_sent_input = tgt_sent_input[:-to_trunc_tgt]
tgt_sent_output = tgt_sent_output[:-to_trunc_tgt]
input_len = FLAGS.seq_len
assert len(src_sent_e)+len(tgt_sent_input)+1 == input_len
# Target padding to tgt_len on the left side
target_mask = [0]*(FLAGS.tgt_len-len(tgt_sent_output))+ [1]*len(tgt_sent_output)
target = [PAD_ID]*(FLAGS.tgt_len-len(tgt_sent_output))+ tgt_sent_output
# Paddings for input
pads = [PAD_ID]*(FLAGS.seq_len-input_len)
instance = pads+src_sent_e+[EOS_ID]+tgt_sent_input
input_mask = [0]*len(pads)+[1]*(len(instance)-len(pads))
assert len(instance) == FLAGS.seq_len, len(instance)
assert len(input_mask) == FLAGS.seq_len, len(input_mask)
assert len(target) == FLAGS.tgt_len, len(target)
assert len(target_mask) == FLAGS.tgt_len, len(target_mask)
else:
raise Exception("Loading from id files not yet supported")
input_data.append(np.array(instance,dtype=np.int64))
target_data.append(np.array(target,dtype=np.int64))
target_mask_data.append(np.array(target_mask,dtype=np.float32))
input_mask_data.append(np.array(input_mask,dtype=np.float32))
total_line_cnt+=1
tf.logging.info("Finish with line %d", total_line_cnt)
if total_line_cnt == 0:
raise Exception("Files have no valid data")
tf.logging.info("[Task %d] Total number line: %d", idx, total_line_cnt)
tfrecord_dir = os.path.join(FLAGS.save_dir, "tfrecords")
file_name, num_batch = create_tfrecords(
save_dir=tfrecord_dir,
basename="{}-{}-{}".format(FLAGS.split, idx, FLAGS.pass_id),
data=(input_data,target_data,target_mask_data,input_mask_data),
seq_len=FLAGS.seq_len,
tgt_len=FLAGS.tgt_len,
bi_data=FLAGS.bi_data,
sp=sp
)
record_info = {
"filenames": [file_name],
"langs": [src_lang,tgt_lang],
"num_batch": num_batch
}
return record_info
def create_data(_):
# Validate FLAGS
# Make workdirs
if not tf.gfile.Exists(FLAGS.save_dir):
tf.gfile.MakeDirs(FLAGS.save_dir)
tfrecord_dir = os.path.join(FLAGS.save_dir, "tfrecords")
if not tf.gfile.Exists(tfrecord_dir):
tf.gfile.MakeDirs(tfrecord_dir)
if FLAGS.tgt_len is None:
FLAGS.tgt_len = FLAGS.seq_len//2
# Create and dump corpus_info from task 0
if FLAGS.task == 0:
corpus_info = {
"vocab_size": VOCAB_SIZE,
"seq_len": FLAGS.seq_len,
"uncased": FLAGS.uncased,
"bi_data": FLAGS.bi_data,
"use_sos": FLAGS.use_sos,
"sp_path": FLAGS.sp_path,
"src_file": FLAGS.src_file,
"tft_file": FLAGS.tgt_file,
"src_lang": FLAGS.src_lang,
"tgt_lang": FLAGS.tgt_lang,
}
corpus_info_path = os.path.join(FLAGS.save_dir, "corpus_info.json")
with tf.gfile.Open(corpus_info_path, "w") as fp:
json.dump(corpus_info, fp)
# Interleavely split the work into FLAGS.num_task splits
assert tf.gfile.Exists(FLAGS.src_file), f"{FLAGS.src_file} not found"
assert tf.gfile.Exists(FLAGS.tgt_file), f"{FLAGS.tgt_file} not found"
record_info = _create_data(FLAGS.task, FLAGS.src_file, FLAGS.tgt_file,
FLAGS.src_lang,
FLAGS.tgt_lang,
transliterate=FLAGS.transliterate,
language_tag=FLAGS.language_tag)
record_prefix = "record_info-{}-{}-{}".format(
FLAGS.split, FLAGS.task, FLAGS.pass_id)
record_name = format_filename_gen(
prefix=record_prefix,
seq_len=FLAGS.seq_len,
tgt_len=FLAGS.tgt_len,
bi_data=FLAGS.bi_data,
suffix="json",
uncased=FLAGS.uncased,
src_lang=FLAGS.src_lang,
tgt_lang=FLAGS.tgt_lang)
record_info_path = os.path.join(tfrecord_dir, record_name)
with tf.gfile.Open(record_info_path, "w") as fp:
json.dump(record_info, fp)
def create_tfrecords(save_dir, basename, data, seq_len,
tgt_len, bi_data, sp):
input_data,target_data,target_mask_data,input_mask_data = data
if bi_data:
raise Exception("Bi directional data not supported right now")
file_name = format_filename_gen(
prefix=basename,
seq_len=seq_len,
tgt_len=tgt_len,
bi_data=bi_data,
suffix="tfrecords",
uncased=FLAGS.uncased,
src_lang=FLAGS.src_lang,
tgt_lang=FLAGS.tgt_lang)
save_path = os.path.join(save_dir, file_name)
record_writer = tf.python_io.TFRecordWriter(save_path)
tf.logging.info("Start writing %s.", save_path)
num_batch = 0
for inputs,targets,inp_masks,tgt_masks in zip(input_data,target_data,input_mask_data,target_mask_data):
feature = {
"input": _int64_feature(inputs),
"labels": _int64_feature(targets),
"input_mask": _float_feature(inp_masks),
"target_mask": _float_feature(tgt_masks),
}
example = tf.train.Example(features=tf.train.Features(feature=feature))
record_writer.write(example.SerializeToString())
num_batch += 1
record_writer.close()
tf.logging.info("Done writing %s. Num of batches: %d", save_path, num_batch)
return save_path, num_batch
################
# get_input_fn #
################
def _convert_example(example, use_bfloat16):
"""Cast int64 into int32 and float32 to bfloat16 if use_bfloat16."""
for key in list(example.keys()):
val = example[key]
if tf.keras.backend.is_sparse(val):
val = tf.sparse.to_dense(val)
if val.dtype == tf.int64:
val = tf.cast(val, tf.int32)
if use_bfloat16 and val.dtype == tf.float32:
val = tf.cast(val, tf.bfloat16)
example[key] = val
def parse_files_to_dataset(parser, file_names, split, num_batch, num_hosts,
host_id, num_core_per_host, bsz_per_core,
toeval=False):
# list of file pathes
num_files = len(file_names)
num_files_per_host = num_files // num_hosts
my_start_file_id = host_id * num_files_per_host
my_end_file_id = (host_id + 1) * num_files_per_host
if host_id == num_hosts - 1:
my_end_file_id = num_files
file_paths = file_names[my_start_file_id: my_end_file_id]
tf.logging.info("Host %d handles %d files", host_id, len(file_paths))
#assert split == "train"
dataset = tf.data.Dataset.from_tensor_slices(file_paths)
dataset = tf.data.TFRecordDataset(dataset)
# (zihang): since we are doing online preprocessing, the parsed result of
# the same input at each time will be different. Thus, cache processed data
# is not helpful. It will use a lot of memory and lead to contrainer OOM.
# So, change to cache non-parsed raw data instead.
if not toeval:
dataset = dataset.cache().shuffle(10000).repeat().map(parser)
else:
dataset = dataset.map(parser)
dataset = dataset.batch(bsz_per_core, drop_remainder=True)
dataset = dataset.prefetch(num_core_per_host * bsz_per_core)
return dataset
def get_dataset(params, num_hosts, num_core_per_host, split, file_names,
num_batch, seq_len, use_bfloat16=False, toeval=True, tgt_len=None):
bsz_per_core = params["batch_size"]
if num_hosts > 1:
host_id = params["context"].current_host
else:
host_id = 0
if tgt_len is None:
tgt_len = seq_len//2
#### Function used to parse tfrecord
def parser(record):
"""function used to parse tfrecord."""
record_spec = {
"input": tf.FixedLenFeature([seq_len], tf.int64),
"labels": tf.FixedLenFeature([tgt_len], tf.int64),
"input_mask": tf.FixedLenFeature([seq_len],tf.float32),
"target_mask": tf.FixedLenFeature([tgt_len],tf.float32)
}
# retrieve serialized example
example = tf.parse_single_example(
serialized=record,
features=record_spec)
_convert_example(example, use_bfloat16)
for k, v in example.items():
tf.logging.info("%s: %s", k, v)
return example
# Get dataset
dataset = parse_files_to_dataset(
parser=parser,
file_names=file_names,
split=split,
num_batch=num_batch,
num_hosts=num_hosts,
host_id=host_id,
num_core_per_host=num_core_per_host,
bsz_per_core=bsz_per_core,
toeval=toeval)
return dataset
def get_input_fn(
tfrecord_dir,
split,
src_lang,
tgt_lang,
bsz_per_host,
seq_len,
bi_data,
num_hosts=1,
num_core_per_host=1,
uncased=False,
num_passes=None,
use_bfloat16=False,
toeval=False,
tgt_len=None):
if tgt_len is None:
tgt_len = seq_len//2
# Merge all record infos into a single one
record_glob_base = format_filename_gen(
prefix="record_info-{}-*".format(split),
seq_len=seq_len,
tgt_len=tgt_len,
bi_data=bi_data,
suffix="json",
uncased=uncased,
src_lang=src_lang,
tgt_lang=tgt_lang)
record_info = {"num_batch": 0, "filenames": []}
tfrecord_dirs = tfrecord_dir.split(",")
tf.logging.info("Use the following tfrecord dirs: %s", tfrecord_dirs)
for idx, record_dir in enumerate(tfrecord_dirs):
record_glob = os.path.join(record_dir, record_glob_base)
tf.logging.info("[%d] Record glob: %s", idx, record_glob)
record_paths = sorted(tf.gfile.Glob(record_glob))
tf.logging.info("[%d] Num of record info path: %d",
idx, len(record_paths))
cur_record_info = {"num_batch": 0, "filenames": []}
for record_info_path in record_paths:
if num_passes is not None:
record_info_name = os.path.basename(record_info_path)
fields = record_info_name.split(".")[0].split("-")
pass_id = int(fields[-1])
if len(fields) == 5 and pass_id >= num_passes:
tf.logging.info("Skip pass %d: %s", pass_id, record_info_name)
continue
with tf.gfile.Open(record_info_path, "r") as fp:
info = json.load(fp)
if num_passes is not None:
eff_num_passes = min(num_passes, len(info["filenames"]))
ratio = eff_num_passes / len(info["filenames"])
cur_record_info["num_batch"] += int(info["num_batch"] * ratio)
cur_record_info["filenames"] += info["filenames"][:eff_num_passes]
else:
cur_record_info["num_batch"] += info["num_batch"]
cur_record_info["filenames"] += info["filenames"]
# overwrite directory for `cur_record_info`
new_filenames = []
for filename in cur_record_info["filenames"]:
basename = os.path.basename(filename)
new_filename = os.path.join(record_dir, basename)
new_filenames.append(new_filename)
cur_record_info["filenames"] = new_filenames
tf.logging.info("[Dir %d] Number of chosen batches: %s",
idx, cur_record_info["num_batch"])
tf.logging.info("[Dir %d] Number of chosen files: %s",
idx, len(cur_record_info["filenames"]))
tf.logging.info(cur_record_info["filenames"])
# add `cur_record_info` to global `record_info`
record_info["num_batch"] += cur_record_info["num_batch"]
record_info["filenames"] += cur_record_info["filenames"]
# For nmt num_batch variable is total examples, divide by bsz_per_host
record_info["num_batch"] = record_info["num_batch"]//bsz_per_host
tf.logging.info("Total number of batches: %d",
record_info["num_batch"])
tf.logging.info("Total number of files: %d",
len(record_info["filenames"]))
tf.logging.info(record_info["filenames"])
def input_fn(params):
"""docs."""
assert params["batch_size"] * num_core_per_host == bsz_per_host,\
f'{(params["batch_size"] , num_core_per_host , bsz_per_host)}'
dataset = get_dataset(
params=params,
num_hosts=num_hosts,
num_core_per_host=num_core_per_host,
split=split,
file_names=record_info["filenames"],
num_batch=record_info["num_batch"],
seq_len=seq_len,
use_bfloat16=use_bfloat16,
toeval=toeval,
tgt_len=tgt_len)
return dataset
return input_fn, record_info
if __name__ == "__main__":
FLAGS = flags.FLAGS
flags.DEFINE_bool("use_tpu", True, help="whether to use TPUs")
flags.DEFINE_integer("seq_len", 512,
help="Sequence length.")
flags.DEFINE_integer("tgt_len", None,
help="Targets will be padded to this size. Default is seq_len//2")
flags.DEFINE_bool("uncased", False, help="Use uncased inputs or not.")
flags.DEFINE_bool("bi_data", True,
help="whether to create bidirectional data")
flags.DEFINE_bool("use_sos", True,
help="whether to use SOS.")
flags.DEFINE_bool("from_raw_text", True,
help="Whether the input is raw text or encoded ids.")
flags.DEFINE_string("sp_path", "", help="Path to the sentence piece model.")
flags.DEFINE_string("save_dir", "proc_data/example",
help="Directory for saving the processed data.")
flags.DEFINE_enum("split", "train", ["train", "dev", "test"],
| |
import os
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.models import load_model
from model.network import AdaptiveInstanceNormalization
from keras.layers import Input, Dense, Flatten
from keras.layers import Lambda, Conv2D
from keras.models import Model
from keras.optimizers import Adam
from keras.constraints import UnitNorm
from keras.regularizers import l2
from keras.applications import VGG16
from keras.preprocessing.image import ImageDataGenerator
from random_eraser import apply_random_eraser_and_mask
from random_eraser import get_random_eraser_and_mask
import hdf5storage
from tqdm import tqdm
from sklearn.metrics import auc
def euclidean_distance(vects):
x, y = vects
sum_square = K.sum(K.square(x - y), axis=1, keepdims=True)
return K.sqrt(K.maximum(sum_square, K.epsilon()))
def contrastive_loss(y_true, y_pred):
'''Contrastive loss from Hadsell-et-al.'06
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
'''
margin = 10
square_pred = K.square(y_pred)
margin_square = K.square(K.maximum(margin - y_pred, 0))
return K.mean(y_true * square_pred + (1 - y_true) * margin_square)
# GPU allocation
K.clear_session()
tf.reset_default_graph()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID";
os.environ["CUDA_VISIBLE_DEVICES"] = "0";
# Tensorflow memory allocation
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 1.
session = tf.Session(config=config)
K.tensorflow_backend.set_session(session)
K.set_learning_phase(False)
tf.set_random_seed(1234)
# Directories of pretrained models/data
model_dir = 'trained_models/lord/model/'
data_loc = 'trained_models/lord/data/celeba_test.npz'
train_data_loc = 'trained_models/lord/data/celeba_vgg.npz'
cbk_loc = 'trained_codebooks/one_sample_fixed.mat'
train_cbk_loc = 'trained_codebooks/train_one_sample_fixed.mat'
# Load all data
all_data = np.load(train_data_loc)
x_d_all = np.copy(all_data['imgs'] / 255.)
y_d_all = np.copy(all_data['classes'])
# Load test data
data = np.load(data_loc)
x_d_test = np.copy(data['imgs'] / 255.)
y_d_test = np.copy(data['classes'])
# Rearrange y_test as ordinal classes (since absolute value of class doesn't matter)
_, y_d_test_ordinal = np.unique(y_d_test, return_inverse=True)
# Filter test data from training data
is_train = np.logical_not(np.isin(y_d_all, y_d_test))
x_d_train = np.copy(x_d_all[is_train])
y_d_train = np.copy(y_d_all[is_train])
# Free up memory
del all_data, x_d_all, y_d_all
# Rearrange y_train as ordinal classes (since absolute value of class doesn't matter)
_, y_d_train_ordinal = np.unique(y_d_train, return_inverse=True)
# Load model by parts
content_encoder = load_model(os.path.join(model_dir, 'content_encoder.h5py'))
class_encoder = load_model(os.path.join(model_dir, 'class_encoder.h5py'))
class_modulation = load_model(os.path.join(model_dir, 'class_modulation.h5py'))
generator = load_model(os.path.join(model_dir, 'generator.h5py'), custom_objects={
'AdaptiveInstanceNormalization': AdaptiveInstanceNormalization})
# Predict content
# Train
train_content = content_encoder.predict(x_d_train)
# Test
test_content = content_encoder.predict(x_d_test)
# Load modulation codebooks
contents = hdf5storage.loadmat(train_cbk_loc)
train_person_mod_codebook = contents['frozen_class_mod']
train_person_codebook = contents['frozen_class']
contents = hdf5storage.loadmat(cbk_loc)
person_mod_codebook = contents['frozen_class_mod']
person_codebook = contents['frozen_class']
# Construct training and validation sets
np.random.seed(2020) # Current year
num_train_persons = 2000
num_val_persons = 100 # Drawn from test persons
train_persons = np.random.choice(np.max(y_d_train_ordinal)+1, size=num_train_persons, replace=False)
val_persons = np.random.choice(np.max(y_d_test_ordinal)+1, size=num_val_persons, replace=False)
x_train = np.copy(x_d_train[np.isin(y_d_train_ordinal, train_persons)])
x_val = np.copy(x_d_test[np.isin(y_d_test_ordinal, val_persons)])
y_train = np.copy(y_d_train_ordinal[np.isin(y_d_train_ordinal, train_persons)])
y_val = np.copy(y_d_test_ordinal[np.isin(y_d_test_ordinal, val_persons)])
c_train = np.copy(train_content[np.isin(y_d_train_ordinal, train_persons)])
c_val = np.copy(test_content[np.isin(y_d_test_ordinal, val_persons)])
# Once we pick validation persons, construct their clean reconstructions
x_match_val = generator.predict([c_val, person_mod_codebook[y_val]])
# Free up memory
del x_d_train, x_d_test, train_content, test_content
# Training parameters
batch_size = 256
mining_steps = 2
num_steps = 20000
alpha = 1e-3 # Weight decay coefficient
best_area_val = 0.
best_val_loss = 1e9
# Learning algorithm
trainer = 'adam'
adv_steps = 10
adv_lr = 16. / 255 # Pixels at once
symmetrical_adv = True # Train symmetrically
# Architecture
latent_dim = 128
# Universal labels (for a single batch)
train_pair_labels = np.concatenate((np.ones(batch_size//2), np.zeros(batch_size//2)))[:, None]
val_pairs = len(x_val)
val_pair_labels = np.concatenate((np.ones(val_pairs), np.zeros(val_pairs)))[:, None]
# Input tensors
input_img = Input(shape=(64, 64, 3))
# Dynamic architecture
# Load a VGG16
core_model = VGG16(input_shape=(64, 64, 3), include_top=False)
encoded = core_model(input_img)
# Feature layer
encoded = Flatten()(encoded)
encoded = Dense(latent_dim, activation='linear', kernel_constraint=UnitNorm())(encoded)
# Create shared model
shared_model = Model(input_img, encoded)
# Two input tensors
img_real = Input(shape=(64, 64, 3))
img_gen = Input(shape=(64, 64, 3))
# Get features
features_real = shared_model(img_real)
features_gen = shared_model(img_gen)
# Compute distance
sim_score = Lambda(euclidean_distance)([features_real, features_gen])
# Siamese model
model = Model([img_real, img_gen], sim_score)
# Optimizer
optimizer = Adam(lr=0.001, amsgrad=True)
# Compile
model.compile(optimizer, loss=contrastive_loss, metrics=['accuracy'])
# Apply L2 weight regularization post-factum
for layer in core_model.layers:
if isinstance(layer, Conv2D) or isinstance(layer, Dense):
layer.add_loss(lambda: l2(alpha)(layer.kernel))
if hasattr(layer, 'bias_regularizer') and layer.use_bias:
layer.add_loss(lambda: l2(alpha)(layer.bias))
# Instantiate cutout
eraser = get_random_eraser_and_mask(p=0.5, s_l=0.02, s_h=0.2, r_1=0.5, r_2=2.,
v_l=0., v_h=1., pixel_level=True)
# Instantiate augmentation generator
image_generator = ImageDataGenerator(width_shift_range=5,
height_shift_range=5,
horizontal_flip=True)
# Setup a graph for patch adversarial attacks
x_adv = tf.placeholder(dtype=tf.float32, shape=(None, 64, 64, 3))
x_adv_pair = tf.placeholder(dtype=tf.float32, shape=(None, 64, 64, 3))
# Get features of both
adv_real_features = shared_model(x_adv)
adv_pair_features = shared_model(x_adv_pair)
# Loss function and its gradient
adv_loss = tf.norm(adv_real_features - adv_pair_features, axis=-1)
grad, = tf.gradients(adv_loss, x_adv)
# Where to save weights
result_dir = 'trained_models/proposed'
if not os.path.exists(result_dir):
os.mkdir(result_dir)
weight_name = result_dir + '/steps%d_lr%.1f' % (adv_steps, adv_lr*255.)
# Granularity of AUC
num_points = 100
tpr_val = np.zeros((num_steps, num_points))
fpr_val = np.zeros((num_steps, num_points))
area_val = np.zeros((num_steps,))
# Training/validation logs
train_loss_log = np.zeros((num_steps,))
val_loss_log = np.zeros((num_steps,))
# Train for each batch
for step_idx in tqdm(range(num_steps)):
# Draw a half batch of samples
random_idx = np.random.choice(len(x_train), size=batch_size//2, replace=False)
# Augment and generate them with their correct class codebook
x_match_real_half_batch = image_generator.flow(x_train[random_idx],
shuffle=False, batch_size=batch_size//2)[0]
# Random erasure
x_match_real_half_batch, x_real_mask_half_batch = apply_random_eraser_and_mask(eraser, x_match_real_half_batch)
# Get content code and generate images with correct class codes
real_content = content_encoder.predict(x_match_real_half_batch)
x_match_gen_half_batch = generator.predict([real_content, train_person_mod_codebook[y_train[random_idx]]])
# Adversarial attack on positive pair
if symmetrical_adv:
if adv_steps > 0:
# Find indices where patch augmentation is applied
patch_attack_idx = np.where(np.sum(x_real_mask_half_batch, axis=(1, 2, 3)))[0]
# Check if at least one such sample exists
if len(patch_attack_idx) > 0:
# Compute feature differences before adversarial attack - enable if manual verification is desired, but will slow down processing
# diff_before = model.predict([x_match_real_half_batch[patch_attack_idx],
# x_match_gen_half_batch[patch_attack_idx]])
# Further minimize distance by adversarial attacks
x_orig = np.copy(x_match_real_half_batch[patch_attack_idx])
x_mask_aug = x_real_mask_half_batch[patch_attack_idx]
for internal_step_idx in range(adv_steps):
# Get gradients and outputs
grad_np = session.run(grad, feed_dict={x_adv: x_orig,
x_adv_pair: x_match_gen_half_batch[patch_attack_idx]})
# Normalize, apply and clip
x_orig = np.clip(x_orig + adv_lr * np.sign(grad_np) * x_mask_aug, 0., 1.)
# Compute feature differences after adversarial attack
# diff_after = model.predict([x_orig, x_match_gen_half_batch[patch_attack_idx]])
# Replace samples with adversarials
x_match_real_half_batch[patch_attack_idx] = x_orig
# Mine for hard candidates that use the same class vectors
fake_person_idx = np.asarray([np.random.choice(np.where(np.logical_not(np.isin(y_train, y_train[random_idx[idx]])))[0],
size=mining_steps, replace=False) for idx in range(batch_size//2)]).flatten()
# Generate fake images with the target's class codes
fake_input_candidates = x_train[fake_person_idx]
mod_input = train_person_mod_codebook[np.mod(np.repeat(y_train[random_idx], mining_steps, axis=0),
len(train_person_mod_codebook)).astype(np.int)]
# Augment all negative pairs and generate them
fake_input_candidates_aug = image_generator.flow(fake_input_candidates,
shuffle=False, batch_size=batch_size//2*mining_steps)[0]
# Random erasure - save the mask for potential attacks
fake_input_candidates_aug, fake_erasure_mask = apply_random_eraser_and_mask(eraser, fake_input_candidates_aug)
# Get content code and generate images with swapped class codes
fake_content = content_encoder.predict(fake_input_candidates_aug)
fake_output_candidates = generator.predict([fake_content, mod_input])
# Get their similarity on input-output pairs
fake_sim_candidates = model.predict([fake_input_candidates_aug, fake_output_candidates])
# Reshape
fake_sim_candidates = np.reshape(fake_sim_candidates, (-1, mining_steps))
fake_output_candidates = np.reshape(fake_output_candidates, (batch_size//2, mining_steps, 64, 64, 3))
fake_input_candidates_aug = np.reshape(fake_input_candidates_aug, (batch_size//2, mining_steps, 64, 64, 3))
fake_masks = np.reshape(fake_erasure_mask, (batch_size//2, mining_steps, 64, 64, 3))
# Pick closest pairs
fake_idx = np.argmin(fake_sim_candidates, axis=-1)
# Assign the other half of batches
x_fake_real_half_batch = fake_input_candidates_aug[np.arange(batch_size//2), fake_idx]
x_fake_mask_half_batch = fake_masks[np.arange(batch_size//2), fake_idx]
x_fake_gen_half_batch = fake_output_candidates[np.arange(batch_size//2), fake_idx]
if adv_steps > 0:
# Find indices where patch augmentation is applied
patch_attack_idx = np.where(np.sum(x_fake_mask_half_batch, axis=(1, 2, 3)))[0]
# Check if at least one such sample exists
if len(patch_attack_idx) > 0:
# Compute feature differences before adversarial attack
# diff_before = model.predict([x_fake_real_half_batch[patch_attack_idx],
# x_fake_gen_half_batch[patch_attack_idx]])
#
# Further minimize distance by adversarial attacks
x_orig = np.copy(x_fake_real_half_batch[patch_attack_idx])
z_class_aug = train_person_mod_codebook[y_train[random_idx]][patch_attack_idx]
x_mask_aug = x_fake_mask_half_batch[patch_attack_idx]
for internal_step_idx in range(adv_steps):
# Get gradients and outputs
grad_np = session.run(grad, feed_dict={x_adv: x_orig,
x_adv_pair: x_fake_gen_half_batch[patch_attack_idx]})
# Normalize, apply and clip
x_orig = np.clip(x_orig - adv_lr * np.sign(grad_np) * x_mask_aug, 0., 1.)
# Compute feature differences after adversarial attack
# diff_after = model.predict([x_orig, x_fake_gen_half_batch[patch_attack_idx]])
# Replace samples with adversarials
x_fake_real_half_batch[patch_attack_idx] = x_orig
# Construct batches
x_real_batch = np.concatenate((x_match_real_half_batch, x_fake_real_half_batch), axis=0)
x_gen_batch = np.concatenate((x_match_gen_half_batch, x_fake_gen_half_batch), axis=0)
else:
# Construct batches
x_real_batch = np.concatenate((x_match_real_half_batch, x_fake_real_half_batch), axis=0)
x_gen_batch = np.concatenate((x_match_gen_half_batch, x_fake_gen_half_batch), axis=0)
# Train on batch
train_loss, train_acc = model.train_on_batch([x_real_batch, x_gen_batch], train_pair_labels)
# Validate periodically
if np.mod(step_idx, 50) == 0:
# For each person, sample another person from the same class
sampled_real_idx = np.asarray([np.random.choice(np.setdiff1d(np.where(y_val == y_val[idx])[0], idx)) for idx in range(len(y_val))])
# For each person, sample another person
sampled_fake_idx = np.asarray([np.random.choice(np.where(y_val != y_val[idx])[0]) for idx in range(len(y_val))])
# Create merged vectors
x_real_val = np.concatenate((x_val, x_val), axis=0)
x_fake_val = np.concatenate((x_val[sampled_real_idx], x_val[sampled_fake_idx]), axis=0)
# Predict
val_loss, val_acc = model.evaluate([x_real_val, x_fake_val], val_pair_labels)
print('Step %d. Val. loss = %.3f, Val. acc. = %.3f' % (step_idx, val_loss, val_acc))
# Verbose
print('Step %d. Train loss = %.3f, Train acc. = %.3f' % (step_idx, train_loss, train_acc))
# Directly get similarities
val_sim = model.predict([x_real_val, x_fake_val])
real_sim, fake_sim = np.split(val_sim, [len(val_sim)//2])
# Compute AUC ad-hoc
min_sim = np.minimum(np.min(real_sim), np.min(fake_sim))
max_sim = np.maximum(np.max(real_sim), np.max(fake_sim))
thresholds = np.linspace(min_sim, max_sim, num_points)
for idx, threshold in enumerate(thresholds):
tpr_val[step_idx, idx] = np.mean(real_sim < threshold)
fpr_val[step_idx, idx] = np.mean(fake_sim < threshold)
# Compute AUC
area_val[step_idx] = auc(fpr_val[step_idx], tpr_val[step_idx])
print('Step %d. AUC = %.3f' % (step_idx, area_val[step_idx]))
# Save best model according to AUC
if area_val[step_idx] > best_area_val:
shared_model.save_weights(weight_name + '_auc.h5')
best_area_val = area_val[step_idx]
# Save best model according to validation loss
if val_loss < best_val_loss:
shared_model.save_weights(weight_name + '_loss.h5')
best_val_loss = val_loss
# Save latest weights always
shared_model.save_weights(weight_name + '_last.h5')
# Store in logs
train_loss_log[step_idx] = train_loss
val_loss_log[step_idx] = val_loss
# Save periodically
hdf5storage.savemat(weight_name + '_logs.mat', {'train_loss_log': train_loss_log,
'val_loss_log': val_loss_log},
| |
class="answer">\n')
if pretty_links:
(sup_answer_body,sup_answer_links) = suppify_body(answer['body'])
yield '', join_(' ', escape_(sup_answer_body, True), ' \n')
for key in loop.setup(sup_answer_links.keys()):
yield '', join_(' ', '[', escape_((key), True), '] ', escape_((sup_answer_links[key].replace("&","&")), True), '<br/>\n')
yield '', join_(' ', '<br/>\n')
else:
yield '', join_(' ', escape_(answer['body'], True), '\n')
yield '', join_(' ', '</div>\n')
if answer.get('comments') and comments:
yield '', join_(' ', '<div class="answer-comments"> \n')
for comment in loop.setup(answer['comments']):
yield '', join_(' ', '<div class="comment">\n')
if int(comment['score']) > 0:
yield '', join_(' ', ' (', escape_(comment['score'], True), ') \n')
yield '', join_(' ', escape_(comment['body'], True), ' - <b> ', escape_(comment.get('owner',{'display_name':'community_owned'}).get('display_name','community_owned'), True), '</b> \n')
yield '', join_(' ', '</div>\n')
yield '', join_(' ', '</div>\n')
yield '', join_(' ', '<div class="answer-pagenumber">', escape_(int(answer_number+1), True), '</div>\n')
yield '', join_(' ', '\n')
yield '', join_(' </div>\n')
yield '', join_(' </div>\n')
yield '', join_(' <script type="text/javascript">\n')
yield '', join_(' var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");\n')
yield '', join_(' document.write(unescape("%3Cscript src=\'" + gaJsHost + "google-analytics.com/ga.js\' type=\'text/javascript\'%3E%3C/script%3E"));\n')
yield '', join_(' </script>\n')
yield '', join_(' <script type="text/javascript">\n')
yield '', join_(' try {\n')
yield '', join_(' var pageTracker = _gat._getTracker("UA-4276204-5");\n')
yield '', join_(' pageTracker._trackPageview();\n')
yield '', join_(' } catch(err) {}\n')
yield '', join_(' </script>\n')
yield '', join_(' <script src="http://static.getclicky.com/js" type="text/javascript"></script>\n')
yield '', join_(' <script type="text/javascript">clicky.init(250663);</script>\n')
yield '', join_(' <noscript><p><img alt="Clicky" width="1" height="1" src="http://in.getclicky.com/250663ns.gif" /></p></noscript>\n')
yield '', join_(' </body>\n')
yield '', join_('</html>\n')
return __template__
export = CompiledTemplate(export(), 'apps/app/views/export.html')
def deleted():
loop = ForLoop()
_dummy = CompiledTemplate(lambda: None, "dummy")
join_ = _dummy._join
escape_ = _dummy._escape
def __template__ (result):
yield '', join_('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n')
yield '', join_('<html>\n')
yield '', join_(' <head>\n')
yield '', join_(' <meta http-equiv="content-type" content="text/html; charset=UTF-8">\n')
yield '', join_(' <meta name="description" content="StackPrinter - The Stack Exchange Printer Suite">\n')
yield '', join_(' <meta name="keywords" content="printer friendly stackoverflow stackapps stack exchange">\n')
yield '', join_(' <title>Deleted - StackPrinter</title> \n')
yield '', join_(' <link rel="stylesheet" href="/stylesheets/search.css">\n')
yield '', join_(' <link rel="stylesheet" href="/stylesheets/main.css">\n')
yield '', join_(' <link rel="stylesheet" href="/stylesheets/jquery-ui.css">\n')
yield '', join_(' <link rel="shortcut icon" type="image/x-icon" href="/favicon.ico">\n')
yield '', join_(' <script type="text/javascript" src="/javascripts/jquery-1.4.2.min.js"></script>\n')
yield '', join_(' <script type="text/javascript" src="/javascripts/jquery-ui.min.js"></script>\n')
yield '', join_(' <script type="text/javascript" src="/javascripts/main.js"></script> \n')
yield '', join_(' </head>\n')
yield '', join_(' <body>\n')
yield '', join_(' <div id="back">\n')
yield '', join_(' <a href="/"><img title="Back to home" width="20px" height="20px" border="0" src="/images/icon_home.png"/></a>\n')
yield '', join_(' </div>\n')
yield '', join_(' <div id="title" class="main"><i>Deleted</i> questions</div>\n')
yield '', join_(' <table cellpadding="2" cellspacing="0">\n')
yield '', join_(' <tr class="even">\n')
yield '', join_(' <td class="printer">\n')
yield '', join_(' <a target="_blank" href="/questions/what-is-your-solution-to-the-fizzbuzz-problem.html"/>\n')
yield '', join_(' <img title="Printer-Friendly" src="images/printer_black.png"/>\n')
yield '', join_(' </a>\n')
yield '', join_(' </td>\n')
yield '', join_(' <td class="service_logo">\n')
yield '', join_(' <a target="_blank" href="http://stackoverflow.com"><img src="https://cdn.sstatic.net/Sites/stackoverflow/img/apple-touch-icon.png"/></a>\n')
yield '', join_(' </td>\n')
yield '', join_(' <td class="service_name">\n')
yield '', join_(' [Stack Overflow] \n')
yield '', join_(' </td> \n')
yield '', join_(' <td class="title">\n')
yield '', join_(' <a target="_blank" href="http://stackoverflow.com/questions/437"/>What is your solution to the FizzBuzz problem?</a><br>\n')
yield '', join_(' <span class="tag">\n')
yield '', join_(' [ language-agnostic interview-questions code-golf rosetta-stone fizzbuzz ] \n')
yield '', join_(' </span>\n')
yield '', join_(' </td>\n')
yield '', join_(' <td class="counter">\n')
yield '', join_(' [Static]\n')
yield '', join_(' </td>\n')
yield '', join_(' </tr>\n')
for question in loop.setup(result):
if supported_services.info.get(question.service):
yield '', join_(' ', '<tr class="', escape_(loop.parity, True), '">\n')
yield '', join_(' ', ' <td class="printer">\n')
yield '', join_(' ', ' <a target="_blank" href="/export?question=', escape_((question.question_id), True), '&format=HTML&service=', escape_((question.service), True), '&linktohome=false"/>\n')
yield '', join_(' ', ' <img title="Printer-Friendly" src="images/printer_black.png"/>\n')
yield '', join_(' ', ' </a>\n')
yield '', join_(' ', ' </td>\n')
yield '', join_(' ', ' <td class="service_logo">\n')
yield '', join_(' ', ' <a target="_blank" href="', escape_((supported_services.info[question.service]['site_url']), True), '"><img src="', escape_((supported_services.info[question.service]['icon_url']), True), '"/></a>\n')
yield '', join_(' ', ' </td>\n')
yield '', join_(' ', ' <td class="service_name">\n')
yield '', join_(' ', ' [', escape_((supported_services.info[question.service]['name']), True), '] \n')
yield '', join_(' ', ' </td> \n')
yield '', join_(' ', ' <td class="title">\n')
yield '', join_(' ', ' <a target="_blank" href="', escape_(question.get_url(), True), '"/>', escape_(htmlquote(question.title), True), '</a><br>\n')
yield '', join_(' ', ' <span class="tag">\n')
yield '', join_(' ', ' [', escape_((", ".join([tag for tag in question.tags])), True), ']\n')
yield '', join_(' ', ' </span>\n')
yield '', join_(' ', ' </td>\n')
yield '', join_(' ', ' <td class="counter">\n')
yield '', join_(' ', ' [', escape_((question.counter), True), ']\n')
yield '', join_(' ', ' </td>\n')
yield '', join_(' ', '</tr>\n')
else:
if len(result) == 0:
yield '', join_(' ', '<p id="not_found">\n')
yield '', join_(' ', ' No questions found\n')
yield '', join_(' ', '</p>\n')
yield '', join_(' </body>\n')
yield '', join_('</html>\n')
return __template__
deleted = CompiledTemplate(deleted(), 'apps/app/views/deleted.html')
def topvoted_tagged():
loop = ForLoop()
_dummy = CompiledTemplate(lambda: None, "dummy")
join_ = _dummy._join
escape_ = _dummy._escape
def __template__ (tagged, result, service, pagination):
yield '', join_('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n')
yield '', join_('<html>\n')
yield '', join_(' <head>\n')
yield '', join_(' <meta http-equiv="content-type" content="text/html; charset=UTF-8">\n')
yield '', join_(' <meta name="description" content="StackPrinter - The Stack Exchange Printer Suite">\n')
yield '', join_(' <meta name="keywords" content="printer friendly stackoverflow stackapps stack exchange">\n')
yield '', join_(' <title>Top Voted - StackPrinter</title> \n')
yield '', join_(' <link rel="stylesheet" href="/stylesheets/search.css">\n')
yield '', join_(' <link rel="stylesheet" href="/stylesheets/main.css">\n')
yield '', join_(' <link rel="stylesheet" href="/stylesheets/jquery-ui.css">\n')
yield '', join_(' <link rel="shortcut icon" type="image/x-icon" href="/favicon.ico">\n')
yield '', join_(' <script type="text/javascript" src="/javascripts/jquery-1.4.2.min.js"></script>\n')
yield '', join_(' <script type="text/javascript" src="/javascripts/jquery-ui.min.js"></script>\n')
yield '', join_(' <script type="text/javascript" src="/javascripts/main.js"></script>\n')
yield '', join_(' </head>\n')
yield '', join_(' <body>\n')
yield '', join_(' <div id="back">\n')
yield '', join_(' <a href="/topvoted"><img src="/images/search.png"/></a>\n')
yield '', join_(' </div>\n')
yield '', join_(' <div id="title" class="main"><img src="', escape_((supported_services.info[service]['icon_url']), True), '"/>', escape_((supported_services.info[service]['name']), True), ' <i>Top Voted</i> questions </div>\n')
yield '', join_(' <p id="input">', escape_((tagged), True), '</p>\n')
yield '', join_(' \n')
yield '', join_(' <table cellpadding="2" cellspacing="0">\n')
for question in loop.setup(result):
yield '', join_(' ', '<tr class="', escape_(loop.parity, True), '">\n')
yield '', join_(' ', ' <td class="printer">\n')
yield '', join_(' ', ' <a target="_blank" href="/export?question=', escape_((question.question_id), True), '&format=HTML&service=', escape_((question.service), True), '&linktohome=false"/>\n')
yield '', join_(' ', ' <img title="Printer-Friendly" src="images/printer_black.png"/>\n')
yield '', join_(' ', ' </a> \n')
yield '', join_(' ', ' </td>\n')
yield '', join_(' ', ' <td class="quicklook">\n')
yield '', join_(' ', ' <a onclick="javascript:quicklook(', escape_((question.question_id), True), ",'", escape_((question.service), True), '\');return false;" href="#"/>\n')
yield '', join_(' ', ' <img title="Quicklook" src="images/quicklook.png"/>\n')
yield '', join_(' ', ' </a>\n')
yield '', join_(' ', ' </td>\n')
yield '', join_(' ', ' <td class="counters">\n')
yield '', join_(' ', ' [', escape_(question.get_votes(), True), ']<br>[', escape_(question.answer_count, True), ']\n')
yield '', join_(' ', ' </td> \n')
yield '', join_(' ', ' <td class="title">\n')
yield '', join_(' ', ' <a target="_blank" href="', escape_(question.url, True), '"/>', escape_(htmlquote(question.title), True), '</a><br>\n')
yield '', join_(' ', ' <span class="tag">\n')
yield '', join_(' ', ' [', escape_((", ".join([tag for tag in question.tags_list])), True), ']\n')
yield '', join_(' ', ' </span>\n')
yield '', join_(' ', ' </td>\n')
yield '', join_(' ', ' <td class="date">\n')
yield '', join_(' ', ' [', escape_((question.creation_date.strftime('%Y-%m-%d')), True), ']\n')
yield '', join_(' ', ' </td>\n')
yield '', join_(' ', '</tr>\n')
if loop.last:
yield '', join_(' ', '</table>\n')
yield '', join_(' ', '<table id="pagination">\n')
yield '', join_(' ', ' <tr>\n')
yield '', join_(' ', ' <td class="pagination_found">Found: ', escape_(commify(pagination.total), True), '</td>\n')
yield '', join_(' ', ' <td class="pagination_page">\n')
if pagination.has_previous_entries():
yield '', join_(' ', ' <a href="/topvoted?service=', escape_((service), True), '&tagged=', escape_((urlquote(tagged)), True), '&page=', escape_((pagination.page-1), True), '&pagesize=', escape_((pagination.pagesize), True), '">« prev </a>\n')
for page in loop.setup(pagination.get_pretty_pagination()):
if page != -1:
yield '', join_(' ', '<a href="/topvoted?service=', escape_((service), True), '&tagged=', escape_((urlquote(tagged)), True), '&page=', escape_((page), True), '&pagesize=', escape_((pagination.pagesize), True), '">\n')
if page == pagination.page:
yield '', join_(' ', '|', escape_((page), True), '| \n')
else:
yield '', join_(' ', escape_(page, True), ' \n')
yield '', join_(' ', '</a>\n')
else:
yield '', join_(' ', escape_(pagination.separator, True), '\n')
if pagination.has_more_entries():
yield '', join_(' ', ' <a href="/topvoted?service=', escape_((service), True), '&tagged=', escape_((urlquote(tagged)), True), '&page=', escape_((pagination.page+1), True), '&pagesize=', escape_((pagination.pagesize), True), '"> next »</a>\n')
yield '', join_(' ', ' </td>\n')
yield '', join_(' ', ' <td class="pagination_pagesize">Pagesize: ', escape_(pagination.pagesize, True), '</td>\n')
yield '', join_(' ', ' </tr>\n')
yield '', join_(' ', '</table>\n')
| |
<reponame>deepneuralmachine/google-research
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from functools import partial
import os
from flax.training import checkpoints
from flax import optim
import jax
import jax.numpy as jnp
import jax.scipy as jscipy
import jax.random
from jax import vmap
from jax import custom_vjp
from tensorflow.io import gfile
def create_learning_rate_scheduler(
factors="constant * linear_warmup * rsqrt_decay * rsqrt_hidden_size",
base_learning_rate=0.5,
warmup_steps=1000,
decay_factor=0.5,
steps_per_decay=20000,
steps_per_cycle=100000,
hidden_size=1024):
"""creates learning rate schedule.
Interprets factors in the factors string which can consist of:
* constant: interpreted as the constant value,
* linear_warmup: interpreted as linear warmup until warmup_steps,
* rsqrt_decay: divide by square root of max(step, warmup_steps)
* decay_every: Every k steps decay the learning rate by decay_factor.
* cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.
Args:
factors: a string with factors separated by '*' that defines the schedule.
base_learning_rate: float, the starting constant for the lr schedule.
warmup_steps: how many steps to warm up for in the warmup schedule.
decay_factor: The amount to decay the learning rate by.
steps_per_decay: How often to decay the learning rate.
steps_per_cycle: Steps per cycle when using cosine decay.
hidden_size: size of feature dimension in attention layers.
Returns:
a function learning_rate(step): float -> {'learning_rate': float}, the
step-dependent lr.
"""
factors = [n.strip() for n in factors.split("*")]
def step_fn(step):
"""Step to learning rate function."""
ret = 1.0
for name in factors:
if name == "constant":
ret *= base_learning_rate
elif name == "linear_warmup":
ret *= jnp.minimum(1.0, step / warmup_steps)
elif name == "rsqrt_decay":
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == "rsqrt_normalized_decay":
ret *= jnp.sqrt(warmup_steps)
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == "decay_every":
ret *= (decay_factor**(step // steps_per_decay))
elif name == "cosine_decay":
progress = jnp.maximum(0.0,
(step - warmup_steps) / float(steps_per_cycle))
ret *= jnp.maximum(0.0,
0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))))
elif name == "rsqrt_hidden_size":
ret /= jnp.sqrt(1.0 * hidden_size)
else:
raise ValueError("Unknown factor %s." % name)
return jnp.asarray(ret, dtype=jnp.float32)
return step_fn
@partial(jnp.vectorize, excluded=(2,), signature="(m),(m)->()")
def permutation_invariant_accuracy(predictions, labels, k):
permutations = jnp.array(list(itertools.permutations(range(k))))
permuted_labels = jax.lax.map(lambda p: p[labels], permutations)
acc = jnp.max(
jax.lax.map(lambda ls: jnp.mean(ls == predictions), permuted_labels))
return acc
def has_checkpoint(logdir):
return (gfile.isdir(logdir) and
gfile.glob(os.path.join(logdir, "checkpoint_*")))
def load_parameters(logdir, init_params):
if has_checkpoint(logdir):
print("Loading checkpoint from %s" % logdir)
optimizer_def = optim.Adam()
optimizer = optimizer_def.create(init_params)
optimizer = checkpoints.restore_checkpoint(logdir, optimizer)
print("Checkpoint loaded from step %d" % optimizer.state.step)
return optimizer.target
else:
print("No checkpoint found in %s" % logdir)
return None
def maybe_load_checkpoint(logdir, optimizer, clobber_checkpoint=False):
if not clobber_checkpoint:
if has_checkpoint(logdir):
print("Loading checkpoint from %s" % logdir)
optimizer = checkpoints.restore_checkpoint(logdir, optimizer)
print("Checkpoint loaded from step %d" % optimizer.state.step)
else:
if gfile.isdir(logdir):
gfile.rmtree(logdir)
return optimizer
def bernoulli_logpmf(logits, labels):
"""Bernoulli log pmf of data x given logits."""
return -jnp.sum(
jnp.logaddexp(0.,
jnp.where(labels, -1., 1.) * logits), axis=-1)
@partial(jax.numpy.vectorize, signature="(n),()->()")
def categorical_logpmf(logits, label):
return jax.nn.log_softmax(logits)[label]
def categorical_kl(p_probs, q_log_probs):
p_log_probs = jnp.log(p_probs)
outs = jnp.where(p_probs > 0, p_probs*(p_log_probs - q_log_probs), p_probs)
return jnp.sum(outs)
@partial(jax.numpy.vectorize, signature="(n,k),(n)->(n)")
def permutation_invariant_categorical_logpmf(logits, labels):
k = logits.shape[-1]
permutations = jnp.array(list(itertools.permutations(range(k))))
permuted_labels = permutations[labels].T
# [k!, num_data_points]
all_lls = jax.vmap(categorical_logpmf, in_axes=(None, 0))(
logits, permuted_labels)
max_ll_ind = jax.lax.stop_gradient(jnp.argmax(jnp.sum(all_lls, axis=1)))
return all_lls[max_ll_ind]
@jax.custom_vjp
def l2_dist(x, y):
"""A custom l2 dist with 0 gradient when x=y."""
return jnp.linalg.norm(x-y)
def fwd_l2_dist(x, y):
dist = l2_dist(x, y)
return dist, (dist, x-y)
def bwd_l2_dist(res, g):
dist, diff = res
grd = jnp.where(dist <= 1e-8, jnp.zeros_like(diff), diff / dist)
return (grd*g, -grd*g)
l2_dist.defvjp(fwd_l2_dist, bwd_l2_dist)
@partial(jax.numpy.vectorize, signature="(n,d),(m,d)->(n,m)")
def pair_dists(X, Y):
return jax.vmap(jax.vmap(l2_dist, in_axes=(None, 0)), in_axes=(0, None))(X, Y)
def pair_vectors(vs):
num_vs, _ = vs.shape
expanded_v = jnp.tile(vs[:, jnp.newaxis, :], [1, num_vs, 1])
matrix = jnp.concatenate(
[expanded_v, jnp.transpose(expanded_v, axes=[1, 0, 2])], axis=2)
return matrix[jnp.tril_indices(num_vs, k=-1)]
def subsample_pairs(key, vs, num_subsampled_pairs):
paired_vs = pair_vectors(vs)
num_pairs = paired_vs.shape[0]
inds = jax.random.choice(
key, num_pairs, (num_subsampled_pairs,), replace=False)
return paired_vs[inds], inds
def to_pairwise_preds(preds):
paired_preds = pair_vectors(preds[Ellipsis, jnp.newaxis])
paired_preds = 1 * (paired_preds[Ellipsis, 0] == paired_preds[Ellipsis, 1])
return paired_preds
@partial(jax.numpy.vectorize, signature="(n),(n)->()")
def binary_f1(y_pred, y_true):
true_pos = jnp.sum((y_pred == y_true)*(y_true == 1))
false_pos = jnp.sum((y_pred != y_true)*(y_true == 0))
false_neg = jnp.sum((y_pred != y_true)*(y_true == 1))
return true_pos / (true_pos + 0.5*(false_pos + false_neg))
@partial(jnp.vectorize, signature="(m),(m)->()")
def permutation_invariant_binary_f1(predictions, labels):
f1_pos = binary_f1(predictions, labels)
permuted_predictions = jnp.array([1, 0])[predictions]
f1_neg = binary_f1(permuted_predictions, labels)
return jnp.maximum(jnp.mean(f1_pos), jnp.mean(f1_neg))
@partial(custom_vjp, nondiff_argnums=(0,))
def fixed_point(f, a, x_init):
"""Computes the fixed point of f.
Given a fixed point equation x* = f(a,x*), this function computes x*.
Args:
f: The function to compute a fixed point of.
a: The 'auxiliary' information to feed f.
x_init: The initial x to use when computing the fixed point.
Returns:
x_*: the fixed point of f.
"""
def cond_fun(carry):
x_prev, x = carry
return jnp.logical_not(jnp.allclose(x_prev, x, rtol=1e-4, atol=1e-4))
def body_fun(carry):
_, x = carry
return x, f(a, x)
_, x_star = jax.lax.while_loop(cond_fun, body_fun, (x_init, f(a, x_init)))
return x_star
def fixed_point_fwd(f, a, x_init):
x_star = fixed_point(f, a, x_init)
return x_star, (a, x_star)
def fixed_point_rev(f, res, x_star_bar):
a, x_star = res
_, vjp_a = jax.vjp(lambda a: f(a, x_star), a)
a_bar, = vjp_a(fixed_point(partial(rev_iter, f),
(a, x_star, x_star_bar),
x_star_bar))
return a_bar, jnp.zeros_like(x_star)
def rev_iter(f, packed, u):
a, x_star, x_star_bar = packed
_, vjp_x = jax.vjp(lambda x: f(a, x), x_star)
return x_star_bar + vjp_x(u)[0]
fixed_point.defvjp(fixed_point_fwd, fixed_point_rev)
@partial(custom_vjp, nondiff_argnums=(0,))
def fixed_point2(f, a, x_init):
"""Computes the fixed point of f.
Given a fixed point equation x* = f(a,x*), this function computes x*.
Args:
f: The function to compute a fixed point of.
a: The 'auxiliary' information to feed f.
x_init: The initial x to use when computing the fixed point.
Returns:
x_*: the fixed point of f.
"""
def cond_fun(carry):
x_prev, x = carry
return jnp.logical_not(jnp.allclose(x_prev, x, rtol=1e-5, atol=1e-8))
def body_fun(carry):
_, x = carry
return x, f(a, x)
_, x_star = jax.lax.while_loop(cond_fun, body_fun, (x_init, f(a, x_init)))
return x_star
def fixed_point2_fwd(f, a, x_init):
"""Custom vector-jacobian product forward function.
Runs fixed_point forward and saves values useful for computing the
vector-jacobian product in the backward pass.
"""
x_star = fixed_point2(f, a, x_init)
return x_star, (a, x_star)
def fixed_point2_rev(f, res, x_star_bar):
"""Custom VJP backward function.
"""
a, x_star = res
d_xstar = jax.jacrev(f, argnums=1)(a, x_star)
d_a = jax.jacrev(f, argnums=0)(a, x_star)
f_output_dim = x_star.shape[0]
da_shapes = jax.tree_util.tree_map(lambda x: x.shape, d_a)
reshaped_da = jax.tree_util.tree_map(
lambda x: jnp.reshape(x, [f_output_dim, -1]), d_a)
pre_inv = jnp.eye(x_star.shape[0]) - d_xstar
jacs = jax.tree_util.tree_map(lambda x: jnp.linalg.solve(pre_inv, x),
reshaped_da)
vjps = jax.tree_util.tree_map(lambda j: jnp.matmul(x_star_bar, j), jacs)
reshaped_vjps = jax.tree_util.tree_multimap(
lambda x, s: jnp.reshape(x, s[1:]), vjps, da_shapes)
return reshaped_vjps, jnp.zeros_like(x_star)
fixed_point2.defvjp(fixed_point2_fwd, fixed_point2_rev)
def sinkhorn(C, log_w_p, log_w_q, key, alpha=0.01):
"""Uses sinkhorn iterations to solve an optimal transport problem.
Computes the optimal cost and transport plan for moving mass from an
atomic measure p to another atomic measure q.
Args:
C: The cost matrix, C_ij contains the cost of moving mass from the ith
atom of p to the jth atom of q.
log_w_p: The log weights of the atoms of p.
log_w_q: The log weights of the atoms of q.
key: JAX PRNGKey for intializing the dual variables.
alpha: The stepsize of the Sinkhorn iteration.
Returns:
cost: The optimal cost
log_pi: The log of the transport plan.
"""
def sinkhorn_step(a, nu):
log_w_p, log_w_q, C = a
pre_lambda = jscipy.special.logsumexp(
(-C - nu[Ellipsis, jnp.newaxis]) / alpha, axis=0)
new_lambda = alpha * (pre_lambda - log_w_q - 1.)
pre_nu = jscipy.special.logsumexp(
(-C - new_lambda[jnp.newaxis, Ellipsis]) / alpha, axis=1)
new_nu = alpha * (pre_nu - log_w_p - 1.)
return new_nu # - jnp.amax(new_nu)
nu_0 = jax.random.normal(key, [log_w_q.shape[0]])
nu_star = fixed_point(sinkhorn_step, (log_w_p, log_w_q, C), nu_0)
pre_lambda = jscipy.special.logsumexp(
(-C - nu_star[Ellipsis, jnp.newaxis]) / alpha, axis=0)
lambda_star = alpha * (pre_lambda - log_w_q - 1.)
log_pi = (-lambda_star[jnp.newaxis, Ellipsis] - nu_star[Ellipsis, jnp.newaxis] -
C) / alpha - 1
cost = jnp.sum(C * jnp.exp(log_pi))
return cost, log_pi
def atomic_sinkhorn(p_locs, log_w_p, q_locs, log_w_q, key, alpha=0.01):
"""Solves an optimal transport problem between two atomic measures.
p and q are assumed to be weighted atomic | |
<reponame>future-haus/django-inbox<filename>tests/test_maintenance.py
import uuid
from django.conf import settings
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.utils import timezone
from faker import Faker
from freezegun import freeze_time
from inbox import settings as inbox_settings
from inbox.models import Message
from inbox.test.utils import InboxTestCaseMixin
from inbox.utils import process_new_messages, process_new_message_logs
User = get_user_model()
Faker.seed()
fake = Faker()
class MaintenanceTestCase(InboxTestCaseMixin, TestCase):
user = None
def setUp(self):
super().setUp()
email = fake.ascii_email()
self.user = User.objects.create(email=email, email_verified_on=timezone.now().date(), username=email)
self.user.device_group.notification_key = 'fake-notification_key'
self.user.device_group.save()
inbox_settings.get_config.cache_clear()
def tearDown(self):
super().tearDown()
inbox_settings.get_config.cache_clear()
def test_maintenance_max_age(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_AGE'] = timezone.timedelta(days=5)
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
with freeze_time('2020-01-02'):
# Create another, should be two
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 2)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 2)
with freeze_time('2020-01-07'):
# Create another, should still be two since the first one is more than 5 days max age
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 2)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 2)
def test_maintenance_max_age_with_message_id(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_AGE'] = timezone.timedelta(days=5)
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
with freeze_time('2020-01-02'):
# Create another, should be two
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 2)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 2)
with freeze_time('2020-01-07'):
# Create another, should still be two since the first one is more than 5 days max age
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 2)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 3)
def test_maintenance_max_age_min_count(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_AGE'] = timezone.timedelta(days=5)
INBOX_CONFIG['PER_USER_MESSAGES_MIN_COUNT'] = 3
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
with freeze_time('2020-01-02'):
# Create another, should be two
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 2)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 2)
with freeze_time('2020-01-07'):
# Create another, should be 3 because the min count saves it
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 3)
# Creating another should keep it at 3 though because it will delete that oldest one now
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 3)
def test_maintenance_max_age_min_count_with_message_id(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_AGE'] = timezone.timedelta(days=5)
INBOX_CONFIG['PER_USER_MESSAGES_MIN_COUNT'] = 3
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
with freeze_time('2020-01-02'):
# Create another, should be two
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 2)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 2)
with freeze_time('2020-01-07'):
# Create another, should be 3 because the min count saves it
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 3)
# Creating another should keep it at 3 though because it will delete that oldest one now
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 4)
def test_maintenance_max_age_min_count_max_count(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_AGE'] = timezone.timedelta(days=5)
INBOX_CONFIG['PER_USER_MESSAGES_MIN_COUNT'] = 3
INBOX_CONFIG['PER_USER_MESSAGES_MAX_COUNT'] = 6
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
with freeze_time('2020-01-02'):
# Create another, should be two
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 2)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 2)
with freeze_time('2020-01-07'):
# Create another, should be 3 because the min count saves it
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 3)
# Creating another should keep it at 3 though because it will delete that oldest one now
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 3)
# Creating 3 more should move it to 6 because the oldest is only 5 days old and there are only 6 total
Message.objects.create(user=self.user, key='default', fail_silently=False)
Message.objects.create(user=self.user, key='default', fail_silently=False)
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 6)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 6)
# Creating another should keep it at 6 because max is 6 and there's no min age defined
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 6)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 6)
def test_maintenance_max_age_min_count_max_count_with_message_id(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_AGE'] = timezone.timedelta(days=5)
INBOX_CONFIG['PER_USER_MESSAGES_MIN_COUNT'] = 3
INBOX_CONFIG['PER_USER_MESSAGES_MAX_COUNT'] = 6
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
with freeze_time('2020-01-02'):
# Create another, should be two
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 2)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 2)
with freeze_time('2020-01-07'):
# Create another, should be 3 because the min count saves it
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 3)
# Creating another should keep it at 3 though because it will delete that oldest one now
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 4)
# Creating 3 more should move it to 6 because the oldest is only 5 days old and there are only 6 total
for i in range(3):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 6)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 7)
# Creating another should keep it at 6 because max is 6 and there's no min age defined
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 6)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 8)
def test_maintenance_max_age_min_count_max_count_min_age(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_AGE'] = timezone.timedelta(days=5)
INBOX_CONFIG['PER_USER_MESSAGES_MIN_COUNT'] = 3
INBOX_CONFIG['PER_USER_MESSAGES_MAX_COUNT'] = 6
INBOX_CONFIG['PER_USER_MESSAGES_MIN_AGE'] = timezone.timedelta(days=3)
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
with freeze_time('2020-01-05'):
# Create another, should be two
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 2)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 2)
with freeze_time('2020-01-07'):
# Create another, should be 3 because the min count saves it
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 3)
# Creating another should keep it at 3 though because it will delete that oldest one now
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 3)
# Creating 3 more should move it to 6 because the oldest is only 2 days old and there are only 6 total
Message.objects.create(user=self.user, key='default', fail_silently=False)
Message.objects.create(user=self.user, key='default', fail_silently=False)
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 6)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 6)
# Creating 1 more should move it to 7 because the oldest is only 2 days old and there is a min age
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 7)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 7)
# Creating 1 more should add 1 more because the oldest is only 2 days old and there is a min age
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 8)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 8)
def test_maintenance_max_age_min_count_max_count_min_age_with_message_id(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_AGE'] = timezone.timedelta(days=5)
INBOX_CONFIG['PER_USER_MESSAGES_MIN_COUNT'] = 3
INBOX_CONFIG['PER_USER_MESSAGES_MAX_COUNT'] = 6
INBOX_CONFIG['PER_USER_MESSAGES_MIN_AGE'] = timezone.timedelta(days=3)
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
with freeze_time('2020-01-05'):
# Create another, should be two
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 2)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 2)
with freeze_time('2020-01-07'):
# Create another, should be 3 because the min count saves it
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 3)
# Creating another should keep it at 3 though | |
# Copyright (c) 2013 <NAME>, <EMAIL>.lucchese at gmail.com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
#
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
#
# 3. This notice may not be removed or altered from any source
# distribution.
import math
import numpy
import matplotlib
import matplotlib.path as mpath
import matplotlib.patches as mpatches
from math2D import *
from target import Target
class TargetData():
def __init__(self, target_id, pos, vel, angle, area_id):
assert isinstance(pos, Point2D)
assert isinstance(vel, Point2D)
assert numeric_real(angle)
assert angle == normalize_angle(angle)
assert area_id
self.id = target_id
self.pos = pos
self.vel = vel
self.angle = angle
self.area_id = area_id
class Camera():
def __init__(self, orig, rot, radius, fullfov, camid="", bpoints=[], fovpoints=[]):
assert isinstance(orig, Point2D)
assert numeric_real(rot)
assert numeric_real(radius)
assert radius > 0.
assert numeric_real(fullfov)
assert fullfov > 0. and fullfov < math.pi
self._targetdata = {}
self._last_targetdata = {}
self._dt = None
self._orig = orig
self._rot = normalize_angle(rot)
self._rotl = normalize_angle(rot - fullfov/2.)
self._roth = normalize_angle(rot + fullfov/2.)
self._radius = radius
self._radius2 = radius**2
self._fullfov = normalize_angle(fullfov)
self._id = camid
self._bangles = []
self._bpoints = bpoints
self._compute_bangles(bpoints)
self._fovpoints = fovpoints
# plot primitives
self._fullcoverage_patch = self._coverage_patch(rot, fullfov, radius, (0.65,0.65,0.65))
self._patches = []
self._lines = []
self._badge = self._camera_badge()
#def _local_angle(self, p):
# compute the angle in local coordinates with respect to the camera
# depth axis
#return normalize_angle(self._angle(p) - self._rot)
def _angle(self, p):
assert isinstance(p, Point2D)
# compute the angle in global coordinates with respect to the camera
# depth axis
return Line2D(self._orig, p).angle()
def _append_bangle(self, angle):
assert angle == normalize_angle(angle)
# check if the bisecting line actually bisects the coverage area
if not normalized_angle_in_range(angle, self._rotl, self._roth):
return
bangles = self._bangles
bangles.append(angle)
# reorder the list of bangles to make detection easier later on
if self._rotl >=0 and self._roth <= 0:
# in this case we must sort positive and negative angles separately
posi = []
nega = []
for a in bangles:
if a >= 0:
posi.append(a)
else:
nega.append(a)
bangles = sorted(posi) + sorted(nega)
else:
bangles = sorted(bangles)
# purge bisecting lines with too similar angles
if 0:
done = False
while not done:
done = True
for i in xrange(0,len(bangles)-1):
#print len(bangles), i
anglel = bangles[i]
angleh = bangles[i+1]
# pop bangles that are less than 2 degrees apart
union_rangeh = normalize_angle(anglel+math.pi/180.*2.)
#print self._id, anglel, union_rangeh, angleh, normalized_angle_in_range(angleh, anglel, union_rangeh)
if normalized_angle_in_range(angleh, anglel, union_rangeh):
#print "popping bangle at index", i+1
bangles.pop(i+1)
done = False
break
self._bangles = bangles
def _compute_bangles(self, bpoints):
if not bpoints:
return
for p in bpoints:
angle = self._angle(p)
self._append_bangle(angle)
def _camera_badge(self):
# shortcut into matplotlib namespace
Path = mpath.Path
badge_size = 0.3
points = numpy.array([
(-badge_size/2., -badge_size/2.),
( badge_size/2., -badge_size/2.),
( badge_size/2., badge_size/2.),
(-badge_size/2., badge_size/2.),
(-badge_size/2., -badge_size/2.),
])
# rotate the badge points and tranlate them to the camera origin
verts = transform_points(points, self._rot, self._orig.array())
codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]
#return mpatches.PathPatch(mpath.Path(verts, codes), alpha=1, facecolor=(0.5,0.5,0.75))
#print self._orig.tuple()
return matplotlib.patches.Circle(self._orig.tuple(), radius=.2, alpha=1, facecolor=(1,1,1))
def _compute_blines(self):
# create the bisceting lines
self._lines = []
clr_blines = (1,0,0)
for angle in self._bangles:
p1 = self._orig
p2 = Point2D(self._radius, 0)
p2 = rotate_point(p2, angle)
p2.x += p1.x
p2.y += p1.y
if 0:
self._lines.append(matplotlib.lines.Line2D([p1.x, p2.x], [p1.y, p2.y], color=clr_blines, linestyle=':'))
def _update_patches(self):
# namespace shortcut for the codes below
Path = mpath.Path
# build the camera badge patch
badge_size = 0.3
points = numpy.array([
(-badge_size/2., -badge_size/2.),
( badge_size/2., -badge_size/2.),
( badge_size/2., badge_size/2.),
(-badge_size/2., badge_size/2.),
(-badge_size/2., -badge_size/2.),
])
# rotate the badge points and tranlate them to the camera origin
verts = transform_points(points, self._rot, self._orig.array())
codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]
self._badge_patch = mpatches.PathPatch(mpath.Path(verts, codes), alpha=1, facecolor=(0.5,0.5,0.75))
self._badge_patch = matplotlib.patches.Circle([self._posx, self._posy], radius=.2, alpha=1, facecolor=(1,0.75,0.75))
# build the camera coverage patch
r = self._radius
w = self._fov
nr_segments = 20
points = [(0,0)]
for i in numpy.arange(0, nr_segments+1):
theta = -w/2. + i*w/float(nr_segments)
points.append((math.cos(theta), math.sin(theta)))
points.append((0,0))
points = numpy.array(points)
# rotate the cone patch points and tranlate them to the camera origin
verts = transform_points(r*points, self._rot, self._orig.array())
codes = [Path.MOVETO] + [Path.LINETO]*(nr_segments+1) + [Path.CLOSEPOLY]
self._coverage_patch = mpatches.PathPatch(mpath.Path(verts, codes), alpha=0.10, facecolor=(0.25,0.25,0.25), linewidth=0.5)
verts = transform_points(0.5*points, self._rot, self._orig.array())
codes = [Path.MOVETO] + [Path.LINETO]*(nr_segments+1) + [Path.CLOSEPOLY]
self._target_patch = mpatches.PathPatch(mpath.Path(verts, codes), alpha=0.45, facecolor=(1,0.2,0.2), linewidth=0.5)
# create the bisceting lines
self._plot_lines = []
clr_blines = (1,0,0)
for angle in self._bangles:
p1 = self._orig
p2 = Point2D(self._radius, 0)
p2 = rotate_point(p2, angle)
p2.x += p1.x
p2.y += p1.y
if 0:
self._plot_lines.append(matplotlib.lines.Line2D([p1.x, p2.x], [p1.y, p2.y], color=clr_blines, linestyle=':'))
def _coverage_patch(self, rot, arc, radius, color, alpha=0.1):
assert numeric_real(alpha)
assert alpha >0 and alpha <= 1
# namespace shortcut for the codes below
Path = mpath.Path
# build the camera coverage patch
nr_segments = 20
points = [(0,0)]
for i in numpy.arange(0, nr_segments+1):
theta = -arc/2. + i*arc/float(nr_segments)
points.append((math.cos(theta), math.sin(theta)))
points.append((0,0))
points = numpy.array(points)
# rotate the cone patch points and tranlate them to the camera origin
verts = transform_points(radius*points, rot, self._orig.array())
codes = [Path.MOVETO] + [Path.LINETO]*(nr_segments+1) + [Path.CLOSEPOLY]
return mpatches.PathPatch(mpath.Path(verts, codes), alpha=alpha, facecolor=color, linewidth=0.5)
def origin(self):
return self._orig
def _angle_to_areaid(self, angle):
assert numeric_real(angle)
assert angle == normalize_angle(angle)
# determine the exact coverage sub-area id
area_id = self._id
if len(self._bangles):
anglel = self._rotl
area_idx = 0
i = 0
for i in xrange(0,len(self._bangles)):
angleh = self._bangles[i]
if normalized_angle_in_range(angle, anglel, angleh):
area_idx = i + 1
break
anglel = angleh
if not area_idx:
# if we haven't found it yet then it must be in the last area
area_idx = len(self._bangles) + 1
# use letters if possible, use numbers otherwise
if area_idx < 26:
area_id += chr(ord('a') + area_idx -1)
else:
area_id += '[%d]' % area_idx
return area_id
def detection_data(self):
return self._targetdata.values()
def plot(self, axis):
assert self._badge
assert self._fullcoverage_patch
#assert self._target_patch
if 1:#self._fovpoints:
for p in self._fovpoints:
axis.add_line(matplotlib.lines.Line2D([self._orig.x, p.x],[self._orig.y, p.y], color=(0.25,0.25,0.25,0.5), zorder=-100))
for p in self._bpoints:
pass
#axis.add_line(matplotlib.lines.Line2D([self._orig.x, p.x],[self._orig.y, p.y], color=(1.,0.25,0.25, 1), zorder=-100, linestyle=':', linewidth=2))
else:
pass
if 1:
#axis.add_patch(self._fullcoverage_patch)
#for patch in self._patches:
# axis.add_patch(patch)
for patch in self._custom_patches():
axis.add_patch(patch)
#if len(self._detected):
# axis.add_patch(self._target_patch)
#for line in self._lines:
# axis.add_line(line)
#for data in self._detected:
# p = data.pos
#print data.area_id
# axis.add_line(matplotlib.lines.Line2D([self._orig.x, p.x],[self._orig.y, p.y], color=(1,0.,0.), alpha=1, zorder=-100))
axis.add_patch(self._badge)
if self._id:
axis.text(self._orig.x, self._orig.y, self._id,
verticalalignment='center', horizontalalignment='center', family='sans-serif',
color='black', fontsize=15)
return
def _track_target(self, targetdata):
assert isinstance(targetdata, TargetData)
_id = targetdata.id
try:
oldpos = self._last_targetdata[_id].pos
vx = (targetdata.x-oldpos.x)/self._dt
vy = (targetdata.y-oldpos.y)/self._dt
targetdata.vel.x = vx
targetdata.vel.y = vy
except:
pass
assert _id not in self._targetdata
self._targetdata[_id] = targetdata
class CameraFixed(Camera):
def __init__(self, orig, rot, radius, fov, camid="", bpoints=[], fovpoints=[]):
Camera.__init__(self, orig, rot, radius, fov, camid, bpoints, fovpoints)
self._compute_blines()
self._dt = None
def _custom_patches(self):
return []
def step(self, time, dt):
self._dt = dt
self._last_targetdata = self._targetdata
self._targetdata = {}
return
def detect(self, target, walls):
assert isinstance(target, Target)
assert walls
pos = target.pos()
line = Line2D(self._orig, pos)
# random detection artifacts
# pre-refactoring <- this code must be checked for good
if 0:#numpy.random.randint(50) == 0:
area_id = self._id
if len(self._bangles):
area_idx = numpy.random.randint(1,len(self._bangles)+1)
# use letters for up to 9 bisecting lines, use numbers otherwise
if area_idx < 10:
area_id += chr(ord('a') + area_idx -1)
else:
area_id += '.%d' % area_idx
#print 'area_id', area_id
self._detected.append(TargetData(target.id(), pos, Point2D(0,0), 0, area_id))
return
if line.norm2() > self._radius2:
# the target is more distant than | |
__str__(self):
return str(self.id)
def clean(self):
return
# Permissions
@staticmethod
@allow_staff_or_superuser
@authenticated_users
def has_read_permission(request):
return True
@allow_staff_or_superuser
@authenticated_users
def has_object_read_permission(self, request):
return True
@staticmethod
@allow_staff_or_superuser
@authenticated_users
def has_write_permission(request):
return any([
request.user.person.officers.filter(
office__gt=300,
status__gt=0,
)
])
@allow_staff_or_superuser
@authenticated_users
def has_object_write_permission(self, request):
return any([
all([
self.group.officers.filter(
person__user=request.user,
status__gt=0,
),
self.mc_pk == None,
]),
])
# Transitions
@fsm_log_by
@fsm_log_description
@transition(field=status, source='*', target=STATUS.active)
def activate(self, description=None, *args, **kwargs):
"""Activate the Member."""
return
@fsm_log_by
@fsm_log_description
@transition(field=status, source='*', target=STATUS.inactive)
def deactivate(self, description=None, *args, **kwargs):
"""Deactivate the Member."""
return
class Officer(TimeStampedModel):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
)
STATUS = Choices(
(-10, 'inactive', 'Inactive',),
(0, 'new', 'New',),
(10, 'active', 'Active',),
)
status = FSMIntegerField(
help_text="""DO NOT CHANGE MANUALLY unless correcting a mistake. Use the buttons to change state.""",
choices=STATUS,
default=STATUS.new,
)
OFFICE = Choices(
('International', [
(100, 'scjc_chair', "SCJC Chair"),
(110, 'scjc_past', "SCJC Chair Past"),
(120, 'scjc_ca', "SCJC CA"),
(130, 'scjc_mus', "SCJC MUS"),
(140, 'scjc_per', "SCJC PER"),
(150, 'scjc_sng', "SCJC SNG"),
(160, 'scjc_chart', "SCJC Chart"),
(170, 'scjc_admin', "SCJC Admin"),
(230, 'judge_ca', "JUDGE CA"),
(240, 'judge_mus', "JUDGE MUS"),
(250, 'judge_per', "JUDGE PER"),
(260, 'judge_sng', "JUDGE SNG"),
(270, 'candidate_ca', "CANDIDATE CA"),
(280, 'candidate_mus', "CANDIDATE MUS"),
(290, 'candidate_per', "CANDIDATE PER"),
(295, 'candidate_sng', "CANDIDATE SNG"),
]),
('District', [
(210, 'drcj', "DRCJ"),
(220, 'drcj_asst', "DRCJ Assistant"),
]),
('Chapter', [
(310, 'chapter_pres', "CPRES"),
(320, 'chapter_sec', "CSEC"),
]),
('Chorus', [
(330, 'chorus_dir', "KDIR"),
(340, 'chorus_asst', "KASS"),
(350, 'chorus_man', "KMAN"),
]),
('Quartet', [
(410, 'quartet_admin', "QADM"),
]),
)
office = models.IntegerField(
choices=OFFICE,
)
start_date = models.DateField(
null=True,
blank=True,
)
end_date = models.DateField(
null=True,
blank=True,
)
mc_pk = models.CharField(
null=True,
blank=True,
max_length=36,
unique=False,
db_index=True,
)
# FKs
person = models.ForeignKey(
'bhs.person',
related_name='officers',
on_delete=models.CASCADE,
)
group = models.ForeignKey(
'bhs.group',
related_name='officers',
on_delete=models.CASCADE,
)
# Relations
statelogs = GenericRelation(
StateLog,
related_query_name='officers',
)
objects = OfficerManager()
# Properties
@cached_property
def is_mc(self):
return bool(self.mc_pk)
# Internals
class Meta:
unique_together = (
('group', 'person', 'office'),
)
verbose_name_plural = 'Officers'
class JSONAPIMeta:
resource_name = "officer"
def __str__(self):
return str(self.id)
def clean(self):
pass
# if self.group.kind != self.group.KIND.vlq:
# if self.office.kind != self.group.kind:
# raise ValidationError({
# 'office': 'Office does not match Group Type.',
# })
# else:
# if self.office.code != self.office.CODE.chorus_man:
# raise ValidationError({
# 'office': 'VLQ officers must be Chorus Managers.',
# })
# Permissions
@staticmethod
@allow_staff_or_superuser
@authenticated_users
def has_read_permission(request):
return True
@allow_staff_or_superuser
@authenticated_users
def has_object_read_permission(self, request):
return True
@staticmethod
@allow_staff_or_superuser
@authenticated_users
def has_write_permission(request):
return any([
request.user.person.officers.filter(
office__lt=200,
status__gt=0,
)
])
@allow_staff_or_superuser
@authenticated_users
def has_object_write_permission(self, request):
return any([
all([
self.group.officers.filter(
person__user=request.user,
status__gt=0,
),
self.mc_pk == None,
]),
])
# Transitions
@fsm_log_by
@fsm_log_description
@transition(field=status, source='*', target=STATUS.active)
def activate(self, description=None, *args, **kwargs):
return
@fsm_log_by
@fsm_log_description
@transition(field=status, source='*', target=STATUS.inactive)
def deactivate(self, description=None, *args, **kwargs):
return
class Chart(TimeStampedModel):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
)
STATUS = Choices(
(-20, 'protected', 'Protected',),
(-10, 'inactive', 'Inactive',),
(0, 'new', 'New'),
(10, 'active', 'Active'),
)
status = FSMIntegerField(
help_text="""DO NOT CHANGE MANUALLY unless correcting a mistake. Use the buttons to change state.""",
choices=STATUS,
default=STATUS.new,
)
title = models.CharField(
max_length=255,
)
arrangers = models.CharField(
max_length=255,
)
composers = models.CharField(
max_length=255,
)
lyricists = models.CharField(
max_length=255,
)
holders = models.TextField(
blank=True,
)
description = models.TextField(
help_text="""
Fun or interesting facts to share about the chart (ie, 'from Disney's Lion King, first sung by <NAME>'.)""",
blank=True,
max_length=1000,
)
notes = models.TextField(
help_text="""
Private Notes (for internal use only).""",
blank=True,
)
image = models.ImageField(
upload_to=ImageUploadPath(),
null=True,
blank=True,
)
# Relations
statelogs = GenericRelation(
StateLog,
related_query_name='charts',
)
@cached_property
def nomen(self):
return "{0} [{1}]".format(
self.title,
self.arrangers,
)
def is_searchable(self):
return bool(self.status == self.STATUS.active)
# Internals
objects = ChartManager()
class Meta:
unique_together = (
('title', 'arrangers',)
)
class JSONAPIMeta:
resource_name = "chart"
def __str__(self):
return "{0} [{1}]".format(
self.title,
self.arrangers,
)
# Permissions
@staticmethod
@allow_staff_or_superuser
@authenticated_users
def has_read_permission(request):
return True
@allow_staff_or_superuser
@authenticated_users
def has_object_read_permission(self, request):
return True
@staticmethod
@allow_staff_or_superuser
@authenticated_users
def has_write_permission(request):
return any([
request.user.person.officers.filter(
office__gt=300,
status__gt=0,
)
])
@allow_staff_or_superuser
@authenticated_users
def has_object_write_permission(self, request):
return any([
request.user.person.officers.filter(
office=160,
status__gt=0,
)
])
# Transitions
@fsm_log_by
@transition(field=status, source='*', target=STATUS.active)
def activate(self, *args, **kwargs):
"""Activate the Chart."""
return
@fsm_log_by
@transition(field=status, source='*', target=STATUS.inactive)
def deactivate(self, *args, **kwargs):
"""Deactivate the Chart."""
return
@fsm_log_by
@transition(field=status, source='*', target=STATUS.protected)
def protect(self, *args, **kwargs):
"""Protect the Chart."""
return
class Repertory(TimeStampedModel):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
)
STATUS = Choices(
(-10, 'inactive', 'Inactive',),
(0, 'new', 'New'),
(10, 'active', 'Active'),
)
status = FSMIntegerField(
help_text="""DO NOT CHANGE MANUALLY unless correcting a mistake. Use the buttons to change state.""",
choices=STATUS,
default=STATUS.active,
)
# FKs
group = models.ForeignKey(
'bhs.group',
related_name='repertories',
on_delete=models.CASCADE,
)
chart = models.ForeignKey(
'bhs.chart',
related_name='repertories',
on_delete=models.CASCADE,
)
# Relations
statelogs = GenericRelation(
StateLog,
related_query_name='repertories',
)
# Internals
class Meta:
verbose_name_plural = 'repertories'
unique_together = (
('group', 'chart',),
)
class JSONAPIMeta:
resource_name = "repertory"
def __str__(self):
return str(self.id)
# Permissions
@staticmethod
@allow_staff_or_superuser
@authenticated_users
def has_read_permission(request):
return True
@allow_staff_or_superuser
@authenticated_users
def has_object_read_permission(self, request):
Assignment = apps.get_model('cmanager.assignment')
return any([
self.group.officers.filter(
person__user=request.user,
status__gt=0,
),
self.group.members.filter(
person__user=request.user,
status__gt=0,
),
self.group.appearances.filter(
round__session__convention__assignments__person__user=request.user,
round__session__convention__assignments__status__gt=0,
round__session__convention__assignments__category__lte=Assignment.CATEGORY.ca,
)
])
@staticmethod
@allow_staff_or_superuser
@authenticated_users
def has_write_permission(request):
return any([
request.user.person.officers.filter(
office__gt=300,
status__gt=0,
)
])
@allow_staff_or_superuser
@authenticated_users
def has_object_write_permission(self, request):
return any([
self.group.officers.filter(
person__user=request.user,
status__gt=0,
),
])
# Transitions
@fsm_log_by
@transition(field=status, source='*', target=STATUS.active)
def activate(self, *args, **kwargs):
"""Activate the Repertory."""
return
@fsm_log_by
@transition(field=status, source='*', target=STATUS.inactive)
def deactivate(self, *args, **kwargs):
"""Deactivate the Repertory."""
return
class Human(models.Model):
id = models.CharField(
primary_key=True,
max_length=255,
editable=False,
)
first_name = NoPunctuationCharField(
max_length=255,
editable=False,
)
middle_name = NoPunctuationCharField(
max_length=255,
editable=False,
db_column='middle_initial',
)
last_name = NoPunctuationCharField(
max_length=255,
editable=False,
)
nick_name = NoPunctuationCharField(
max_length=255,
editable=False,
db_column='preferred_name',
)
email = LowerEmailField(
editable=False,
null=True,
db_column='username',
)
birth_date = ReasonableBirthDate(
editable=False,
null=True,
db_column='birthday'
)
home_phone = ValidatedPhoneField(
max_length=255,
editable=False,
db_column='phone'
)
cell_phone = ValidatedPhoneField(
max_length=255,
editable=False,
)
work_phone = ValidatedPhoneField(
max_length=255,
editable=False,
)
bhs_id = models.IntegerField(
editable=False,
unique=True,
null=True,
db_column='legacy_id',
)
GENDER = Choices(
('male', 'Male'),
('female', 'Female'),
)
gender = models.CharField(
max_length=255,
editable=False,
choices=GENDER,
db_column='sex',
)
PART = Choices(
('tenor', 'Tenor'),
('lead', 'Lead'),
('baritone', 'Baritone'),
('bass', 'Bass'),
)
part = VoicePartField(
max_length=255,
editable=False,
choices=PART,
db_column='primary_voice_part',
)
mon = models.IntegerField(
editable=False,
db_column='trusted_mon',
)
is_deceased = models.BooleanField(
editable=False,
)
is_honorary = models.BooleanField(
editable=False,
db_column='honorary_member',
)
is_suspended = models.BooleanField(
editable=False,
)
is_expelled = models.BooleanField(
editable=False,
)
merged_id = models.CharField(
max_length=255,
null=True,
editable=False,
db_column='merged_into',
)
deleted_id = models.CharField(
max_length=255,
null=True,
editable=False,
db_column='deleted_by_id',
)
created = models.DateTimeField(
db_column='created',
null=True,
editable=False,
)
modified = models.DateTimeField(
db_column='updated',
null=True,
editable=False,
)
objects = HumanManager()
# Internals
def __str__(self):
if self.nick_name:
first = self.nick_name
else:
first = self.first_name
return " ".join([
first,
self.last_name,
])
# Methods
class Meta:
managed=False
db_table = 'vwMembers'
class Structure(models.Model):
id = models.CharField(
primary_key=True,
max_length=255,
editable=False,
)
name = models.CharField(
max_length=255,
editable=False,
)
KIND = Choices(
('organization', 'Organization'),
('district', 'District'),
('group', 'Group'),
('chapter', 'Chapter'),
('chorus', 'Chorus'),
('quartet', 'Quartet'),
)
kind = models.CharField(
max_length=255,
editable=False,
choices=KIND,
db_column='object_type',
)
GENDER = Choices(
('men', 'Male'),
('women', 'Female'),
('mixed', 'Mixed'),
)
gender = models.CharField(
max_length=255,
editable=False,
choices=GENDER,
db_column='category'
)
DIVISION = Choices(
('EVG', [
'EVG Division I',
'EVG Division II',
'EVG Division III',
'EVG Division IV',
'EVG Division V',
]),
('FWD', [
'FWD Arizona',
'FWD Northeast',
'FWD Northwest',
'FWD Southeast',
'FWD Southwest',
]),
('LOL', [
'LOL 10000 Lakes',
'LOL Division One',
'LOL Northern Plains',
'LOL Packerland',
'LOL Southwest',
]),
('MAD', [
# (160, 'madatl', 'MAD Atlantic'),
'MAD Central',
'MAD Northern',
'MAD Southern',
# (200, 'madwst', 'MAD Western'),
]),
('NED', [
'NED Granite and Pine',
'NED Mountain',
'NED Patriot',
'NED Sunrise',
'NED Yankee',
]),
('SWD', [
'SWD Northeast',
'SWD Northwest',
'SWD Southeast',
'SWD Southwest',
]),
)
division = models.CharField(
max_length=255,
editable=False,
null=True,
db_column='division',
choices=DIVISION,
)
bhs_id = models.IntegerField(
editable=False,
unique=True,
null=True,
db_column='legacy_id',
)
chapter_code = models.CharField(
max_length=255,
editable=False,
db_column='legacy_code',
)
website = models.CharField(
max_length=255,
editable=False,
)
email = models.CharField(
max_length=255,
editable=False,
)
chorus_name = models.CharField(
max_length=255,
editable=False,
)
phone = models.CharField(
max_length=255,
editable=False,
)
fax = models.CharField(
max_length=255,
editable=False,
)
facebook = models.CharField(
max_length=255,
editable=False,
)
twitter = models.CharField(
max_length=255,
editable=False,
)
youtube = models.CharField(
max_length=255,
editable=False,
)
pinterest = models.CharField(
max_length=255,
editable=False,
)
flickr = models.CharField(
max_length=255,
editable=False,
)
instagram = models.CharField(
max_length=255,
editable=False,
)
soundcloud | |
BUILD_BB_GAMEPLAY_EFFECT_FLOOR_PATTERN_ECO_FOOTPRINT_PLUS1: 'CommonGameTag' = 2306
BUILD_BB_GAMEPLAY_EFFECT_FLOOR_PATTERN_ECO_FOOTPRINT_PLUS2: 'CommonGameTag' = 2307
BUILD_BB_GAMEPLAY_EFFECT_FLOOR_PATTERN_ENVIRONMENT_SCORE_MINUS1: 'CommonGameTag' = 2296
BUILD_BB_GAMEPLAY_EFFECT_FLOOR_PATTERN_ENVIRONMENT_SCORE_MINUS2: 'CommonGameTag' = 2297
BUILD_BB_GAMEPLAY_EFFECT_FLOOR_PATTERN_ENVIRONMENT_SCORE_PLUS1: 'CommonGameTag' = 2294
BUILD_BB_GAMEPLAY_EFFECT_FLOOR_PATTERN_ENVIRONMENT_SCORE_PLUS2: 'CommonGameTag' = 2295
BUILD_BB_GAMEPLAY_EFFECT_FLOOR_PATTERN_INCREASE_BILLS: 'CommonGameTag' = 2328
BUILD_BB_GAMEPLAY_EFFECT_OBJECT_DECREASE_BILLS: 'CommonGameTag' = 2327
BUILD_BB_GAMEPLAY_EFFECT_OBJECT_ECO_FOOTPRINT_MINUS1: 'CommonGameTag' = 2300
BUILD_BB_GAMEPLAY_EFFECT_OBJECT_ECO_FOOTPRINT_MINUS2: 'CommonGameTag' = 2301
BUILD_BB_GAMEPLAY_EFFECT_OBJECT_ECO_FOOTPRINT_PLUS1: 'CommonGameTag' = 2298
BUILD_BB_GAMEPLAY_EFFECT_OBJECT_ECO_FOOTPRINT_PLUS2: 'CommonGameTag' = 2299
BUILD_BB_GAMEPLAY_EFFECT_OBJECT_ECO_FOOTPRINT_PLUS_PARK: 'CommonGameTag' = 2444
BUILD_BB_GAMEPLAY_EFFECT_OBJECT_ENVIRONMENT_SCORE_MINUS1: 'CommonGameTag' = 2288
BUILD_BB_GAMEPLAY_EFFECT_OBJECT_ENVIRONMENT_SCORE_MINUS2: 'CommonGameTag' = 2289
BUILD_BB_GAMEPLAY_EFFECT_OBJECT_ENVIRONMENT_SCORE_PLUS1: 'CommonGameTag' = 2286
BUILD_BB_GAMEPLAY_EFFECT_OBJECT_ENVIRONMENT_SCORE_PLUS2: 'CommonGameTag' = 2287
BUILD_BB_GAMEPLAY_EFFECT_OBJECT_INCREASE_BILLS: 'CommonGameTag' = 2326
BUILD_BB_GAMEPLAY_EFFECT_OBJECT_POWER_CONSUMER: 'CommonGameTag' = 2314
BUILD_BB_GAMEPLAY_EFFECT_OBJECT_POWER_PRODUCER: 'CommonGameTag' = 2316
BUILD_BB_GAMEPLAY_EFFECT_OBJECT_WATER_CONSUMER: 'CommonGameTag' = 2315
BUILD_BB_GAMEPLAY_EFFECT_OBJECT_WATER_PRODUCER: 'CommonGameTag' = 2317
BUILD_BB_GAMEPLAY_EFFECT_POOL_SURFACE_POWER_CONSUMER: 'CommonGameTag' = 2322
BUILD_BB_GAMEPLAY_EFFECT_POOL_SURFACE_POWER_PRODUCER: 'CommonGameTag' = 2324
BUILD_BB_GAMEPLAY_EFFECT_POOL_SURFACE_WATER_CONSUMER: 'CommonGameTag' = 2323
BUILD_BB_GAMEPLAY_EFFECT_POOL_SURFACE_WATER_PRODUCER: 'CommonGameTag' = 2325
BUILD_BB_GAMEPLAY_EFFECT_ROOF_MATERIAL_DECREASE_BILLS: 'CommonGameTag' = 2333
BUILD_BB_GAMEPLAY_EFFECT_ROOF_MATERIAL_ECO_FOOTPRINT_MINUS1: 'CommonGameTag' = 2312
BUILD_BB_GAMEPLAY_EFFECT_ROOF_MATERIAL_ECO_FOOTPRINT_MINUS2: 'CommonGameTag' = 2313
BUILD_BB_GAMEPLAY_EFFECT_ROOF_MATERIAL_ECO_FOOTPRINT_PLUS1: 'CommonGameTag' = 2310
BUILD_BB_GAMEPLAY_EFFECT_ROOF_MATERIAL_ECO_FOOTPRINT_PLUS2: 'CommonGameTag' = 2311
BUILD_BB_GAMEPLAY_EFFECT_ROOF_MATERIAL_ENVIRONMENT_SCORE_MINUS1: 'CommonGameTag' = 2319
BUILD_BB_GAMEPLAY_EFFECT_ROOF_MATERIAL_ENVIRONMENT_SCORE_PLUS1: 'CommonGameTag' = 2318
BUILD_BB_GAMEPLAY_EFFECT_ROOF_MATERIAL_INCREASE_BILLS: 'CommonGameTag' = 2332
BUILD_BB_GAMEPLAY_EFFECT_ROOF_MATERIAL_POWER_PRODUCER: 'CommonGameTag' = 2320
BUILD_BB_GAMEPLAY_EFFECT_ROOF_MATERIAL_WATER_PRODUCER: 'CommonGameTag' = 2321
BUILD_BB_GAMEPLAY_EFFECT_WALL_PATTERN_DECREASE_BILLS: 'CommonGameTag' = 2331
BUILD_BB_GAMEPLAY_EFFECT_WALL_PATTERN_ECO_FOOTPRINT_MINUS1: 'CommonGameTag' = 2304
BUILD_BB_GAMEPLAY_EFFECT_WALL_PATTERN_ECO_FOOTPRINT_MINUS2: 'CommonGameTag' = 2305
BUILD_BB_GAMEPLAY_EFFECT_WALL_PATTERN_ECO_FOOTPRINT_PLUS1: 'CommonGameTag' = 2302
BUILD_BB_GAMEPLAY_EFFECT_WALL_PATTERN_ECO_FOOTPRINT_PLUS2: 'CommonGameTag' = 2303
BUILD_BB_GAMEPLAY_EFFECT_WALL_PATTERN_ENVIRONMENT_SCORE_MINUS1: 'CommonGameTag' = 2292
BUILD_BB_GAMEPLAY_EFFECT_WALL_PATTERN_ENVIRONMENT_SCORE_MINUS2: 'CommonGameTag' = 2293
BUILD_BB_GAMEPLAY_EFFECT_WALL_PATTERN_ENVIRONMENT_SCORE_PLUS1: 'CommonGameTag' = 2290
BUILD_BB_GAMEPLAY_EFFECT_WALL_PATTERN_ENVIRONMENT_SCORE_PLUS2: 'CommonGameTag' = 2291
BUILD_BB_GAMEPLAY_EFFECT_WALL_PATTERN_INCREASE_BILLS: 'CommonGameTag' = 2330
BUILD_BLOCK: 'CommonGameTag' = 548
BUILD_BLOCK_BASEMENT: 'CommonGameTag' = 242
BUILD_BLOCK_DECK: 'CommonGameTag' = 1062
BUILD_BLOCK_DIAGONAL: 'CommonGameTag' = 1070
BUILD_BLOCK_FOUNTAIN: 'CommonGameTag' = 232
BUILD_BLOCK_FOUNTAIN_TOOL: 'CommonGameTag' = 233
BUILD_BLOCK_NO_WALLS: 'CommonGameTag' = 1064
BUILD_BLOCK_PLATFORM: 'CommonGameTag' = 2491
BUILD_BLOCK_PLATFORM_TOOL: 'CommonGameTag' = 2492
BUILD_BLOCK_POOL: 'CommonGameTag' = 1226
BUILD_BLOCK_POOL_TOOL: 'CommonGameTag' = 1227
BUILD_BLOCK_WALL_TOOL: 'CommonGameTag' = 653
BUILD_BLOCK_WITH_WALLS: 'CommonGameTag' = 1063
BUILD_BUY_AUTONOMY_MARKER_ATTRACTOR: 'CommonGameTag' = 1638
BUILD_BUY_NO_AUTONOMY_LIGHTS: 'CommonGameTag' = 1637
BUILD_BUY_NO_AUTONOMY_PLANTS: 'CommonGameTag' = 1636
BUILD_BUY_NO_AUTONOMY_RUGS: 'CommonGameTag' = 1639
BUILD_BUY_NO_AUTONOMY_SCULPTURES: 'CommonGameTag' = 1634
BUILD_BUY_WORLD_OBJECTS: 'CommonGameTag' = 787
BUILD_COLUMN: 'CommonGameTag' = 538
BUILD_DOOR: 'CommonGameTag' = 535
BUILD_DOOR_DOUBLE: 'CommonGameTag' = 918
BUILD_DOOR_SINGLE: 'CommonGameTag' = 974
BUILD_ELEVATOR: 'CommonGameTag' = 1611
BUILD_FENCE: 'CommonGameTag' = 544
BUILD_FLOOR_PATTERN: 'CommonGameTag' = 541
BUILD_FLOOR_TRIM: 'CommonGameTag' = 554
BUILD_FLOWER: 'CommonGameTag' = 556
BUILD_FLOWER_BUSH: 'CommonGameTag' = 1068
BUILD_FLOWER_GROUND_COVER: 'CommonGameTag' = 1067
BUILD_FLOWER_MISC: 'CommonGameTag' = 1069
BUILD_FOUNDATION: 'CommonGameTag' = 552
BUILD_FOUNTAIN_TRIM: 'CommonGameTag' = 1081
BUILD_FRIEZE: 'CommonGameTag' = 550
BUILD_GATE: 'CommonGameTag' = 537
BUILD_GATE_DOUBLE: 'CommonGameTag' = 915
BUILD_GATE_SINGLE: 'CommonGameTag' = 976
BUILD_GENERIC: 'CommonGameTag' = 1596
BUILD_HALF_WALL: 'CommonGameTag' = 1441
BUILD_HALF_WALL_TRIM: 'CommonGameTag' = 1442
BUILD_IS_SHELL_BUILDING: 'CommonGameTag' = 1574
BUILD_LADDER: 'CommonGameTag' = 2425
BUILD_PLATFORM_TRIM: 'CommonGameTag' = 2483
BUILD_POOL_STYLES: 'CommonGameTag' = 251
BUILD_POOL_TRIM: 'CommonGameTag' = 250
BUILD_POST: 'CommonGameTag' = 782
BUILD_RAILING: 'CommonGameTag' = 547
BUILD_ROCK: 'CommonGameTag' = 560
BUILD_ROOF: 'CommonGameTag' = 540
BUILD_ROOF_ATTACHMENT: 'CommonGameTag' = 539
BUILD_ROOF_ATTACHMENT_MISC: 'CommonGameTag' = 975
BUILD_ROOF_CHIMNEY: 'CommonGameTag' = 919
BUILD_ROOF_DIAGONAL: 'CommonGameTag' = 906
BUILD_ROOF_ORTHOGONAL: 'CommonGameTag' = 977
BUILD_ROOF_PATTERN: 'CommonGameTag' = 543
BUILD_ROOF_TRIM: 'CommonGameTag' = 551
BUILD_RUG: 'CommonGameTag' = 559
BUILD_SHRUB: 'CommonGameTag' = 557
BUILD_SHRUB_BUSH: 'CommonGameTag' = 1065
BUILD_SHRUB_CACTUS: 'CommonGameTag' = 1066
BUILD_SPANDREL: 'CommonGameTag' = 545
BUILD_STAIR: 'CommonGameTag' = 546
BUILD_STYLE: 'CommonGameTag' = 549
BUILD_TREE: 'CommonGameTag' = 558
BUILD_WALL_ATTACHMENT: 'CommonGameTag' = 555
BUILD_WALL_PATTERN: 'CommonGameTag' = 542
BUILD_WEDDING_ARCH: 'CommonGameTag' = 981
BUILD_WINDOW: 'CommonGameTag' = 536
BUY_CAT_CLEAN_POWER: 'CommonGameTag' = 67591
BUY_CAT_COLLECTION_ALIEN: 'CommonGameTag' = 1044
BUY_CAT_COLLECTION_ALL: 'CommonGameTag' = 1053
BUY_CAT_COLLECTION_CAPSULE: 'CommonGameTag' = 69729
BUY_CAT_COLLECTION_CITY_POSTER: 'CommonGameTag' = 55378
BUY_CAT_COLLECTION_CRYSTAL: 'CommonGameTag' = 1041
BUY_CAT_COLLECTION_ELEMENT: 'CommonGameTag' = 1042
BUY_CAT_COLLECTION_FISH: 'CommonGameTag' = 1051
BUY_CAT_COLLECTION_FOSSIL: 'CommonGameTag' = 1043
BUY_CAT_COLLECTION_FROG: 'CommonGameTag' = 1052
BUY_CAT_COLLECTION_GACHAPON: 'CommonGameTag' = 69728
BUY_CAT_COLLECTION_GARDENING: 'CommonGameTag' = 1159
BUY_CAT_COLLECTION_METAL: 'CommonGameTag' = 1045
BUY_CAT_COLLECTION_MY_SIM: 'CommonGameTag' = 1046
BUY_CAT_COLLECTION_POSTCARD: 'CommonGameTag' = 1049
BUY_CAT_COLLECTION_SLIDE: 'CommonGameTag' = 1048
BUY_CAT_COLLECTION_SNOW_GLOBE: 'CommonGameTag' = 55377
BUY_CAT_COLLECTION_SPACE_PRINT: 'CommonGameTag' = 1047
BUY_CAT_COLLECTION_SPACE_ROCK: 'CommonGameTag' = 1050
BUY_CAT_COLLECTION_TREASURE: 'CommonGameTag' = 2043
BUY_CAT_COLUMNS: 'CommonGameTag' = 429
BUY_CAT_COMMUNITY: 'CommonGameTag' = 1352
BUY_CAT_EASEL: 'CommonGameTag' = 440
BUY_CAT_EE_ACTIVE_ACTIVITY: 'CommonGameTag' = 970
BUY_CAT_EE_ALARM: 'CommonGameTag' = 169
BUY_CAT_EE_AUDIO: 'CommonGameTag' = 163
BUY_CAT_EE_BAR: 'CommonGameTag' = 176
BUY_CAT_EE_BASKETBALL: 'CommonGameTag' = 456
BUY_CAT_EE_CHESS_TABLE: 'CommonGameTag' = 457
BUY_CAT_EE_CLOCK: 'CommonGameTag' = 171
BUY_CAT_EE_COMPUTER: 'CommonGameTag' = 162
BUY_CAT_EE_CREATIVE_ACTIVITY: 'CommonGameTag' = 968
BUY_CAT_EE_GARDENING: 'CommonGameTag' = 2075
BUY_CAT_EE_HOBBY_SKILL: 'CommonGameTag' = 165
BUY_CAT_EE_INDOOR_ACTIVITY: 'CommonGameTag' = 173
BUY_CAT_EE_KID_ACTIVITY: 'CommonGameTag' = 174
BUY_CAT_EE_KID_FURNITURE: 'CommonGameTag' = 167
BUY_CAT_EE_KID_TOY: 'CommonGameTag' = 168
BUY_CAT_EE_KNOWLEDGE_ACTIVITY: 'CommonGameTag' = 969
BUY_CAT_EE_MISC_ELECTRONICS: 'CommonGameTag' = 177
BUY_CAT_EE_MISC_ENTERTAINMENT: 'CommonGameTag' = 178
BUY_CAT_EE_MISC_KIDS: 'CommonGameTag' = 179
BUY_CAT_EE_MONKEY_BARS: 'CommonGameTag' = 458
BUY_CAT_EE_OUTDOOR_ACTIVITY: 'CommonGameTag' = 175
BUY_CAT_EE_PARTY: 'CommonGameTag' = 166
BUY_CAT_EE_PET_ACTIVITY_TOYS: 'CommonGameTag' = 2014
BUY_CAT_EE_PET_MISC: 'CommonGameTag' = 1948
BUY_CAT_EE_PET_TOYS: 'CommonGameTag' = 1944
BUY_CAT_EE_PET_VET: 'CommonGameTag' = 1947
BUY_CAT_EE_TODDLERS: 'CommonGameTag' = 172
BUY_CAT_EE_TRANSPORTATION: 'CommonGameTag' = 2237
BUY_CAT_EE_TV: 'CommonGameTag' = 161
BUY_CAT_EE_TV_SETS: 'CommonGameTag' = 164
BUY_CAT_EE_TV_STAND: 'CommonGameTag' = 1122
BUY_CAT_EE_VIDEO_GAME_CONSOLE: 'CommonGameTag' = 55356
BUY_CAT_HOLIDAY_ALL: 'CommonGameTag' = 2084
BUY_CAT_HOLIDAY_DECOR_ALL: 'CommonGameTag' = 2085
BUY_CAT_INSTRUMENT: 'CommonGameTag' = 441
BUY_CAT_LD_AWNING: 'CommonGameTag' = 979
BUY_CAT_LD_BATHROOM_ACCENT: 'CommonGameTag' = 194
BUY_CAT_LD_CEILING_DECORATION: 'CommonGameTag' = 2188
BUY_CAT_LD_CEILING_LIGHT: 'CommonGameTag' = 205
BUY_CAT_LD_CLUTTER: 'CommonGameTag' = 823
BUY_CAT_LD_CURTAIN_BLIND: 'CommonGameTag' = 978
BUY_CAT_LD_FIREPLACE: 'CommonGameTag' = 785
BUY_CAT_LD_FLOOR_LAMP: 'CommonGameTag' = 204
BUY_CAT_LD_FOUNTAIN_DECORATION: 'CommonGameTag' = 199
BUY_CAT_LD_FOUNTAIN_EMITTER: 'CommonGameTag' = 231
BUY_CAT_LD_FOUNTAIN_OBJECTS: 'CommonGameTag' = 252
BUY_CAT_LD_KID_DECORATION: 'CommonGameTag' = 196
BUY_CAT_LD_LAWN_ORNAMENT: 'CommonGameTag' = 195
BUY_CAT_LD_MIRROR: 'CommonGameTag' = 207
BUY_CAT_LD_MIRROR_FREESTANDING: 'CommonGameTag' = 965
BUY_CAT_LD_MIRROR_WALL: 'CommonGameTag' = 964
BUY_CAT_LD_MISC_DECORATION: 'CommonGameTag' = 209
BUY_CAT_LD_MISC_LIGHT: 'CommonGameTag' = 208
BUY_CAT_LD_NIGHT_LIGHT: 'CommonGameTag' = 1718
BUY_CAT_LD_OUTDOOR_LIGHT: 'CommonGameTag' = 206
BUY_CAT_LD_PLANT: 'CommonGameTag' = 202
BUY_CAT_LD_POOL_DECORATIONS: 'CommonGameTag' = 1246
BUY_CAT_LD_POOL_OBJECTS: 'CommonGameTag' = 1228
BUY_CAT_LD_POOL_OBJECTS_INVENTORYABLE: 'CommonGameTag' = 2211
BUY_CAT_LD_RUG: 'CommonGameTag' = 198
BUY_CAT_LD_RUG_MANAGED: 'CommonGameTag' = 1496
BUY_CAT_LD_SCULPTURE: 'CommonGameTag' = 200
BUY_CAT_LD_TABLE_LAMP: 'CommonGameTag' = 203
BUY_CAT_LD_WALL_DECORATION: 'CommonGameTag' = 201
BUY_CAT_LD_WALL_LIGHT: 'CommonGameTag' = 310
BUY_CAT_LD_WALL_SCULPTURE: 'CommonGameTag' = 824
BUY_CAT_LD_WINDOW_TREATMENT: 'CommonGameTag' = 197
BUY_CAT_LOT_REQ_ELEVATOR: 'CommonGameTag' = 55374
BUY_CAT_LOT_REQ_ELEVATOR_BG: 'CommonGameTag' = 2240
BUY_CAT_LOT_REQ_MAILBOX: 'CommonGameTag' = 55375
BUY_CAT_LOT_REQ_MAILBOX_BG: 'CommonGameTag' = 2241
BUY_CAT_LOT_REQ_TRASH_CHUTE: 'CommonGameTag' = 55376
BUY_CAT_LOT_REQ_TRASH_CHUTE_BG: 'CommonGameTag' = 2242
BUY_CAT_MAG_BATHROOM: 'CommonGameTag' = 271
BUY_CAT_MAG_BEDROOM: 'CommonGameTag' = 272
BUY_CAT_MAG_CAREER: 'CommonGameTag' = 468
BUY_CAT_MAG_DINING_ROOM: 'CommonGameTag' = 273
BUY_CAT_MAG_KIDS: 'CommonGameTag' = 864
BUY_CAT_MAG_KITCHEN: 'CommonGameTag' = 274
BUY_CAT_MAG_LIVING_ROOM: 'CommonGameTag' = 270
BUY_CAT_MAG_MISC: 'CommonGameTag' = 407
BUY_CAT_MAG_OUTDOOR: 'CommonGameTag' = 275
BUY_CAT_MAG_STUDY: 'CommonGameTag' = 276
BUY_CAT_OTG_APPLIANCES: 'CommonGameTag' = 2380
BUY_CAT_OTG_CRAFTING: 'CommonGameTag' = 2381
BUY_CAT_OTG_LIGHTING: 'CommonGameTag' = 2382
BUY_CAT_OTG_MISC: 'CommonGameTag' = 2383
BUY_CAT_OTG_OUTDOOR_ACTIVITIES: 'CommonGameTag' = 2384
BUY_CAT_OTG_PLUMBING: 'CommonGameTag' = 2385
BUY_CAT_PA_COFFEE_MAKER: 'CommonGameTag' = 966
BUY_CAT_PA_DISPOSABLE: 'CommonGameTag' = 188
BUY_CAT_PA_DISPOSAL_INDOOR: 'CommonGameTag' = 972
BUY_CAT_PA_DISPOSAL_OUTDOOR: 'CommonGameTag' = 973
BUY_CAT_PA_LARGE_APPLIANCE: 'CommonGameTag' = 185
BUY_CAT_PA_LITTER_BOX: 'CommonGameTag' = 1978
BUY_CAT_PA_MICROWAVE: 'CommonGameTag' = 967
BUY_CAT_PA_MISC_APPLIANCE: 'CommonGameTag' = 193
BUY_CAT_PA_MISC_PLUMBING: 'CommonGameTag' = 192
BUY_CAT_PA_MISC_SMALL_APPLIANCE: 'CommonGameTag' = 191
BUY_CAT_PA_OUTDOOR_COOKING: 'CommonGameTag' = 190
BUY_CAT_PA_PET_CARE: 'CommonGameTag' = 1945
BUY_CAT_PA_PET_FOOD: 'CommonGameTag' = 1976
BUY_CAT_PA_PUBLIC_RESTROOM: 'CommonGameTag' = 2042
BUY_CAT_PA_REFRIGERATOR: 'CommonGameTag' = 189
BUY_CAT_PA_SHOWER: 'CommonGameTag' = 183
BUY_CAT_PA_SINK: 'CommonGameTag' = 180
BUY_CAT_PA_SINK_COUNTER: 'CommonGameTag' = 920
BUY_CAT_PA_SINK_FREESTANDING: 'CommonGameTag' = 182
BUY_CAT_PA_SMALL_APPLIANCE: 'CommonGameTag' = 186
BUY_CAT_PA_STOVE: 'CommonGameTag' = 187
BUY_CAT_PA_STOVE_HOOD: 'CommonGameTag' = 913
BUY_CAT_PA_TOILET: 'CommonGameTag' = 181
BUY_CAT_PA_TUB: 'CommonGameTag' = 184
BUY_CAT_PAINTING: 'CommonGameTag' = 446
BUY_CAT_SHAREABLE: 'CommonGameTag' = 1261
BUY_CAT_SPANDRELS_FRIEZES_TRIM: 'CommonGameTag' = 430
BUY_CAT_SS_ACCENT_TABLE: 'CommonGameTag' = 1123
BUY_CAT_SS_BARSTOOL: 'CommonGameTag' = 224
BUY_CAT_SS_BED: 'CommonGameTag' = 225
BUY_CAT_SS_BED_DOUBLE: 'CommonGameTag' = 914
BUY_CAT_SS_BED_SINGLE: 'CommonGameTag' = 971
BUY_CAT_SS_BOOKSHELF: 'CommonGameTag' = 226
BUY_CAT_SS_CABINET: 'CommonGameTag' = 211
BUY_CAT_SS_COFFEE_TABLE: 'CommonGameTag' = 214
BUY_CAT_SS_COUNTER: 'CommonGameTag' = 210
BUY_CAT_SS_DESK: 'CommonGameTag' = 215
BUY_CAT_SS_DESK_CHAIR: 'CommonGameTag' = 222
BUY_CAT_SS_DINING_CHAIR: 'CommonGameTag' = 217
BUY_CAT_SS_DINING_TABLE: 'CommonGameTag' = 212
BUY_CAT_SS_DINING_TABLE_LONG: 'CommonGameTag' = 963
BUY_CAT_SS_DINING_TABLE_SHORT: 'CommonGameTag' = 962
BUY_CAT_SS_DISPLAY: 'CommonGameTag' = 216
BUY_CAT_SS_DRESSER: 'CommonGameTag' = 227
BUY_CAT_SS_ELEMENT_DISPLAY: 'CommonGameTag' = 1072
BUY_CAT_SS_END_TABLE: 'CommonGameTag' = 213
BUY_CAT_SS_HALLWAY_TABLE: 'CommonGameTag' = 1126
BUY_CAT_SS_LIVING_CHAIR: 'CommonGameTag' = 221
BUY_CAT_SS_LOVE_SEAT: 'CommonGameTag' = 219
BUY_CAT_SS_MISC_COMFORT: 'CommonGameTag' = 229
BUY_CAT_SS_MISC_STORAGE: 'CommonGameTag' = 230
BUY_CAT_SS_MISC_SURFACE: 'CommonGameTag' = 228
BUY_CAT_SS_OUTDOOR_BENCH: 'CommonGameTag' = 916
BUY_CAT_SS_OUTDOOR_CHAIR: 'CommonGameTag' = 220
BUY_CAT_SS_OUTDOOR_SEATING: 'CommonGameTag' = 223
BUY_CAT_SS_OUTDOOR_TABLE: 'CommonGameTag' = 917
BUY_CAT_SS_PET_BED: 'CommonGameTag' = 1977
BUY_CAT_SS_PET_FURNITURE: 'CommonGameTag' = 1946
BUY_CAT_SS_POSTCARD_BOARD: 'CommonGameTag' = 1071
BUY_CAT_SS_SCRATCHING_POST: 'CommonGameTag' = 1979
BUY_CAT_SS_SOFA: 'CommonGameTag' = 218
BUY_CAT_VENUE_ARTS_CENTER: 'CommonGameTag' = 1604
BUY_CAT_VENUE_ARTS_COMMONS: 'CommonGameTag' = 2273
BUY_CAT_VENUE_BAR: 'CommonGameTag' = 1353
BUY_CAT_VENUE_BEACH: 'CommonGameTag' = 2199
BUY_CAT_VENUE_BLUFFS: 'CommonGameTag' = 24612
BUY_CAT_VENUE_CAFE: 'CommonGameTag' = 24578
BUY_CAT_VENUE_CHALET: 'CommonGameTag' = 24611
BUY_CAT_VENUE_CLUB: 'CommonGameTag' = 1354
BUY_CAT_VENUE_COMMUNITY_SPACE_DEFAULT: 'CommonGameTag' = 2438
BUY_CAT_VENUE_COMMUNITY_SPACE_GARDEN: 'CommonGameTag' = 2440
BUY_CAT_VENUE_COMMUNITY_SPACE_MAKER_SPACE: 'CommonGameTag' = 2439
BUY_CAT_VENUE_COMMUNITY_SPACE_MARKETPLACE: 'CommonGameTag' = 2441
BUY_CAT_VENUE_DOCTOR_CLINIC: 'CommonGameTag' = 1362
BUY_CAT_VENUE_FOREST_PARK: 'CommonGameTag' = 1355
BUY_CAT_VENUE_GYM: 'CommonGameTag' = 1356
BUY_CAT_VENUE_KARAOKE: 'CommonGameTag' = 1579
BUY_CAT_VENUE_LIBRARY: 'CommonGameTag' = 1357
BUY_CAT_VENUE_LOUNGE: 'CommonGameTag' = 1358
BUY_CAT_VENUE_MUSEUM: 'CommonGameTag' = 1359
BUY_CAT_VENUE_ONSEN: 'CommonGameTag' = 69662
BUY_CAT_VENUE_PARK: 'CommonGameTag' = 1360
BUY_CAT_VENUE_PENTHOUSE: 'CommonGameTag' = 55373
BUY_CAT_VENUE_PENTHOUSE_BG: 'CommonGameTag' = 2239
BUY_CAT_VENUE_POLICE_STATION: 'CommonGameTag' = 1363
BUY_CAT_VENUE_POOL: 'CommonGameTag' = 1459
BUY_CAT_VENUE_RELAXATION_CENTER: 'CommonGameTag' = 18436
BUY_CAT_VENUE_RESTAURANT: 'CommonGameTag' = 26625
BUY_CAT_VENUE_RETAIL: 'CommonGameTag' = 1361
BUY_CAT_VENUE_RUINS: 'CommonGameTag' = 24613
BUY_CAT_VENUE_SCIENCE_COMMONS: 'CommonGameTag' = 2272
BUY_CAT_VENUE_SCIENTIST_LAB: 'CommonGameTag' = 1364
BUY_CAT_VENUE_STAR_GARDEN: 'CommonGameTag' = 1580
BUY_CAT_VENUE_UNIVERSITY_HOUSING: 'CommonGameTag' = 2229
| |
<gh_stars>0
import os, sys, math, time
import matplotlib.pyplot as plt
from itertools import cycle
# Farbige Ausgabe auf dem Bildschirm mit print
class c:
PURPLE = '\033[95m'
PURPLEBOLD = '\033[95m\033[1m'
CYAN = '\033[96m'
CYANBOLD = '\033[96m\033[1m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
BLUEBOLD = '\033[94m\033[1m'
GREEN = '\033[92m'
GREENBOLD = '\033[92m\033[1m'
YELLOW ='\033[93m'
YELLBOLD = '\033[1m\033[93m'
RED = '\033[91m'
REDBOLD = '\033[1m\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
ITALIC = '\33[3m'
# Quersumme anzeigen
def quersumme(value):
value = str(value)
while len(value) > 1:
value = str(sum(int(digit) for digit in value))
return value
# Tausender Trennzeichen ausgeben
def format_int(i, sep='.'):
cyc = cycle(['', '', sep])
s = str(i)
last = len(s) - 1
formatted = [(next(cyc) if idx != last else '') + char
for idx, char in enumerate(reversed(s))]
return ''.join(reversed(formatted))
# Fuehrende Leerstellen statt Nullen wie bei zfill(x) ausgeben
def fuell_auf(a, b):
tempvar = 0;
leerzeichen = '';
for tempvar in range(len(a), b):
leerzeichen = leerzeichen + ' '
return leerzeichen
# Handelt es sich um einen Integer?
def is_int(mytest):
return(isinstance(mytest, int))
# Ausgabe der Ergebnisse
def ausgabe_ergebnis():
ergebnis = c.CYAN
ohnefarbe = c.END
printf( '\n\nListe der natuerlichen Zahlen bis: \t ' + ergebnis + '%s\n' + ohnefarbe, format_int(hoechstwert))
printf( 'Gefundene Primzahlen: \t\t\t ' + ergebnis + '%d\n' + ohnefarbe, (menge - 1))
printf( 'Es ist jede \t\t\t\t ' + ergebnis + "%2f" + ohnefarbe + ' Zahl eine Primzahl\n', float(hoechstwert) / float(menge))
printf( 'Blockgroesse: \t\t\t\t ' + ergebnis + '%d\n' + ohnefarbe, (blockgroesse))
printf( 'Dauer der Untersuchung: \t\t ' + ergebnis + '%.4f ' + ohnefarbe + 'Sekunden\n', float(endezeit - startzeit))
# 'Clearen' des Bildschirms
def clear():
if os.name == 'nt':
clearme = lambda: os.system('cls')
clearme()
else:
os.system('clear')
# Formatierte Zeichenausgabe
def printf(format, *args):
sys.stdout.write(format % args)
def printprime():
printf(c.CYAN)
printf(" ##### \n")
printf(c.GREEN)
printf(" #### ##*\n")
printf(c.CYAN)
printf(" # ## ##\n")
printf(c.GREEN)
printf(" ## ##\n")
printf(c.CYAN)
printf(" ## ##\n")
printf(c.GREEN)
printf(" ## ## +#\n")
printf(c.CYAN)
printf(" ## ###* #+\n")
printf(c.GREEN)
printf(" #####\n")
printf(c.CYAN)
printf(" ## *##### ## -## ## ##* *####*\n")
printf(c.GREEN)
printf(" ## ###++## ## ### ## ## ## ##\n")
printf(c.CYAN)
printf(" ## ## ## ## ## ## ######*\n")
printf(c.GREEN)
printf(" ## ## ## ## ## ## ##\n")
printf(c.CYAN)
printf(" ## ## ## ## ## ## ##\n")
printf(c.GREEN)
printf(" ## ## ## ## ## ## *##**-\n\n\n")
printf(c.END)
def abbruch():
printprime()
printf(c.CYANBOLD)
printf("primzahl.py")
printf(c.END)
printf(" ist ein Hilfsprogramm zur Untersuchung von Primzahlen.\n")
printf("Das Programm kann in zwei Modi verwendet werden:\n\n")
printf("\t- interaktiv")
printf(c.BOLD)
printf(" und\n")
printf(c.END)
printf("\t- ueber die Kommandozeile\n\n")
printf("Um im den interaktiven Modus zu gelangen, genuegt die Eingabe von:\n")
printf(c.CYAN)
printf("\tpython primzahl.py\n\n\n")
printf(c.END)
printf(c.BOLD)
printf("Beispiel fuer den Kommandozeilenmodus:\n\n")
printf(c.END)
printf(c.CYAN)
printf("python primzahl.py 1000 0 1 \"|\" 10 1 1 0 0 \n")
printf(c.END)
printf(c.ITALIC)
printf(" 1000 Anzahl der Primzahlen bis n [pi(n)]\n")
printf(" 0 = Keine grafische Ausgabe\n")
printf(" 1 = Anzeigen aller ermittelter Primzahlen\n")
printf(" | = Trennzeichen\n")
printf(" 10 = Anzahl der Ausgabespalten\n")
printf(" 1 = Anzeige fuehrender Nullen\n")
printf(" 1 = Leerzeichen (0 = Nullen)\n")
printf(" 0: Bildschirmausgabe\n")
printf(" 1: primzahlen.dat im akt. Verz. anlegen\n")
printf(" 0 = Quersumme nicht anzeigen (1 = anzeigen)\n")
printf(c.END)
# Los geht's
def main():
global blockgroesse, menge, hoechstwert, startzeit, endezeit, dauer
menge = 1
querSum = ''
schreibindatei = False
zeigQuersumme = False
trennzeichen = ''
blockgroesse = 10
# -h ist das erste Argument, dann will der Anwender eine Hilfe angezeigt bekommen
hilfe = str(sys.argv[1:])
# Anstatt alle Parameter zu uebergeben, benoetigt der Anwender Hilfe
if (hilfe.find('-h') > 0) or (hilfe.find('--h') > 0):
clear()
abbruch()
sys.exit(0)
# Es werden genuegend Parameter uebergeben
if len(sys.argv) == 10:
try:
hoechstwert = int(sys.argv[1]) # Zahl
zeigegrafik = int(sys.argv[2]) # Bool
if zeigegrafik == 1:
zeigegrafik = True
else:
zeigegrafik = False
zeigeprim = int(sys.argv[3]) # Bool
if zeigeprim == 1:
zeigeprim = True
else:
zeigeprim = False
trennzeichen = sys.argv[4] # Zeichen
blockgroesse = int(sys.argv[5]) # Zahl
zeigenullen = int(sys.argv[6]) # Bool
if zeigenullen == 1:
zeigenullen = True
else:
zeigenullen = False
zeigleer = int(sys.argv[7]) # Bool
if zeigleer == 1:
zeigleer = True
else:
zeigleer = False
schreibindatei = int(sys.argv[8]) # Bool
if schreibindatei == 1:
schreibindatei = True
zeigeprim = True
else:
schreibindatei = False
zeigQuersumme = int(sys.argv[9]) # Bool
if zeigQuersumme == 1:
zeigQuersumme = True
else:
zeigQuersumme = False
# Den eingegebenen Hoechstwert in eine Zeichenkette umwandeln und die Laenge des Strings merken
hoechstwertlaenge = len(str(hoechstwert))
# Wenn alles schief geht ...
except Exception:
clear()
abbruch()
sys.exit(0)
# Keine -h fuer Hilfe uebergeben und auch nicht die Anzahl der benoetigten Parameter, also startet das Programm den interaktiven Modus
else:
################################################################################################
# los geht's mit der Abfrage ...
################################################################################################
if len(sys.argv) > 1:
clear()
print(c.REDBOLD)
print("Fehler!")
print(c.END)
printf("Anzahl der Parameter ist fehlerhaft. Aufruf der Hilfe mit \"python primzahl.py -h\". ")
repeat = input("Start im interaktiven Modus (J/n)?")
if repeat.upper() == "N":
sys.exit()
clear()
menge = 1 # Bis zum Zeitpunkt x gefundene Primzahlen
hoechstwertlaenge = 0 # Die Anzahl Zeichen des, vom Anwender eingegebenen Hoechstwert speichern
nullen = 'j' # Fuehrende Nullen anzeigen 'j' als Defaultwert
schreiben = 'n' # Nicht in Datei schreiben per Default
printf( c.BOLD + c.CYAN)
printf('Primzahlen berechnen\n')
printf('--------------------\n')
printf( c.END)
# Frage nach einem Hoechstwert bis zu dem gesucht werden soll
try:
hoechstwert = int(input('\nBis zu welcher natuerlichen Zahl sollen die Primzahlen errechnet werden (default: 1000)? '))
# gibt der Anwender Muell ein, wird der Defaultwert von 1000 verwendet - nicht elegant, funktioniert aber
except ValueError:
hoechstwert = 1000
# Soll eine grafische Anzeige der gefundenen Primzahlen erfolgen? Das dauert aber zigfach laenger.
info = input('\nGrafik anzeigen? Das Programm arbeitet durch die grafische Ausgabe deutlich laenger. (j/N)?')
if info in ['j', 'J', 'y', 'Y']:
zeigegrafik = True
else:
zeigegrafik = False
# Soll nur das Ergebnis angezeigt werden, oder jede gefundene zahl?
info = input('\nJede gefundene Primzahl ausgeben? (J/n)?')
if (info in ['j', 'J']) or (len(info) == 0):
zeigeprim = True
# Mit welchem Zeichen sollen die ermittelten Primzahlen getrennt werden?
trennzeichen = input('\nWelches Trennzeichen soll verwendet werden (default: |)?')
if len(trennzeichen) > 0:
trennzeichen = trennzeichen[0]
else:
trennzeichen = '|'
# Sollen die gefundenen Primzahlen in Bloecken zu x Zeichen ausgegeben werden?
blockgroesse = input('\nNach wievielen Primzahlen soll ein Zeilenumbruch erfolgen (default: 10)?')
try:
blockgroesse = int(blockgroesse)
if blockgroesse == 0:
blockgroesse = 1
except:
blockgroesse = 10
nullen = input('\nFuehrende Nullen ausgeben? (J/n)?')
if nullen in ['n', 'N']:
zeigenullen = False
else:
zeigenullen = True
null_leer = input('\nTatsaechlich fuehrende Nullen oder lieber ein Leerzeichen ausgeben? (J=0/n=Leerzeichen)?')
if null_leer in ['n', 'N']:
zeigleer = True
else:
zeigleer = False
# Quersumme anzeigen?
myQuersumme = input('\nQuersumme mit ausgeben? (J/n)?')
if myQuersumme in ['n', 'N']:
zeigQuersumme = False
else:
zeigQuersumme = True
schreiben = input('\nDie Primzahlen in eine Datei schreiben statt auf dem Bildschirm auszugeben? (j/N)?')
if schreiben in ['j', 'j', 'y', 'Y']:
schreibindatei = True
else:
schreibindatei = False
# Den eingegebenen Hoechstwert in eine Zeichenkette umwandeln und die Laenge des Strings merken
hoechstwertlaenge = len(str(hoechstwert))
else:
zeigeprim = False
################################################################################################
# Ende des Abfrageblocks
################################################################################################
# Bildschirm leeren - sieht schicker aus
clear()
# Falls das Ergebnis in eine Datei geschrieben werden soll
if schreibindatei == True:
f = open("primzahlen.dat","w")
# Einige Vorarbeiten, wenn die grafische Ausgabe gewuenscht wird
if zeigegrafik == True:
# Fenstertitel anzeigen
fig = plt.figure()
# ... und beschriften
fig.canvas.set_window_title('Primzahlen mit Python berechnen und anzeigen ...')
# Subplot fuer die Beschriftung
ax = fig.add_subplot(111)
# Titel der Primzahlberechnung
plt.title('Primzahlberechnungen', fontsize=24, color='black')
# Bildschirm 'leeren' und Startzeit merken
clear()
startzeit = time.time()
printf( c.BOLD + c.CYAN)
printf('Ergebnis der Primzahlberechnungen\n')
printf('---------------------------------\n')
printf( c.END)
################################################################################################
# Ab hier beginnt die eigentliche Berechnung
################################################################################################
# Jede Zahl zwischen 1 und hoechstwert wird zuerst als Primzahl angenommen
# und der Wert mit True vorbelegt. Da zahlen[0] die 0 und zahlen[1] die 1 repraesentiert
# und somit per se keine Primzahlen sind, werden die mit False vorbelegt
zahlen = [True]*(hoechstwert+1)
| |
loc=None, correct_key=True):
"""Evaluates and sets the specified option value in
environment `loc`. Many options need ``N`` to be defined in
`loc`, some need `popsize`.
Details
-------
Keys that contain 'filename' are not evaluated.
For `loc` is None, the self-dict is used as environment
:See: `evalall()`, `__call__`
"""
# TODO: try: loc['dim'] = loc['N'] etc
if correct_key:
# in_key = key # for debugging only
key = self.corrected_key(key)
self[key] = self(key, default, loc)
return self[key]
def evalall(self, loc=None, defaults=None):
"""Evaluates all option values in environment `loc`.
:See: `eval()`
"""
self.check()
if defaults is None:
defaults = cma_default_options_()
# TODO: this needs rather the parameter N instead of loc
if 'N' in loc: # TODO: __init__ of CMA can be simplified
popsize = self('popsize', defaults['popsize'], loc)
for k in list(self.keys()):
k = self.corrected_key(k)
self.eval(k, defaults[k],
{'N':loc['N'], 'popsize':popsize})
self._lock_setting = True
return self
def match(self, s=''):
"""return all options that match, in the name or the description,
with string `s`, case is disregarded.
Example: ``cma.CMAOptions().match('verb')`` returns the verbosity
options.
"""
match = s.lower()
res = {}
for k in sorted(self):
s = str(k) + '=\'' + str(self[k]) + '\''
if match in s.lower():
res[k] = self[k]
return CMAOptions(res)
@property
def to_namedtuple(self):
"""return options as const attributes of the returned object,
only useful for inspection. """
raise NotImplementedError
# return collections.namedtuple('CMAOptionsNamedTuple',
# self.keys())(**self)
def from_namedtuple(self, t):
"""update options from a `collections.namedtuple`.
:See also: `to_namedtuple`
"""
return self.update(t._asdict())
def pprint(self, linebreak=80):
for i in sorted(self.items()):
s = str(i[0]) + "='" + str(i[1]) + "'"
a = s.split(' ')
# print s in chunks
l = '' # start entire to the left
while a:
while a and len(l) + len(a[0]) < linebreak:
l += ' ' + a.pop(0)
print(l)
l = ' ' # tab for subsequent lines
try:
collections.namedtuple
except:
pass
else:
class CMAEvolutionStrategyResult(collections.namedtuple(
'CMAEvolutionStrategyResult', [
'xbest',
'fbest',
'evals_best',
'evaluations',
'iterations',
'xfavorite',
'stds',
'stop',
])):
"""A results tuple from `CMAEvolutionStrategy` property ``result``.
This tuple contains in the given position and as attribute
- 0 ``xbest`` best solution evaluated
- 1 ``fbest`` objective function value of best solution
- 2 ``evals_best`` evaluation count when ``xbest`` was evaluated
- 3 ``evaluations`` evaluations overall done
- 4 ``iterations``
- 5 ``xfavorite`` distribution mean in "phenotype" space, to be
considered as current best estimate of the optimum
- 6 ``stds`` effective standard deviations, can be used to
compute a lower bound on the expected coordinate-wise distance
to the true optimum, which is (very) approximately stds[i] *
dimension**0.5 / min(mueff, dimension) / 1.5 / 5 ~ std_i *
dimension**0.5 / min(popsize / 2, dimension) / 5, where
dimension = CMAEvolutionStrategy.N and mueff =
CMAEvolutionStrategy.sp.weights.mueff ~ 0.3 * popsize.
- 7 ``stop`` termination conditions in a dictionary
The penalized best solution of the last completed iteration can be
accessed via attribute ``pop_sorted[0]`` of `CMAEvolutionStrategy`
and the respective objective function value via ``fit.fit[0]``.
Details:
- This class is of purely declarative nature and for providing
this docstring. It does not provide any further functionality.
- ``list(fit.fit).find(0)`` is the index of the first sampled
solution of the last completed iteration in ``pop_sorted``.
"""
cma_default_options = CMAOptions(cma_default_options_())
class _CMAEvolutionStrategyResult(tuple):
"""A results tuple from `CMAEvolutionStrategy` property ``result``.
This tuple contains in the given position
- 0 best solution evaluated, ``xbest``
- 1 objective function value of best solution, ``f(xbest)``
- 2 evaluation count when ``xbest`` was evaluated
- 3 evaluations overall done
- 4 iterations
- 5 distribution mean in "phenotype" space, to be considered as
current best estimate of the optimum
- 6 effective standard deviations, give a lower bound on the expected
coordinate-wise distance to the true optimum of (very) approximately
std_i * dimension**0.5 / min(mueff, dimension) / 1.2 / 5
~ std_i * dimension**0.5 / min(popsize / 0.4, dimension) / 5, where
mueff = CMAEvolutionStrategy.sp.weights.mueff ~ 0.3 * popsize.
The penalized best solution of the last completed iteration can be
accessed via attribute ``pop_sorted[0]`` of `CMAEvolutionStrategy`
and the respective objective function value via ``fit.fit[0]``.
Details:
- This class is of purely declarative nature and for providing this
docstring. It does not provide any further functionality.
- ``list(fit.fit).find(0)`` is the index of the first sampled solution
of the last completed iteration in ``pop_sorted``.
""" # here starts the code: (beating the code folding glitch)
# remark: a tuple is immutable, hence we cannot change it anymore
# in __init__. This would work if we inherited from a `list`.
@staticmethod
def _generate(self):
"""return a results tuple of type `CMAEvolutionStrategyResult`.
`_generate` is a surrogate for the ``__init__`` method, which
cannot be used to initialize the immutable `tuple` super class.
"""
return _CMAEvolutionStrategyResult(
self.best.get() + ( # (x, f, evals) triple
self.countevals,
self.countiter,
self.gp.pheno(self.mean, into_bounds=self.boundary_handler.repair),
self.gp.scales * self.sigma * self.sigma_vec.scaling *
self.dC**0.5))
class CMAEvolutionStrategy(interfaces.OOOptimizer):
"""CMA-ES stochastic optimizer class with ask-and-tell interface.
Calling Sequences
=================
- ``es = CMAEvolutionStrategy(x0, sigma0)``
- ``es = CMAEvolutionStrategy(x0, sigma0, opts)``
- ``es = CMAEvolutionStrategy(x0, sigma0).optimize(objective_fct)``
- ::
res = CMAEvolutionStrategy(x0, sigma0,
opts).optimize(objective_fct).result
Arguments
=========
`x0`
initial solution, starting point. `x0` is given as "phenotype"
which means, if::
opts = {'transformation': [transform, inverse]}
is given and ``inverse is None``, the initial mean is not
consistent with `x0` in that ``transform(mean)`` does not
equal to `x0` unless ``transform(mean)`` equals ``mean``.
`sigma0`
initial standard deviation. The problem variables should
have been scaled, such that a single standard deviation
on all variables is useful and the optimum is expected to
lie within about `x0` +- ``3*sigma0``. Often one wants to
check for solutions close to the initial point. This allows,
for example, for an easier check of consistency of the
objective function and its interfacing with the optimizer.
In this case, a much smaller `sigma0` is advisable.
`opts`
options, a dictionary with optional settings,
see class `CMAOptions`.
Main interface / usage
======================
The interface is inherited from the generic `OOOptimizer`
class (see also there). An object instance is generated from::
es = cma.CMAEvolutionStrategy(8 * [0.5], 0.2)
The least verbose interface is via the optimize method::
es.optimize(objective_func)
res = es.result
More verbosely, the optimization is done using the
methods `stop`, `ask`, and `tell`::
while not es.stop():
solutions = es.ask()
es.tell(solutions, [cma.ff.rosen(s) for s in solutions])
es.disp()
es.result_pretty()
where `ask` delivers new candidate solutions and `tell` updates
the `optim` instance by passing the respective function values
(the objective function `cma.ff.rosen` can be replaced by any
properly defined objective function, see `cma.ff` for more
examples).
To change an option, for example a termination condition to
continue the optimization, call::
es.opts.set({'tolfacupx': 1e4})
The class `CMAEvolutionStrategy` also provides::
(solutions, func_values) = es.ask_and_eval(objective_func)
and an entire optimization can also be written like::
while not es.stop():
es.tell(*es.ask_and_eval(objective_func))
Besides for termination criteria, in CMA-ES only the ranks of the
`func_values` are relevant.
Attributes and Properties
=========================
- `inputargs`: passed input arguments
- `inopts`: passed options
- `opts`: actually used options, some of them can be changed any
time via ``opts.set``, see class `CMAOptions`
- `popsize`: population size lambda, number of candidate
solutions returned by `ask` ()
- `logger`: a `CMADataLogger` instance utilized by `optimize`
Examples
========
Super-short example, with output shown:
>>> import cma
>>> # construct an object instance in 4-D, sigma0=1:
>>> es = cma.CMAEvolutionStrategy(4 * [1], 1, {'seed':234})
... # doctest: +ELLIPSIS
(4_w,8)-aCMA-ES (mu_w=2.6,w_1=52%) in dimension 4 (seed=234...)
and optimize the ellipsoid function
>>> es.optimize(cma.ff.elli, verb_disp=1) # doctest: +ELLIPSIS
Iterat #Fevals function value axis ratio sigma min&max std t[m:s]
1 8 2.09...
>>> assert len(es.result) == 8
>>> assert es.result[1] < 1e-9
The optimization loop can also be written explicitly:
>>> es = cma.CMAEvolutionStrategy(4 * [1], 1) # doctest: +ELLIPSIS
(4_w,8)-aCMA-ES (mu_w=2.6,w_1=52%) in dimension 4 (seed=...
>>> while not es.stop():
... X = es.ask()
... es.tell(X, [cma.ff.elli(x) for x in X])
... es.disp() # doctest: +ELLIPSIS
Iterat #Fevals function value axis ratio sigma min&max std t[m:s]
1 8 ...
achieving the same result as above.
An | |
<gh_stars>0
import os
import re
import copy
import math
import time
import glob
import shutil
import pickle
import pathlib
import warnings
import functools
import importlib
import itertools
from ..utils import _get_fn_name, prod, progbar
from .combo_runner import (
nan_like_result,
combo_runner_core,
combo_runner_to_ds,
)
from .case_runner import (
case_runner,
)
from .prepare import (
parse_combos,
parse_constants,
parse_attrs,
parse_fn_args,
parse_cases,
)
from .farming import Runner, Harvester, Sampler, XYZError
BTCH_NM = "xyz-batch-{}.jbdmp"
RSLT_NM = "xyz-result-{}.jbdmp"
FNCT_NM = "xyz-function.clpkl"
INFO_NM = "xyz-settings.jbdmp"
def write_to_disk(obj, fname):
with open(fname, 'wb') as file:
pickle.dump(obj, file)
def read_from_disk(fname):
with open(fname, 'rb') as file:
return pickle.load(file)
@functools.lru_cache(8)
def get_picklelib(picklelib='joblib.externals.cloudpickle'):
return importlib.import_module(picklelib)
def to_pickle(obj, picklelib='joblib.externals.cloudpickle'):
plib = get_picklelib(picklelib)
s = plib.dumps(obj)
return s
def from_pickle(s, picklelib='joblib.externals.cloudpickle'):
plib = get_picklelib(picklelib)
obj = plib.loads(s)
return obj
# --------------------------------- parsing --------------------------------- #
def parse_crop_details(fn, crop_name, crop_parent):
"""Work out how to structure the sowed data.
Parameters
----------
fn : callable, optional
Function to infer name crop_name from, if not given.
crop_name : str, optional
Specific name to give this set of runs.
crop_parent : str, optional
Specific directory to put the ".xyz-{crop_name}/" folder in
with all the cases and results.
Returns
-------
crop_location : str
Full path to the crop-folder.
crop_name : str
Name of the crop.
crop_parent : str
Parent folder of the crop.
"""
if crop_name is None:
if fn is None:
raise ValueError("Either `fn` or `crop_name` must be give.")
crop_name = _get_fn_name(fn)
crop_parent = crop_parent if crop_parent is not None else os.getcwd()
crop_location = os.path.join(crop_parent, ".xyz-{}".format(crop_name))
return crop_location, crop_name, crop_parent
def parse_fn_farmer(fn, farmer):
if farmer is not None:
if fn is not None:
warnings.warn("'fn' is ignored if a 'Runner', 'Harvester', or "
"'Sampler' is supplied as the 'farmer' kwarg.")
fn = farmer.fn
return fn, farmer
def calc_clean_up_default_res(crop, clean_up, allow_incomplete):
"""Logic for choosing whether to automatically clean up a crop, and what,
if any, the default all-nan result should be.
"""
if clean_up is None:
clean_up = not allow_incomplete
if allow_incomplete:
default_result = crop.all_nan_result
else:
default_result = None
return clean_up, default_result
def check_ready_to_reap(crop, allow_incomplete, wait):
if not (allow_incomplete or wait or crop.is_ready_to_reap()):
raise XYZError("This crop is not ready to reap yet - results are "
"missing. You can reap only finished batches by setting"
" ``allow_incomplete=True``, but be aware this will "
"represent all missing batches with ``np.nan`` and thus"
" might effect data-types.")
class Crop(object):
"""Encapsulates all the details describing a single 'crop', that is,
its location, name, and batch size/number. Also allows tracking of
crop's progress, and experimentally, automatic submission of
workers to grid engine to complete un-grown cases. Can also be instantiated
directly from a :class:`~xyzpy.Runner` or :class:`~xyzpy.Harvester` or
:class:`~Sampler.Crop` instance.
Parameters
----------
fn : callable, optional
Target function - Crop `name` will be inferred from this if
not given explicitly. If given, `Sower` will also default
to saving a version of `fn` to disk for `cropping.grow` to use.
name : str, optional
Custom name for this set of runs - must be given if `fn`
is not.
parent_dir : str, optional
If given, alternative directory to put the ".xyz-{name}/"
folder in with all the cases and results.
save_fn : bool, optional
Whether to save the function to disk for `cropping.grow` to use.
Will default to True if `fn` is given.
batchsize : int, optional
How many cases to group into a single batch per worker.
By default, batchsize=1. Cannot be specified if `num_batches`
is.
num_batches : int, optional
How many total batches to aim for, cannot be specified if
`batchsize` is.
farmer : {xyzpy.Runner, xyzpy.Harvester, xyzpy.Sampler}, optional
A Runner, Harvester or Sampler, instance, from which the `fn` can be
inferred and which can also allow the Crop to reap itself straight to a
dataset or dataframe.
autoload : bool, optional
If True, check for the existence of a Crop written to disk
with the same location, and if found, load it.
See Also
--------
Runner.Crop, Harvester.Crop, Sampler.Crop
"""
def __init__(
self, *,
fn=None,
name=None,
parent_dir=None,
save_fn=None,
batchsize=None,
num_batches=None,
shuffle=False,
farmer=None,
autoload=True
):
self._fn, self.farmer = parse_fn_farmer(fn, farmer)
self.name = name
self.parent_dir = parent_dir
self.save_fn = save_fn
self.batchsize = batchsize
self.num_batches = num_batches
self.shuffle = shuffle
self._batch_remainder = None
self._all_nan_result = None
# Work out the full directory for the crop
self.location, self.name, self.parent_dir = \
parse_crop_details(self._fn, self.name, self.parent_dir)
# try loading crop information if it exists
if autoload and self.is_prepared():
self._sync_info_from_disk()
# Save function so it can be automatically loaded with all deps?
if (fn is None) and (save_fn is True):
raise ValueError("Must specify a function for it to be saved!")
self.save_fn = save_fn is not False
@property
def runner(self):
if isinstance(self.farmer, Runner):
return self.farmer
elif isinstance(self.farmer, (Harvester, Sampler)):
return self.farmer.runner
else:
return None
# ------------------------------- methods ------------------------------- #
def choose_batch_settings(self, *, combos=None, cases=None):
"""Work out how to divide all cases into batches, i.e. ensure
that ``batchsize * num_batches >= num_cases``.
"""
if combos:
n_combos = prod(len(x) for _, x in combos)
else:
n_combos = 1
if cases:
n_cases = len(cases)
else:
n_cases = 1
# for each case every combination is run
n = n_cases * n_combos
if (self.batchsize is not None) and (self.num_batches is not None):
# Check that they are set correctly
pos_tot = self.batchsize * self.num_batches
if self._batch_remainder is not None:
pos_tot += self._batch_remainder
if not (n <= pos_tot < n + self.batchsize):
raise ValueError("`batchsize` and `num_batches` cannot both"
"be specified if they do not not multiply"
"to the correct number of total cases.")
# Decide based on batchsize
elif self.num_batches is None:
if self.batchsize is None:
self.batchsize = 1
if not isinstance(self.batchsize, int):
raise TypeError("`batchsize` must be an integer.")
if self.batchsize < 1:
raise ValueError("`batchsize` must be >= 1.")
self.num_batches = math.ceil(n / self.batchsize)
self._batch_remainder = 0
# Decide based on num_batches:
else:
# cap at the total number of cases
self.num_batches = min(n, self.num_batches)
if not isinstance(self.num_batches, int):
raise TypeError("`num_batches` must be an integer.")
if self.num_batches < 1:
raise ValueError("`num_batches` must be >= 1.")
self.batchsize, self._batch_remainder = divmod(n, self.num_batches)
def ensure_dirs_exists(self):
"""Make sure the directory structure for this crop exists.
"""
os.makedirs(os.path.join(self.location, "batches"), exist_ok=True)
os.makedirs(os.path.join(self.location, "results"), exist_ok=True)
def save_info(self, combos=None, cases=None, fn_args=None):
"""Save information about the sowed cases.
"""
# If saving Harvester or Runner, strip out function information so
# as just to use pickle.
if self.farmer is not None:
farmer_copy = copy.deepcopy(self.farmer)
farmer_copy.fn = None
farmer_pkl = to_pickle(farmer_copy)
else:
farmer_pkl = None
write_to_disk({
'combos': combos,
'cases': cases,
'fn_args': fn_args,
'batchsize': self.batchsize,
'num_batches': self.num_batches,
'_batch_remainder': self._batch_remainder,
'shuffle': self.shuffle,
'farmer': farmer_pkl,
}, os.path.join(self.location, INFO_NM))
def load_info(self):
"""Load the full settings from disk.
"""
sfile = os.path.join(self.location, INFO_NM)
if not os.path.isfile(sfile):
raise XYZError("Settings can't be found at {}.".format(sfile))
else:
return read_from_disk(sfile)
def _sync_info_from_disk(self, only_missing=True):
"""Load information about the saved cases.
"""
settings = self.load_info()
self.batchsize = settings['batchsize']
self.num_batches = settings['num_batches']
self._batch_remainder = settings['_batch_remainder']
farmer_pkl = settings['farmer']
farmer = (
None if farmer_pkl is None else
from_pickle(farmer_pkl)
)
fn, farmer = parse_fn_farmer(None, farmer)
# if crop already has a harvester/runner. (e.g. was instantiated from
# one) by default don't overwrite from disk
if (self.farmer) is None or (not only_missing):
self.farmer = farmer
if self.fn is None:
self.load_function()
def save_function_to_disk(self):
"""Save the base function to disk using cloudpickle
"""
write_to_disk(to_pickle(self._fn),
os.path.join(self.location, FNCT_NM))
def load_function(self):
"""Load the saved function from disk, and try to re-insert it back into
Harvester or Runner if present.
"""
self._fn = from_pickle(read_from_disk(
os.path.join(self.location, FNCT_NM)))
if self.farmer is not None:
if self.farmer.fn is None:
self.farmer.fn = self._fn
else:
# TODO: check equality?
raise XYZError("Trying to load this Crop's function, {}, from "
"disk but its farmer already has a function "
"set: {}.".format(self._fn, self.farmer.fn))
def prepare(self, combos=None, cases=None, fn_args=None):
"""Write information about this crop and the supplied combos to disk.
Typically done at start of sow, not when Crop instantiated.
"""
self.ensure_dirs_exists()
if self.save_fn:
self.save_function_to_disk()
self.save_info(combos=combos, cases=cases, fn_args=fn_args)
def is_prepared(self):
"""Check whether this crop has been written to disk.
"""
return os.path.exists(os.path.join(self.location, INFO_NM))
def calc_progress(self):
"""Calculate how much progressed has been made in growing the batches.
"""
if self.is_prepared():
self._sync_info_from_disk()
self._num_sown_batches = len(glob.glob(
os.path.join(self.location, "batches", BTCH_NM.format("*"))))
self._num_results = len(glob.glob(
os.path.join(self.location, "results", | |
<reponame>DataCanvasIO/YLearn
from copy import deepcopy
from collections import defaultdict
import numpy as np
from sklearn import clone
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold
from .base_models import BaseEstModel
from .utils import (convert2array, convert4onehot, nd_kron,
get_wv, cartesian, get_tr_ctrl)
from ylearn.utils import logging
logger = logging.get_logger(__name__)
#
# class DoubleML(BaseEstModel):
# r"""
# Double machine learning has two stages:
# In stage I, we
# 1. fit a model (y_model) to predict outcome (y) from confounders (w) to
# get the predicted outcome (py);
# 2. fit a model (x_model) to predict treatment (x) from confounders (w)
# to get the predicted treatement (px).
# In stage II, we
# fit a final model (yx_model) to predict y - py from x - px.
#
# See https://arxiv.org/pdf/1608.00060.pdf for reference.
#
# Attributes
# ----------
#
# Methods
# ----------
# _prepare4est(data, outcome, treatment, adjustment, individual=None)
# Prepare (fit the model) for estimating various quantities including
# ATE, CATE, ITE, and CITE.
# estimate(data, outcome, treatment, adjustment, quantity='ATE',
# condition_set=None, condition=None, individual=None)
# Integrate estimations for various quantities into a single method.
# estimate_ate(self, data, outcome, treatment, adjustment)
# estimate_cate(self, data, outcome, treatment, adjustment,
# condition_set, condition)
# estimate_ite(self, data, outcome, treatment, adjustment, individual)
# estimate_cite(self, data, outcome, treatment, adjustment,
# condition_set, condition, individual)
# """
# # TODO: support more final models, e.g., non-parametric models.
#
# def __init__(self, y_model, x_model, yx_model):
# super().__init__()
# if type(y_model) is str:
# y_model = self.ml_model_dic[y_model]
# if type(x_model) is str:
# x_model = deepcopy(self.ml_model_dic[x_model])
# if type(yx_model) is str:
# yx_model = deepcopy(self.ml_model_dic[yx_model])
#
# self.y_model = y_model
# self.x_model = x_model
# self.yx_model = yx_model
#
# def _prepare4est(self, data, outcome, treatment, adjustment, individual=None):
# self.y_model.fit(data[adjustment], data[outcome])
# self.x_model.fit(data[adjustment], data[treatment])
#
# py = self.y_model.predict(data[adjustment])
# px = self.x_model.predict(data[adjustment])
#
# self.yx_model.fit(data[treatment] - px, data[outcome] - py)
# # TODO: support cate, now only support ate
# result = self.yx_model.coef_
# return result
#
# def estimate_cate(self, data, outcome, treatment, adjustment,
# condition_set, condition):
# raise NotImplementedError
class DML4CATE(BaseEstModel):
r"""Double machine learning for estimating CATE.
# TODO: convert the einstein notations in this section to the usual ones.
# TODO: expand fij to higher orders of v.
# TODO: add intercept to the final linear regression model
(- Skip this if you are only interested in the implementation.)
A typical double machine learning for CATE solves the following treatment
effect estimation (note that we use the einstein notation here):
y^i = f^i_j(v^k) x^j + g^i(v^k, w^l) + \epsilon
x^j = h^j(v^k, w^l) + \eta
where f^i_j(v^k) is the CATE conditional on V=v and takes the form
f^i_j(v^k) = F^i_{j, k} \rho^k
with \rho^k: v \to R being v^k in the simplest case. Thus we have
y^i = F^i_{j, k} \rho^k x^j + g^i(v^k, w^l) + \epsilon.
The coefficients F_j^i_k can be estimated from the newly-formed data
(\rho^k x^j, y^i) with linear regression where F^i_{j, k} are just
coefficients of every feature in {1, 2, ..., k*j}. For a simple example, if
both y and x only have one dimention, then the CATE for an input with
covariate (v^1, v^2, v^3) will be F_1v^1, F_2v^2, and F_3v^3. #TODO:
However, note that letting \rho^k simply be v^k actually implicitly assumes
that the value of v^k is small thus is a good approximation of \rho^k.
(- Start of the implementation.)
We implement a complicated version of the double machine learning same as
the algorithm described in the [1]:
1. Let k (cf_folds in our class) be an int. Form a k-fold random
partition {..., (train_data_i, test_data_i), ...,
(train_data_k, test_data_k)}.
2. For each i, train y_model and x_model on train_data_i, then evaluate
their performances in test_data_i whoes results will be saved as
(y_hat_k, x_hat_k). All (y_hat_k, x_hat_k) will be the form
(y_hat, x_hat).
3. Define the differences
y_prime = y - y_hat,
x_prime = (x - x_hat) \otimes v.
Then form the new dataset (y_prime, x_prime).
4. Perform linear regression on the dataset (y_prime, x_prime) whoes
coefficients will be saved in a vector F. The estimated CATE given V=v
will just be
F \dot v.
On the other hand, the ATE can be simply estimated by taking average
of F \dot v over the original data.
Attributes
----------
_is_fitted : bool
True if the model is fitted ortherwise False.
x_model : estimator
Machine learning models for fitting x. Any such models should implement
the fit and predict (also predict_proba if x is discrete) methods
y_model : estimator
Machine learning models for fitting y.
yx_model : estimator
Machine learning models for fitting the residual of y on residual of x.
Currently this should be a linear regression model.
adjustment_transformer : transformer
Transformer for adjustment variables, by default None.
covariate_transformer : transformer
Transformer for covariate variables, by default None.
is_discrete_treatment : bool
categories : str or list
random_state : int
cf_fold : int
The number of folds for performing cross fit, by default 1
treat : float or ndarray
In the case of single discrete treatment, treat should be an int or
str in one of all possible treatment values which indicates the
value of the intended treatment;
in the case of multiple discrete treatment, treat should be a list
or a ndarray where treat[i] indicates the value of the i-th intended
treatment;
in the case of continuous treatment, treat should be a float or a
ndarray, by default None
_v : np.array
Covariate variables in the training set.
_y_d : int
Dimension of the outcome.
_x_d : int
Dimension of the treatment.
ord_transformer : OrdinalEncoder
Ordinal transformer of the discrete treament.
oh_transformer : OneHotEncoder
One hot encoder of the discrete treatment. Note that the total transformer
is combined by the ord_transformer and oh_transformer. See comp_transformer
for detail.
label_dict : dict
x_hat_dict : defaultdict(list)
Cached values when fitting the treatment model.
y_hat_dict : defaultdict(list)
Cached values when fitting the outcome model.
Methods
----------
fit(data, outcome, treatment, adjustment, covariate)
Fit the DML4CATE estimator model.
estimate(data, treat, control, quantity)
Estimate the causal effect.
comp_transformer(x, categories='auto')
Transform the discrete treatment into one-hot vectors.
_cross_fit(model)
Fit x_model and y_model in a cross fitting manner.
_fit_first_stage(x_model, y_model, y, x, wv, folds)
Fit the first stage of the double machine learning.
_fit_second_stage(yx_model, y_prime, x_prime)
Fit the second stage of the DML.
_prepare4est(data)
Reference
----------
[1] <NAME>, et al. Double Machine Learning for Treatment and
Causal Parameters. arXiv:1608.00060.
"""
def __init__(
self,
x_model,
y_model,
yx_model=None,
cf_fold=1,
adjustment_transformer=None,
covariate_transformer=None,
random_state=2022,
is_discrete_treatment=False,
categories='auto',
):
"""
Parameters
----------
x_model : estimator
Machine learning models for fitting x. Any such models should implement
the fit and predict (also predict_proba if x is discrete) methods
y_model : estimator
Machine learning models for fitting y.
yx_model : estimator, optional
Machine learning models for fitting the residual of y on residual of x.
cf_fold : int, optional
The number of folds for performing cross fit, by default 1
adjustment_transformer : transformer, optional
Transformer for adjustment variables, by default None
covariate_transformer : transformer, optional
Transformer for covariate variables, by default None
random_state : int, optional
Random seed, by default 2022
is_discrete_treatment : bool, optional
If the treatment variables are discrete, set this as True, by default False
categories : str, optional
"""
self.cf_fold = cf_fold
self.x_model = clone(x_model)
self.y_model = clone(y_model)
if yx_model is None:
self.yx_model = LinearRegression()
else:
self.yx_model = yx_model
self.adjustment_transformer = adjustment_transformer
self.covariate_transformer = covariate_transformer
self.x_hat_dict = defaultdict(list)
self.y_hat_dict = defaultdict(list)
self.x_hat_dict['is_fitted'].append(False)
self.y_hat_dict['is_fitted'].append(False)
super().__init__(
random_state=random_state,
is_discrete_treatment=is_discrete_treatment,
categories=categories,
)
# TODO:could add a decorator in this place
def fit(
self,
data,
outcome,
treatment,
adjustment=None,
covariate=None,
**kwargs,
):
"""Fit the DML4CATE estimator model.
Parameters
----------
data : pandas.DataFrame
The dataset used for training the model
outcome : str or list of str, optional
Names of the outcome variables
treatment : str or list of str
Names of the treatment variables
adjustment : str or list of str, optional
Names of the adjustment variables, by default None
covariate : str or list of str, optional
| |
config.config['private_config'].get('ssl_key_file', 'server.key') )
self.ssc_file_prefix = self.getfilepath('server')
self.req_file_prefix = self.getfilepath('request')
def getfilepath(self, name):
path = os.getcwd() + '/' + name
path = path.replace('\\', '/')
log.debug(path)
return path
def json(self, selfsigned):
crt, csr = None, None
mycertinfo = {}
mycert = None
try:
if selfsigned:
mycert = X509.load_cert(self.ssc_file_prefix + '.pem')
crt = open(self.ssc_file_prefix + '.pem').read()
else:
mycert = X509.load_request(self.req_file_prefix + '.pem')
csr = open(self.req_file_prefix + '.pem').read()
mycertinfo['C'] = mycert.get_subject().C
mycertinfo['ST'] = mycert.get_subject().ST
mycertinfo['L'] = mycert.get_subject().L
mycertinfo['O'] = mycert.get_subject().O
mycertinfo['CN'] = mycert.get_subject().CN
mycertinfo['OU'] = mycert.get_subject().OU
mycertinfo['Email'] = mycert.get_subject().Email
mycertinfo['SN'] = mycert.get_subject().SN
mycertinfo['GN'] = mycert.get_subject().GN
except:
log.debug('Error in parsing cert')
log.debug(mycertinfo)
log.debug(mycert)
obj = dict(
certinfo = mycertinfo,
cert = crt,
csr = csr,
selfsigned = True if crt != None else False
)
log.debug('In Certificate.json...')
log.debug(obj)
return obj
def GET(self, service=None):
val = None
if service == 'json':
webinput = web.input()
web.header('Content-Type', 'text/javascript')
web.http.expires(0)
if webinput and webinput.get('type', '') == 'csr':
val = json.dumps(self.json(False))
else:
val = json.dumps(self.json(True))
return val
def POST(self, service):
if service != 'json':
return None
error = None
data = {}
cmd = json.loads(web.data())
if cmd['method'] == 'generate':
if cmd['selfsigned']:
crt = cert.Certificate()
else:
crt = cert.Request()
info = cmd['certinfo']
for k,v in info.items():
crt.update(k, v)
if cmd['selfsigned']:
crt.save(self.ssc_file_prefix)
else:
log.debug(self.req_file_prefix)
crt.save(self.req_file_prefix)
data = self.json(cmd['selfsigned'])
elif cmd['method'] == 'upload':
temppath = self.getfilepath('/temp.pem')
f = open(temppath, 'w')
f.write(cmd['cert'])
f.close()
try:
if cert.check(temppath, self.req_file_prefix + '.key'):
os.unlink(self.crt_file)
os.unlink(self.key_file)
os.rename(temppath, self.crt_file)
os.rename(self.req_file_prefix + '.key', self.key_file)
os.unlink(self.req_file_prefix + '.pem')
error = "Success"
else:
error = "Certificate does not match the Certificate Request"
except Exception, e:
error = "The certificate is bad or is in the wrong format"
log.exception("Error loading the certificate")
data['error'] = error
web.header('Content-Type', 'text/javascript')
return json.dumps(data)
class PostInstallConfig:
def GET(self):
auth.check_authorization()
authvalue = str_encrypt('ic4vc_authenticated')
web.setcookie('ic4vc-auth', authvalue, 300)
raise web.seeother('/static/ic4vc-postconfig.html', '')
class Credentials:
def GET(self ):
url = config.config['public_config']['providers']['credentials']['credential']['url']+ web.ctx.get('query')
webdata = web.data()
hdr = {'Content-Type':'application/json',
'Accept':'application/json'}
r = requests.get(url, headers=hdr)
for hdr in r.headers:
web.header(hdr, r.headers[hdr])
web.ctx.status = str(r.status_code)
return r.content
def POST(self):
url = config.config['public_config']['providers']['credentials']['credential']['url']
log.debug("Credentials POST URL = %s", url)
log.debug("Credentials POST web data = %s", web.data())
hdr = {'Content-Type':'application/json',
'Accept':'application/json'}
webdata = web.data()
payload = json.loads(webdata)
valids = ["username", "ip", "type", "action"]
if (payload.has_key('action') and (payload['action'] == 'add')):
valids.append("password")
for k in list(set(payload.keys()) - set(valids)):
del payload[k]
webdata = json.dumps(payload)
r = requests.post(url, data=webdata, headers=hdr)
for hdr in r.headers:
web.header(hdr, r.headers[hdr])
web.ctx.status = str(r.status_code)
result = r.content
past_tense = lambda action:action+('ed' if (action.lower() == 'add') else 'd')
if (r.status_code == 200):
result = 'Successfully %s Credentials' % (past_tense(payload['action'].capitalize()))
elif (r.status_code == 404):
result = 'Cannot %s Credentials due to invalid host.' % (payload['action'].capitalize())
return result
class HPOneView:
def GET(self, action=None):
try:
url = config.config['public_config']['providers']['hponeview'][action]['url']+ web.ctx.get('query')
log.debug("HP OneView GET %s", action)
log.debug("HP OneView GET URL = %s", url)
log.debug("HP OneView GET web data = %s", web.data())
hdr = {'Content-Type':'application/json',
'Accept':'application/json'}
r = requests.get(url, headers=hdr)
for hdr in r.headers:
web.header(hdr, r.headers[hdr])
web.ctx.status = str(r.status_code)
return r.content
except:
raise
def POST(self, action=None):
#auth.check_authorization()
log.debug("HP OneView POST %s", action)
url = config.config['public_config']['providers']['hponeview'][action]['url']
log.debug("HP OneView POST URL = %s", url)
log.debug("HP OneView POST web data = %s", web.data())
hdr = {'Content-Type':'application/json',
'Accept':'application/json'}
webdata = web.data()
r = requests.post(url, data=webdata, headers=hdr)
for hdr in r.headers:
web.header(hdr, r.headers[hdr])
if (not action):
try:
if r.status_code == 200:
auth_token = r.json()['auth']
#_cred = credential.get_credential()
tiup = json.loads(webdata)
#_cred.add(ip = tiup['ip'], credtype = 'hponeview',
# credusername = tiup['username'],
# credpassword = tiup['password'])
else:
web.ctx.status = str(r.status_code)
#except credential.DuplicateCredError as e:
#log.debug("Duplicate credential %s", e)
except:
raise
return r.content
class UserInterfaceManager :
@staticmethod
def registerWithVCenter(vc_config) :
# Build url to vcenter wsdl file
vcenter_wsdl_file = 'file:///' + os.getcwd() + '/share/vmware/wsdl41/vimService.wsdl'
vcenter_wsdl_file = vcenter_wsdl_file.replace('\\', '/')
log.debug('Using wsdl file %s', vcenter_wsdl_file)
# Get the server thumb print for NGC plugin registration
server_thumbprint = ''
try:
if config.config['private_config']['web_roots']['uim_root'].lower().startswith('https://') :
cert = X509.load_cert(config.config['private_config']['ssl_cert_file'])
s = cert.get_fingerprint('sha1')
log.debug('Raw Server Thumbprint: %s', s)
if len(s) < 40 :
s = ('0' * (40 - len(s))) + s
log.debug('Padding Server Thumbprint to 160 bits: %s', s)
server_thumbprint = ':'.join(a+b for a,b in zip(s[::2], s[1::2]))
log.debug('Server Thumbprint: %s', server_thumbprint)
except:
log.exception("Error generating ssl server thumbprint")
# Setup the vSphere client plugin configuration file
plugin_key = '<KEY>'
ngc_plugin_key = ngc_plugin_name
plugin_version = version
plugin_zip_name = ngc_plugin_name + '.zip'
plugin_config_url = '%s/static/plugin_config.xml' % config.config['private_config']['web_roots']['uim_root']
ngc_plugin_package_url = '%s/static/vsphere-web-client/' % config.config['private_config']['web_roots']['uim_root'] + plugin_zip_name
log.debug("NGC plugin package config url %s", ngc_plugin_package_url)
plugin_description = 'Integrates HP ProLiant manageability into vCenter'
plugin_name = 'HP Insight Management'
plugin_vendor = 'Hewlett-Packard Company'
# Convert web_roots and paths to urls
UserInterfaceManager.buildExtensionURLs(config.config['private_config']['extensions'], config.config['private_config']['web_roots'])
# Create the .Net plugin object
plugin_config = plugin.PluginConfig(version = plugin_version, key = plugin_key,
extensions = config.config['private_config']['extensions'],
description = plugin_description,
name = plugin_name,
vendor = plugin_vendor, multiVCSupported=True, supportNonSecureCommunication=True)
# Write the .Net plugin xml file that is read by vCenter
f = open('static/plugin_config.xml', 'w')
f.write(plugin_config.toxml())
f.close()
# Login to VCenter and Register Plugin
try :
try :
password = str_decrypt(vc_config['password'])
except :
log.exception("Error decrypting password for vCenter %s", vc_config['ip'])
log.warning("Assuming password is not encrypted for vCenter %s", vc_config['ip'])
password = vc_config['password']
log.info('Connecting to vCenter: %s', vc_config['ip'])
vc = VCClient(server=vc_config['ip'], username=vc_config['username'], password=password, wsdl_location=vcenter_wsdl_file)
log.info('Registering .Net plugin on vCenter: %s', vc_config['ip'])
vc.register_plugin(key = plugin_key, config_url = plugin_config_url, version = plugin_version, description=plugin_description,
name = plugin_name, admin_email='<EMAIL>',
company=plugin_vendor,
resources = config.config['private_config'].get('resources', [])
)
log.info('Registering NGC plugin on vCenter: %s', vc_config['ip'])
vc.register_ngc_plugin(key=ngc_plugin_key, package_url=ngc_plugin_package_url, version=plugin_version,
admin_email='<EMAIL>', company=plugin_vendor, name=plugin_name,
description=plugin_description + ' Web Client',
server_thumbprint=server_thumbprint)
# Keep the vcenter instance for looking up cluster info and other things.
vmware.vcenters.append(vc)
vc.create_alarms()
except :
log.exception("Exception setting up vCenter %s", vc_config['ip'])
@staticmethod
def registerWithVCenters() :
vmware.vcenters = []
for vc_config in config.config['private_config']['vcenters'] :
UserInterfaceManager.registerWithVCenter(vc_config)
@staticmethod
def buildExtensionURLs(extensions, web_roots) :
for ext in extensions :
# Fill in format sting with hostname & port
if 'web_root' in ext['url'] : # make sure this conversion wasn't already done
ext['url'] = {'url':web_roots[ext['url']['web_root']] + ext['url']['path']}
if 'extensions' in ext : # Extensions can have nested extensions
UserInterfaceManager.buildExtensionURLs(ext['extensions'], web_roots)
if 'icon' in ext :
if isinstance(ext['icon'], dict) : # make sure this conversion wasn't already done
ext['icon'] = web_roots[ext['icon']['web_root']] + ext['icon']['path']
@staticmethod
def loadConfig() :
# Load and merge config files
config.config = {}
for config_file in ['config.json', 'server_config.json', 'storage_config.json', 'network_config.json'] :
try :
config.config = merge_dict_list(config.config, json.load(open(config_file)))
except IOError, e :
if e.errno == 2 and config_file != 'config.json' : #OK - plugin just not installed
log.debug("Config file '%s' not found", config_file)
else :
log.error("Unable to load config file '%s': %s", config_file, os.strerror(e.errno))
sys.exit(1)
except ValueError, e:
log.exception("Unable to parse '%s': %s", config_file, e)
sys.exit(1)
except :
log.exception("Error processing config file '%s'", config_file)
raise
# Create the URLs for the provider services from web_root and path
for provider in config.config['public_config']['providers'].values() :
for service in provider.values() :
service['url'] = config.config['private_config']['web_roots'][service['web_root']] + service['path']
# Create the URLs for the action menus
for page in config.config['public_config'].get('pages', {}).values() :
for amenu in page.get('action_menu_items', [] ) :
if 'path' and 'web_root' in amenu :
amenu['url'] = config.config['private_config']['web_roots'][amenu['web_root']] + amenu['path']
log.debug('Converting Action Menu Items to URL %s', amenu['url'])
for setting in page.get('settings', [] ) :
if 'path' and 'web_root' in setting :
setting['url'] = config.config['private_config']['web_roots'][setting['web_root']] + setting['path']
log.debug('Converting Settings Menu Items to URL %s', setting['url'])
# Create the URLs for the configuration pages
for item in config.config['public_config'].get('configurationPages', []) :
if 'url' and 'web_root' in item :
item['url'] = config.config['private_config']['web_roots'][item['web_root']] + item['url']
log.debug('Converting configurationPages Items to URL %s', item['url'])
def start(self) :
log.info('Starting Insight Control for VMware vCenter UIM version %s', version)
soap.setup()
UserInterfaceManager.loadConfig()
# Login to VCenter | |
import pandas as pd
import pytest
from feature_engine.encoding import OneHotEncoder
def test_encode_categories_in_k_binary_plus_select_vars_automatically(df_enc_big):
# test case 1: encode all categories into k binary variables, select variables
# automatically
encoder = OneHotEncoder(top_categories=None, variables=None, drop_last=False)
X = encoder.fit_transform(df_enc_big)
# test init params
assert encoder.top_categories is None
assert encoder.variables is None
assert encoder.drop_last is False
# test fit attr
transf = {
"var_A_A": 6,
"var_A_B": 10,
"var_A_C": 4,
"var_A_D": 10,
"var_A_E": 2,
"var_A_F": 2,
"var_A_G": 6,
"var_B_A": 10,
"var_B_B": 6,
"var_B_C": 4,
"var_B_D": 10,
"var_B_E": 2,
"var_B_F": 2,
"var_B_G": 6,
"var_C_A": 4,
"var_C_B": 6,
"var_C_C": 10,
"var_C_D": 10,
"var_C_E": 2,
"var_C_F": 2,
"var_C_G": 6,
}
assert encoder.variables_ == ["var_A", "var_B", "var_C"]
assert encoder.variables_binary_ == []
assert encoder.n_features_in_ == 3
assert encoder.encoder_dict_ == {
"var_A": ["A", "B", "C", "D", "E", "F", "G"],
"var_B": ["A", "B", "C", "D", "E", "F", "G"],
"var_C": ["A", "B", "C", "D", "E", "F", "G"],
}
# test transform output
assert X.sum().to_dict() == transf
assert "var_A" not in X.columns
def test_encode_categories_in_k_minus_1_binary_plus_list_of_variables(df_enc_big):
# test case 2: encode all categories into k-1 binary variables,
# pass list of variables
encoder = OneHotEncoder(
top_categories=None, variables=["var_A", "var_B"], drop_last=True
)
X = encoder.fit_transform(df_enc_big)
# test init params
assert encoder.top_categories is None
assert encoder.variables == ["var_A", "var_B"]
assert encoder.drop_last is True
# test fit attr
transf = {
"var_A_A": 6,
"var_A_B": 10,
"var_A_C": 4,
"var_A_D": 10,
"var_A_E": 2,
"var_A_F": 2,
"var_B_A": 10,
"var_B_B": 6,
"var_B_C": 4,
"var_B_D": 10,
"var_B_E": 2,
"var_B_F": 2,
}
assert encoder.variables_ == ["var_A", "var_B"]
assert encoder.variables_binary_ == []
assert encoder.n_features_in_ == 3
assert encoder.encoder_dict_ == {
"var_A": ["A", "B", "C", "D", "E", "F"],
"var_B": ["A", "B", "C", "D", "E", "F"],
}
# test transform output
for col in transf.keys():
assert X[col].sum() == transf[col]
assert "var_B" not in X.columns
assert "var_B_G" not in X.columns
assert "var_C" in X.columns
def test_encode_top_categories():
# test case 3: encode only the most popular categories
df = pd.DataFrame(
{
"var_A": ["A"] * 5
+ ["B"] * 11
+ ["C"] * 4
+ ["D"] * 9
+ ["E"] * 2
+ ["F"] * 2
+ ["G"] * 7,
"var_B": ["A"] * 11
+ ["B"] * 7
+ ["C"] * 4
+ ["D"] * 9
+ ["E"] * 2
+ ["F"] * 2
+ ["G"] * 5,
"var_C": ["A"] * 4
+ ["B"] * 5
+ ["C"] * 11
+ ["D"] * 9
+ ["E"] * 2
+ ["F"] * 2
+ ["G"] * 7,
}
)
encoder = OneHotEncoder(top_categories=4, variables=None, drop_last=False)
X = encoder.fit_transform(df)
# test init params
assert encoder.top_categories == 4
# test fit attr
transf = {
"var_A_D": 9,
"var_A_B": 11,
"var_A_A": 5,
"var_A_G": 7,
"var_B_A": 11,
"var_B_D": 9,
"var_B_G": 5,
"var_B_B": 7,
"var_C_D": 9,
"var_C_C": 11,
"var_C_G": 7,
"var_C_B": 5,
}
# test fit attr
assert encoder.variables_ == ["var_A", "var_B", "var_C"]
assert encoder.variables_binary_ == []
assert encoder.n_features_in_ == 3
assert encoder.encoder_dict_ == {
"var_A": ["B", "D", "G", "A"],
"var_B": ["A", "D", "B", "G"],
"var_C": ["C", "D", "G", "B"],
}
# test transform output
for col in transf.keys():
assert X[col].sum() == transf[col]
assert "var_B" not in X.columns
assert "var_B_F" not in X.columns
def test_error_if_top_categories_not_integer():
with pytest.raises(ValueError):
OneHotEncoder(top_categories=0.5)
def test_error_if_drop_last_not_bool():
with pytest.raises(ValueError):
OneHotEncoder(drop_last=0.5)
def test_raises_error_if_df_contains_na(df_enc_big, df_enc_big_na):
# test case 4: when dataset contains na, fit method
with pytest.raises(ValueError):
encoder = OneHotEncoder()
encoder.fit(df_enc_big_na)
# test case 4: when dataset contains na, transform method
with pytest.raises(ValueError):
encoder = OneHotEncoder()
encoder.fit(df_enc_big)
encoder.transform(df_enc_big_na)
def test_encode_numerical_variables(df_enc_numeric):
encoder = OneHotEncoder(
top_categories=None,
variables=None,
drop_last=False,
ignore_format=True,
)
X = encoder.fit_transform(df_enc_numeric[["var_A", "var_B"]])
# test fit attr
transf = {
"var_A_1": [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"var_A_2": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
"var_A_3": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
"var_B_1": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"var_B_2": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
"var_B_3": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
}
transf = pd.DataFrame(transf).astype("int32")
X = pd.DataFrame(X).astype("int32")
assert encoder.variables_ == ["var_A", "var_B"]
assert encoder.variables_binary_ == []
assert encoder.n_features_in_ == 2
assert encoder.encoder_dict_ == {"var_A": [1, 2, 3], "var_B": [1, 2, 3]}
# test transform output
pd.testing.assert_frame_equal(X, transf)
def test_variables_cast_as_category(df_enc_numeric):
encoder = OneHotEncoder(
top_categories=None,
variables=None,
drop_last=False,
ignore_format=True,
)
df = df_enc_numeric.copy()
df[["var_A", "var_B"]] = df[["var_A", "var_B"]].astype("category")
X = encoder.fit_transform(df[["var_A", "var_B"]])
# test fit attr
transf = {
"var_A_1": [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"var_A_2": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
"var_A_3": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
"var_B_1": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"var_B_2": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
"var_B_3": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
}
transf = pd.DataFrame(transf).astype("int32")
X = pd.DataFrame(X).astype("int32")
assert encoder.variables_ == ["var_A", "var_B"]
assert encoder.n_features_in_ == 2
assert encoder.encoder_dict_ == {"var_A": [1, 2, 3], "var_B": [1, 2, 3]}
# test transform output
pd.testing.assert_frame_equal(X, transf)
@pytest.fixture(scope="module")
def df_enc_binary():
df = {
"var_A": ["A"] * 6 + ["B"] * 10 + ["C"] * 4,
"var_B": ["A"] * 10 + ["B"] * 6 + ["C"] * 4,
"var_C": ["AHA"] * 12 + ["UHU"] * 8,
"var_D": ["OHO"] * 5 + ["EHE"] * 15,
"var_num": [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0],
}
df = pd.DataFrame(df)
return df
def test_encode_into_k_dummy_plus_drop_binary(df_enc_binary):
encoder = OneHotEncoder(
top_categories=None, variables=None, drop_last=False, drop_last_binary=True
)
X = encoder.fit_transform(df_enc_binary)
X = X.astype("int32")
# test fit attr
transf = {
"var_num": [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0],
"var_A_A": [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"var_A_B": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
"var_A_C": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
"var_B_A": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"var_B_B": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
"var_B_C": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
"var_C_AHA": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
"var_D_OHO": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
}
transf = pd.DataFrame(transf).astype("int32")
assert encoder.variables_ == ["var_A", "var_B", "var_C", "var_D"]
assert encoder.variables_binary_ == ["var_C", "var_D"]
assert encoder.n_features_in_ == 5
assert encoder.encoder_dict_ == {
"var_A": ["A", "B", "C"],
"var_B": ["A", "B", "C"],
"var_C": ["AHA"],
"var_D": ["OHO"],
}
# test transform output
pd.testing.assert_frame_equal(X, transf)
assert "var_C_B" not in X.columns
def test_encode_into_kminus1_dummyy_plus_drop_binary(df_enc_binary):
encoder = OneHotEncoder(
top_categories=None, variables=None, drop_last=True, drop_last_binary=True
)
X = encoder.fit_transform(df_enc_binary)
X = X.astype("int32")
# test fit attr
transf = {
"var_num": [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, | |
<reponame>Valisback/hiring-engineers<filename>code/venv/lib/python3.8/site-packages/datadog_api_client/v2/api/users_api.py<gh_stars>0
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v2.api_client import ApiClient, Endpoint as _Endpoint
from datadog_api_client.v2.model.permissions_response import PermissionsResponse
from datadog_api_client.v2.model.query_sort_order import QuerySortOrder
from datadog_api_client.v2.model.service_account_create_request import ServiceAccountCreateRequest
from datadog_api_client.v2.model.user_create_request import UserCreateRequest
from datadog_api_client.v2.model.user_invitation_response import UserInvitationResponse
from datadog_api_client.v2.model.user_invitations_request import UserInvitationsRequest
from datadog_api_client.v2.model.user_invitations_response import UserInvitationsResponse
from datadog_api_client.v2.model.user_response import UserResponse
from datadog_api_client.v2.model.user_update_request import UserUpdateRequest
from datadog_api_client.v2.model.users_response import UsersResponse
class UsersApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self._create_service_account_endpoint = _Endpoint(
settings={
"response_type": (UserResponse,),
"auth": ["apiKeyAuth", "appKeyAuth"],
"endpoint_path": "/api/v2/service_accounts",
"operation_id": "create_service_account",
"http_method": "POST",
"servers": None,
},
params_map={
"body": {
"required": True,
"openapi_types": (ServiceAccountCreateRequest,),
"location": "body",
},
},
headers_map={"accept": ["application/json"], "content_type": ["application/json"]},
api_client=api_client,
)
self._create_user_endpoint = _Endpoint(
settings={
"response_type": (UserResponse,),
"auth": ["AuthZ", "apiKeyAuth", "appKeyAuth"],
"endpoint_path": "/api/v2/users",
"operation_id": "create_user",
"http_method": "POST",
"servers": None,
},
params_map={
"body": {
"required": True,
"openapi_types": (UserCreateRequest,),
"location": "body",
},
},
headers_map={"accept": ["application/json"], "content_type": ["application/json"]},
api_client=api_client,
)
self._disable_user_endpoint = _Endpoint(
settings={
"response_type": None,
"auth": ["AuthZ", "apiKeyAuth", "appKeyAuth"],
"endpoint_path": "/api/v2/users/{user_id}",
"operation_id": "disable_user",
"http_method": "DELETE",
"servers": None,
},
params_map={
"user_id": {
"required": True,
"openapi_types": (str,),
"attribute": "user_id",
"location": "path",
},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
self._get_invitation_endpoint = _Endpoint(
settings={
"response_type": (UserInvitationResponse,),
"auth": ["AuthZ", "apiKeyAuth", "appKeyAuth"],
"endpoint_path": "/api/v2/user_invitations/{user_invitation_uuid}",
"operation_id": "get_invitation",
"http_method": "GET",
"servers": None,
},
params_map={
"user_invitation_uuid": {
"required": True,
"openapi_types": (str,),
"attribute": "user_invitation_uuid",
"location": "path",
},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
self._get_user_endpoint = _Endpoint(
settings={
"response_type": (UserResponse,),
"auth": ["AuthZ", "apiKeyAuth", "appKeyAuth"],
"endpoint_path": "/api/v2/users/{user_id}",
"operation_id": "get_user",
"http_method": "GET",
"servers": None,
},
params_map={
"user_id": {
"required": True,
"openapi_types": (str,),
"attribute": "user_id",
"location": "path",
},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
self._list_user_organizations_endpoint = _Endpoint(
settings={
"response_type": (UserResponse,),
"auth": ["AuthZ", "apiKeyAuth", "appKeyAuth"],
"endpoint_path": "/api/v2/users/{user_id}/orgs",
"operation_id": "list_user_organizations",
"http_method": "GET",
"servers": None,
},
params_map={
"user_id": {
"required": True,
"openapi_types": (str,),
"attribute": "user_id",
"location": "path",
},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
self._list_user_permissions_endpoint = _Endpoint(
settings={
"response_type": (PermissionsResponse,),
"auth": ["AuthZ", "apiKeyAuth", "appKeyAuth"],
"endpoint_path": "/api/v2/users/{user_id}/permissions",
"operation_id": "list_user_permissions",
"http_method": "GET",
"servers": None,
},
params_map={
"user_id": {
"required": True,
"openapi_types": (str,),
"attribute": "user_id",
"location": "path",
},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
self._list_users_endpoint = _Endpoint(
settings={
"response_type": (UsersResponse,),
"auth": ["AuthZ", "apiKeyAuth", "appKeyAuth"],
"endpoint_path": "/api/v2/users",
"operation_id": "list_users",
"http_method": "GET",
"servers": None,
},
params_map={
"page_size": {
"openapi_types": (int,),
"attribute": "page[size]",
"location": "query",
},
"page_number": {
"openapi_types": (int,),
"attribute": "page[number]",
"location": "query",
},
"sort": {
"openapi_types": (str,),
"attribute": "sort",
"location": "query",
},
"sort_dir": {
"openapi_types": (QuerySortOrder,),
"attribute": "sort_dir",
"location": "query",
},
"filter": {
"openapi_types": (str,),
"attribute": "filter",
"location": "query",
},
"filter_status": {
"openapi_types": (str,),
"attribute": "filter[status]",
"location": "query",
},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
self._send_invitations_endpoint = _Endpoint(
settings={
"response_type": (UserInvitationsResponse,),
"auth": ["AuthZ", "apiKeyAuth", "appKeyAuth"],
"endpoint_path": "/api/v2/user_invitations",
"operation_id": "send_invitations",
"http_method": "POST",
"servers": None,
},
params_map={
"body": {
"required": True,
"openapi_types": (UserInvitationsRequest,),
"location": "body",
},
},
headers_map={"accept": ["application/json"], "content_type": ["application/json"]},
api_client=api_client,
)
self._update_user_endpoint = _Endpoint(
settings={
"response_type": (UserResponse,),
"auth": ["AuthZ", "apiKeyAuth", "appKeyAuth"],
"endpoint_path": "/api/v2/users/{user_id}",
"operation_id": "update_user",
"http_method": "PATCH",
"servers": None,
},
params_map={
"user_id": {
"required": True,
"openapi_types": (str,),
"attribute": "user_id",
"location": "path",
},
"body": {
"required": True,
"openapi_types": (UserUpdateRequest,),
"location": "body",
},
},
headers_map={"accept": ["application/json"], "content_type": ["application/json"]},
api_client=api_client,
)
def create_service_account(self, body, **kwargs):
"""Create a service account
Create a service account for your organization.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True.
>>> thread = api.create_service_account(body, async_req=True)
>>> result = thread.get()
Args:
body (ServiceAccountCreateRequest):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
UserResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs = self._create_service_account_endpoint.default_arguments(kwargs)
kwargs["body"] = body
return self._create_service_account_endpoint.call_with_http_info(**kwargs)
def create_user(self, body, **kwargs):
"""Create a user
Create a user for your organization.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True.
>>> thread = api.create_user(body, async_req=True)
>>> result = thread.get()
Args:
body (UserCreateRequest):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
UserResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs = self._create_user_endpoint.default_arguments(kwargs)
kwargs["body"] = body
return self._create_user_endpoint.call_with_http_info(**kwargs)
def disable_user(self, user_id, **kwargs):
"""Disable a user
Disable a user. Can only be used with an application key belonging to an administrator user.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True.
>>> thread = api.disable_user(user_id, async_req=True)
>>> result = thread.get()
Args:
user_id (str): The ID of the user.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs = self._disable_user_endpoint.default_arguments(kwargs)
kwargs["user_id"] = user_id
return self._disable_user_endpoint.call_with_http_info(**kwargs)
def get_invitation(self, user_invitation_uuid, **kwargs):
"""Get a user invitation
Returns a single user invitation by its UUID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True.
>>> thread = api.get_invitation(user_invitation_uuid, async_req=True)
>>> result = thread.get()
Args:
user_invitation_uuid (str): The UUID of the user invitation.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we | |
<gh_stars>0
"""Tracer code to set up training pipeline."""
import glob
import os
import logging
import multiprocessing
import pathlib
import pickle
import sqlite3
import subprocess
import sys
from osgeo import osr
from osgeo import gdal
import numpy
import pygeoprocessing
import png
import retrying
import rtree
import shapely.geometry
import taskgraph
"""
git pull && docker build dockerfile-dir -f dockerfile-dir/docker-cpu -t natcap/dam-inference-server-cpu:0.0.1 && docker run -it --rm -v `pwd`:/usr/local/natgeo_dams natcap/dam-inference-server-cpu:0.0.1 python "./training_set_generator_retinet.py"
"""
WORKSPACE_DIR = 'training_set_workspace'
ECOSHARD_DIR = os.path.join(WORKSPACE_DIR, 'ecoshard')
CHURN_DIR = os.path.join(WORKSPACE_DIR, 'churn')
ANNOTATIONS_CSV_PATH = os.path.join('.', 'annotations.csv')
CLASSES_CSV_PATH = os.path.join('.', 'classes.csv')
TRAINING_IMAGERY_DIR = os.path.join(WORKSPACE_DIR, 'training_imagery')
NOT_A_DAM_DIR = 'not_a_dam_images'
PLANET_QUAD_DAMS_DATABASE_URI = (
'gs://natgeo-dams-data/databases/'
'quad_database_md5_12866cf27da2575f33652d197beb05d3.db')
COUNTRY_BORDER_VECTOR_URI = (
'gs://natgeo-dams-data/ecoshards/'
'countries_iso3_md5_6fb2431e911401992e6e56ddf0a9bcda.gpkg')
STATUS_DATABASE_PATH = os.path.join(CHURN_DIR, 'work_status.db')
logging.basicConfig(
filename='log.txt',
level=logging.DEBUG,
format=(
'%(asctime)s (%(relativeCreated)d) %(processName)s %(levelname)s '
'%(name)s [%(funcName)s:%(lineno)d] %(message)s'))
LOGGER = logging.getLogger(__name__)
logging.getLogger('taskgraph').setLevel(logging.INFO)
TRAINING_IMAGE_DIMS = (419, 419)
def create_status_database(quads_database_path, target_status_database_path):
"""Create a runtime status database if it doesn't exist.
Parameters:
quads_database_path (str): path to existing database of quads.
target_status_database_path (str): path to database to create.
Returns:
None.
"""
LOGGER.debug('launching create_status_database')
bounding_box_quad_uri_list = _execute_sqlite(
'''
SELECT
bounding_box_to_mosaic.quad_id,
quad_id_to_uri.quad_uri,
bounding_box
FROM bounding_box_to_mosaic
INNER JOIN quad_id_to_uri ON
bounding_box_to_mosaic.quad_id = quad_id_to_uri.quad_id
''', quads_database_path, argument_list=[], fetch='all')
quad_id_list = [
[y] for y in set([x[0] for x in bounding_box_quad_uri_list])]
# processed quads table
# annotations
create_database_sql = (
"""
CREATE TABLE quad_bounding_box_uri_table (
quad_id TEXT NOT NULL,
quad_uri TEXT NOT NULL,
bounding_box BLOB NOT NULL);
CREATE INDEX quad_bounding_box_uri_table_quad_id
ON quad_bounding_box_uri_table (quad_id);
CREATE TABLE quad_processing_status (
quad_id TEXT PRIMARY KEY,
processed INT NOT NULL);
CREATE TABLE annotation_table (
record TEXT PRIMARY KEY);
""")
if os.path.exists(target_status_database_path):
os.remove(target_status_database_path)
connection = sqlite3.connect(target_status_database_path)
connection.executescript(create_database_sql)
connection.commit()
connection.close()
_execute_sqlite(
"INSERT INTO "
"quad_bounding_box_uri_table (quad_id, quad_uri, bounding_box) "
"VALUES (?, ?, ?);",
target_status_database_path,
argument_list=bounding_box_quad_uri_list, mode='modify',
execute='many')
_execute_sqlite(
"INSERT INTO "
"quad_processing_status (quad_id, processed) "
"VALUES (?, 0);",
target_status_database_path,
argument_list=quad_id_list, mode='modify', execute='many')
@retrying.retry(wait_exponential_multiplier=1000, wait_exponential_max=5000)
def _execute_sqlite(
sqlite_command, database_path, argument_list=None,
mode='read_only', execute='execute', fetch=None):
"""Execute SQLite command and attempt retries on a failure.
Parameters:
sqlite_command (str): a well formatted SQLite command.
database_path (str): path to the SQLite database to operate on.
argument_list (list): `execute == 'execute` then this list is passed to
the internal sqlite3 `execute` call.
mode (str): must be either 'read_only' or 'modify'.
execute (str): must be either 'execute', 'many', or 'script'.
fetch (str): if not `None` can be either 'all' or 'one'.
If not None the result of a fetch will be returned by this
function.
Returns:
result of fetch if `fetch` is not None.
"""
cursor = None
connection = None
try:
if mode == 'read_only':
ro_uri = r'%s?mode=ro' % pathlib.Path(
os.path.abspath(database_path)).as_uri()
LOGGER.debug(
'%s exists: %s', ro_uri, os.path.exists(os.path.abspath(
database_path)))
connection = sqlite3.connect(ro_uri, uri=True)
elif mode == 'modify':
connection = sqlite3.connect(database_path)
else:
raise ValueError('Unknown mode: %s' % mode)
if execute == 'execute':
cursor = connection.execute(sqlite_command, argument_list)
elif execute == 'many':
cursor = connection.executemany(sqlite_command, argument_list)
elif execute == 'script':
cursor = connection.executescript(sqlite_command)
else:
raise ValueError('Unknown execute mode: %s' % execute)
result = None
payload = None
if fetch == 'all':
payload = (cursor.fetchall())
elif fetch == 'one':
payload = (cursor.fetchone())
elif fetch is not None:
raise ValueError('Unknown fetch mode: %s' % fetch)
if payload is not None:
result = list(payload)
cursor.close()
connection.commit()
connection.close()
return result
except Exception:
LOGGER.exception('Exception on _execute_sqlite: %s', sqlite_command)
if cursor is not None:
cursor.close()
if connection is not None:
connection.commit()
connection.close()
raise
def make_training_data(task_graph, dams_database_path, imagery_dir):
"""Make training data by fetching imagery and building CSVs.
Parameters:
task_graph (taskgraph.Taskgraph): TaskGraph object to help with
scheduling downloads.
dams_database_path (str): path to database containing a
"bounding_box_to_mosaic" table.
imagery_dir (str): path to directory to store images.
Returns:
None
"""
# get all the quad_ids
quad_id_uris_to_process = _execute_sqlite(
'''
SELECT
quad_bounding_box_uri_table.quad_id,
quad_bounding_box_uri_table.quad_uri
FROM quad_processing_status
INNER JOIN quad_bounding_box_uri_table ON
quad_processing_status.quad_id=quad_bounding_box_uri_table.quad_id
WHERE processed=0
GROUP BY quad_bounding_box_uri_table.quad_id, quad_uri
''', dams_database_path, argument_list=[], fetch='all')
for index, (quad_id, quad_uri) in enumerate(quad_id_uris_to_process):
_ = task_graph.add_task(
func=process_quad,
args=(quad_uri, quad_id, dams_database_path),
transient_run=True,
ignore_path_list=[dams_database_path],
task_name='process quad %s' % quad_id)
task_graph.close()
task_graph.join()
def process_quad(quad_uri, quad_id, dams_database_path):
"""Process quad into bounding box annotated chunks.
Parameters:
quad_uri (str): gs:// path to quad to download.
quad_id (str): ID in the database so work can be updated.
dams_database_path (str): path to the database that can be
updated to include the processing state complete and the
quad processed.
Returns:
True when complete.
"""
task_graph = taskgraph.TaskGraph(WORKSPACE_DIR, -1)
quad_raster_path = os.path.join(
TRAINING_IMAGERY_DIR, os.path.basename(quad_uri))
download_quad_task = task_graph.add_task(
func=copy_from_gs,
args=(quad_uri, quad_raster_path),
target_path_list=[quad_raster_path],
task_name='download %s' % quad_uri)
download_quad_task.join()
quad_info = pygeoprocessing.get_raster_info(quad_raster_path)
n_cols, n_rows = quad_info['raster_size']
# extract the bounding boxes
bb_srs = osr.SpatialReference()
bb_srs.ImportFromEPSG(4326)
bounding_box_blob_list = _execute_sqlite(
'''
SELECT bounding_box
FROM quad_bounding_box_uri_table
WHERE quad_id=?
''', dams_database_path, argument_list=[quad_id], fetch='all')
working_dam_bb_list = [] # will be used to collapose duplicates later
for index, (bounding_box_blob,) in enumerate(bounding_box_blob_list):
bounding_box = pickle.loads(bounding_box_blob)
LOGGER.debug('%s: %s', quad_uri, bounding_box)
local_bb = pygeoprocessing.transform_bounding_box(
bounding_box, bb_srs.ExportToWkt(),
quad_info['projection'], edge_samples=11)
inv_gt = gdal.InvGeoTransform(quad_info['geotransform'])
ul_i, ul_j = [int(x) for x in gdal.ApplyGeoTransform(
inv_gt, local_bb[0], local_bb[1])]
lr_i, lr_j = [int(x) for x in gdal.ApplyGeoTransform(
inv_gt, local_bb[2], local_bb[3])]
ul_i, lr_i = sorted([ul_i, lr_i])
ul_j, lr_j = sorted([ul_j, lr_j])
# possible the dam may lie outside of the quad, if so clip to the
# edge of the quad
if ul_j < 0:
ul_j = 0
if ul_i < 0:
ul_i = 0
if lr_i >= n_cols:
lr_i = n_cols - 1
if lr_j >= n_rows:
lr_j = n_rows - 1
# if < 0.5 ratio, bump up to 0.5 ratio
bb_xsize = max(1, lr_i-ul_i)
bb_ysize = max(1, lr_j-ul_j)
if bb_xsize / bb_ysize < 0.5:
delta_xsize = max(2, 0.5*bb_ysize-bb_xsize)
ul_i -= delta_xsize/2
lr_i += delta_xsize/2
elif bb_ysize / bb_xsize < 0.5:
delta_ysize = max(2, 0.5*bb_xsize-bb_ysize)
ul_j -= delta_ysize/2
lr_j += delta_ysize/2
dam_bb = [ul_i, ul_j, lr_i, lr_j]
# this is a sanity check
if ul_i >= n_cols or ul_j >= n_rows or lr_i < 0 or lr_j < 0:
raise ValueError(
'transformed coordinates outside of raster bounds: '
'lat/lng: %s\nlocal: %sraster_bb: %s\ntransformed: %s' % (
bounding_box, local_bb, quad_info['bounding_box'], dam_bb))
working_dam_bb_list.append(dam_bb)
bounding_box_rtree = rtree.index.Index()
index_to_bb_list = []
while working_dam_bb_list:
current_bb = shapely.geometry.box(*working_dam_bb_list.pop())
for index in range(len(working_dam_bb_list)-1, -1, -1):
test_bb = shapely.geometry.box(*working_dam_bb_list[index])
if current_bb.intersects(test_bb):
current_bb = current_bb.union(test_bb)
del working_dam_bb_list[index]
LOGGER.debug(
'going to insert this: %s',
str((len(index_to_bb_list), current_bb.bounds)))
bounding_box_rtree.insert(len(index_to_bb_list), current_bb.bounds)
index_to_bb_list.append(current_bb.bounds)
quad_slice_index = 0
annotation_string_list = []
for xoff in range(0, n_cols, TRAINING_IMAGE_DIMS[0]):
win_xsize = TRAINING_IMAGE_DIMS[0]
if xoff + win_xsize >= n_cols:
xoff = n_cols-win_xsize-1
for yoff in range(0, n_rows, TRAINING_IMAGE_DIMS[1]):
win_ysize = TRAINING_IMAGE_DIMS[1]
if yoff + win_ysize >= n_rows:
yoff = n_rows-win_ysize-1
bb_indexes = list(bounding_box_rtree.intersection(
(xoff, yoff, xoff+win_xsize, yoff+win_ysize)))
# see if any of the bounding boxes intersect in which case make
# a single big one
if bb_indexes:
LOGGER.debug(
'these local bbs at %d %d: %s', xoff, yoff,
str(bb_indexes))
# clip out the png and name after number of bbs per image
quad_png_path = os.path.join(
TRAINING_IMAGERY_DIR, '%d_%s_%d.png' % (
len(bb_indexes), quad_id, quad_slice_index))
quad_slice_index += 1
try:
make_quad_png(
quad_raster_path, quad_png_path,
xoff, yoff, win_xsize, win_ysize)
# transform local bbs so they're relative to the png
for bb_index in bb_indexes:
base_bb = list(index_to_bb_list[bb_index])
# if the centroid is out of bounds, go with the other
# quad that contains it
bb_xcentroid = base_bb[0]+(base_bb[2]-base_bb[0])/2
bb_ycentroid = base_bb[1]+(base_bb[3]-base_bb[1])/2
if (bb_xcentroid-xoff < 0 or
bb_xcentroid-xoff >= TRAINING_IMAGE_DIMS[0] or
bb_ycentroid-yoff < 0 or
bb_ycentroid-yoff >= TRAINING_IMAGE_DIMS[1]):
continue
# make sure it's not tiny
if base_bb[2]-base_bb[0] < 16:
delta = 16-(base_bb[2]-base_bb[0])
base_bb[0] -= delta//2
base_bb[2] += delta//2
if base_bb[3]-base_bb[1] < 16:
delta = 16-(base_bb[3]-base_bb[1])
base_bb[1] -= delta//2
base_bb[3] += delta//2
base_bb[0] = max(0, base_bb[0]-xoff)
base_bb[1] = max(0, base_bb[1]-yoff)
base_bb[2] = \
min(TRAINING_IMAGE_DIMS[0], base_bb[2]-xoff)
base_bb[3] = \
min(TRAINING_IMAGE_DIMS[1], base_bb[3]-yoff)
annotation_string_list.append(
['%s,%d,%d,%d,%d,dam' % (
quad_png_path, base_bb[0], base_bb[1],
base_bb[2], base_bb[3])])
except Exception:
LOGGER.exception('skipping %s' % quad_raster_path)
LOGGER.debug(
'updating annotation table with this: %s', str(annotation_string_list))
_execute_sqlite(
'''
INSERT OR REPLACE INTO annotation_table
(record)
VALUES (?);
''', dams_database_path,
argument_list=annotation_string_list, execute='many', mode='modify')
_execute_sqlite(
'''
UPDATE quad_processing_status
SET processed=1
WHERE quad_id=?
''', dams_database_path, argument_list=[quad_id], mode='modify')
task_graph.join()
task_graph.close()
os.remove(quad_raster_path)
def copy_from_gs(gs_uri, target_path):
"""Copy a GS objec to `target_path."""
dirpath = os.path.dirname(target_path)
try:
os.makedirs(dirpath)
except Exception:
pass
subprocess.run(
#'/usr/local/gcloud-sdk/google-cloud-sdk/bin/gsutil cp %s %s' %
'gsutil cp %s %s' %
(gs_uri, target_path), shell=True)
def main():
"""Entry point."""
for dir_path in [
WORKSPACE_DIR, ECOSHARD_DIR, CHURN_DIR, TRAINING_IMAGERY_DIR]:
try:
os.makedirs(dir_path)
except OSError:
pass
task_graph = taskgraph.TaskGraph(
WORKSPACE_DIR, multiprocessing.cpu_count(), 5.0)
planet_quad_dams_database_path = os.path.join(
ECOSHARD_DIR, os.path.basename(PLANET_QUAD_DAMS_DATABASE_URI))
quad_db_dl_task | |
newly created copy of the source node (i.e. the destination
node). See :meth:`.Node._f_copy` for further details on the
semantics of copying nodes.
"""
obj = self.get_node(where, name=name)
if obj._v_depth == 0 and newparent and not newname:
npobj = self.get_node(newparent)
if obj._v_file is not npobj._v_file:
# Special case for copying file1:/ --> file2:/path
self.root._f_copy_children(npobj, overwrite=overwrite,
recursive=recursive, **kwargs)
return npobj
else:
raise OSError(
"You cannot copy a root group over the same file")
return obj._f_copy(newparent, newname,
overwrite, recursive, createparents, **kwargs)
def remove_node(self, where, name=None, recursive=False):
"""Remove the object node *name* under *where* location.
Parameters
----------
where, name
These arguments work as in
:meth:`File.get_node`, referencing the node to be acted upon.
recursive : bool
If not supplied or false, the node will be removed
only if it has no children; if it does, a
NodeError will be raised. If supplied
with a true value, the node and all its descendants will be
completely removed.
"""
obj = self.get_node(where, name=name)
obj._f_remove(recursive)
def get_node_attr(self, where, attrname, name=None):
"""Get a PyTables attribute from the given node.
Parameters
----------
where, name
These arguments work as in :meth:`File.get_node`, referencing the
node to be acted upon.
attrname
The name of the attribute to retrieve. If the named
attribute does not exist, an AttributeError is raised.
"""
obj = self.get_node(where, name=name)
return obj._f_getattr(attrname)
def set_node_attr(self, where, attrname, attrvalue, name=None):
"""Set a PyTables attribute for the given node.
Parameters
----------
where, name
These arguments work as in
:meth:`File.get_node`, referencing the node to be acted upon.
attrname
The name of the attribute to set.
attrvalue
The value of the attribute to set. Any kind of Python
object (like strings, ints, floats, lists, tuples, dicts,
small NumPy objects ...) can be stored as an attribute.
However, if necessary, pickle is automatically used so as
to serialize objects that you might want to save.
See the :class:`AttributeSet` class for details.
Notes
-----
If the node already has a large number of attributes, a
PerformanceWarning is issued.
"""
obj = self.get_node(where, name=name)
obj._f_setattr(attrname, attrvalue)
def del_node_attr(self, where, attrname, name=None):
"""Delete a PyTables attribute from the given node.
Parameters
----------
where, name
These arguments work as in :meth:`File.get_node`, referencing the
node to be acted upon.
attrname
The name of the attribute to delete. If the named
attribute does not exist, an AttributeError is raised.
"""
obj = self.get_node(where, name=name)
obj._f_delattr(attrname)
def copy_node_attrs(self, where, dstnode, name=None):
"""Copy PyTables attributes from one node to another.
Parameters
----------
where, name
These arguments work as in :meth:`File.get_node`, referencing the
node to be acted upon.
dstnode
The destination node where the attributes will be copied to. It can
be a path string or a Node instance (see :ref:`NodeClassDescr`).
"""
srcobject = self.get_node(where, name=name)
dstobject = self.get_node(dstnode)
srcobject._v_attrs._f_copy(dstobject)
def copy_children(self, srcgroup, dstgroup,
overwrite=False, recursive=False,
createparents=False, **kwargs):
"""Copy the children of a group into another group.
Parameters
----------
srcgroup : str
The group to copy from.
dstgroup : str
The destination group.
overwrite : bool, optional
If True, the destination group will be overwritten if it already
exists. Defaults to False.
recursive : bool, optional
If True, all descendant nodes of srcgroup are recursively copied.
Defaults to False.
createparents : bool, optional
If True, any necessary parents of dstgroup will be created.
Defaults to False.
kwargs : dict
Additional keyword arguments can be used to customize the copying
process. See the documentation of :meth:`Group._f_copy_children`
for a description of those arguments.
"""
srcgroup = self.get_node(srcgroup) # Does the source node exist?
self._check_group(srcgroup) # Is it a group?
srcgroup._f_copy_children(
dstgroup, overwrite, recursive, createparents, **kwargs)
def copy_file(self, dstfilename, overwrite=False, **kwargs):
"""Copy the contents of this file to dstfilename.
Parameters
----------
dstfilename : str
A path string indicating the name of the destination file. If
it already exists, the copy will fail with an IOError, unless
the overwrite argument is true.
overwrite : bool, optional
If true, the destination file will be overwritten if it already
exists. In this case, the destination file must be closed, or
errors will occur. Defaults to False.
kwargs
Additional keyword arguments discussed below.
Notes
-----
Additional keyword arguments may be passed to customize the
copying process. For instance, title and filters may be changed,
user attributes may be or may not be copied, data may be
sub-sampled, stats may be collected, etc. Arguments unknown to
nodes are simply ignored. Check the documentation for copying
operations of nodes to see which options they support.
In addition, it recognizes the names of parameters present in
:file:`tables/parameters.py` as additional keyword arguments.
See :ref:`parameter_files` for a detailed info on the supported
parameters.
Copying a file usually has the beneficial side effect of
creating a more compact and cleaner version of the original
file.
"""
self._check_open()
# Check that we are not treading our own shoes
if Path(self.filename).resolve() == Path(dstfilename).resolve():
raise OSError("You cannot copy a file over itself")
# Compute default arguments.
# These are *not* passed on.
filters = kwargs.pop('filters', None)
if filters is None:
# By checking the HDF5 attribute, we avoid setting filters
# in the destination file if not explicitly set in the
# source file. Just by assigning ``self.filters`` we would
# not be able to tell.
filters = getattr(self.root._v_attrs, 'FILTERS', None)
copyuserattrs = kwargs.get('copyuserattrs', True)
title = kwargs.pop('title', self.title)
if Path(dstfilename).is_file() and not overwrite:
raise OSError(
f"file ``{dstfilename}`` already exists; you may want to "
f"use the ``overwrite`` argument"
)
# Create destination file, overwriting it.
dstfileh = open_file(
dstfilename, mode="w", title=title, filters=filters, **kwargs)
try:
# Maybe copy the user attributes of the root group.
if copyuserattrs:
self.root._v_attrs._f_copy(dstfileh.root)
# Copy the rest of the hierarchy.
self.root._f_copy_children(dstfileh.root, recursive=True, **kwargs)
finally:
dstfileh.close()
def list_nodes(self, where, classname=None):
"""Return a *list* with children nodes hanging from where.
This is a list-returning version of :meth:`File.iter_nodes`.
"""
group = self.get_node(where) # Does the parent exist?
self._check_group(group) # Is it a group?
return group._f_list_nodes(classname)
def iter_nodes(self, where, classname=None):
"""Iterate over children nodes hanging from where.
Parameters
----------
where
This argument works as in :meth:`File.get_node`, referencing the
node to be acted upon.
classname
If the name of a class derived from
Node (see :ref:`NodeClassDescr`) is supplied, only instances of
that class (or subclasses of it) will be returned.
Notes
-----
The returned nodes are alphanumerically sorted by their name.
This is an iterator version of :meth:`File.list_nodes`.
"""
group = self.get_node(where) # Does the parent exist?
self._check_group(group) # Is it a group?
return group._f_iter_nodes(classname)
def __contains__(self, path):
"""Is there a node with that path?
Returns True if the file has a node with the given path (a
string), False otherwise.
"""
try:
self.get_node(path)
except NoSuchNodeError:
return False
else:
return True
def __iter__(self):
"""Recursively iterate over the nodes in the tree.
This is equivalent to calling :meth:`File.walk_nodes` with no
arguments.
Examples
--------
::
# Recursively list all the nodes in the object tree.
h5file = tables.open_file('vlarray1.h5')
print("All nodes in the object tree:")
for node in h5file:
print(node)
"""
return self.walk_nodes('/')
def walk_nodes(self, where="/", classname=None):
"""Recursively iterate over nodes hanging from where.
Parameters
----------
where : str or Group, optional
If supplied, the iteration starts from (and includes)
this group. It can be a path string or a
Group instance (see :ref:`GroupClassDescr`).
classname
If the name of a class derived from
Node (see :ref:`GroupClassDescr`) is supplied, only instances of
that class (or subclasses of it) will be returned.
Notes
-----
This version iterates over the leaves in the same group in order
to avoid having a list referencing to them and thus, preventing
the LRU cache to remove them after their use.
Examples
--------
::
# Recursively print all the nodes hanging from '/detector'.
print("Nodes hanging from group '/detector':")
for node in h5file.walk_nodes('/detector', classname='EArray'):
print(node)
"""
class_ = get_class_by_name(classname)
if class_ is Group: # only groups
yield from self.walk_groups(where)
elif class_ is Node: # all nodes
yield self.get_node(where)
for group in | |
30.34 C
ATOM 11 N ALA A 23 11.017 19.283 9.723 1.00 27.76 N
ATOM 12 CA ALA A 23 11.998 19.805 10.666 1.00 26.26 C
ATOM 13 C ALA A 23 11.854 19.144 12.033 1.00 23.38 C
ATOM 14 O ALA A 23 11.027 18.251 12.217 1.00 25.31 O
ATOM 15 CB ALA A 23 11.863 21.315 10.790 1.00 27.05 C
ATOM 16 N ALA A 24 12.663 19.590 12.988 1.00 19.15 N
ATOM 17 CA ALA A 24 12.633 19.039 14.337 1.00 17.40 C
ATOM 18 C ALA A 24 11.922 19.980 15.304 1.00 15.24 C
ATOM 19 O ALA A 24 12.393 21.086 15.567 1.00 15.15 O
ATOM 20 CB ALA A 24 14.044 18.750 14.822 1.00 16.09 C
ATOM 21 N ALA A 25 10.786 19.533 15.829 1.00 13.99 N
ATOM 22 CA ALA A 25 10.015 20.329 16.776 1.00 13.94 C
ATOM 23 C ALA A 25 10.239 19.843 18.204 1.00 12.79 C
ATOM 24 O ALA A 25 10.365 18.643 18.449 1.00 11.77 O
ATOM 25 CB ALA A 25 8.536 20.287 16.426 1.00 14.72 C
ATOM 26 N ALA A 26 10.287 20.782 19.143 1.00 11.97 N
ATOM 27 CA ALA A 26 10.504 20.451 20.547 1.00 12.04 C
ATOM 28 C ALA A 26 9.246 19.858 21.172 1.00 10.98 C
ATOM 29 O ALA A 26 8.158 20.424 21.059 1.00 12.65 O
ATOM 30 CB ALA A 26 10.949 21.684 21.319 1.00 11.63 C
ATOM 31 N ALA A 27 9.402 18.715 21.831 1.00 10.67 N
ATOM 32 CA ALA A 27 8.280 18.044 22.478 1.00 12.41 C
ATOM 33 C ALA A 27 8.017 18.628 23.862 1.00 13.93 C
ATOM 34 O ALA A 27 8.706 19.549 24.300 1.00 14.04 O
ATOM 35 CB ALA A 27 8.541 16.549 22.574 1.00 12.92 C
ATOM 36 N ALA A 28 7.014 18.086 24.546 1.00 15.45 N
ATOM 37 CA ALA A 28 6.660 18.549 25.882 1.00 17.17 C
ATOM 38 C ALA A 28 7.601 17.969 26.932 1.00 16.56 C
ATOM 39 O ALA A 28 7.858 16.765 26.952 1.00 18.23 O
ATOM 40 CB ALA A 28 5.218 18.186 26.203 1.00 17.15 C
ATOM 41 N GLY A 29 8.113 18.833 27.803 1.00 18.72 N
ATOM 42 CA GLY A 29 9.016 18.408 28.856 1.00 18.52 C
ATOM 43 C GLY A 29 10.462 18.764 28.570 1.00 18.48 C
ATOM 44 O GLY A 29 11.301 18.760 29.470 1.00 19.65 O
ATOM 45 N ALA A 30 10.753 19.073 27.311 1.00 17.32 N
ATOM 46 CA ALA A 30 12.107 19.433 26.905 1.00 16.80 C
ATOM 47 C ALA A 30 12.217 20.928 26.624 1.00 16.52 C
ATOM 48 O ALA A 30 11.285 21.541 26.102 1.00 17.70 O
ATOM 49 CB ALA A 30 12.525 18.631 25.683 1.00 15.92 C
ATOM 50 N ALA A 31 13.360 21.509 26.972 1.00 17.28 N
ATOM 51 CA ALA A 31 13.591 22.933 26.761 1.00 16.91 C
ATOM 52 C ALA A 31 14.726 23.168 25.770 1.00 16.03 C
ATOM 53 O ALA A 31 15.327 22.221 25.263 1.00 14.98 O
ATOM 54 CB ALA A 31 13.890 23.623 28.083 1.00 17.76 C
ATOM 55 N ALA A 32 15.014 24.438 25.499 1.00 16.22 N
ATOM 56 CA ALA A 32 16.077 24.804 24.571 1.00 16.19 C
ATOM 57 C ALA A 32 17.454 24.603 25.197 1.00 14.39 C
ATOM 58 O ALA A 32 17.594 24.584 26.420 1.00 15.02 O
ATOM 59 CB ALA A 32 15.908 26.246 24.116 1.00 18.18 C
ATOM 60 N ALA A 33 18.466 24.453 24.349 1.00 13.72 N
ATOM 61 CA ALA A 33 19.836 24.268 24.814 1.00 13.64 C
ATOM 62 C ALA A 33 20.441 25.587 25.284 1.00 12.50 C
ATOM 63 O ALA A 33 20.243 26.629 24.660 1.00 12.22 O
ATOM 64 CB ALA A 33 20.690 23.656 23.714 1.00 14.15 C
ATOM 65 N ALA A 34 21.178 25.534 26.389 1.00 12.12 N
ATOM 66 CA ALA A 34 21.807 26.725 26.950 1.00 12.41 C
ATOM 67 C ALA A 34 23.098 27.068 26.213 1.00 13.60 C
ATOM 68 O ALA A 34 24.003 26.240 26.107 1.00 15.68 O
ATOM 69 CB ALA A 34 22.080 26.530 28.433 1.00 12.66 C
ATOM 70 N ALA A 35 23.176 28.295 25.706 1.00 14.17 N
ATOM 71 CA ALA A 35 24.355 28.754 24.981 1.00 16.12 C
ATOM 72 C ALA A 35 25.475 29.145 25.941 1.00 15.11 C
ATOM 73 O ALA A 35 25.219 29.625 27.045 1.00 16.45 O
ATOM 74 CB ALA A 35 23.998 29.925 24.078 1.00 16.47 C
ATOM 75 N GLY A 36 26.715 28.936 25.512 1.00 14.18 N
ATOM 76 CA GLY A 36 27.869 29.252 26.333 1.00 13.93 C
ATOM 77 C GLY A 36 28.258 30.716 26.267 1.00 13.16 C
ATOM 78 O GLY A 36 27.771 31.461 25.417 1.00 12.39 O
ATOM 79 N ALA A 37 29.142 31.128 27.171 1.00 14.69 N
ATOM 80 CA ALA A 37 29.593 32.514 27.230 1.00 16.56 C
ATOM 81 C ALA A 37 30.896 32.713 26.461 1.00 18.09 C
ATOM 82 O ALA A 37 31.452 33.812 26.441 1.00 19.82 O
ATOM 83 CB ALA A 37 29.761 32.952 28.677 1.00 17.45 C
ATOM 84 N ALA A 38 31.380 31.645 25.835 1.00 20.32 N
ATOM 85 CA ALA A 38 32.634 31.691 25.089 1.00 19.19 C
ATOM 86 C ALA A 38 32.523 32.591 23.861 1.00 19.61 C
ATOM 87 O ALA A 38 31.430 32.814 23.340 1.00 18.87 O
ATOM 88 CB ALA A 38 33.057 30.289 24.680 1.00 20.89 C
ATOM 89 N ALA A 39 33.662 33.103 23.405 1.00 18.72 N
ATOM 90 CA ALA A 39 33.693 33.988 22.245 1.00 19.33 C
ATOM 91 C ALA A 39 33.919 33.202 20.957 1.00 18.66 C
ATOM 92 O ALA A 39 33.897 33.765 19.863 1.00 21.79 O
ATOM 93 CB ALA A 39 34.772 35.046 22.416 1.00 20.01 C
ATOM 94 N ALA A 40 34.135 31.898 21.097 1.00 17.11 N
ATOM 95 CA ALA A 40 34.309 31.020 19.946 1.00 17.00 C
ATOM 96 C ALA A 40 33.079 30.140 19.765 1.00 16.01 C
ATOM 97 O ALA A 40 32.433 29.760 20.742 1.00 16.18 O
ATOM 98 CB ALA A 40 35.558 30.168 20.108 1.00 17.26 C
ATOM 99 N ALA A 41 32.753 29.817 18.518 1.00 15.99 N
ATOM 100 CA ALA A 41 31.569 29.015 18.241 1.00 16.37 C
ATOM 101 C ALA A 41 31.823 27.542 18.541 1.00 15.52 C
ATOM 102 O ALA A 41 32.953 27.155 18.845 1.00 18.11 O
ATOM 103 CB ALA A 41 31.133 29.194 16.795 1.00 17.06 C
ATOM 104 N ALA A 42 30.774 26.729 18.414 1.00 13.73 N
ATOM 105 CA ALA A 42 30.833 25.293 18.719 1.00 13.40 C
ATOM 106 C ALA A 42 29.475 24.631 18.468 1.00 12.31 C
ATOM 107 O ALA A 42 28.435 25.294 18.493 1.00 13.00 O
ATOM 108 CB ALA A 42 31.299 25.047 20.147 1.00 14.91 C
ATOM 109 N ALA A 43 29.490 23.325 18.212 1.00 12.74 N
ATOM 110 CA ALA A 43 28.299 22.611 17.753 1.00 12.74 C
ATOM 111 C ALA A 43 27.575 21.788 18.816 1.00 11.86 C
ATOM 112 O ALA A 43 28.147 20.872 19.408 1.00 12.94 O
ATOM 113 CB ALA A 43 28.672 21.710 16.582 1.00 13.72 C
ATOM 114 N ALA A 44 26.306 22.127 19.048 1.00 9.87 N
ATOM 115 CA ALA A 44 25.485 21.399 20.010 1.00 9.48 C
ATOM 116 C ALA A 44 24.361 20.643 19.306 1.00 9.29 C
ATOM 117 O ALA A 44 23.451 21.248 18.741 1.00 9.63 O
ATOM 118 CB ALA A 44 24.911 22.348 21.050 1.00 11.24 C
ATOM 119 N ALA A 45 24.424 19.317 19.347 1.00 8.42 N
ATOM 120 CA ALA A 45 23.416 18.491 18.692 1.00 9.05 C
ATOM 121 C ALA A 45 22.579 17.737 19.718 1.00 9.11 C
ATOM 122 O ALA A 45 23.069 16.837 20.399 1.00 10.48 O
ATOM 123 CB ALA A 45 24.073 17.520 17.725 0.50 9.47 C
ATOM 125 N ALA A 46 21.309 18.115 19.818 1.00 9.29 N
ATOM 126 CA ALA A 46 20.391 17.499 20.768 1.00 9.25 C
ATOM 127 C ALA A 46 19.256 16.778 20.049 1.00 9.68 C
ATOM 128 O | |
<reponame>annac-dima/NLP_Analysis_HarryPotterBooks
#!/usr/bin/env python
# coding: utf-8
# # Harry Potter Books NLP Analysis
#
#
# ***Author: <NAME>***
#
#
# ### Table of Contents
# [1. Data and Preprocessing](#preprocessing)
# [2. Analysis](#analysis)
# [2.1 Word Embeddings](#word-embeddings)
# [2.2 Analysis of most important terms: TF-IDF-TFIDF](#tfidf)
# [2.3 Pointwise Mutual Information (PMI)](#PMI)
# [2.4 Language Generation using HP Books](#language-generation)
# [2.5 Topic Modeling](#topic)
# [2.5.1 LDA Topic Modeling](#LDA)
# [2.5.2 Dynamic Topic Modeling](#dynamic)
# [2.6 Matrix Factorization](#matrix)
# [2.7 Clustering](#clustering)
# [3. Visualization](#visualization)
# [3.1 Word Embeddings Visualization](#word-visua)
# [3.2 TSNE Word2Vec Visualization](#tsne)
# [3.3 Word2Vec Book1 Book7 Visualization](#book1book7)
# [3.4 PCA Words Embeddings Visualization](#pcavisua)
# [3.5 Unigrams TF-IDF-TFIDF Visualization scaled by TF](#tf-scaled)
# [3.6 Bigrams TF-IDF-TFIDF Visualization scaled by TF](#bigrams-scaled)
# [3.7 Harry - Ron - Hermione Occurrencies Visualization](#harry-ron-co-occ)
# [3.8 Topic Modeling Visualization](#topic-visua)
# [3.9 Clusering Visualization](#clust-visua)
# In[1]:
'''Import the relevant libraries for the analysis'''
import pandas as pd
import numpy as np
import spacy
import nltk
import gensim
nlp = spacy.load('en_core_web_sm', disable=['ner'])
nlp.max_length = 2000000
import re
import sklearn
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.decomposition import NMF
from sklearn.decomposition import TruncatedSVD
from sklearn.cluster import KMeans, AgglomerativeClustering
from sklearn.metrics import silhouette_score
from nltk.collocations import BigramCollocationFinder, BigramAssocMeasures
from nltk.corpus import stopwords
from collections import Counter
from collections import defaultdict
from gensim import models
from gensim.models import Phrases
from gensim.models import phrases
from gensim.models.phrases import Phraser
from gensim.models import Word2Vec
from gensim.models.word2vec import FAST_VERSION
from gensim.models import LdaMulticore, TfidfModel, CoherenceModel
from gensim.corpora import Dictionary
from gensim.models import ldaseqmodel
from gensim.matutils import hellinger
import matplotlib.pyplot as plt
from matplotlib import colors
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn
import seaborn as sns
import time
import multiprocessing
import pyLDAvis
import pyLDAvis.gensim
import os
# set the base directory
b_dir = 'C:/Users/anna9/Desktop/.../'
# ## 1. Data and Preprocessing
# <a id="preprocessing"></a>
# ###### Description of the content and type of the dataset
# For this project I downloaded the 7 Harry Potter Books in .txt format from http://www.glozman.com/textpages.html webiste. In the end, I had 7 .txt files each containg one Harry Potter book.
# ##### PREPROCESSING
#
# The first preprocessing step was to read each of the txt files, transform it into a unique string and then split it into sentences (first splitting the documents, and then applying nlp .sents method).
# Then I have used RegEx to remove any number and special character appearing in the text. After that, I took the sentences and I have expanded all the contractions. Once the sentences were expanded I have tokenized the senteces, put them into lower case and retreived only the lemmas form of the tokens. I have also removed punctuation,retrieved only the content words ('NOUN','VERB','PROPN', 'ADJ','ADV') and removed stopwords. This allows to significantly reduce noise and retreive only the informative part of the text.
# I have also created an 'Instances' object which simply contains the grouped tokens for each sentence.
# Lastly, I have created a DataFrame contaning the book number, the tokens and the instances.
# The initial input were .txt files whereas the output is a DataFrame containing the tokenized sentences and the instances. The length of the final instances is smaller than the number of original sentences as there have been a noise reduction.
#
#
#
#
# | Book | Length Original Text | N^ Original Senteces | N^ Preprocesses Sentences
# | --- | --- | --- | --- |
# | 1 | 442066 | 7542 | 6647 |
# | 2 | 489397 | 7931 | 6837 |
# | 3 | 612445 | 10792 | 9345 |
# | 4 | 1106719 | 17804 | 15609 |
# | 5 | 1481713 | 21609 | 18664 |
# | 6 | 984950 | 13939 | 12309 |
# | 7 | 1132501 | 16926 | 14971 |
# In[2]:
'''Defining functions to perform preprocessing'''
def clean_documents(raw_documents):
'''
Remove numbers and any special character from sentences
'''
documents = []
numb = re.compile('[0-9]') #Removing numbers
special_cha = re.compile("[-+]?[^A-Za-z0-9 ']+") #Removing any special character
for sentence in raw_documents:
cleaned_text = re.sub(numb,'',sentence)
cleaned_text = re.sub(special_cha,'',cleaned_text)
documents.append(cleaned_text.lower())
return documents
def tokenizer(documents):
'''
Tokenize the documents
Put the text into lower case and retreive only the lemmas form of the tokens
Removing punctuation
Retrieve only the content words (Noun,Verb,Propn,Adj,Adv) of the textbook
Remove stopwords
Remove empty tokens
'''
text = [[token.lemma_.lower() for token in nlp(sentence)
if token.pos_ is not 'PUNCT'
and token.pos_ in {'NOUN','VERB','PROPN', 'ADJ','ADV'}
and not token.is_stop] for sentence in documents]
text = [token for token in text if token!=[]]
return(text)
def Instances(text):
'''
Creating instances = put the tokens for each sentence together in a lisst
Otput = list containing clean and preprocessed sentences
'''
instances=[]
for sentence in text:
seperator = ' '
sent = (seperator.join(sentence))
instances.append(sent)
if instances !='':
return(instances)
# In[3]:
'''
Creating a dictionary with the most frequent english contractions from
https://en.wikipedia.org/wiki/Wikipedia:List_of_English_contractions
'''
contraction_dict = {
"ain't": "am not",
"aren't": "are not",
"can't": "cannot",
"can't've": "cannot have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'd've": "he would have",
"he'll": "he will",
"he'll've": "he will have",
"he's": "he is",
"how'd": "how did",
"how'd'y": "how do you",
"how'll": "how will",
"how's": "how is",
"i'd": "i would",
"i'd've": "i would have",
"i'll": "i will",
"i'll've": "i will have",
"i'm": "i am",
"i've": "i have",
"isn't": "is not",
"it'd": "it had",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so is",
"that'd": "that would",
"that'd've": "that would have",
"that's": "that is",
"there'd": "there had",
"there'd've": "there would have",
"there's": "there is",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we had",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what'll've": "what will have",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'alls": "you alls",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you had",
"you'd've": "you would have",
"you'll": "you you will",
"you'll've": "you you will have",
"you're": "you are",
"you've": "you have"
}
contr = re.compile('(%s)' % '|'.join(contraction_dict.keys()))
def expandContractions(text, contr=contr):
'''
Expanding contractions in the text
Takes a list of sentences and input and returns a list of expanded sentences
'''
l = []
for sentence in text:
def replace(match):
return contraction_dict[match.group(0)]
sent = contr.sub(replace, sentence)
l.append(sent)
return l
# In[4]:
'''
Creating a DataFrame with all the 7 HP Books. The DataFrame will contain the Book number, the tokenized sentences and instances
'''
corpustot = pd.DataFrame() #Initializing the final corpus as an empty DataFrame
bookn = [1,2,3,4,5,6,7]
for book in bookn:
file = open(os.path.join(b_dir) + "HP%s.txt" %book, "r",encoding="cp437")
text = file.read() #Open the book .txt file as a unique string
clean_text = text.rstrip()
clean_text = clean_text.replace("\n", " ") #Remove the \n symbols
#print(clean_text)
| |
<reponame>smarie/python-spawner
import multiprocessing as mp
import os
from logging import Logger
import sys
from pickle import PicklingError
from types import FunctionType
from six import with_metaclass, raise_from
try: # python 3.5+
from typing import Union, Any, List, Dict, Tuple, Type, Iterable, Callable
except SyntaxError:
# strange error on some python 3.7 distributions ?!!
pass
except ImportError:
pass
from spawny.main_remotes_and_defs import InstanceDefinition, ScriptDefinition, ModuleDefinition, Definition
from spawny.utils_logging import default_logger
from spawny.utils_object_proxy import ProxifyDunderMeta, replace_all_dundermethods_with_getattr
PY2 = sys.version_info < (3, 0)
# def init_mp_context():
# """ NOT SUPPORTED IN PYTHON 2
# multiprocessing toolbox setup
# :return:
# """
# mp.set_start_method('spawn')
def run_script(script_str, # type: str
python_exe=None, # type: str
logger=default_logger # type: Logger
):
# type: (...) -> ObjectProxy
"""
Executes the provided script in a subprocess. The script will be run in a dynamically created module.
Returns an instance of `DaemonProxy` representing the created module, that will transmit all interactions to the
backend module through the created inter-process communication channel.
:param script_str:
:param python_exe:
:param logger:
:return:
"""
d = DaemonProxy(ScriptDefinition(script_str), python_exe=python_exe, logger=logger)
return d.obj_proxy
def run_module(module_name, # type: str
module_path=None, # type: str
python_exe=None, # type: str
logger=default_logger # type: Logger
):
# type: (...) -> ObjectProxy
"""
Executes the provided module in a subprocess.
Returns an instance of `DaemonProxy` representing the module, that will transmit all interactions to the
backend module through the created inter-process communication channel.
:param module_name:
:param module_path:
:param python_exe:
:param logger:
:return:
"""
d = DaemonProxy(ModuleDefinition(module_name, module_path=module_path), python_exe=python_exe, logger=logger)
return d.obj_proxy
def run_object(
object_instance_or_definition, # type: Union[Any, Definition]
python_exe=None, # type: str
logger=default_logger # type: Logger
):
# type: (...) -> ObjectProxy
d = DaemonProxy(object_instance_or_definition, python_exe=python_exe, logger=logger)
return d.obj_proxy
# 'protocol' constants
OK_FLAG = True
ERR_FLAG = False
START_CMD = -1
EXIT_CMD = 0
EXEC_CMD = 1 # this will send a function to execute
# --------- all the functions that will be pickled so as to be remotely executed
def get_object(o,
names
):
"""
Command used to get the object o.name1.name2.name3 where name1, name2, name3 are provided in `names`
It is located here so that it can be pickled and sent over the wire
:param o:
:param names:
:return:
"""
result = o
for n in names:
result = getattr(result, n)
return result
def is_function(o,
names):
o = get_object(o, names)
if isinstance(o, FunctionType):
return True
elif hasattr(o, 'im_self'):
return True
elif hasattr(o, '__self__'):
return True
else:
return False
def call_method_on_object(o,
*args,
# names,
**kwargs):
names = kwargs.pop('names')
return get_object(o, names)(*args, **kwargs)
def call_method_using_cmp_py2(o,
*args,
# names,
# method_to_replace
**kwargs):
"""
In python 2 some objects (int...) do not implement rich comparison.
The problem is that the proxy that we create for them do implement it.
So we have to redirect the implementation.
See https://portingguide.readthedocs.io/en/latest/comparisons.html#rich-comparisons
:param o:
:param args:
:param kwargs:
:return:
"""
names = kwargs.pop('names')
method_to_replace = kwargs.pop('method_to_replace')
cmp_result = get_object(o, names + ['__cmp__'])(*args, **kwargs)
if method_to_replace == '__eq__':
return cmp_result == 0
elif method_to_replace == '__ne__':
return cmp_result != 0
elif method_to_replace == '__lt__':
return cmp_result < 0
elif method_to_replace == '__le__':
return cmp_result <= 0
elif method_to_replace == '__gt__':
return cmp_result > 0
elif method_to_replace == '__ge__':
return cmp_result >= 0
else:
raise ValueError("invalid method: %s" % method_to_replace)
# ---------- end of picklable functions
class ObjectProxy(with_metaclass(ProxifyDunderMeta, object)):
"""
Represents a proxy to an object. It relies on a daemon proxy to communicate.
Thanks to the `ProxifyDunderMeta` metaclass, all dunder methods are redirected to __getattr__. See https://stackoverflow.com/questions/9057669/how-can-i-intercept-calls-to-pythons-magic-methods-in-new-style-classes
"""
__ignore__ = "class mro new init setattr getattr getattribute dict del dir doc name qualname module"
__myslots__ = 'daemon', 'is_multi_object', 'child_names' # 'instance_type',
def __init__(self,
daemon, # type: DaemonProxy
is_multi_object, # type: bool
instance_type=None, # type: Type[Any]
#attr_methods=None, # type: List[str]
child_names=None # type: List[str]
):
to_ignore = set("__%s__" % n for n in ObjectProxy.__ignore__.split())
# replace all methods dynamically: actually this seems to be useless since if we do not do it
# at class creation that's not taken into account by python.
if instance_type is not None:
# if attr_methods is not None:
# raise ValueError("only one of instance_type or attr_methods must be set")
replace_all_dundermethods_with_getattr(ignore=to_ignore, from_cls=instance_type, to_cls_or_inst=self,
is_class=False)
# else:
# if attr_methods is None:
# raise ValueError("one of instance_type or attr_methods must be set")
# replace_all_methods_with_getattr(ignore=to_ignore, from_cls=attr_methods, to_cls_or_inst=self,
# is_class=False)
self.daemon = daemon
# self.instance_type = instance_type
self.is_multi_object = is_multi_object
self.child_names = child_names
def __getattr__(self, item):
if item in ObjectProxy.__myslots__:
# real local attributes
return super(ObjectProxy, self).__getattribute__(item)
elif item in ('terminate_daemon', ):
# real daemon attributes
return getattr(self.daemon, item)
else:
# remote communication
if self.child_names is not None:
names = self.child_names + [item]
else:
names = [item]
# first let's check what kind of object this is so that we can determine what to do
try:
is_func = self.daemon.remote_call_using_pipe(EXEC_CMD, is_function, names=names, log_errors=False)
except AttributeError as e:
# Rich comparison operators might be missing
if PY2 and item in ('__eq__', '__ne__', '__lt__', '__le__', '__gt__', '__ge__'):
def remote_method_proxy(*args, **kwargs):
return self.daemon.remote_call_using_pipe(EXEC_CMD, call_method_using_cmp_py2,
to_execute_args=args, names=names[0:-1],
method_to_replace=item, **kwargs)
return remote_method_proxy
else:
raise_from(e, e)
if is_func:
# a function (not a callable object ): generate a remote method proxy with that name
def remote_method_proxy(*args, **kwargs):
return self.daemon.remote_call_using_pipe(EXEC_CMD, call_method_on_object,
to_execute_args=args, names=names, **kwargs)
return remote_method_proxy
else:
# an object
try:
typ = self.daemon.remote_call_using_pipe(EXEC_CMD, get_object, names=names + ['__class__'],
log_errors=False)
if self.is_multi_object:
# create a new DaemonProxy for that object
return ObjectProxy(self.daemon, instance_type=typ, is_multi_object=False, child_names=names)
else:
# bring back the attribute value over the pipe
return self.daemon.remote_call_using_pipe(EXEC_CMD, get_object, names=names)
except DaemonCouldNotSendMsgError as pe:
if isinstance(pe.exc, PicklingError):
# the object type is not known or cant be decoded locally. Not important, we can still create a
# proxy
# TODO get the list of methods ?
return ObjectProxy(self.daemon, instance_type=None, is_multi_object=False, child_names=names)
else:
raise
# TODO
# def __setattr__(self, key, value):
# if not self.is_started():
# return super(DaemonProxy, self).__setattr__(key, value)
# else:
# return setattr(self.obj_proxy, key, value)
def __call__(self, *args, **kwargs):
return self.daemon.remote_call_using_pipe(EXEC_CMD, call_method_on_object, names=self.child_names,
to_execute_args=args, **kwargs)
class CommChannel(object):
__slots__ = 'conn',
def __init__(self, conn):
self.conn = conn
def __del__(self):
self.conn = None
class DaemonProxy(object):
"""
A proxy that spawns (or TODO conects to)
a separate process and delegates the methods to it, through an `ObjectProxy`.
"""
def __init__(self,
obj_instance_or_definition, # type: Union[Any, Definition]
python_exe=None, # type: str
logger=default_logger # type: Logger
):
# type: (...) -> DaemonProxy
"""
Creates a daemon running the provided object instance, and inits this Proxy to be able to delegate the calls to
the daemon. Users may either provide the object instance, or a definition of instance to create. In that case
the instance will be created in the daemon.
:param obj_instance_or_definition: the object instance to use in the daemon, or the definition that the daemon
should follow to create the object instance
:param python_exe: the optional python executable to use to launch the daemon. By default the same executable
than this process will be used. Note that a non-None value is not supported on python 2 if the system is
not windows
:param logger: an optional custom logger. By default a logger that prints to stdout will be used.
"""
self.started = False
self.logger = logger or default_logger
# --proxify all dunder methods from the instance type
# unfortunately this does not help much since for new-style classes, special methods are only looked up on the
# class not the instance. That's why we try to register as much special methods as possible in ProxifyDunderMeta
if isinstance(obj_instance_or_definition, Definition):
instance_type = obj_instance_or_definition.get_type()
is_multi_object = obj_instance_or_definition.is_multi_object()
else:
instance_type = obj_instance_or_definition.__class__
is_multi_object = False
self.obj_proxy = ObjectProxy(daemon=self, instance_type=instance_type, is_multi_object=is_multi_object)
# --set executable (actually there is no way to ensure that this is atomic with mp.Process(), too bad !
if python_exe is not None:
if sys.version_info < (3, 0) and not sys.platform.startswith('win'):
raise ValueError("`python_exe` can only be set on windows under python 2. See "
"https://docs.python.org/2/library/multiprocessing.html#multiprocessing.")
else:
mp.set_executable(python_exe)
# --init the multiprocess communication queue/pipe
parent_conn, child_conn = mp.Pipe()
self.parent_conn = CommChannel(parent_conn)
# self.logger.info('Object proxy created an interprocess communication channel')
# --spawn an independent process
self.logger.info('[DaemonProxy] spawning child process...')
self.p = mp.Process(target=daemon, args=(child_conn, obj_instance_or_definition),
name=python_exe or 'python' + '-' | |
<gh_stars>10-100
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import io
import cv2
import json
import logging
import numpy as np
import tensorflow as tf
from typing import List
from scipy.spatial.transform import Rotation as R
from .. import data as g_data
from .base import BaseDataGen, ProcessPipeline, AxisAlignBoundingBox
from GraphicsDL.graphicsutils import g_io, g_cfg, g_math, g_str, g_perf
class SemanticsPlane(g_cfg.DictRecursive):
def __init__(self):
super().__init__()
self.ID: int = int(0)
self.planeID: List[int] = list([int(0)])
self.type: str = str()
class Junction(g_cfg.DictRecursive):
def __init__(self):
super().__init__()
self.ID: int = int(0)
self.coordinate: List[float] = list([float(0)])
class Line(g_cfg.DictRecursive):
def __init__(self):
super().__init__()
self.ID: int = int(0)
self.point: List[float] = list([float(0)])
self.direction: List[float] = list([float(0)])
class Plane(g_cfg.DictRecursive):
def __init__(self):
super().__init__()
self.offset: float = float(0)
self.type: str = str()
self.ID: int = int(0)
self.normal: List[float] = list([float(0)])
class Annotation3D(g_cfg.DictRecursive):
def __init__(self):
super().__init__()
self.junctions: List[Junction] = list([Junction()])
self.lines: List[Line] = list([Line()])
self.planes: List[Plane] = list([Plane()])
self.planeLineMatrix: List[List[int]] = list([list([0])])
self.lineJunctionMatrix: List[List[int]] = list([list([0])])
self.semantics: List[SemanticsPlane] = list([SemanticsPlane()])
def get_semantics_by_room_id(self, room_id):
for k_id, k_ in enumerate(self.semantics):
if k_.ID == room_id:
return k_
return None
def get_rooms_by_type(self, r_type) -> List[int]:
room_list = list()
for k_id, k_ in enumerate(self.semantics):
if k_.type != r_type:
continue
room_list.append(k_.ID)
return room_list
def get_semantic_bounding_box(self, room_id) -> AxisAlignBoundingBox:
planes_id = self.get_semantics_by_room_id(int(room_id)).planeID
planes = [p_ for p_ in self.planes if p_.ID in planes_id]
plane_lines_matrix = np.asarray(self.planeLineMatrix)
lines_id = [np.argwhere(plane_lines_matrix[p_.ID])[..., 0] for p_ in planes]
lines_id = np.unique(np.concatenate(lines_id))
line_junctions_matrix = np.asarray(self.lineJunctionMatrix)
junctions_id = [np.argwhere(line_junctions_matrix[l_])[..., 0] for l_ in lines_id]
junctions_id = np.unique(np.concatenate(junctions_id))
junctions = [j_ for j_ in self.junctions if j_.ID in junctions_id]
points = [p_.coordinate for p_ in junctions]
semantic_box = AxisAlignBoundingBox()
semantic_box.assign_box_size(np.max(points, axis=0).tolist(), np.min(points, axis=0).tolist())
return semantic_box
class S3DUtilize(object):
@staticmethod
def get_fov_normal(image_size, cam_focal):
u2x, v2y = [(np.arange(1, image_size[a_i] + 1) - image_size[a_i] / 2) / cam_focal[a_i] for a_i in [0, 1]]
cam_m_u2x = np.tile([u2x], (image_size[1], 1))
cam_m_v2y = np.tile(v2y[:, np.newaxis], (1, image_size[0]))
cam_m_depth = np.ones(image_size).T
fov_normal = np.stack((cam_m_depth, -1 * cam_m_v2y, cam_m_u2x), axis=-1)
fov_normal = fov_normal / np.sqrt(np.sum(np.square(fov_normal), axis=-1, keepdims=True))
return fov_normal
@staticmethod
def cast_perspective_to_local_coord(depth_img: np.ndarray, fov_normal):
return np.expand_dims(depth_img, axis=-1) * fov_normal
@staticmethod
def cast_points_to_voxel(points, labels, room_size=(6.4, 3.2, 6.4), room_stride=0.2):
vol_resolution = (np.asarray(room_size) / room_stride).astype(np.int32)
vol_index = np.floor(points / room_stride).astype(np.int32)
in_vol = np.logical_and(np.all(vol_index < vol_resolution, axis=1), np.all(vol_index >= 0, axis=1))
x, y, z = [d_[..., 0] for d_ in np.split(vol_index[in_vol], 3, axis=-1)]
vol_label = labels[in_vol]
vol_data = np.zeros(vol_resolution, dtype=np.uint8)
vol_data[x, y, z] = vol_label
return vol_data
@staticmethod
def get_rotation_matrix_from_tu(cam_front, cam_up):
cam_n = np.cross(cam_front, cam_up)
cam_m = np.stack((cam_front, cam_up, cam_n), axis=1).astype(np.float32)
return cam_m
class Structured3DDataGen(BaseDataGen):
def __init__(self, data_dir, out_dir, process_pipelines, cfg=None, **kargs):
super().__init__(data_dir, out_dir, process_pipelines, **kargs)
cfg = cfg if cfg is not None else process_pipelines[0]
room_size = np.insert(cfg.room_size, 1, cfg.room_height)
self.room_size, self.room_stride = np.array(room_size), cfg.room_stride
self.room_center = self.room_size * [0.5, 0, 0.5]
self.vox_size = (self.room_size / self.room_stride).astype(np.int32)
self.label_type = cfg.label_type
self.data_label = getattr(g_data, self.label_type.upper())()
self.label_list = self.data_label.label_id_map_arr()
self.color_map = np.concatenate([self.data_label.color_map_arr(), [[0, 0, 0]]], axis=0)
self.fov_n = None
self.select_nyu_label_id, self.label_mapping, self.category_mapping = None, None, None
self.init_config()
def init_config(self):
nyu40_label = g_data.NYU40().label_id_map_arr()
select_nyu_label = self.label_list + ['desk'] if 'living' in self.label_type else self.label_list
self.select_nyu_label_id = [nyu40_label.index(s_l) for s_l in select_nyu_label]
self.category_mapping = np.zeros(len(g_data.NYU40().label_id_map_arr()), dtype=np.uint8)
for s_i, s_l in enumerate(self.select_nyu_label_id):
self.category_mapping[s_l] = s_i
if 'living' in self.label_type:
self.category_mapping[nyu40_label.index('desk')] = self.label_list.index('table')
image_size = np.array([1280, 720], np.int32)
cam_half_fov = np.array([0.698132, 0.440992])
self.fov_n = S3DUtilize.get_fov_normal(image_size, image_size / 2 / np.tan(cam_half_fov))
def load_zips(self, filter_regex='Structured3D') -> g_io.GroupZipIO:
ctx_files = [f for f in os.listdir(self.data_dir) if filter_regex in f]
zip_reader = g_io.GroupZipIO([os.path.join(self.data_dir, f) for f in ctx_files])
return zip_reader
@staticmethod
def read_file_from_zip(zip_reader, scene_id, file_, filter_regex='Structured3D'):
ctx = zip_reader.read('/'.join((filter_regex, scene_id, file_)))
return io.BytesIO(ctx)
def load_scene_anno_from_zip(self, zip_reader, scene_id: str):
anno_3d = Annotation3D()
anno_3d.load(json.load(self.read_file_from_zip(zip_reader, scene_id, 'annotation_3d.json')))
return anno_3d
def get_room_box_from_zip(self, zip_reader, scene_id: str, room_id: str):
scene_anno = self.load_scene_anno_from_zip(zip_reader, scene_id)
room_box = scene_anno.get_semantic_bounding_box(room_id)
room_box.scale(1 / 1000)
room_box.rotation(np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]))
return room_box
def assemble_semantic_points_from_img(self, depth_img, semantic_pano, cos_threshold=0.15):
points = S3DUtilize.cast_perspective_to_local_coord(depth_img, self.fov_n)
points_normal = g_math.normal_from_cross_product(points)
view_dist = np.maximum(np.linalg.norm(points, axis=-1, keepdims=True), float(10e-5))
cosine_dist = np.sum((points * points_normal / view_dist), axis=-1)
cosine_dist = np.abs(cosine_dist)
point_valid = np.logical_and(cosine_dist > cos_threshold, depth_img < 65535)
label_valid = semantic_pano > 0
all_valid = np.logical_and(point_valid, label_valid)
return points[all_valid], semantic_pano[all_valid]
def get_all_rooms_by_type(self, room_type):
room_list_path = os.path.join(g_str.mkdir_automated(self.out_assemble_dir), f'{room_type}_list')
if os.path.exists(room_list_path):
with open(room_list_path, 'r') as fp:
room_list = [f.rstrip() for f in fp.readlines()]
else:
room_list = list()
data_zip_meta = self.load_zips()
scene_list = [c.split('/')[1] for c in data_zip_meta.namelist() if 'annotation_3d.json' in c]
for scene_id in scene_list:
scene_anno = self.load_scene_anno_from_zip(data_zip_meta, scene_id)
room_ids = scene_anno.get_rooms_by_type(room_type)
room_list.extend([f'{scene_id}/2D_rendering/{r_i}' for r_i in room_ids])
with open(room_list_path, 'w') as fp:
fp.writelines('\n'.join(room_list))
data_zip_meta.close()
return room_list
def load_camera_and_image(self, zip_meta, cam_path):
# load camera_pose
camera_para = io.BytesIO(zip_meta.read(cam_path)).read().decode('utf-8').split(' ')
assert np.all(np.array(camera_para[-3:-1]) == np.array(['0.698132', '0.440992']))
camera_para = np.asarray([float(i_) for i_ in camera_para], np.float32)
cam_r = S3DUtilize.get_rotation_matrix_from_tu(camera_para[3:6], camera_para[6:9])
cam_r = np.matmul(np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]), cam_r)
cam_t = np.matmul(np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]), camera_para[:3] / 1000)
# load depth image
depth_path = cam_path.replace('camera_pose.txt', 'depth.png')
depth_img_data = np.frombuffer(io.BytesIO(zip_meta.read(depth_path)).read(), np.uint8)
depth_img = cv2.imdecode(depth_img_data, cv2.IMREAD_UNCHANGED)
depth_img[depth_img == 0] = 65535
# load semantic image
semantic_path = cam_path.replace('camera_pose.txt', 'semantic.png')
semantic_img_data = np.frombuffer(io.BytesIO(zip_meta.read(semantic_path)).read(), np.uint8)
semantic_img = cv2.imdecode(semantic_img_data, cv2.IMREAD_UNCHANGED)[..., ::-1]
semantic_id_img = np.zeros(semantic_img.shape[:2], dtype=np.uint8)
for l_id in self.select_nyu_label_id:
color = np.asarray(g_data.NYU40().color_map(l_id), dtype=np.uint8)
semantic_id_img[np.all(semantic_img == color, axis=-1)] = l_id
label_img = np.take(self.category_mapping, semantic_id_img)
return cam_r, cam_t, depth_img, label_img
@staticmethod
def filter_far_view(cam_t, cam_r):
cam_r_rotvec = R.from_dcm(cam_r).as_rotvec()
if cam_t[0] <= 0.5 and cam_t[2] <= 0.5:
return -np.pi / 2 < cam_r_rotvec[1] < 0
elif cam_t[0] > 0.5 and cam_t[2] <= 0.5:
return -np.pi < cam_r_rotvec[1] < -np.pi / 2
elif cam_t[0] > 0.5 and cam_t[2] > 0.5:
return np.pi / 2 < cam_r_rotvec[1] < np.pi
elif cam_t[0] <= 0.5 and cam_t[2] > 0.5:
return 0 < cam_r_rotvec[1] < np.pi / 2
else:
raise NotImplementedError
def visualize_image(self, depth_image, label_image, vis_path):
depth_img_vis = (np.where(depth_image < 65535, depth_image, 0) / 1000 / 6.4 * 255).astype(np.uint8)
cv2.imwrite(vis_path + '_depth.png', depth_img_vis)
label_image_vis = self.color_map[label_image][..., ::-1]
cv2.imwrite(vis_path + '_category.jpg', label_image_vis)
def visualize_surface_voxel(self, surface_voxel, cam_t_list, vis_path):
voxel_room_cam = surface_voxel.copy()
cam_t_vox_list = cam_t_list / self.room_stride
voxel_room_cam[tuple(np.split(cam_t_vox_list.astype(np.uint8), 3, axis=-1))] = len(self.color_map)
color_map = np.concatenate([self.color_map, [[0, 0, 0]]])
g_io.PlyIO().dump_vox(vis_path, voxel_room_cam, vox_scale=self.room_stride, colors_map=color_map)
def single_thread_perspective_vol(self, room_list, room_type, _, worker_id):
vis_dir = g_str.mkdir_automated(os.path.join(self.out_dir, f'vis_{room_type}'))
tmp_dir = g_str.mkdir_automated(os.path.join(self.out_dir, f'tmp_{room_type}'))
assemble_zip_path = os.path.join(tmp_dir, f'assemble_worker{worker_id}.zip')
if os.path.exists(assemble_zip_path):
logging.info(f'Skip {assemble_zip_path} generation')
return
train_samples = list()
vol_zip = g_io.ZipIO(assemble_zip_path, 'w')
zip_meta = self.load_zips()
for r_i, room_path in enumerate(room_list):
if r_i % 100 == 0:
logging.info(f'{worker_id} {r_i}th/{len(room_list)}')
cam_path_list = [c for c in zip_meta.namelist() if room_path in c and 'camera_pose.txt' in c]
if len(cam_path_list) == 0:
continue
scene_id, _, room_id = room_path.split('/')
room_box = self.get_room_box_from_zip(zip_meta, scene_id, room_id)
if np.any(room_box.box_size() > np.asarray(self.room_size)):
continue
room_samples, point_list, label_list, cam_t_list, cam_r_list = list(), list(), list(), list(), list()
for cam_path in cam_path_list:
_, scene_id, _, room_id, _, _, view_id, _ = cam_path.split('/')
room_view_id = '%s-room_%s-view_%03d' % (scene_id, room_id, int(view_id))
cam_r, cam_t, depth_img, label_img = self.load_camera_and_image(zip_meta, cam_path)
r_points, r_labels = self.assemble_semantic_points_from_img(depth_img, label_img)
if len(r_points) == 0:
continue
r_points = np.matmul(r_points / 1000, cam_r.T).astype(np.float32) + cam_t
# remove wrong label
if 'sofa' in self.label_list and self.label_list.index('sofa') in r_labels:
p_valid = np.logical_or(r_points[..., 1] < 2.0, r_labels != self.label_list.index('sofa'))
if not np.all(p_valid):
r_points, r_labels = r_points[p_valid], r_labels[p_valid]
if len(r_points) == 0:
continue
[arr.append(d) for arr, d in zip([room_samples, point_list, label_list, cam_t_list, cam_r_list],
[room_view_id, r_points, r_labels, cam_t, cam_r])]
if r_i < 20 and worker_id == 0:
self.visualize_image(depth_img, label_img, os.path.join(vis_dir, f'{room_view_id}'))
if len(point_list) == 0:
continue
# remove outside points
point_list, label_list = np.concatenate(point_list), np.concatenate(label_list)
p_valid = np.logical_and(np.all(point_list < room_box.max, -1), np.all(point_list > room_box.min, -1))
point_list, label_list = point_list[p_valid], label_list[p_valid]
if len(point_list) == 0:
continue
point_floor_center = room_box.center_floor()
point_list = point_list - point_floor_center + np.asarray(self.room_center)
surface_voxel = S3DUtilize.cast_points_to_voxel(point_list, label_list, self.room_size, self.room_stride)
if len(np.unique(surface_voxel)) - 1 == 0:
continue
# camera
cam_t_list, cam_r_list = np.asarray(cam_t_list), np.asarray(cam_r_list)
cam_t_list = cam_t_list - point_floor_center + np.asarray(self.room_center)
valid_c = np.logical_and(np.all(cam_t_list < self.room_size, axis=1), np.all(cam_t_list >= 0, axis=1))
cam_t_list, cam_r_list = cam_t_list[valid_c], cam_r_list[valid_c]
if len(cam_t_list) == 0:
continue
room_samples = np.asarray(room_samples)[valid_c].tolist()
cam_t_norm_list = cam_t_list / self.room_size[0]
npz_meta = io.BytesIO()
np.savez_compressed(npz_meta, camera_id=room_samples, label=surface_voxel, room_size=room_box.box_size(),
cam_t=cam_t_norm_list, cam_r=cam_r_list)
npz_meta.seek(0)
room_out_name = '%s-room_%s' % (scene_id, room_id)
vol_zip.writestr(f'{room_out_name}.npz', npz_meta.read())
train_samples.extend(room_samples)
if r_i < 20 | |
from datetime import timedelta
from django.urls import reverse
from rest_framework import status
from internal.tests.base_test import UserModel
from library.models import Artist, Song, SongTag, SongWorkLink, Work
from library.tests.base_test import LibraryAPITestCase
class SongListViewTestCase(LibraryAPITestCase):
url = reverse("library-song-list")
def setUp(self):
# create a user without any rights
self.user = self.create_user("TestUser")
# create a manager
self.manager = self.create_user("TestManager", library_level=UserModel.MANAGER)
# create test data
self.create_test_data()
def test_get_song_list(self):
"""Test to verify song list with no query."""
# Login as simple user
self.authenticate(self.user)
# Get songs list
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["count"], 2)
self.assertEqual(len(response.data["results"]), 2)
# Songs are sorted by title
self.check_song_json(response.data["results"][0], self.song1)
self.check_song_json(response.data["results"][1], self.song2)
def test_get_song_long_lyrics(self):
"""Test to get a song with few lyrics."""
# Login as simple user
self.authenticate(self.user)
# add lyrics to one song
self.song2.lyrics = """Mary had a little lamb
Little lamb, little lamb
Mary had a little lamb
Its fleece was white as snow
And everywhere that Mary went
Mary went, Mary."""
self.song2.save()
# Get songs list
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["count"], 2)
self.assertEqual(len(response.data["results"]), 2)
# check lyrics
self.assertDictEqual(
response.data["results"][1]["lyrics_preview"],
{
"text": """Mary had a little lamb
Little lamb, little lamb
Mary had a little lamb
Its fleece was white as snow
And everywhere that Mary went""",
"truncated": True,
},
)
def test_get_song_list_forbidden(self):
"""Test to verify unauthenticated user can't get songs list."""
# Attempte to get songs list
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_get_song_list_with_query(self):
"""Test to verify song list with simple query."""
# Login as simple user
self.authenticate(self.user)
# Get songs list with query = "ong1"
# Should only return song1
self.song_query_test("ong1", [self.song1])
# Get songs list with query = "tist1"
# Should only return song2 which has Artist1 as artist
self.song_query_test("tist1", [self.song2])
# Get songs list with query = "ork1"
# Should only return song2 which is linked to Work1
self.song_query_test("ork1", [self.song2])
def test_get_song_list_with_query_empty(self):
"""Test to verify song list with empty query."""
# Login as simple user
self.authenticate(self.user)
# Get songs list with query = ""
# Should return all songs
self.song_query_test("", [self.song1, self.song2])
def test_get_song_list_with_query_detail(self):
"""Test to verify song list with detail query."""
# Login as simple user
self.authenticate(self.user)
# Get songs list with query = "Version2"
# Should only return song2
self.song_query_test("ersion2", [self.song2])
# Get songs list with query = "Detail2"
# Should only return song2
self.song_query_test("etail2", [self.song2])
# Get songs list with query = "Detail_Video2"
# Should only return song2
self.song_query_test("etail_Video2", [self.song2])
def test_get_song_list_with_query_tag(self):
"""Test to verify song list with tag query."""
# Login as simple user
self.authenticate(self.user)
# Get songs list with query = "#TAG1"
# Should only return song2
self.song_query_test("#TAG1", [self.song2])
# Get songs list with query = "#TAG2"
# Should not return any result
self.song_query_test("#TAG2", [])
def test_get_song_list_with_query_artist(self):
"""Test to verify song list with artist query."""
# Login as simple user
self.authenticate(self.user)
# Get songs list with query = "artist:1"
# Should only return song2
self.song_query_test("artist:1", [self.song2])
# Get songs list with query = "artist:k"
# Should not return any result
self.song_query_test("artist:k", [])
# Get songs list with query = "artist:""Artist1"""
# Should only return song2
self.song_query_test('artist:""Artist1""', [self.song2])
# Get songs list with query = "artist:""tist1"""
# Should not return any result
self.song_query_test('artist:""tist1""', [])
def test_get_song_list_with_query_work(self):
"""Test to verify song list with work query."""
# Login as simple user
self.authenticate(self.user)
# Get songs list with query = "wt1:Work1"
# Should only return song2
self.song_query_test("wt1:Work1", [self.song2])
# Get songs list with query = "wt1:""Work1"""
# Should only return song2
self.song_query_test("""wt1:""Work1"" """, [self.song2])
# Get songs list with query = "wt2:Work1"
# Should not return any result since Work1 is not of type workType2
self.song_query_test("wt2:Work1", [])
def test_get_song_list_with_query_work_alternative_title(self):
"""Test to verify song list with work alternative title query."""
# Login as simple user
self.authenticate(self.user)
# Get songs list with query = "work:AltTitle1"
# Should only return song2
self.song_query_test("work:AltTitle1", [self.song2])
# Get songs list with query = "work:""AltTitle1"""
# Should only return song2
self.song_query_test("""work:""AltTitle1"" """, [self.song2])
# Get songs list with query = "wt1:AltTitle1"
# Should only return song2
self.song_query_test("wt1:AltTitle1", [self.song2])
# Get songs list with query = "wt1:""AltTitle1"""
# Should only return song2
self.song_query_test("""wt1:""AltTitle1"" """, [self.song2])
# Get songs list with query = "AltTitle1"
# Should only return song2
self.song_query_test("AltTitle1", [self.song2])
# Get songs list with query = "wt2:AltTitle1"
# Should not return any result since Work1 is not of type workType2
self.song_query_test("wt2:AltTitle1", [])
def test_get_song_list_with_query_title(self):
"""Test to verify song list with title query."""
# Login as simple user
self.authenticate(self.user)
# Get songs list with query = "title:1"
# Should only return song1
self.song_query_test("title:1", [self.song1])
# Get songs list with query = "title:""Song1"""
# Should only return song1
self.song_query_test(""" title:""Song1"" """, [self.song1])
# Get songs list with query = "title:Artist"
# Should not return any result
self.song_query_test("title:Artist", [])
def test_get_song_list_with_query_multiple(self):
"""Test to verify song list with title query."""
# Login as simple user
self.authenticate(self.user)
# Get songs list with query = "artist:Artist1 title:1"
# Should not return any song
self.song_query_test("artist:Artist1 title:1", [])
def test_get_song_list_with_query_complex(self):
"""Test to verify parsed query is returned."""
# Login as simple user
self.authenticate(self.user)
query = (
"""hey artist: me work:you wt1:workName title: test\\ Test """
"""remain stuff #tagg wt3:test artist:"my artist" work:""exact """
"""Work"" i """
)
# Get song list with a complex query
# should not return any song, but we'll check returned parsed query
response = self.client.get(self.url, {"query": query})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["count"], 0)
results = response.data["results"]
self.assertEqual(len(results), 0)
query = response.data["query"]
self.assertCountEqual(
query["remaining"], ["remain", "stuff", "hey", "i", "wt3:test"]
)
self.assertCountEqual(query["tag"], ["TAGG"])
self.assertCountEqual(query["title"]["contains"], ["test Test"])
self.assertCountEqual(query["title"]["exact"], [])
self.assertCountEqual(query["artist"]["contains"], ["me", "my artist"])
self.assertCountEqual(query["artist"]["exact"], [])
self.assertCountEqual(query["work"]["contains"], ["you"])
self.assertCountEqual(query["work"]["exact"], ["exact Work"])
self.assertCountEqual(query["work_type"].keys(), ["wt1"])
self.assertCountEqual(query["work_type"]["wt1"]["contains"], ["workName"])
self.assertCountEqual(query["work_type"]["wt1"]["exact"], [])
def song_query_test(self, query, expected_songs):
"""Method to test a song request with a given query.
Returned songs should be the same as expected_songs,
in the same order.
"""
# TODO This only works when there is only one page of songs
response = self.client.get(self.url, {"query": query})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["count"], len(expected_songs))
results = response.data["results"]
self.assertEqual(len(results), len(expected_songs))
for song, expected_song in zip(results, expected_songs):
self.assertEqual(song["id"], expected_song.id)
def test_get_song_list_disabled_tag(self):
"""Test to verify songs with disabled for user.
For a simple user, song list does not include disabled songs with tags.
"""
# Login as simple user
self.authenticate(self.user)
# Set tag1 disabled
self.tag1.disabled = True
self.tag1.save()
# Get songs list, there should be only one song
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["count"], 1)
self.assertEqual(len(response.data["results"]), 1)
# The remaining song is Song1
# Song2 has been excluded from the list
self.check_song_json(response.data["results"][0], self.song1)
def test_get_song_list_disabled_tag_manager(self):
"""Test to verify songs with disabled tags for manager.
For a manager, song list includes disabled songs with tags.
"""
# Login as manager
self.authenticate(self.manager)
# Set tag1 disabled
self.tag1.disabled = True
self.tag1.save()
# Get songs list, there should be only one song
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["count"], 2)
self.assertEqual(len(response.data["results"]), 2)
# The remaining song is Song1
# Song2 has been excluded from the list
self.check_song_json(response.data["results"][0], self.song1)
self.check_song_json(response.data["results"][1], self.song2)
def test_post_song_simple(self):
"""Test to create a song without nested artists, tags nor works."""
# login as manager
self.authenticate(self.manager)
# pre assert the amount of songs
self.assertEqual(Song.objects.count(), 2)
# create a new song
song = {
"title": "Song3",
"filename": "song3",
"directory": "directory",
"duration": 0,
"lyrics": "mary had a little lamb",
"version": "version 1",
"detail": "test",
"detail_video": "here",
}
response = self.client.post(self.url, song)
# assert the response
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# assert the created song
song = Song.objects.get(title="Song3")
self.assertIsNotNone(song)
self.assertEqual(song.filename, "song3")
self.assertEqual(song.directory, "directory")
self.assertEqual(song.duration, timedelta(0))
self.assertEqual(song.lyrics, "mary had a little lamb")
self.assertEqual(song.version, "version 1")
self.assertEqual(song.detail, "test")
self.assertEqual(song.detail_video, "here")
def test_post_song_with_tag(self):
"""Test to create a song with nested tags."""
# login as manager
self.authenticate(self.manager)
# pre assert the amount of songs
self.assertEqual(Song.objects.count(), 2)
self.assertEqual(SongTag.objects.count(), 2)
# pre assert
self.assertNotEqual(self.tag1.color_hue, 256)
# create a new song
song = {
"title": "Song3",
"filename": "song3",
"directory": "directory",
"duration": 0,
"tags": [
{"name": "TAG3", "color_hue": 134},
{"name": self.tag1.name, "color_hue": 256},
],
}
response = self.client.post(self.url, song)
# assert the response
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# assert tag3 was created with the given color hue
self.assertEqual(SongTag.objects.count(), 3)
tag3 = SongTag.objects.get(name="TAG3")
self.assertIsNotNone(tag3)
self.assertEqual(tag3.color_hue, 134)
# assert | |
<reponame>lgbouma/cdips-pipeline
# -*- coding: utf-8 -*-
'''
run
$ python TESS_ETE6_reduction.py --help
'''
from __future__ import division, print_function
import os, time
import matplotlib as mpl
mpl.use('AGG')
import numpy as np, pandas as pd, matplotlib.pyplot as plt
import aperturephot as ap, shared_variables as sv, autoimagesub as ais, \
imagesubphot as ism, tessutils as tu, lcstatistics as lcs, \
imageutils as iu
from glob import glob
from tqdm import tqdm
from astropy.io import fits
from astropy import units as units, constants as constants
from datetime import datetime
import argparse
from parse import parse, search
np.random.seed(42)
def get_files_needed_before_image_subtraction(
fitsdir, fitsglob, outdir, initccdextent, ccdgain, zeropoint, exptime,
ra_nom, dec_nom,
catra, catdec, ccd_fov,
catalog, catalog_file, reformed_cat_file,
fnamestr='*-1-1-0016_cal_img.fits', anetfluxthreshold=20000,
fistarglob='*.fistar',
width=13, anettweak=6, anetradius=30, xpix=2048, ypix=2048, cols=(2,3),
brightrmag=6.0, faintrmag=13.0,
fiphotfluxthreshold=1000,
aperturelist='1.45:7.0:6.0,1.95:7.0:6.0,2.45:7.0:6.0',
nworkers=20,
useastrometrydotnet=True,
useimagenotfistar=True,
extractsources=True
):
'''
get .fistar, .fiphot, and .wcs files needed before image subtraction
1. run parallel_extract_sources on all frames with threshold ~ 10000 to get
bright stars for astrometry.
2. run parallel_anet to get precise WCS headers for all frames.
3. run make_fov_catalog to get a FOV source catalog for the field.
4. run reform_fov_catalog to cut this down to the columns needed for magfit
only.
5. run parallel_fitsdir_photometry for photometry on all frames (via
fistar)
'''
ap.parallel_extract_sources(fitsdir, outdir, ccdextent=initccdextent,
ccdgain=ccdgain,
fluxthreshold=anetfluxthreshold,
zeropoint=zeropoint, exptime=exptime,
tailstr='.fits',
fnamestr=fnamestr)
if useastrometrydotnet:
ap.fistardir_to_xy(fitsdir, fistarglob=fistarglob)
ap.parallel_astrometrydotnet(
fitsdir, outdir, ra_nom, dec_nom,
fistarfitsxyglob=fistarglob.replace('.fistar','.fistar-fits-xy'),
tweakorder=anettweak, radius=anetradius, xpix=xpix, ypix=ypix,
nworkers=nworkers, scalelow=10, scalehigh=30,
scaleunits='arcsecperpix', nobjs=200, xcolname='ximage',
ycolname='yimage', useimagenotfistar=useimagenotfistar,
downsample=4
)
else:
ap.parallel_anet(fitsdir, outdir, ra_nom, dec_nom,
fistarglob=fistarglob,
infofromframe=False, width=width, tweak=anettweak,
radius=anetradius,
xpix=xpix, ypix=ypix,
cols=cols # columns with x,y in fistar file.
)
# This function gets all the sources in the field of view of the frame, given
# its central pointing coordinates and plate-scale from 2MASS. This catalog
# file is then be used as input to make_source_list below.
_ = ap.make_fov_catalog(ra=catra, dec=catdec, size=ccd_fov,
brightrmag=brightrmag, faintrmag=faintrmag,
fits=None, outfile=None, outdir=outdir,
catalog=catalog, catalogpath=None,
columns=None, observatory='tess')
# This converts the full output catalog from 2massread, etc. to the format
# required for magfit. Also useful for general reforming of the columns.
ap.reform_fov_catalog(catalog_file, reformed_cat_file)
if extractsources==True:
fiphot_xycols = '7,8'
else:
fiphot_xycols = '13,14'
ap.parallel_fitsdir_photometry(fitsdir, outdir, reformed_cat_file,
fluxthreshold=fiphotfluxthreshold,
ccdextent={'x':[0.,2048.],'y':[0.,2048.]},
pixborders=0.0,
aperturelist=aperturelist,
removesourcetemp=True,
removesourcelist=False, binaryoutput=False,
nworkers=nworkers, maxtasksperworker=1000,
saveresults=True, rejectbadframes=True,
minsrcbgv=200.0, maxmadbgv=150.0,
maxframebgv=2000.0, minnstars=500,
fitsglob=fitsglob, ccdgain=ccdgain,
ccdexptime=exptime, zeropoint=zeropoint,
extractsources=extractsources,
fovcat_xycols=(12,13),
projcat_xycols=(24,25),
fiphot_xycols=fiphot_xycols,
observatory='tess'
)
def initial_wcs_worked_well_enough(outdir, fitsglob):
# check whether initial anet converged on over half of frames.
N_fitsfiles = len(glob(outdir+fitsglob))
N_wcsfiles = len(glob(outdir+fitsglob.replace('.fits','.wcs')))
if N_wcsfiles < N_fitsfiles/2:
return False
else:
return True
def is_presubtraction_complete(outdir, fitsglob, lcdir, percentage_required=95,
extractsources=False):
'''
require at least e.g., 95% of the initial astrometry, photometry, etc to
exist to return True. in that case, or if any stats_files products are
found, move on to image subtraction. else, returns False.
'''
N_fitsfiles = len(glob(outdir+fitsglob))
N_fistarfiles = len(glob(outdir+fitsglob.replace('.fits','.fistar')))
N_wcsfiles = len(glob(outdir+fitsglob.replace('.fits','.wcs')))
N_projcatalog = len(glob(outdir+fitsglob.replace('.fits','.projcatalog')))
N_sourcelistfiles = len(glob(outdir+fitsglob.replace('.fits','.sourcelist')))
N_fiphotfiles = len(glob(outdir+fitsglob.replace('.fits','.fiphot')))
if extractsources:
N_files = [N_fitsfiles, N_fistarfiles, N_wcsfiles, N_fiphotfiles,
N_sourcelistfiles, N_projcatalog]
else:
N_files = [N_fitsfiles, N_fistarfiles, N_wcsfiles, N_fiphotfiles,
N_projcatalog]
statsdir = lcdir+'stats_files'
N_statsfiles_products = len(glob(statsdir+"*"))
if N_statsfiles_products >= 1:
print('found stats_files product. skipping to detrending.')
return True
elif (np.any( np.abs(np.diff(N_files)/N_fitsfiles)>(1-percentage_required/100) )
or
np.any( np.array(N_files)==0 )
):
print('did not find {:d}% completed source extraction, astrometry, '.
format(percentage_required)+
'and initial photometry')
return False
else:
print('found {:d}% completed source extraction, astrometry, '.
format(percentage_required)+
'and initial photometry. skipping presubtraction steps.')
return True
def is_imagesubtraction_complete(fitsdir, fitsglob, lcdir):
N_subfitslist = len(glob(
fitsdir+'rsub-????????-'+fitsglob.replace('.fits','-xtrns.fits')))
N_iphotlist = len(glob(
fitsdir+'rsub-????????-'+fitsglob.replace('.fits','.iphot')))
N_kernellist = len(glob(
fitsdir+'rsub-????????-'+
fitsglob.replace('.fits','-xtrns.fits-kernel')))
N_subconvjpglist = len(glob(
fitsdir+'JPEG-SUBTRACTEDCONV-rsub-*-tess*.jpg'))
N_lcs = len(glob(
lcdir+'*.grcollectilc'))
N_files = [N_subfitslist, N_iphotlist, N_kernellist, N_subconvjpglist]
full_N_files = [N_subfitslist, N_iphotlist, N_kernellist, N_subconvjpglist,
N_lcs]
statsdir = lcdir+'stats_files/'
N_statsfiles_products = len(glob(statsdir+"*"))
if N_statsfiles_products >= 1:
print('found stats_files product. skipping to detrending.')
return True
elif np.any( np.diff(N_files) ) or np.any( np.array(full_N_files)==0 ):
print('did not find completed image-subtracted photometry products '
'including photometry, images, and lightcurves.')
return False
else:
return True
def run_imagesubtraction(fitsdir, fitsglob, fieldinfo, photparams, fits_list,
photreftype, dbtype, reformed_cat_file, xtrnsglob,
iphotpattern, lcdirectory, kernelspec='b/4;i/4;d=4/4',
refdir=sv.REFBASEDIR, nworkers=1,
aperturelist='1.95:7.0:6.0,2.45:7.0:6.0,2.95:7.0:6.0',
photdisjointradius=2, colorscheme='bwr',
photreffluxthreshold=30000, extractsources=True):
ccdgain = photparams['ccdgain']
exptime = photparams['ccdexptime']
zeropoint = photparams['zeropoint']
# Step ISP0.
_ = ais.parallel_frames_to_database(fitsdir, 'calibratedframes',
observatory='tess', fitsglob=fitsglob,
overwrite=False,
badframetag='badframes',
nonwcsframes_are_ok=False,
nworkers=nworkers, maxworkertasks=1000)
# Step ISP1.
arefinfo = ais.dbgen_get_astromref(fieldinfo, makeactive=True,
observatory='tess', overwrite=False,
refdir=sv.REFBASEDIR, database=None)
if arefinfo == None:
raise AssertionError('you need an astrometric reference!!')
# Step ISP2. Takes ~1 sec per 20-30 frames.
_ = ism.get_smoothed_xysdk_coeffs(fitsdir, fistarglob='*.fistar',
nworkers=nworkers, maxworkertasks=1000)
# Step ISP3. ~600 in 5 minutes --> 2 frames per second, running over 20 workers.
# If called with warpcheck=True and warpthreshold=2000, many things will be
# moved to badframes that are not in fact badframes. The warpthreshold is a bad
# empirical thing that should be avoided.
_ = ais.framelist_make_xtrnsfits(fits_list, fitsdir, fitsglob, outdir=None,
refinfo='foobar', warpcheck=False,
warpthreshold=15000.0, warpmargins=100,
nworkers=nworkers, observatory='tess',
maxworkertasks=1000, fieldinfo=fieldinfo)
# # Optional Step ISP3.5: parallelized move of astrometry ref shifted frames to database
# out = ais.parallel_frames_to_database(fitsdir, 'arefshifted_frames',
# fitsglob='1-???????_?-xtrns.fits',
# network='HP', overwrite=False,
# badframetag='badframes',
# nonwcsframes_are_ok=False, nworkers=nworkers,
# maxworkertasks=1000)
#
# Step ISP4: the next thing to do is to select a bunch of frames that can serve
# as photometric reference frames (photrefs).
xtrnsfiles = glob(fitsdir+xtrnsglob)
photrefinfo = ais.generate_photref_candidates_from_xtrns(
xtrnsfiles, minframes=50, observatory='tess',
maxbackgroundstdevpctile=100., maxbackgroundmedianpctile=70.,
minngoodobjectpctile=70., forcecollectinfo=False, nworkers=nworkers,
maxworkertasks=1000)
# Optional Step ISP5: amend the list, if needed.
# photrefinfo = ais.amend_candidate_photrefs(photrefinfo)
# Step ISP6: make a photometric reference frame
_ = ais.generate_combined_photref(
photrefinfo, photreftype, dbtype,
photref_reformedfovcat=reformed_cat_file, makeactive=True, field=None,
ccd=None, projectid=None, combinemethod='median',
kernelspec=kernelspec, ccdgain=ccdgain, zeropoint=zeropoint,
ccdexptime=exptime, extractsources=extractsources,
apertures=aperturelist, framewidth=None, searchradius=8.0,
nworkers=nworkers, maxworkertasks=1000, observatory='tess',
fieldinfo=fieldinfo, photreffluxthreshold=photreffluxthreshold)
# Step ISP7: convolve and subtract all FITS files in the xtrnsfits list from the
# photometric reference. With 30 workers, at best process ~few frames per
# second.
_ = ais.parallel_xtrnsfits_convsub(
xtrnsfiles, photreftype, fitsdir=fitsdir, fitsglob=fitsglob,
outdir=None, observatory='tess', fieldinfo=fieldinfo,
reversesubtract=True, kernelspec=kernelspec, nworkers=nworkers,
maxworkertasks=1000, colorscheme=colorscheme)
# Step ISP8: do photometry on your subtracted frames to produce .iphot files.
# With 30 workers, at best process ~few frames per second.
subfitslist = glob(fitsdir+'rsub-????????-'+
fitsglob.replace('.fits','-xtrns.fits'))
_ = ais.parallel_convsubfits_staticphot(
subfitslist, fitsdir=fitsdir, fitsglob=fitsglob,
photreftype=photreftype, kernelspec=kernelspec,
lcapertures=aperturelist, photdisjointradius=photdisjointradius,
outdir=None, fieldinfo=fieldinfo, observatory='tess',
nworkers=nworkers, maxworkertasks=1000, photparams=photparams)
# Step ISP9 + 10 : dump lightcurves.
ism.dump_lightcurves_with_grcollect(
iphotpattern, lcdirectory, '4g', lcextension='grcollectilc',
objectidcol=3, observatory='tess')
# # Alternative Step 9 + 10: add the .iphot files to postgres database. Collect
# # LCs from postgres, and then make difference image lightcurve (*.ilc) files in
# # lcdirectory. AKA "light curve dump", or "the transposition problem".
# # Surprisingly computationally expensive. An alternative approach is
# # fitsh's `grcollect`, which skips the database architecture entirely.
#
# print('beginning insert_phots_into_database')
# ais.insert_phots_into_database(sv.REDPATH, frameglob='rsub-*-xtrns.fits',
# photdir=None, photglob='rsub-*-%s.iphot',
# maxframes=None, overwrite=False, database=None)
#
# hatidlist = ais.get_hatidlist_from_cmrawphot(projectid, field, ccd, photreftype)
#
# print('beginning lightcurve dump')
# ais.parallel_dbphot_lightcurves_hatidlist(hatidlist, lcdirectory)
def run_detrending(epdstatfile, tfastatfile, lcdirectory, epdlcglob,
reformed_cat_file, statsdir, field, epdsmooth=11,
epdsigclip=10, nworkers=10, binlightcurves=False):
'''
Step ISP11: do EPD on all the LCs, and collect stats on the results.
for ISP LCs, use lcmagcols=([27,28,29],[30,],[30,],[30,])
Step ISP12: do TFA on all the LCs. First, choose TFA template stars using the
.epdlc stats. Then run TFA, to get .tfalc.TF{1,2,3} files. Turn them into
single .tfalc files. Then collect statistics.
'''
if not os.path.exists(epdstatfile):
_ = ism.parallel_run_epd_imagesub(lcdirectory,
ilcglob='*.grcollectilc',
outdir=None, smooth=epdsmooth,
sigmaclip=epdsigclip, nworkers=nworkers,
maxworkertasks=1000, minndet=100)
ap.parallel_lc_statistics(lcdirectory, epdlcglob,
reformed_cat_file, tfalcrequired=False,
fovcatcols=(0,9), # objectid, magcol to use
fovcatmaglabel='r', outfile=epdstatfile,
nworkers=nworkers,
workerntasks=500, rmcols=[14,19,24],
epcols=[27,28,29], tfcols=[30,31,32],
rfcols=None, correctioncoeffs=None,
sigclip=5.0)
else:
print('already made EPD LC stats file')
epdmadplot = glob(os.path.join(statsdir, '*median-EP1-vs-mad-*png'))
if not epdmadplot:
ap.plot_stats_file(epdstatfile, statsdir, field, binned=False,
logy=True, logx=False, correctmagsafter=None,
rangex=(5.9,16), observatory='tess')
else:
print('already made EPD LC plots')
if not os.path.exists(lcdirectory+'aperture-1-tfa-template.list'):
_ = ap.choose_tfa_template(epdstatfile, reformed_cat_file, lcdirectory,
ignoretfamin=False, fovcat_idcol=0,
fovcat_xicol=3, fovcat_etacol=4,
fovcat_magcol=9, min_ndet=100,
min_nstars=50, max_nstars=1000,
brightest_mag=8.5, faintest_mag=13.0,
max_rms=0.1, max_sigma_above_rmscurve=5.0,
outprefix=statsdir, tfastage1=True)
if not os.path.exists(tfastatfile):
templatefiles = glob(lcdirectory+'aperture-?-tfa-template.list')
ism.parallel_run_tfa(lcdirectory, templatefiles, epdlc_glob='*.epdlc',
epdlc_jdcol=0, epdlc_magcol=(27,28,29),
template_sigclip=5.0, epdlc_sigclip=5.0, nworkers=nworkers,
workerntasks=1000)
ap.parallel_lc_statistics(lcdirectory, '*.epdlc', reformed_cat_file,
tfalcrequired=True,
fovcatcols=(0,9), # objectid, magcol from fovcat
fovcatmaglabel='r',
outfile=tfastatfile,
nworkers=nworkers,
workerntasks=500,
rmcols=[14,19,24],
epcols=[27,28,29],
tfcols=[30,31,32],
rfcols=None,
correctioncoeffs=None,
sigclip=5.0)
else:
print('already made TFA LC stats file')
tfaboolstatusfile = os.path.join(statsdir,'are_tfa_plots_done.txt')
if not os.path.exists(tfaboolstatusfile):
ap.plot_stats_file(tfastatfile, statsdir, field, binned=False,
logy=True, logx=False, correctmagsafter=None,
rangex=(5.9,16), observatory='tess')
with open(tfaboolstatusfile+'','w') as f:
f.write('1\n')
else:
print('found done TFA plots (unbinned) through {:s}. continuing.'.
format(tfaboolstatusfile))
if binlightcurves:
binsizes = [3600,21600]
binnedlcfiles = | |
look into the distribution of attribution scores for each token across all layers and attribution matrices for each head in all layers in Bert model.
# We do that using one of the layer attribution algorithms, namely, layer conductance. However, we encourage you to try out and compare the results with other algorithms as well.
#
#
# Let's configure `InterpretableEmbeddingsBase` again, in this case in order to interpret the layers of our model.
# In[ ]:
interpretable_embedding = configure_interpretable_embedding_layer(
model, "bert.embeddings.word_embeddings"
)
# Let's iterate over all layers and compute the attributions w.r.t. all tokens in the input and attention matrices.
#
# Note: Since below code is iterating over all layers it can take over 5 seconds. Please be patient!
# In[21]:
layer_attrs_start = []
layer_attrs_end = []
layer_attn_mat_start = []
layer_attn_mat_end = []
input_embeddings, ref_input_embeddings = construct_whole_bert_embeddings(
input_ids,
ref_input_ids,
token_type_ids=token_type_ids,
ref_token_type_ids=ref_token_type_ids,
position_ids=position_ids,
ref_position_ids=ref_position_ids,
)
for i in range(model.config.num_hidden_layers):
lc = LayerConductance(squad_pos_forward_func, model.bert.encoder.layer[i])
layer_attributions_start = lc.attribute(
inputs=input_embeddings,
baselines=ref_input_embeddings,
additional_forward_args=(token_type_ids, position_ids, attention_mask, 0),
)
layer_attributions_end = lc.attribute(
inputs=input_embeddings,
baselines=ref_input_embeddings,
additional_forward_args=(token_type_ids, position_ids, attention_mask, 1),
)
layer_attrs_start.append(summarize_attributions(layer_attributions_start[0]))
layer_attrs_end.append(summarize_attributions(layer_attributions_end[0]))
layer_attn_mat_start.append(layer_attributions_start[1])
layer_attn_mat_end.append(layer_attributions_end[1])
# In[22]:
# layer x seq_len
layer_attrs_start = torch.stack(layer_attrs_start)
# layer x seq_len
layer_attrs_end = torch.stack(layer_attrs_end)
# layer x batch x head x seq_len x seq_len
layer_attn_mat_start = torch.stack(layer_attn_mat_start)
# layer x batch x head x seq_len x seq_len
layer_attn_mat_end = torch.stack(layer_attn_mat_end)
# As a reminder of Part 1 we visualize the heatmaps of the attributions for the outputs of all 12 layers in the plots below. The outputs of 12 layers are also known as context layer which represents the dot product between the attribution matrices and value vector.
#
# The plot below represents a heatmap of attributions across all layers and tokens for the start position prediction.
#
# Note that here we do not have information about different heads. Heads related information will be examined separately when we visualize the attribution scores of the attention matrices w.r.t. the start or end position predictions.
#
# It is interesting to observe that the question word `what` gains increasingly high attribution from layer one to ten. In the last two layers that importance is slowly diminishing.
# In contrary to `what` token, many other tokens have negative or close to zero attribution in the first 6 layers.
#
# We start seeing slightly higher attribution in tokens `important`, `us` and `to`. Interestingly token `important` is also assigned high attribution score which is remarkably high in the fifth and sixth layers.
#
# Lastly, our correctly predicted token `to` gains increasingly high positive attribution especially in the last two layers.
#
# In[23]:
fig, ax = plt.subplots(figsize=(15, 5))
xticklabels = all_tokens
yticklabels = list(range(1, 13))
ax = sns.heatmap(
layer_attrs_start.cpu().detach().numpy(),
xticklabels=xticklabels,
yticklabels=yticklabels,
linewidth=0.2,
)
plt.xlabel("Tokens")
plt.ylabel("Layers")
plt.show()
# Now let's examine the heat map of the attributions for the end position prediction. In the case of end position prediction we again observe high attribution scores for the token `what` in the last 11 layers.
# Correctly predicted end token `kinds` has positive attribution across all layers and it is especially prominent in the last two layers. It's also interesting to observe that `humans` token also has relatively high attribution score in the last two layers.
# In[24]:
fig, ax = plt.subplots(figsize=(15, 5))
xticklabels = all_tokens
yticklabels = list(range(1, 13))
ax = sns.heatmap(
layer_attrs_end.cpu().detach().numpy(),
xticklabels=xticklabels,
yticklabels=yticklabels,
linewidth=0.2,
) # , annot=True
plt.xlabel("Tokens")
plt.ylabel("Layers")
plt.show()
# It is interesting to note that when we compare the heat maps of start and end position, overall the colors for start position prediction on the map have darker intensities. This implies that there are less tokens that attribute positively to the start position prediction and there are more tokens which are negative indicators or signals of start position prediction.
# # Interpreting Attribution Scores for Attention Matrices
# In this section we visualize the attribution scores of start and end position predictions w.r.t. attention matrices.
# Note that each layer has 12 heads, hence attention matrices. We will first visualize for a specific layer and head, later we will summarize across all heads in order to gain a bigger picture.
#
# Below we visualize the attribution scores of 12 heads for selected layer `layer` for start position prediction.
# In[25]:
visualize_token2token_scores(
layer_attn_mat_start[layer].squeeze().cpu().detach().numpy()
)
# As we can see from the visualizations above, in contrary to attention scores the attributions of specific target w.r.t. to those scores are more meaningful and most importantly, they do not attend to `[SEP]` token or show diagonal patterns. We observe that heads 4, 9, 12 and 2 show strong relationship between `what` and `it` tokens when predicting start position, head 10 and 11 between `it` and `it`, heads 8 between `important` and `to` and head 1 between `to` and `what`. Note that `to` token is the start position of the answer token. It is also important to mention that these observations are for a selected `layer`. We can change the index of selected `layer` and examine interesting relationships in other layers.
# In the cell below we visualize the attention attribution scores normalized across the head axis.
# In[26]:
visualize_token2token_scores(
norm_fn(layer_attn_mat_start, dim=2).squeeze().detach().cpu().numpy(),
x_label_name="Layer",
)
# By looking at the visualizations above we can see that the model pays attention to very specific handpicked relationships when making a sprediction for start position. Most notably in the layers 10, 7, 11 and 4 it focuses more on the relationships between `it` and `is`, `important` and `to`.
# Now let's run the same experiments for the end position prediction. Below we visualize the attribution scorese of attention matrices for the end position prediction for the selected `layer`.
# In[27]:
visualize_token2token_scores(layer_attn_mat_end[layer].squeeze().cpu().detach().numpy())
# As we can see from the visualizations above that for the end position prediction we have stronger attention towards the end of the answer token `kinds`. Here we can see stronger connection between `humans` and `kinds` in the 11th head, `it` and `em`, `power`, `and` in the 5th, 6th and 8th heads. The connections between `it` and `what` are also strong in first couple and 10th heads.
# Similar to start position let's visualize the norm across all heads for each layer.
# In[28]:
visualize_token2token_scores(
norm_fn(layer_attn_mat_end, dim=2).squeeze().detach().cpu().numpy(),
x_label_name="Layer",
)
# As we can see from the visualizations above for the end position prediction there is a relation learnt between `[SEP]` and `.` in first and second layers. Also we observe that `it` token is strongly related to `what`, `important` and `to`.
# # Computing and Visualizing Vector Norms
# In this section of the tutorial we will compute Vector norms for activation layers such as ||f(x)||, ||α * f(x)|| and ||Σαf(x)|| as also described in the: https://arxiv.org/pdf/2004.10102.pdf
#
# As also shown in the paper mentioned above, normalized activations are better indicators of importance scores than the attention scores however they aren't as indicative as the attribution scores. This is because normalized activations ||f(x)|| and ||α * f(x)|| aren't attributed to a specific output prediction. From our results we can also see that according to those normalized scores `[SEP]` tokens are insignificant.
# Below we define / extract all parameters that we need to computation vector norms.
# In[29]:
output_attentions_all_shape = output_attentions_all.shape
batch = output_attentions_all_shape[1]
num_heads = output_attentions_all_shape[2]
head_size = 64
all_head_size = 768
# In order to compute above mentioned norms we need to get access to dense layer's weights and value vector of the self attention layer.
# #### Getting Access to Value Activations
# Let's define the list of all layers for which we would like to access Value Activations.
# In[30]:
layers = [
model.bert.encoder.layer[layer].attention.self.value
for layer in range(len(model.bert.encoder.layer))
]
# We use `Captum`'s LayerActivation algorithm to access the outputs of all `layers`.
# In[31]:
la = LayerActivation(squad_pos_forward_func, layers)
value_layer_acts = la.attribute(
input_embeddings,
additional_forward_args=(token_type_ids, position_ids, attention_mask),
)
# shape -> layer x batch x seq_len x all_head_size
value_layer_acts = torch.stack(value_layer_acts)
# In the cell below we perform several transformations with the value layer activations and bring it to the shape so that we can compute different norms. The transformations are done the same way as it is described in the original paper and corresponding github implementation.
# In[32]:
new_x_shape = value_layer_acts.size()[:-1] + (num_heads, head_size)
value_layer_acts = value_layer_acts.view(*new_x_shape)
# layer x batch x neum_heads x 1 x head_size
value_layer_acts = value_layer_acts.permute(0, 1, 3, 2, 4)
value_layer_acts = value_layer_acts.permute(0, 1, 3, 2, 4).contiguous()
value_layer_acts_shape = value_layer_acts.size()
# layer x batch x seq_length x num_heads x 1 x | |
yax].fill_between(
sl, ev_mean - ev_std, ev_mean + ev_std, alpha=0.3, color=colour
)
major_ticks = np.arange(0, 101, 20)
minor_ticks = np.arange(0, 101, 5)
axs[xax, yax].set_xticks(minor_ticks / 100.0, minor=True)
axs[xax, yax].set_yticks(major_ticks / 100.0)
axs[xax, yax].set_yticks(minor_ticks / 100.0, minor=True)
axs[xax, yax].grid(which="minor", linewidth=0.5)
axs[xax, yax].grid(
which="major", linewidth=1.5
)
axs[xax, yax].set_title(endpoint, fontsize=16)
axs[xax, yax].set_xlabel("significance")
axs[xax, yax].set_ylabel("error rate")
par1 = axs[xax, yax].twinx()
par1.set_ylabel("efficiency (SCP)")
xax += 1
if xax == n_rows:
xax = 0
yax += 1
else:
axs[yax].plot([0, 1], [0, 1], "--", linewidth=1, color="black")
sl = eval_df["significance_level"]
for ev, colour in zip(evaluation_measures, colours):
ev_mean = eval_df[f"{ev} mean"]
ev_std = eval_df[f"{ev} std"]
axs[yax].plot(sl, ev_mean, label=True, c=colour)
axs[yax].fill_between(
sl, ev_mean - ev_std, ev_mean + ev_std, alpha=0.3, color=colour
)
major_ticks = np.arange(0, 101, 20)
minor_ticks = np.arange(0, 101, 5)
axs[yax].set_xticks(minor_ticks / 100.0, minor=True)
axs[yax].set_yticks(major_ticks / 100.0)
axs[yax].set_yticks(minor_ticks / 100.0, minor=True)
axs[yax].grid(which="minor", linewidth=0.5)
axs[yax].grid(which="major", linewidth=1.5)
axs[yax].set_title(endpoint, fontsize=16)
axs[yax].set_xlabel("significance")
axs[yax].set_ylabel("error rate")
par1 = axs[yax].twinx()
par1.set_ylabel("efficiency (SCP)")
yax += 1
lgd = fig.legend(eval_legend, loc="center left", bbox_to_anchor=(1, 0.47))
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
fig.suptitle(strat, fontsize=20)
return plt, lgd
mini_size = 8
small_size = 10
medium_size = 10
def boxplot_val_eff_acc_quer(
evaluation_dfs,
measures,
significance_level,
map_labels=False, fig_height=8.5, fig_width=17):
"""
Make same plot but subplots next to each other instead of underneath
"""
measure_dict_sl = {}
for exp, dfs in evaluation_dfs.items():
measure_dict_sl[exp] = {}
for measure in measures:
measure_dict_sl[exp][measure] = np.array([])
for ep_df in dfs:
ep_df_sl = ep_df[ep_df["significance_level"] == significance_level]
for measure in measures:
measure_dict_sl[exp][measure] = np.append(
measure_dict_sl[exp][measure],
ep_df_sl[f"{measure} mean"].values)
experiments = evaluation_dfs.keys()
if map_labels:
labels_map_dict = {"cv_original": "cv",
"original": "cal_original", # pred_holdout\n
"update": "iii",
"update1": "cal_update1", # pred_holdout\n
"update2": "cal_update2", # pred_holdout\n
"update12": "cal_update1_and_2", # pred_holdout\n
}
labels = [labels_map_dict[k] for k in experiments]
else:
labels = experiments
boxprops = dict(linewidth=2)
whiskerprops = dict(linewidth=2)
capprops = dict(linewidth=2)
medianprops = dict(linewidth=2, color='darkred')
flierprops = dict(linewidth=2, markeredgecolor='darkred', markeredgewidth=2)
plt.clf()
fig, axs = plt.subplots(ncols=3)
fig.set_figheight(cm2inch(fig_height))
fig.set_figwidth(cm2inch(fig_width))
plt.rc('xtick', labelsize=small_size)
plt.rc('ytick', labelsize=small_size)
plt.rc('legend', fontsize=small_size)
yax = 0
measures_map_dict = {"validity_bal": "Balanced validity", "efficiency_bal": "Balanced efficiency",
"accuracy_bal": "Balanced accuracy",
"validity_0": "Validity\n inactive class", "efficiency_0": "Efficiency\n inactive class",
"accuracy_0": "Accuracy\n inactive class", "validity_1": "Validity\n active class",
"efficiency_1": "Efficiency\n active class", "accuracy_1": "Accuracy\n active class"}
for measure in measures:
axs[yax].hlines(0.8, 1, len(experiments), linestyle="dashed")
axs[yax].boxplot(
[measure_dict_sl[exp][measure] for exp in experiments], labels=labels,
widths=0.75, boxprops=boxprops,
whiskerprops=whiskerprops, capprops=capprops, medianprops=medianprops, flierprops=flierprops,
)
axs[yax].set_xticklabels(labels, rotation=30, fontsize=small_size, ha="right") # )"vertical")
axs[yax].set_ylim(0.0, 1.0)
if measure in measures_map_dict.keys():
measure_title = measures_map_dict[measure]
else:
measure_title = measure
axs[yax].set_title(measure_title, fontsize=small_size)
yax += 1
plt.tight_layout(h_pad=1, w_pad=1)
return plt
def boxplot_and_df_for_eval_measure(
evaluation_dfs,
measure,
descriptors,
significance_level,
data_results_path,
datasets,
name_spec=None,
):
print(significance_level)
"""
Create boxplots for a selected evaluation measure, e.g. validity, efficiency, rmsd etc.
Parameters
----------
evaluation_dfs
measure
descriptors
significance_level
data_results_path
datasets
Returns
-------
"""
measure_dict_sl = {}
for strategy, dfs in evaluation_dfs.items():
measure_dict_sl[strategy] = np.array([])
for ep_df in dfs:
ep_df_sl = ep_df[ep_df["significance_level"] == significance_level]
measure_dict_sl[strategy] = np.append(
measure_dict_sl[strategy], ep_df_sl[f"{measure} mean"].values
)
plt.clf()
strategies = evaluation_dfs.keys()
# if measure != "efficiency":
plt.hlines(0.5, 0, 4, linestyle="dashed")
plt.boxplot(
[measure_dict_sl[strategy] for strategy in strategies], labels=strategies
)
plt.xticks(rotation="vertical")
plt.gca().set_ylim(0.0, 1.0)
plt.title(f"{measure} over all endpoints, {descriptors} descriptors, chembl")
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
if name_spec:
plt.savefig(
f"{data_results_path}/{measure}_{descriptors}_sl0{str(significance_level)[-1]}_{name_spec}_chembl.png"
)
else:
plt.savefig(
f"{data_results_path}/{measure}_{descriptors}_sl0{str(significance_level)[-1]}_chembl.png"
)
measure_dict_sl["dataset"] = datasets
measure_df = pd.DataFrame.from_dict(measure_dict_sl)
measure_df.set_index("dataset")
if name_spec:
measure_df.to_csv(
f"{data_results_path}/{measure}_df_{descriptors}_sl0{str(significance_level)[-1]}_{name_spec}_chembl.csv"
)
else:
measure_df.to_csv(
f"{data_results_path}/{measure}_df_{descriptors}_sl0{str(significance_level)[-1]}_chembl.csv"
)
def draw_scatter_plot_more_datasets(
evaluation_dfs_per_dataset,
datasets,
evaluation_measures,
colours=("navy", "green", "plum"),
marker_styles=("o", "x", "+", "v"),
figsize=(7.5, 7.5),
significance_level=0.2,
):
"""
Create a scatter plot in form of subplots for multiple datasets. Select a list of evaluation measures to be plotted in
each subplot/per dataset
Parameters
----------
marker_styles
evaluation_dfs_per_dataset:
A dict containing as keys the datasets and as values a dict
containing as keys the strategy/experiment and as value the coresponding evaluation_df
datasets: list
dataset names
descriptors:
type of descriptors used for experiment, i.e chem, bio or chembio
evaluation_measures:
evaluation measures to be included in the plot
line_plots_path:
path to save plots
colours:
colours for lines
figsize:
size of complete plot
significance_level:
significance level, at which the evaluation should be performed/plotted
Returns
-------
"""
n_cols, n_rows = calc_n_cols_n_rows(len(datasets))
plt.clf()
plt.rc("xtick", labelsize=12)
plt.rc("ytick", labelsize=12)
plt.rc("legend", fontsize=12)
fig, axs = plt.subplots(ncols=n_cols, nrows=n_rows)
fig.set_figheight(cm2inch(figsize[0]))
fig.set_figwidth(cm2inch(figsize[1]))
xax = 0
yax = 0
measures_map_dict = {"validity_bal": "balanced validity", "efficiency_bal": "balanced efficiency",
"accuracy_bal": "balanced accuracy",
"validity_0": "validity\n inactive class", "efficiency_0": "efficiency\n inactive class",
"accuracy_0": "accuracy\n inactive class", "validity_1": "validity\n active class",
"efficiency_1": "efficiency\n active class", "accuracy_1": "accuracy\n active"}
eval_legend = []
for em in evaluation_measures:
if em in measures_map_dict.keys():
eval_legend.append(measures_map_dict[em])
else:
eval_legend.append(em)
for i, dataset in enumerate(datasets):
dataset_evaluation_dfs = evaluation_dfs_per_dataset[dataset]
# Prepare data for plot, i.e. eval measure for each strategy
measure_dict_sl = {}
measure_dict_sl["strategies"] = []
for meas in evaluation_measures:
measure_dict_sl[meas] = []
for strategy, df in dataset_evaluation_dfs.items():
measure_dict_sl["strategies"].append(strategy)
df = df[0]
df_sl = df[df["significance_level"] == significance_level]
for meas in evaluation_measures:
measure_dict_sl[meas].append(df_sl[f"{meas} mean"].values)
strategies = measure_dict_sl["strategies"]
labels_map_dict = {"cv_original": "cv",
"original": "cal_original", # pred_holdout\n
"update": "iii",
"update1": "cal_update1", # pred_holdout\n
"update2": "cal_update2", # pred_holdout\n
"update12": "cal_update1_and_2", # pred_holdout\n
}
labels = [labels_map_dict[l] for l in strategies]
if n_rows > 1:
for m, meas in enumerate(evaluation_measures):
axs[xax, yax].scatter(
strategies,
measure_dict_sl[meas],
color=colours[m],
marker=marker_styles[m]
)
axs[xax, yax].hlines(0.8, 0, 4, linestyle="dashed")
axs[xax, yax].set_xticklabels(labels, rotation=30, ha="right")
axs[xax, yax].set_ylim(-0.05, 1.05)
title = f"{dataset}"
axs[xax, yax].set_title(title, fontsize=14)#2)
xax += 1
if xax == n_rows:
xax = 0
yax += 1
else:
axs[yax].hlines(0.8, 0, 9, linestyle="dashed")
for m, meas in enumerate(evaluation_measures):
axs[yax].scatter(
strategies,
measure_dict_sl[meas],
color=colours[m],
marker=marker_styles[m],
)
axs[yax].set_xticklabels(labels, rotation=30, ha="right")
axs[yax].set_ylim(-0.05, 1.05)
title = f"{dataset}"
print("title", title)
axs[yax].set_title(title, fontsize=14)#2)
yax += 1
lgd = fig.legend(eval_legend, loc='upper center', bbox_to_anchor=(0.45, 0.05), ncol=4, columnspacing=0.5,
numpoints=3)
plt.tight_layout(rect=[0, 0.03, 0.9, 0.95])
fig.suptitle(
f"{', '.join(evaluation_measures)}", fontsize=16
)
evaluation_measures = "_".join([em for em in evaluation_measures])
return plt, lgd
def format_dataset_dict(endpoint_dict, evaluation_dfs, strategies):
datasets = [] # Save dataframe names in a list
for k, v in endpoint_dict.items():
datasets.append(k)
evaluation_dfs_per_dataset = {}
for i, ds in enumerate(datasets):
evaluation_dfs_per_dataset[ds] = {}
for strategy in strategies:
evaluation_dfs_per_dataset[ds][strategy] = []
for strategy in strategies:
evaluation_dfs_per_dataset[ds][strategy].append(
evaluation_dfs[strategy][i]
)
return datasets, evaluation_dfs_per_dataset
# -----------------------------------------------
# Define threshold for train/update/test datasets
# -----------------------------------------------
def smaller_starting_year(year, starting_year):
"""
If the year is older than the defined "starting_year", i.e
the year from when we start counting, set it to the "starting_year"
This is just internal handling of the code and won't change the data
"""
if year < starting_year:
year = starting_year
return year
def count_nof_actives_inactives(df, starting_year, name=None):
"""
Count # of actives and inactives per year
"""
if name:
df_years = copy.copy(df[["year", f"{name}_bioactivity"]])
else:
df_years = copy.copy(df[["year", "binary_value"]])
df_years["year"] = df_years["year"].apply(
lambda y: smaller_starting_year(y, starting_year)
)
years = range(starting_year, 2021)
actives = []
inactives = []
for year in years:
if name:
act_year_df = df_years[
(df_years["year"] == year) & (df_years[f"{name}_bioactivity"] == 1)
]
inact_year_df = df_years[
(df_years["year"] == year) & (df_years[f"{name}_bioactivity"] == 0)
]
else:
act_year_df = df_years[
(df_years["year"] == year) & (df_years["binary_value"] == 1)
]
inact_year_df = df_years[
(df_years["year"] == year) & (df_years["binary_value"] == 0)
]
actives.append(len(act_year_df))
inactives.append(len(inact_year_df))
return years, actives, inactives
def count_actives_inactives_all_datasets(
dataset_dict, orig_path, general_df_name, starting_year=2000
):
"""
Create a dict with the years, number of actives and inactives per dataset. This dict can later be used
to plot the number of actives and inactives available per year.
Parameters
----------
dataset_dict :
dict with dataset names as key
orig_path :
Path where dataframes are saved
general_df_name :
end name how descriptor df was called, e.g. "chembio.csv"
starting_year :
year from which to start counting. This is only required to save computational cost. We don't need to start
iterating from year 1900 or even earlier since no data was published/added to chembl by then. Since even in
2000 only very few data points were available, 2000 was used as a `starting_year`, i.e. to start iterating
over years
Returns
-------
"""
count_all_data_dict = {
"target": [],
"#standardised": [],
"actives": [],
"inactives": [],
}
count_dict = copy.copy(dataset_dict)
for k, v in dataset_dict.items():
# Define target_chembl_id
# Load compounds
bioact_df = pd.read_csv(os.path.join(orig_path, f"{k}_{general_df_name}"))
# Get info about number of compounds available per target
count_all_data_dict["target"].append(k)
count_all_data_dict["#standardised"].append(bioact_df.shape[0])
count_all_data_dict["actives"].append(
bioact_df[bioact_df[f"{k}_bioactivity"] == 1].shape[0]
)
count_all_data_dict["inactives"].append(
bioact_df[bioact_df[f"{k}_bioactivity"] == 0].shape[0]
)
# Count # actives and # inactives per year
| |
<reponame>dcslagel/welly<filename>welly/curve.py
"""
Defines log curves.
:copyright: 2021 Agile Scientific
:license: Apache 2.0
"""
from __future__ import division
import numpy as np
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.patches import PathPatch
import warnings
from . import utils
class CurveError(Exception):
"""
Generic error class.
"""
pass
class Curve(np.ndarray):
"""
A fancy ndarray. Gives some utility functions, plotting, etc, for curve
data.
"""
def __new__(cls, data, basis=None, params=None):
"""
I am just following the numpy guide for subclassing ndarray...
"""
obj = np.asarray(data).view(cls).copy()
params = params or {}
for k, v in params.items():
setattr(obj, k, v)
if basis is not None:
if basis[0] > basis[1]:
basis = np.flipud(basis)
setattr(obj, 'start', basis[0])
setattr(obj, 'step', basis[1]-basis[0])
return obj
def __array_finalize__(self, obj):
"""
I am just following the numpy guide for subclassing ndarray...
"""
if obj is None:
return
if obj.size == 1:
return float(obj)
self.start = getattr(obj, 'start', 0)
self.step = getattr(obj, 'step', 1)
self.mnemonic = getattr(obj, 'mnemonic', None)
self.units = getattr(obj, 'units', None)
self.run = getattr(obj, 'run', 0)
self.null = getattr(obj, 'null', -999.25)
self.service_company = getattr(obj, 'service_company', None)
self.date = getattr(obj, 'date', None)
self.code = getattr(obj, 'code', None)
self.basis_units = getattr(obj, 'basis_units', None)
def __getitem__(self, items):
"""
Update the basis when a Curve is sliced.
"""
newarr = self.copy()
if isinstance(items, slice):
if (items.start is not None) and (items.start > 0):
newarr.start = newarr.basis.copy()[items.start]
if items.step is not None:
newarr.step = newarr.step * items.step
return np.ndarray.__getitem__(newarr, items)
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def _repr_html_(self):
"""
Jupyter Notebook magic repr function.
"""
if self.size < 10:
return np.ndarray.__repr__(self)
attribs = self.__dict__.copy()
# Header.
row1 = '<tr><th style="text-align:center;" colspan="2">{} [{{}}]</th></tr>'
rows = row1.format(attribs.pop('mnemonic'))
rows = rows.format(attribs.pop('units', '–'))
row2 = '<tr><td style="text-align:center;" colspan="2">{:.4f} : {:.4f} : {:.4f}</td></tr>'
rows += row2.format(attribs.pop('start'), self.stop, attribs.pop('step'))
# Curve attributes.
s = '<tr><td><strong>{k}</strong></td><td>{v}</td></tr>'
for k, v in attribs.items():
rows += s.format(k=k, v=v)
# Curve stats.
rows += '<tr><th style="border-top: 2px solid #000; text-align:center;" colspan="2"><strong>Stats</strong></th></tr>'
stats = self.get_stats()
s = '<tr><td><strong>samples (NaNs)</strong></td><td>{samples} ({nulls})</td></tr>'
s += '<tr><td><strong><sub>min</sub> mean <sup>max</sup></strong></td>'
s += '<td><sub>{min:.2f}</sub> {mean:.3f} <sup>{max:.2f}</sup></td></tr>'
rows += s.format(**stats)
# Curve preview.
s = '<tr><th style="border-top: 2px solid #000;">Depth</th><th style="border-top: 2px solid #000;">Value</th></tr>'
rows += s.format(self.start, self[0])
s = '<tr><td>{:.4f}</td><td>{:.4f}</td></tr>'
for depth, value in zip(self.basis[:3], self[:3]):
rows += s.format(depth, value)
rows += '<tr><td>⋮</td><td>⋮</td></tr>'
for depth, value in zip(self.basis[-3:], self[-3:]):
rows += s.format(depth, value)
# Footer.
# ...
# End.
html = '<table>{}</table>'.format(rows)
return html
@property
def values(self):
return np.array(self)
@property
def stop(self):
"""
The stop depth. Computed on the fly from the start,
the step, and the length of the curve.
"""
return self.start + (self.shape[0] - 1) * self.step
@property
def basis(self):
"""
The depth or time basis of the curve's points. Computed
on the fly from the start, stop and step.
Returns
ndarray. The array, the same length as the curve.
"""
return np.linspace(self.start, self.stop, self.shape[0], endpoint=True)
def describe(self):
"""
Return basic statistics about the curve.
"""
stats = {}
stats['samples'] = self.shape[0]
stats['nulls'] = self[np.isnan(self)].shape[0]
stats['mean'] = float(np.nanmean(self.real))
stats['min'] = float(np.nanmin(self.real))
stats['max'] = float(np.nanmax(self.real))
return stats
get_stats = describe
@classmethod
def from_lasio_curve(cls, curve,
depth=None,
basis=None,
start=None,
stop=None,
step=0.1524,
run=-1,
null=-999.25,
service_company=None,
date=None,
basis_units=None):
"""
Makes a curve object from a lasio curve object and either a depth
basis or start and step information.
Args:
curve (ndarray)
depth (ndarray)
basis (ndarray)
start (float)
stop (float)
step (float): default: 0.1524
run (int): default: -1
null (float): default: -999.25
service_company (str): Optional.
date (str): Optional.
basis_units (str): the units of the basis.
Returns:
Curve. An instance of the class.
"""
data = curve.data
unit = curve.unit
# See if we have uneven sampling.
if depth is not None:
d = np.diff(depth)
if not np.allclose(d - np.mean(d), np.zeros_like(d)):
# Sampling is uneven.
m = "Irregular sampling in depth is not supported. "
m += "Interpolating to regular basis."
warnings.warn(m)
step = np.nanmedian(d)
start, stop = depth[0], depth[-1]+0.00001 # adjustment
basis = np.arange(start, stop, step)
data = np.interp(basis, depth, data)
else:
step = np.nanmedian(d)
start = depth[0]
# Carry on with easier situations.
if start is None:
if basis is not None:
start = basis[0]
step = basis[1] - basis[0]
else:
raise CurveError("You must provide a basis or a start depth.")
if step == 0:
if stop is None:
raise CurveError("You must provide a step or a stop depth.")
else:
step = (stop - start) / (curve.data.shape[0] - 1)
# Interpolate into this.
params = {}
params['mnemonic'] = curve.mnemonic
params['description'] = curve.descr
params['start'] = start
params['step'] = step
params['units'] = unit
params['run'] = run
params['null'] = null
params['service_company'] = service_company
params['date'] = date
params['code'] = curve.API_code
params['basis_units'] = basis_units
return cls(data, params=params)
def get_alias(self, alias):
"""
Given a mnemonic, get the alias name(s) it falls under. If there aren't
any, you get an empty list.
"""
alias = alias or {}
return [k for k, v in alias.items() if self.mnemonic in v]
def plot_2d(self, ax=None,
width=None,
aspect=60,
cmap=None,
curve=False,
ticks=(1, 10),
return_fig=False,
**kwargs,
):
"""
Plot a 2D curve.
Args:
ax (ax): A matplotlib axis.
width (int): The width of the image.
aspect (int): The aspect ratio (not quantitative at all).
cmap (str): The colourmap to use.
curve (bool): Whether to plot the curve as well.
ticks (tuple): The tick interval on the y-axis.
return_fig (bool): whether to return the matplotlib figure.
Default False.
Returns:
ax. If you passed in an ax, otherwise None.
"""
# Set up the figure.
if ax is None:
fig = plt.figure(figsize=(2, 10))
ax = fig.add_subplot(111)
return_ax = False
else:
return_ax = True
# Set up the data.
cmap = cmap or 'viridis'
default = int(self.shape[0] / aspect)
if self.ndim == 1:
a = np.expand_dims(self, axis=1)
a = np.repeat(a, width or default, axis=1)
elif self.ndim == 2:
a = self[:, :width] if width < self.shape[1] else self
elif self.ndim == 3:
if 2 < self.shape[-1] < 5:
# Interpret as RGB or RGBA.
a = utils.normalize(np.copy(self))
cmap = None # Actually doesn't matter.
else:
# Take first slice.
a = self[:, :width, 0] if width < self.shape[1] else self[..., 0]
else:
raise NotImplementedError("Can only handle up to 3 dimensions.")
# At this point, a is either a 2D array, or a 2D (rgb) array.
extent = [0, width or default, self.stop, self.start]
im = ax.imshow(a, cmap=cmap, extent=extent)
if curve:
paths = ax.fill_betweenx(self.basis, self, np.nanmin(self),
facecolor='none',
**kwargs,
)
# Make the 'fill' mask and clip the background image with it.
patch = PathPatch(paths._paths[0], visible=False)
ax.add_artist(patch)
im.set_clip_path(patch)
ax.set_xticks([])
# Rely on interval order.
lower, upper = self.stop, self.start
rng = abs(upper - lower)
ax.set_ylim([lower, upper])
# Make sure ticks is a tuple.
try:
ticks = tuple(ticks)
except TypeError:
ticks = (1, ticks)
# Avoid MAXTICKS error.
while rng/ticks[0] > 250:
mi, ma = 10*ticks[0], ticks[1]
if ma <= mi:
ma = 10 * mi
ticks = (mi, ma)
# Carry on plotting...
minorLocator = mpl.ticker.MultipleLocator(ticks[0])
ax.yaxis.set_minor_locator(minorLocator)
majorLocator = mpl.ticker.MultipleLocator(ticks[1])
majorFormatter = mpl.ticker.FormatStrFormatter('%d')
ax.yaxis.set_major_locator(majorLocator)
ax.yaxis.set_major_formatter(majorFormatter)
ax.yaxis.set_ticks_position('left')
ax.get_yaxis().set_tick_params(which='both', direction='out')
if return_ax:
return ax
elif return_fig:
return fig
else:
return None
def plot(self, ax=None, legend=None, return_fig=False, **kwargs):
"""
Plot a curve.
Args:
ax (ax): A matplotlib axis.
legend (striplog.legend): A legend. Optional.
return_fig (bool): whether to return the matplotlib figure.
Default False.
kwargs: Arguments for ``ax.set()``
Returns:
ax. If you passed in an ax, otherwise None.
"""
if ax is None:
fig = plt.figure(figsize=(2, 10))
ax = fig.add_subplot(111)
return_ax = False
else:
return_ax = True
d = None
if legend is not None:
try:
d = legend.get_decor(self)
except:
pass
if d is not None:
kwargs['color'] = d.colour
kwargs['lw'] = getattr(d, 'lineweight', None) or getattr(d, 'lw', 1)
kwargs['ls'] = getattr(d, 'linestyle', None) or getattr(d, 'ls', '-')
# Attempt to get axis parameters from decor.
axkwargs = {}
xlim = getattr(d, 'xlim', None)
if xlim is not None:
axkwargs['xlim'] = list(map(float, xlim.split(',')))
xticks = getattr(d, 'xticks', None)
if xticks is not None:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.