repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|
mvpossum/machine-learning
|
tp4/plot_table.py
|
1
|
1620
|
#! /usr/bin/env python
import sys
import os
from sys import argv
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
FILE = argv[1]
PLOT_FILE = os.path.splitext(FILE)[0]+'.png'
ERROR = 'er' in FILE.lower()
legend = argv[2:]
cols = len(legend)
if cols>=4:
linestyles = ['--', '-', '--', '-', '--', '-', '--', '-', '--', '-', '--', '-']
colors = ['r', 'r', 'b', 'b', 'g', 'g', 'orange', 'orange', 'purple', 'purple', 'y', 'y', 'gray', 'gray']
elif cols==3:
linestyles = ['-', '-', '-']
colors = ['b', 'g', 'r']
else:
linestyles = ['-','-']
colors = ['r', 'b']
x = []
y = [[] for _ in range(cols)]
for line in open(FILE):
if line.strip():
line = [float(s) for s in line.split(' ') if s.strip()]
x.append(line[0])
for j in range(cols):
y[j].append(line[j+1])
fig, ax = plt.subplots()
ax = plt.subplot(111)
FONT_SIZE = 16
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(FONT_SIZE)
for yv in range(cols):
ax.plot(x, y[yv], label=legend[yv], linestyle=linestyles[yv], color=colors[yv])
#~ if ERROR:
#ax.set_ylim(9,60)
#~ else:
#~ ax.set_ylim(0,30)
#ax.set_xlim(0,128)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.62, box.height])
ax.legend(prop={'size':FONT_SIZE}, bbox_to_anchor=(1, 1.0))
plt.xlabel('Dimensiones', size=FONT_SIZE)
#~ plt.xscale('log')
ylabel = 'Error (%)' if ERROR else 'Cantidad de nodos del árbol'
plt.ylabel(ylabel, size=FONT_SIZE)
plt.savefig(PLOT_FILE)
plt.show()
|
mit
| -6,190,329,077,399,839,000
| 24.296875
| 109
| 0.57937
| false
|
zanardob/django-pizza
|
pizzeria/pizzeria/settings.py
|
1
|
2661
|
"""
Django settings for pizzeria project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i!%!frm&u7pf5bqmev#n*dp%vovkwbb33s1n@gycfr1su_c9bl'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pizza'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'pizzeria.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pizzeria.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config()
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDUA_ROOT = os.path.join(BASE_DIR, 'media')
|
cc0-1.0
| -1,323,239,971,014,439,400
| 24.834951
| 71
| 0.699737
| false
|
tombstone/models
|
official/nlp/bert/run_squad_helper.py
|
1
|
19349
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for running BERT family models on SQuAD 1.1/2.0 in TF 2.x."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
from absl import flags
from absl import logging
import tensorflow as tf
from official.modeling import performance
from official.nlp import optimization
from official.nlp.bert import bert_models
from official.nlp.bert import common_flags
from official.nlp.bert import input_pipeline
from official.nlp.bert import model_saving_utils
from official.nlp.bert import model_training_utils
from official.nlp.bert import squad_evaluate_v1_1
from official.nlp.bert import squad_evaluate_v2_0
from official.nlp.data import squad_lib_sp
from official.utils.misc import keras_utils
def define_common_squad_flags():
"""Defines common flags used by SQuAD tasks."""
flags.DEFINE_enum(
'mode', 'train_and_eval',
['train_and_eval', 'train_and_predict',
'train', 'eval', 'predict', 'export_only'],
'One of {"train_and_eval", "train_and_predict", '
'"train", "eval", "predict", "export_only"}. '
'`train_and_eval`: train & predict to json files & compute eval metrics. '
'`train_and_predict`: train & predict to json files. '
'`train`: only trains the model. '
'`eval`: predict answers from squad json file & compute eval metrics. '
'`predict`: predict answers from the squad json file. '
'`export_only`: will take the latest checkpoint inside '
'model_dir and export a `SavedModel`.')
flags.DEFINE_string('train_data_path', '',
'Training data path with train tfrecords.')
flags.DEFINE_string(
'input_meta_data_path', None,
'Path to file that contains meta data about input '
'to be used for training and evaluation.')
# Model training specific flags.
flags.DEFINE_integer('train_batch_size', 32, 'Total batch size for training.')
# Predict processing related.
flags.DEFINE_string('predict_file', None,
'SQuAD prediction json file path. '
'`predict` mode supports multiple files: one can use '
'wildcard to specify multiple files and it can also be '
'multiple file patterns separated by comma. Note that '
'`eval` mode only supports a single predict file.')
flags.DEFINE_bool(
'do_lower_case', True,
'Whether to lower case the input text. Should be True for uncased '
'models and False for cased models.')
flags.DEFINE_float(
'null_score_diff_threshold', 0.0,
'If null_score - best_non_null is greater than the threshold, '
'predict null. This is only used for SQuAD v2.')
flags.DEFINE_bool(
'verbose_logging', False,
'If true, all of the warnings related to data processing will be '
'printed. A number of warnings are expected for a normal SQuAD '
'evaluation.')
flags.DEFINE_integer('predict_batch_size', 8,
'Total batch size for prediction.')
flags.DEFINE_integer(
'n_best_size', 20,
'The total number of n-best predictions to generate in the '
'nbest_predictions.json output file.')
flags.DEFINE_integer(
'max_answer_length', 30,
'The maximum length of an answer that can be generated. This is needed '
'because the start and end predictions are not conditioned on one '
'another.')
common_flags.define_common_bert_flags()
FLAGS = flags.FLAGS
def squad_loss_fn(start_positions,
end_positions,
start_logits,
end_logits):
"""Returns sparse categorical crossentropy for start/end logits."""
start_loss = tf.keras.losses.sparse_categorical_crossentropy(
start_positions, start_logits, from_logits=True)
end_loss = tf.keras.losses.sparse_categorical_crossentropy(
end_positions, end_logits, from_logits=True)
total_loss = (tf.reduce_mean(start_loss) + tf.reduce_mean(end_loss)) / 2
return total_loss
def get_loss_fn():
"""Gets a loss function for squad task."""
def _loss_fn(labels, model_outputs):
start_positions = labels['start_positions']
end_positions = labels['end_positions']
start_logits, end_logits = model_outputs
return squad_loss_fn(
start_positions,
end_positions,
start_logits,
end_logits)
return _loss_fn
RawResult = collections.namedtuple('RawResult',
['unique_id', 'start_logits', 'end_logits'])
def get_raw_results(predictions):
"""Converts multi-replica predictions to RawResult."""
for unique_ids, start_logits, end_logits in zip(predictions['unique_ids'],
predictions['start_logits'],
predictions['end_logits']):
for values in zip(unique_ids.numpy(), start_logits.numpy(),
end_logits.numpy()):
yield RawResult(
unique_id=values[0],
start_logits=values[1].tolist(),
end_logits=values[2].tolist())
def get_dataset_fn(input_file_pattern, max_seq_length, global_batch_size,
is_training):
"""Gets a closure to create a dataset.."""
def _dataset_fn(ctx=None):
"""Returns tf.data.Dataset for distributed BERT pretraining."""
batch_size = ctx.get_per_replica_batch_size(
global_batch_size) if ctx else global_batch_size
dataset = input_pipeline.create_squad_dataset(
input_file_pattern,
max_seq_length,
batch_size,
is_training=is_training,
input_pipeline_context=ctx)
return dataset
return _dataset_fn
def get_squad_model_to_predict(strategy, bert_config, checkpoint_path,
input_meta_data):
"""Gets a squad model to make predictions."""
with strategy.scope():
# Prediction always uses float32, even if training uses mixed precision.
tf.keras.mixed_precision.experimental.set_policy('float32')
squad_model, _ = bert_models.squad_model(
bert_config,
input_meta_data['max_seq_length'],
hub_module_url=FLAGS.hub_module_url)
if checkpoint_path is None:
checkpoint_path = tf.train.latest_checkpoint(FLAGS.model_dir)
logging.info('Restoring checkpoints from %s', checkpoint_path)
checkpoint = tf.train.Checkpoint(model=squad_model)
checkpoint.restore(checkpoint_path).expect_partial()
return squad_model
def predict_squad_customized(strategy,
input_meta_data,
predict_tfrecord_path,
num_steps,
squad_model):
"""Make predictions using a Bert-based squad model."""
predict_dataset_fn = get_dataset_fn(
predict_tfrecord_path,
input_meta_data['max_seq_length'],
FLAGS.predict_batch_size,
is_training=False)
predict_iterator = iter(
strategy.experimental_distribute_datasets_from_function(
predict_dataset_fn))
@tf.function
def predict_step(iterator):
"""Predicts on distributed devices."""
def _replicated_step(inputs):
"""Replicated prediction calculation."""
x, _ = inputs
unique_ids = x.pop('unique_ids')
start_logits, end_logits = squad_model(x, training=False)
return dict(
unique_ids=unique_ids,
start_logits=start_logits,
end_logits=end_logits)
outputs = strategy.run(_replicated_step, args=(next(iterator),))
return tf.nest.map_structure(strategy.experimental_local_results, outputs)
all_results = []
for _ in range(num_steps):
predictions = predict_step(predict_iterator)
for result in get_raw_results(predictions):
all_results.append(result)
if len(all_results) % 100 == 0:
logging.info('Made predictions for %d records.', len(all_results))
return all_results
def train_squad(strategy,
input_meta_data,
bert_config,
custom_callbacks=None,
run_eagerly=False,
init_checkpoint=None,
sub_model_export_name=None):
"""Run bert squad training."""
if strategy:
logging.info('Training using customized training loop with distribution'
' strategy.')
# Enables XLA in Session Config. Should not be set for TPU.
keras_utils.set_session_config(FLAGS.enable_xla)
performance.set_mixed_precision_policy(common_flags.dtype())
epochs = FLAGS.num_train_epochs
num_train_examples = input_meta_data['train_data_size']
max_seq_length = input_meta_data['max_seq_length']
steps_per_epoch = int(num_train_examples / FLAGS.train_batch_size)
warmup_steps = int(epochs * num_train_examples * 0.1 / FLAGS.train_batch_size)
train_input_fn = get_dataset_fn(
FLAGS.train_data_path,
max_seq_length,
FLAGS.train_batch_size,
is_training=True)
def _get_squad_model():
"""Get Squad model and optimizer."""
squad_model, core_model = bert_models.squad_model(
bert_config,
max_seq_length,
hub_module_url=FLAGS.hub_module_url,
hub_module_trainable=FLAGS.hub_module_trainable)
optimizer = optimization.create_optimizer(FLAGS.learning_rate,
steps_per_epoch * epochs,
warmup_steps,
FLAGS.end_lr,
FLAGS.optimizer_type)
squad_model.optimizer = performance.configure_optimizer(
optimizer,
use_float16=common_flags.use_float16(),
use_graph_rewrite=common_flags.use_graph_rewrite())
return squad_model, core_model
# If explicit_allreduce = True, apply_gradients() no longer implicitly
# allreduce gradients, users manually allreduce gradient and pass the
# allreduced grads_and_vars to apply_gradients(). clip_by_global_norm will be
# applied to allreduced gradients.
def clip_by_global_norm_callback(grads_and_vars):
grads, variables = zip(*grads_and_vars)
(clipped_grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
return zip(clipped_grads, variables)
model_training_utils.run_customized_training_loop(
strategy=strategy,
model_fn=_get_squad_model,
loss_fn=get_loss_fn(),
model_dir=FLAGS.model_dir,
steps_per_epoch=steps_per_epoch,
steps_per_loop=FLAGS.steps_per_loop,
epochs=epochs,
train_input_fn=train_input_fn,
init_checkpoint=init_checkpoint or FLAGS.init_checkpoint,
sub_model_export_name=sub_model_export_name,
run_eagerly=run_eagerly,
custom_callbacks=custom_callbacks,
explicit_allreduce=False,
post_allreduce_callbacks=[clip_by_global_norm_callback])
def prediction_output_squad(strategy, input_meta_data, tokenizer, squad_lib,
predict_file, squad_model):
"""Makes predictions for a squad dataset."""
doc_stride = input_meta_data['doc_stride']
max_query_length = input_meta_data['max_query_length']
# Whether data should be in Ver 2.0 format.
version_2_with_negative = input_meta_data.get('version_2_with_negative',
False)
eval_examples = squad_lib.read_squad_examples(
input_file=predict_file,
is_training=False,
version_2_with_negative=version_2_with_negative)
eval_writer = squad_lib.FeatureWriter(
filename=os.path.join(FLAGS.model_dir, 'eval.tf_record'),
is_training=False)
eval_features = []
def _append_feature(feature, is_padding):
if not is_padding:
eval_features.append(feature)
eval_writer.process_feature(feature)
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
kwargs = dict(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=input_meta_data['max_seq_length'],
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=False,
output_fn=_append_feature,
batch_size=FLAGS.predict_batch_size)
# squad_lib_sp requires one more argument 'do_lower_case'.
if squad_lib == squad_lib_sp:
kwargs['do_lower_case'] = FLAGS.do_lower_case
dataset_size = squad_lib.convert_examples_to_features(**kwargs)
eval_writer.close()
logging.info('***** Running predictions *****')
logging.info(' Num orig examples = %d', len(eval_examples))
logging.info(' Num split examples = %d', len(eval_features))
logging.info(' Batch size = %d', FLAGS.predict_batch_size)
num_steps = int(dataset_size / FLAGS.predict_batch_size)
all_results = predict_squad_customized(
strategy, input_meta_data, eval_writer.filename, num_steps, squad_model)
all_predictions, all_nbest_json, scores_diff_json = (
squad_lib.postprocess_output(
eval_examples,
eval_features,
all_results,
FLAGS.n_best_size,
FLAGS.max_answer_length,
FLAGS.do_lower_case,
version_2_with_negative=version_2_with_negative,
null_score_diff_threshold=FLAGS.null_score_diff_threshold,
verbose=FLAGS.verbose_logging))
return all_predictions, all_nbest_json, scores_diff_json
def dump_to_files(all_predictions, all_nbest_json, scores_diff_json,
squad_lib, version_2_with_negative, file_prefix=''):
"""Save output to json files."""
output_prediction_file = os.path.join(FLAGS.model_dir,
'%spredictions.json' % file_prefix)
output_nbest_file = os.path.join(FLAGS.model_dir,
'%snbest_predictions.json' % file_prefix)
output_null_log_odds_file = os.path.join(FLAGS.model_dir, file_prefix,
'%snull_odds.json' % file_prefix)
logging.info('Writing predictions to: %s', (output_prediction_file))
logging.info('Writing nbest to: %s', (output_nbest_file))
squad_lib.write_to_json_files(all_predictions, output_prediction_file)
squad_lib.write_to_json_files(all_nbest_json, output_nbest_file)
if version_2_with_negative:
squad_lib.write_to_json_files(scores_diff_json, output_null_log_odds_file)
def _get_matched_files(input_path):
"""Returns all files that matches the input_path."""
input_patterns = input_path.strip().split(',')
all_matched_files = []
for input_pattern in input_patterns:
input_pattern = input_pattern.strip()
if not input_pattern:
continue
matched_files = tf.io.gfile.glob(input_pattern)
if not matched_files:
raise ValueError('%s does not match any files.' % input_pattern)
else:
all_matched_files.extend(matched_files)
return sorted(all_matched_files)
def predict_squad(strategy,
input_meta_data,
tokenizer,
bert_config,
squad_lib,
init_checkpoint=None):
"""Get prediction results and evaluate them to hard drive."""
if init_checkpoint is None:
init_checkpoint = tf.train.latest_checkpoint(FLAGS.model_dir)
all_predict_files = _get_matched_files(FLAGS.predict_file)
squad_model = get_squad_model_to_predict(strategy, bert_config,
init_checkpoint, input_meta_data)
for idx, predict_file in enumerate(all_predict_files):
all_predictions, all_nbest_json, scores_diff_json = prediction_output_squad(
strategy, input_meta_data, tokenizer, squad_lib, predict_file,
squad_model)
if len(all_predict_files) == 1:
file_prefix = ''
else:
# if predict_file is /path/xquad.ar.json, the `file_prefix` may be
# "xquad.ar-0-"
file_prefix = '%s-' % os.path.splitext(
os.path.basename(all_predict_files[idx]))[0]
dump_to_files(all_predictions, all_nbest_json, scores_diff_json, squad_lib,
input_meta_data.get('version_2_with_negative', False),
file_prefix)
def eval_squad(strategy,
input_meta_data,
tokenizer,
bert_config,
squad_lib,
init_checkpoint=None):
"""Get prediction results and evaluate them against ground truth."""
if init_checkpoint is None:
init_checkpoint = tf.train.latest_checkpoint(FLAGS.model_dir)
all_predict_files = _get_matched_files(FLAGS.predict_file)
if len(all_predict_files) != 1:
raise ValueError('`eval_squad` only supports one predict file, '
'but got %s' % all_predict_files)
squad_model = get_squad_model_to_predict(strategy, bert_config,
init_checkpoint, input_meta_data)
all_predictions, all_nbest_json, scores_diff_json = prediction_output_squad(
strategy, input_meta_data, tokenizer, squad_lib, all_predict_files[0],
squad_model)
dump_to_files(all_predictions, all_nbest_json, scores_diff_json, squad_lib,
input_meta_data.get('version_2_with_negative', False))
with tf.io.gfile.GFile(FLAGS.predict_file, 'r') as reader:
dataset_json = json.load(reader)
pred_dataset = dataset_json['data']
if input_meta_data.get('version_2_with_negative', False):
eval_metrics = squad_evaluate_v2_0.evaluate(pred_dataset,
all_predictions,
scores_diff_json)
else:
eval_metrics = squad_evaluate_v1_1.evaluate(pred_dataset, all_predictions)
return eval_metrics
def export_squad(model_export_path, input_meta_data, bert_config):
"""Exports a trained model as a `SavedModel` for inference.
Args:
model_export_path: a string specifying the path to the SavedModel directory.
input_meta_data: dictionary containing meta data about input and model.
bert_config: Bert configuration file to define core bert layers.
Raises:
Export path is not specified, got an empty string or None.
"""
if not model_export_path:
raise ValueError('Export path is not specified: %s' % model_export_path)
# Export uses float32 for now, even if training uses mixed precision.
tf.keras.mixed_precision.experimental.set_policy('float32')
squad_model, _ = bert_models.squad_model(bert_config,
input_meta_data['max_seq_length'])
model_saving_utils.export_bert_model(
model_export_path, model=squad_model, checkpoint_dir=FLAGS.model_dir)
|
apache-2.0
| -358,132,179,482,600,700
| 39.226611
| 80
| 0.64577
| false
|
cinemapub/bright-response
|
scripts/lib/foursquare/foursquare/tests/test_events.py
|
1
|
1068
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# (c) 2013 Mike Lewis
import logging; log = logging.getLogger(__name__)
from . import BaseAuthenticatedEndpointTestCase, BaseUserlessEndpointTestCase
class EventsEndpointTestCase(BaseAuthenticatedEndpointTestCase):
"""
General
"""
def test_event(self):
response = self.api.events(self.default_eventid)
assert 'event' in response
def test_categories(self):
response = self.api.events.categories()
assert 'categories' in response
def test_search(self):
response = self.api.events.search({'domain': u'songkick.com', 'eventId': u'8183976'})
assert 'events' in response
class EventsUserlessEndpointTestCase(BaseUserlessEndpointTestCase):
"""
General
"""
def test_categories(self):
response = self.api.events.categories()
assert 'categories' in response
def test_search(self):
response = self.api.events.search({'domain': u'songkick.com', 'eventId': u'8183976'})
assert 'events' in response
|
mit
| 7,469,222,720,811,461,000
| 25.04878
| 93
| 0.667603
| false
|
jaor/bigmler
|
bigmler/options/externalconnector.py
|
1
|
3449
|
# -*- coding: utf-8 -*-
#
# Copyright 2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Options for BigMLer external connector processing
"""
def get_external_connector_options(defaults=None):
"""external connector-related options
"""
if defaults is None:
defaults = {}
options = {
# The path to a file containing external connector attributes.
'--connector-attributes': {
'action': 'store',
'dest': 'connector_attributes',
'default': defaults.get('connector_attributes', None),
'help': ("Path to a json file describing connector"
" attributes.")},
# The ID of an existing connector.
'--external-connector-id': {
'action': 'store',
'dest': 'external_connector_id',
'default': defaults.get('external_connector_id', None),
'help': ("ID of an existing external connector.")},
# The kind of database manager
'--engine': {
'action': 'store',
'dest': 'source',
'default': defaults.get('source', None),
'choices': ["mysql", "postgresql", "elasticsearch", "sqlserver"],
'help': ("Database manager engine.")},
# The host where the database manager is
'--host': {
'action': 'store',
'dest': 'host',
'default': defaults.get('host', None),
'help': ("Name of the database manager host.")},
# The list of hosts for Elasticsearch
'--hosts': {
'action': 'store',
'dest': 'hosts',
'default': defaults.get('hosts', None),
'help': ("Comma-separated list of hosts (elasticsearch only).")},
# The port that the database listens to
'--port': {
'action': 'store',
'dest': 'port',
'default': defaults.get('port', None),
'help': ("Port number to connect to.")},
# The database name
'--database': {
'action': 'store',
'dest': 'database',
'default': defaults.get('database', None),
'help': ("Name of the database.")},
# The username
'--user': {
'action': 'store',
'dest': 'user',
'default': defaults.get('user', None),
'help': ("Database user name.")},
# The password
'--password': {
'action': 'store',
'dest': 'password',
'default': defaults.get('password', None),
'help': ("Database user password.")},
# JSON file containing the connection info
'--connection-json': {
'action': 'store',
'dest': 'connection_json',
'default': defaults.get('connection_json', None),
'help': ("JSON file describing the connection arguments.")}
}
return options
|
apache-2.0
| 884,515,083,397,673,700
| 31.847619
| 77
| 0.544506
| false
|
msakai/pyubcsat
|
ubcsat.py
|
1
|
2406
|
import re
import subprocess
import sys
class Solver():
def __init__(self, ubcsat = "ubcsat"):
self._ubcsat = ubcsat
self._nvar = 0
self._clauses = []
self._soft_clauses = []
def newvar(self):
self._nvar += 1
return self._nvar
def add_clause(self, clause):
self._clauses.append(clause)
def add_soft_clause(self, clause, weight = 1):
self._soft_clauses.append((weight, clause))
def _write_wcnf(self, file):
top = sum(w for w, _ in self._soft_clauses) + 1
file.write("p wcnf %d %d %d\n" % (self._nvar, len(self._clauses) + len(self._soft_clauses), top))
for clause in self._clauses:
file.write(str(top))
for lit in clause:
file.write(" ")
file.write(str(lit))
file.write(" 0\n")
for w, clause in self._soft_clauses:
file.write(str(w))
for lit in clause:
file.write(" ")
file.write(str(lit))
file.write(" 0\n")
file.flush()
return top
def run(self):
cmd = [self._ubcsat, "-w", "-alg", "irots", "-seed", "0", "-runs", "10", "-solve", "-r", "bestsol"]
popen = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
top = self._write_wcnf(popen.stdin)
try:
for line in popen.stdout:
sys.stdout.write(line)
sys.stdout.flush()
m = re.match(r"^\d+ [01] (\d+) ([01]+)$", line)
if m:
obj, model = m.groups()
obj = int(obj)
if obj < top:
model = [None] + [c=='1' for c in model]
yield (obj, model)
finally:
popen.terminate()
def optimize(self):
bestobj = None
bestmodel = None
for (obj, model) in self.run():
if bestobj is None or obj < bestobj:
bestobj, bestmodel = obj, model
return bestobj, bestmodel
if __name__ == '__main__':
solver = Solver()
for i in xrange(4):
solver.newvar()
solver.add_clause([1, -2, 4])
solver.add_clause([-1, -2, 3])
solver.add_soft_clause([-2, -4], 8)
solver.add_soft_clause([-3, 2], 4)
solver.add_soft_clause([3, 1], 3)
print(solver.optimize())
|
bsd-3-clause
| -7,839,600,136,086,166,000
| 29.075
| 107
| 0.489194
| false
|
openqt/algorithms
|
leetcode/python/lc688-knight-probability-in-chessboard.py
|
1
|
1796
|
# coding=utf-8
import unittest
"""688. Knight Probability in Chessboard
https://leetcode.com/problems/knight-probability-in-chessboard/description/
On an `N`x`N` chessboard, a knight starts at the `r`-th row and `c`-th column
and attempts to make exactly `K` moves. The rows and columns are 0 indexed, so
the top-left square is `(0, 0)`, and the bottom-right square is `(N-1, N-1)`.
A chess knight has 8 possible moves it can make, as illustrated below. Each
move is two squares in a cardinal direction, then one square in an orthogonal
direction.

Each time the knight is to move, it chooses one of eight possible moves
uniformly at random (even if the piece would go off the chessboard) and moves
there.
The knight continues moving until it has made exactly `K` moves or has moved
off the chessboard. Return the probability that the knight remains on the
board after it has stopped moving.
**Example:**
**Input:** 3, 2, 0, 0
**Output:** 0.0625
**Explanation:** There are two moves (to (1,2), (2,1)) that will keep the knight on the board.
From each of those positions, there are also two moves that will keep the knight on the board.
The total probability the knight stays on the board is 0.0625.
**Note:**
* `N` will be between 1 and 25.
* `K` will be between 0 and 100.
* The knight always initially starts on the board.
Similar Questions:
Out of Boundary Paths (out-of-boundary-paths)
"""
class Solution(object):
def knightProbability(self, N, K, r, c):
"""
:type N: int
:type K: int
:type r: int
:type c: int
:rtype: float
"""
def test(self):
pass
if __name__ == "__main__":
unittest.main()
|
gpl-3.0
| 8,180,465,002,098,857,000
| 26.828125
| 98
| 0.667038
| false
|
FAANG/faang-methylation
|
workflowbs/src/jflow/server.py
|
1
|
24010
|
#
# Copyright (C) 2015 INRA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import cherrypy
import cgi
import tempfile
import json
import sys
import datetime
from functools import wraps
import time
import os
import argparse
from argparse import ArgumentTypeError
from .workflows_manager import WorkflowsManager
from .config_reader import JFlowConfigReader
from .workflow import Workflow
from .parameter import browsefile, localfile, urlfile, inputfile, create_test_function, MiltipleAction, MiltipleAppendAction, MultipleParameters
from workflows.types import *
from . import utils
from cctools.util import time_format
from .utils import get_octet_string_representation
# function in charge to upload large files
class UploadFieldStorage(cgi.FieldStorage):
"""Our version uses a named temporary file instead of the default
non-named file; keeping it visibile (named), allows us to create a
2nd link after the upload is done, thus avoiding the overhead of
making a copy to the destination filename."""
def get_tmp_directory(self):
jflowconf = JFlowConfigReader()
return jflowconf.get_tmp_directory()
def get_file_name(self):
self.tmpfile = None
# if this is a file object, just return the name of the file
if hasattr( self.file, 'name' ):
return self.file.name
# if not, this is a cStringIO.StringO, write it down
# and return the file name
else:
tmp_folder = self.get_tmp_directory()
if not os.path.exists( tmp_folder ):
try : os.mkdir(tmp_folder)
except : pass
fh = open(os.path.join(tmp_folder, self.filename), "wb+")
fh_name = fh.name
fh.write(self.file.getvalue())
fh.close()
return fh_name
def __del__(self):
try:
self.file.close()
except AttributeError:
pass
try:
tmp_folder = self.get_tmp_directory()
os.remove(os.path.join(tmp_folder, self.filename))
except:
pass
def make_file(self, binary=None):
tmp_folder = self.get_tmp_directory()
if not os.path.exists( tmp_folder ):
try : os.mkdir(tmp_folder)
except : pass
return tempfile.NamedTemporaryFile(dir=tmp_folder)
def noBodyProcess():
"""Sets cherrypy.request.process_request_body = False, giving
us direct control of the file upload destination. By default
cherrypy loads it to memory, we are directing it to disk."""
cherrypy.request.process_request_body = False
cherrypy.tools.noBodyProcess = cherrypy.Tool('before_request_body', noBodyProcess)
# define functions in charge to handle cross domain calls
def CORS():
cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'
cherrypy.response.headers['Access-Control-Allow-Methods'] = 'OPTIONS, GET, POST'
cherrypy.response.headers['Access-Control-Allow-Headers'] = 'Content-Type, Content-Range, Content-Disposition'
cherrypy.tools.CORS = cherrypy.Tool('before_finalize', CORS)
class JFlowJSONEncoder (json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (datetime.date, datetime.datetime)):
return obj.strftime( JFlowConfigReader().get_date_format() )
else:
return json.JSONEncoder.default(self, obj)
class JFlowServer (object):
MULTIPLE_TYPE_SPLITER = "___"
APPEND_PARAM_SPLITER = "::-::"
JFLOW_WDATA = "data"
def __init__(self):
# Create a workflow manager to get access to our workflows
self.wfmanager = WorkflowsManager()
self.jflow_config_reader = JFlowConfigReader()
@staticmethod
def quickstart(server_class, config=None, daemon=False):
# daemonize the server if asked to
if daemon:
from cherrypy.process.plugins import Daemonizer
Daemonizer(cherrypy.engine).subscribe()
# define the socket host and port
jflowconf = JFlowConfigReader()
socket_opts = jflowconf.get_socket_options()
# add the result directory
if config is None or not '/' in config:
config['/'] = {'tools.staticdir.root': jflowconf.get_work_directory()}
else:
link = os.path.join(config['/']['tools.staticdir.root'], "data")
if not os.path.islink(link):
os.symlink(jflowconf.get_work_directory(), link)
config[os.path.join('/', JFlowServer.JFLOW_WDATA)] = {'tools.staticdir.on' : True,
'tools.staticdir.dir' : jflowconf.get_work_directory()}
# remove any limit on the request body size; cherrypy's default is 100MB
# (maybe we should just increase it ?)
cherrypy.server.max_request_body_size = 0
# increase server socket timeout to 60s; we are more tolerant of bad
# quality client-server connections (cherrypy's default is 10s)
cherrypy.server.socket_timeout = 60
cherrypy.config.update({'server.socket_host': socket_opts[0],
'server.socket_port': socket_opts[1]})
# start the server
cherrypy.quickstart(server_class(), config=config)
def jsonify(func):
'''JSON and JSONP decorator for CherryPy'''
@wraps(func)
def wrapper(*args, **kw):
value = func(*args, **kw)
cherrypy.response.headers["Content-Type"] = "application/json"
# if JSONP request
if "callback" in kw:
return ('%s(%s)' % (kw["callback"], json.dumps(value, cls=JFlowJSONEncoder))).encode('utf8')
# else return the JSON
else: return json.dumps(value, cls=JFlowJSONEncoder).encode('utf8')
return wrapper
def jsonify_workflow_status(self, workflow, init_to_zero=False):
if workflow.start_time: start_time = time.asctime(time.localtime(workflow.start_time))
else: start_time = "-"
if workflow.start_time and workflow.end_time: elapsed_time = str(workflow.end_time-workflow.start_time)
elif workflow.start_time: elapsed_time = str(time.time()-workflow.start_time)
else: elapsed_time = "-"
if workflow.end_time: end_time = time.asctime(time.localtime(workflow.end_time))
else: end_time = "-"
if init_to_zero:
return {"id":utils.get_nb_string(workflow.id),
"name": workflow.name,
"status": Workflow.STATUS_STARTED,
"elapsed_time": str(elapsed_time),
"start_time": start_time,
"end_time": end_time,
"components": []}
else:
components = []
components_status = workflow.get_components_status()
for i, component in enumerate(workflow.get_components_nameid()):
status_info = components_status[component]
try: perc_waiting = (status_info["waiting"]*100.0)/status_info["tasks"]
except: perc_waiting = 0
try: perc_running = (status_info["running"]*100.0)/status_info["tasks"]
except: perc_running = 0
try: perc_failed = (status_info["failed"]*100.0)/status_info["tasks"]
except: perc_failed = 0
try: perc_aborted = (status_info["aborted"]*100.0)/status_info["tasks"]
except: perc_aborted = 0
try: perc_completed = (status_info["completed"]*100.0)/status_info["tasks"]
except: perc_completed = 0
components.append({"name": component,
"elapsed_time": time_format(status_info["time"]),
"total": status_info["tasks"],
"waiting": status_info["waiting"],
"failed": status_info["failed"],
"running": status_info["running"],
"aborted": status_info["aborted"],
"completed": status_info["completed"]})
status = {"id":utils.get_nb_string(workflow.id),
"errors": workflow.get_errors(),
"name": workflow.name,
"metadata": workflow.metadata,
"status": workflow.get_status(),
"elapsed_time": "-" if elapsed_time == "-" else str(datetime.timedelta(seconds=int(str(elapsed_time).split(".")[0]))),
"start_time": start_time,
"end_time": end_time,
"components": components}
return status
@cherrypy.expose
@jsonify
def get_available_workflows(self, **kwargs):
workflows = []
filter_groups = None
select = False
if 'filter_groups' in kwargs : filter_groups = kwargs['filter_groups'].split(',')
if 'select' in kwargs : select = kwargs['select'] in ['True', 'true', '1', 1]
wf_instances, wf_methodes = self.wfmanager.get_available_workflows(filter_groups = filter_groups , select = select)
for instance in wf_instances:
parameters, parameters_per_groups, ordered_groups = [], {}, ["default"]
for param in instance.get_parameters():
# if it's a multiple action change the action by the name
if param.action == MiltipleAction:
action = "MiltipleAction"
elif param.action == MiltipleAppendAction:
action = "MiltipleAppendAction"
else:
action = param.action
try:
cparam_help = param.global_help
except:
cparam_help = param.help
hash_param = {"help": cparam_help,
"required": param.required,
"default": param.default,
"choices": param.choices,
"action": action,
"type": param.get_type(),
"name": param.name,
"display_name": param.display_name,
"group": param.group}
if hash_param["type"] == "date":
hash_param["format"] = self.jflow_config_reader.get_date_format()
if hash_param["format"] == '%d/%m/%Y':
hash_param["format"] = 'dd/mm/yyyy'
elif hash_param["format"] == '%d/%m/%y':
hash_param["format"] = 'dd/mm/yy'
elif hash_param["format"] == '%Y/%m/%d':
hash_param["format"] = 'yyyy/mm/dd'
elif hash_param["format"] == '%y/%m/%d':
hash_param["format"] = 'yy/mm/dd'
# if it's a multiple type add sub parameters
if type(param.type) == MultipleParameters:
hash_param["sub_parameters"] = []
for sub_param in param.sub_parameters:
hash_param["sub_parameters"].append({"help": sub_param.help,
"required": sub_param.required,
"default": sub_param.default,
"choices": sub_param.choices,
"action": sub_param.action,
"type": sub_param.get_type(),
"name": param.name + JFlowServer.MULTIPLE_TYPE_SPLITER + sub_param.flag,
"display_name": sub_param.display_name,
"group": sub_param.group})
if hash_param["sub_parameters"][-1]["type"] == "date":
hash_param["sub_parameters"][-1]["format"] = self.jflow_config_reader.get_date_format()
if hash_param["sub_parameters"][-1]["format"] == '%d/%m/%Y':
hash_param["sub_parameters"][-1]["format"] = 'dd/mm/yyyy'
elif hash_param["sub_parameters"][-1]["format"] == '%d/%m/%y':
hash_param["sub_parameters"][-1]["format"] = 'dd/mm/yy'
elif hash_param["sub_parameters"][-1]["format"] == '%Y/%m/%d':
hash_param["sub_parameters"][-1]["format"] = 'yyyy/mm/dd'
elif hash_param["sub_parameters"][-1]["format"] == '%y/%m/%d':
hash_param["sub_parameters"][-1]["format"] = 'yy/mm/dd'
parameters.append(hash_param)
if param.group in parameters_per_groups:
parameters_per_groups[param.group].append(hash_param)
else: parameters_per_groups[param.group] = [hash_param]
if param.group not in ordered_groups:
ordered_groups.append(param.group)
workflows.append({"name": instance.name,
"help": instance.description,
"class": instance.__class__.__name__,
"parameters": parameters,
"parameters_per_groups": parameters_per_groups,
"groups": ordered_groups})
return workflows
@cherrypy.expose
@jsonify
def run_workflow(self, **kwargs):
try:
kwargs_modified = {}
# handle MultiParameterList
multi_sub_params = {}
for key in list(kwargs.keys()):
parts = key.split(JFlowServer.MULTIPLE_TYPE_SPLITER)
if len(parts) == 3:
if not parts[0] in kwargs_modified:
kwargs_modified[parts[0]] = []
multi_sub_params[parts[0]] = {}
if parts[2] in multi_sub_params[parts[0]]:
multi_sub_params[parts[0]][parts[2]].append((parts[1], kwargs[key]))
else:
multi_sub_params[parts[0]][parts[2]] = [(parts[1], kwargs[key])]
for key in list(kwargs.keys()):
parts = key.split(JFlowServer.MULTIPLE_TYPE_SPLITER)
# split append values
new_values = kwargs[key].split(JFlowServer.APPEND_PARAM_SPLITER)
if len(new_values) == 1:
new_values = new_values[0]
# if this is a classic Parameter
if len(parts) == 1:
kwargs_modified[key] = new_values
# if this is a MultiParameter
elif len(parts) == 2:
if parts[0] in kwargs_modified:
kwargs_modified[parts[0]].append((parts[1], new_values))
else:
kwargs_modified[parts[0]] = [(parts[1], new_values)]
# handle MultiParameterList
for param in multi_sub_params:
kwargs_modified[param] = []
for sub_param in multi_sub_params[param]:
kwargs_modified[param].append(multi_sub_params[param][sub_param])
workflow = self.wfmanager.run_workflow(kwargs_modified["workflow_class"], kwargs_modified)
return { "status" : 0, "content" : self.jsonify_workflow_status(workflow, True) }
except Exception as err:
return { "status" : 1, "content" : str(err) }
@cherrypy.expose
@jsonify
def delete_workflow(self, **kwargs):
self.wfmanager.delete_workflow(kwargs["workflow_id"])
@cherrypy.expose
@jsonify
def rerun_workflow(self, **kwargs):
workflow = self.wfmanager.rerun_workflow(kwargs["workflow_id"])
return self.jsonify_workflow_status(workflow)
@cherrypy.expose
@jsonify
def reset_workflow_component(self, **kwargs):
workflow = self.wfmanager.reset_workflow_component(kwargs["workflow_id"], kwargs["component_name"])
return self.jsonify_workflow_status(workflow)
@cherrypy.expose
def upload_light(self, **kwargs):
uniq_directory = ""
for key in list(kwargs.keys()):
if key == "uniq_directory":
uniq_directory = kwargs['uniq_directory']
else:
file_param = key
# the file transfer can take a long time; by default cherrypy
# limits responses to 300s; we increase it to 1h
cherrypy.response.timeout = 3600
# upload file by chunks
file_dir = os.path.join( self.jflow_config_reader.get_tmp_directory(), uniq_directory )
os.mkdir( file_dir )
if isinstance(kwargs[file_param], list):
for cfile in kwargs[file_param]:
FH_sever_file = open(os.path.join(file_dir, cfile.filename), "w")
while True:
data = cfile.file.read(8192)
if not data:
break
FH_sever_file.write(data)
FH_sever_file.close()
else:
FH_sever_file = open(os.path.join(file_dir, kwargs[file_param].filename), "w")
while True:
data = kwargs[file_param].file.read(8192)
if not data:
break
FH_sever_file.write(data)
FH_sever_file.close()
@cherrypy.expose
@cherrypy.tools.noBodyProcess()
@cherrypy.tools.CORS()
def upload(self):
# the file transfer can take a long time; by default cherrypy
# limits responses to 300s; we increase it to 1h
cherrypy.response.timeout = 3600
# convert the header keys to lower case
lcHDRS = {}
for key, val in cherrypy.request.headers.items():
lcHDRS[key.lower()] = val
# at this point we could limit the upload on content-length...
# incomingBytes = int(lcHDRS['content-length'])
# create our version of cgi.FieldStorage to parse the MIME encoded
# form data where the file is contained
formFields = UploadFieldStorage(fp=cherrypy.request.rfile,
headers=lcHDRS,
environ={'REQUEST_METHOD':'POST'},
keep_blank_values=True)
# we now create a link to the file, using the submitted
# filename; if we renamed, there would be a failure because
# the NamedTemporaryFile, used by our version of cgi.FieldStorage,
# explicitly deletes the original filename
for current in list(formFields.keys()):
if current != 'uniq_directory':
currentFile = formFields[current]
fileDir = os.path.join(self.jflow_config_reader.get_tmp_directory(), formFields.getvalue("uniq_directory"))
os.mkdir(fileDir)
if isinstance(currentFile, list):
for cfile in currentFile:
os.link(
cfile.get_file_name(),
os.path.join(fileDir, cfile.filename)
)
else:
os.link(
currentFile.get_file_name(),
os.path.join(fileDir, currentFile.filename)
)
@cherrypy.expose
@jsonify
def get_workflows_status(self, **kwargs):
status = []
workflows = self.wfmanager.get_workflows(use_cache=True)
for workflow in workflows:
if "metadata_filter" in kwargs:
is_ok = False
for wf_meta in workflow.metadata:
for metadata in kwargs["metadata_filter"].split(","):
if wf_meta == metadata:
is_ok = True
break
if is_ok: break
if is_ok: status.append(self.jsonify_workflow_status(workflow))
else:
status.append(self.jsonify_workflow_status(workflow))
return status
@cherrypy.expose
@jsonify
def get_workflow_status(self, **kwargs):
workflow = self.wfmanager.get_workflow(kwargs["workflow_id"])
if kwargs["display"] == "list":
return self.jsonify_workflow_status(workflow)
elif kwargs["display"] == "graph":
g = workflow.get_execution_graph()
status = self.jsonify_workflow_status(workflow)
nodes = []
for node in g.nodes():
if Workflow.INPUTFILE_GRAPH_LABEL in g.node_attributes(node):
nodes.append({"name": node, "display_name": g.node_attributes(node)[1], "type": "inputfile"})
elif Workflow.INPUTFILES_GRAPH_LABEL in g.node_attributes(node):
nodes.append({"name": node, "display_name": g.node_attributes(node)[1], "type": "inputfiles"})
elif Workflow.INPUTDIRECTORY_GRAPH_LABEL in g.node_attributes(node):
nodes.append({"name": node, "display_name": g.node_attributes(node)[1], "type": "inputdirectory"})
elif Workflow.COMPONENT_GRAPH_LABEL in g.node_attributes(node):
nodes.append({"name": node, "display_name": g.node_attributes(node)[1], "type": "component"})
status["nodes"] = nodes
status["edges"] = g.edges()
return status
def _webify_outputs(self, web_path, path):
work_dir = self.jflow_config_reader.get_work_directory()
if work_dir.endswith("/"): work_dir = work_dir[:-1]
socket_opt = self.jflow_config_reader.get_socket_options()
return {
'url':'http://' + socket_opt[0] + ':' + str(socket_opt[1]) + '/' + path.replace(work_dir, web_path),
'size': get_octet_string_representation(os.path.getsize(os.path.abspath(path))),
'extension': os.path.splitext(path)[1]
}
@cherrypy.expose
@jsonify
def get_workflow_outputs(self, **kwargs):
on_disk_outputs, on_web_outputs = self.wfmanager.get_workflow_outputs(kwargs["workflow_id"]), {}
for cpt_name in list(on_disk_outputs.keys()):
on_web_outputs[cpt_name] = {}
for outf in on_disk_outputs[cpt_name]:
on_web_outputs[cpt_name][outf] = self._webify_outputs(JFlowServer.JFLOW_WDATA, on_disk_outputs[cpt_name][outf])
return on_web_outputs
@cherrypy.expose
@jsonify
def validate_field(self, **kwargs):
try:
value_key = None
for key in list(kwargs.keys()):
if key != "type" and key != "callback" and key != "_" and key != "action":
value_key = key
break
# if it's an append parameter, let's check each value
if kwargs["action"] == "append":
for cval in kwargs[value_key].split("\n"):
create_test_function(kwargs["type"])(cval)
else:
create_test_function(kwargs["type"])(kwargs[value_key])
return True
except Exception as e:
return str(e)
|
apache-2.0
| 6,862,293,120,437,390,000
| 44.910134
| 144
| 0.546606
| false
|
ajiwo/xiboside
|
xlf.py
|
1
|
3207
|
from xml.etree import ElementTree
import logging
log = logging.getLogger('xiboside.xlf')
def parse_file(path):
layout = None
try:
_xlf = Xlf(path)
except ElementTree.ParseError, err:
log.error(err.message)
return None
except IOError, err:
log.error("%s: %s" % (err.strerror, err.filename))
return None
if _xlf.layout:
layout = dict(_xlf.layout)
_xlf = None
del _xlf
return layout
class Xlf:
def __init__(self, path=None):
self.layout = None
self.region = None
self.media = None
if path:
self.parse(path)
def parse(self, path):
layout = {
'width': '',
'height': '',
'bgcolor': '',
'background': '',
'regions': [],
'tags': []
}
tree = ElementTree.parse(path)
root = tree.getroot()
if 'layout' != root.tag:
self.layout = None
return None
for k, v in root.attrib.iteritems():
if k in layout:
layout[k] = v
for child in root:
if 'region' == child.tag:
region = self.__parse_region(child)
if region:
layout['regions'].append(region)
elif 'tags' == child.tag:
for tag in child:
layout['tags'].append(tag.text)
self.layout = layout
return layout
def __parse_region(self, node):
if node is None:
self.region = None
return None
region = {
'id': '',
'width': '',
'height': '',
'left': '',
'top': '',
'userId': '',
'zindex': '0',
'media': [],
'options': {}
}
for k, v in node.attrib.iteritems():
if k in region:
region[k] = v
for child in node:
if 'media' == child.tag:
media = self.__parse_media(child)
if media:
region['media'].append(media)
elif 'options' == child.tag:
for option in child:
if option.text:
region['options'][option.tag] = option.text
self.region = region
return region
def __parse_media(self, node):
if node is None:
self.media = None
return None
media = {
'id': '',
'type': '',
'duration': '',
'render': '',
'options': {},
'raws': {}
}
for k, v in node.attrib.iteritems():
if k in media:
media[k] = v
for child in node:
if 'options' == child.tag:
for option in child:
if option.text:
media['options'][option.tag] = option.text
elif 'raw' == child.tag:
for raw in child:
if raw.text:
media['raws'][raw.tag] = raw.text
self.media = media
return media
|
agpl-3.0
| -4,592,609,816,481,283,000
| 24.862903
| 67
| 0.428438
| false
|
Azure/azure-sdk-for-python
|
sdk/security/azure-mgmt-security/azure/mgmt/security/operations/_topology_operations.py
|
1
|
11513
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class TopologyOperations(object):
"""TopologyOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.security.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.TopologyList"]
"""Gets a list that allows to build a topology view of a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TopologyList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.security.models.TopologyList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TopologyList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('TopologyList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/topologies'} # type: ignore
def list_by_home_region(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.TopologyList"]
"""Gets a list that allows to build a topology view of a subscription and location.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TopologyList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.security.models.TopologyList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TopologyList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_home_region.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'ascLocation': self._serialize.url("self._config.asc_location", self._config.asc_location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('TopologyList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_home_region.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/locations/{ascLocation}/topologies'} # type: ignore
def get(
self,
resource_group_name, # type: str
topology_resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.TopologyResource"
"""Gets a specific topology component.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param topology_resource_name: Name of a topology resources collection.
:type topology_resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TopologyResource, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.TopologyResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TopologyResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-01-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'ascLocation': self._serialize.url("self._config.asc_location", self._config.asc_location, 'str'),
'topologyResourceName': self._serialize.url("topology_resource_name", topology_resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('TopologyResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/locations/{ascLocation}/topologies/{topologyResourceName}'} # type: ignore
|
mit
| 6,028,255,393,832,436,000
| 46.57438
| 199
| 0.625901
| false
|
jonathanchu/django-statusboard
|
statusboard/statusboard/settings/base.py
|
1
|
7617
|
"""Common settings and globals."""
import os
import sys
from unipath import Path
########## PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
PROJECT_ROOT = Path(__file__).ancestor(3)
# DJANGO_ROOT = dirname(dirname(abspath(__file__)))
# Absolute filesystem path to the top-level project folder:
# SITE_ROOT = dirname(DJANGO_ROOT)
SITE_ROOT = os.path.dirname(PROJECT_ROOT)
# Site name:
# SITE_NAME = basename(DJANGO_ROOT)
SITE_NAME = os.path.basename(PROJECT_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
# path.append(DJANGO_ROOT)
sys.path.append(PROJECT_ROOT)
########## END PATH CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Your Name', 'your_email@example.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/New_York'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
# MEDIA_ROOT = normpath(join(SITE_ROOT, 'media'))
MEDIA_ROOT = PROJECT_ROOT.child('media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
# STATIC_ROOT = normpath(join(SITE_ROOT, 'assets'))
STATIC_ROOT = PROJECT_ROOT.child('static')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
PROJECT_ROOT.child('assets'),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
########## END STATIC FILE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = os.environ['SECRET_KEY']
########## END SECRET CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
# FIXTURE_DIRS = (
# normpath(join(SITE_ROOT, 'fixtures')),
# )
########## END FIXTURE CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
PROJECT_ROOT.child('templates'),
)
########## END TEMPLATE CONFIGURATION
########## MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = 'statusboard.urls'
########## END URL CONFIGURATION
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
'django.contrib.humanize',
# Admin panel and documentation:
'grappelli',
# grappelli needs to go before django.contrib.admin
'django.contrib.admin',
# 'django.contrib.admindocs',
)
THIRD_PARTY_APPS = (
# Database migration helpers:
'south',
'compressor',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'accounts',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
########## END APP CONFIGURATION
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## WSGI CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'statusboard.wsgi.application'
########## END WSGI CONFIGURATION
########## COMPRESSOR CONFIGURATION
COMPRESS_ENABLED = True
COMPRESS_OFFLINE = True
COMPRESS_ROOT = PROJECT_ROOT.child('assets')
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile}'),
)
COMPRESS_CSS_FILTERS = [
'compressor.filters.cssmin.CSSMinFilter'
]
COMPRESS_JS_FILTERS = [
'compressor.filters.jsmin.JSMinFilter'
]
########## END COMPRESSOR CONFIGURATION
########## AUTHENTICATION CONFIGURATION
AUTH_USER_MODEL = 'accounts.CustomUser'
########## END AUTHENTICATION CONFIGURATION
|
mit
| 1,131,830,989,008,572,000
| 28.638132
| 98
| 0.694237
| false
|
gltn/stdm
|
stdm/third_party/sqlalchemy/inspection.py
|
1
|
3030
|
# sqlalchemy/inspect.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The inspection module provides the :func:`_sa.inspect` function,
which delivers runtime information about a wide variety
of SQLAlchemy objects, both within the Core as well as the
ORM.
The :func:`_sa.inspect` function is the entry point to SQLAlchemy's
public API for viewing the configuration and construction
of in-memory objects. Depending on the type of object
passed to :func:`_sa.inspect`, the return value will either be
a related object which provides a known interface, or in many
cases it will return the object itself.
The rationale for :func:`_sa.inspect` is twofold. One is that
it replaces the need to be aware of a large variety of "information
getting" functions in SQLAlchemy, such as
:meth:`_reflection.Inspector.from_engine`,
:func:`.orm.attributes.instance_state`, :func:`_orm.class_mapper`,
and others. The other is that the return value of :func:`_sa.inspect`
is guaranteed to obey a documented API, thus allowing third party
tools which build on top of SQLAlchemy configurations to be constructed
in a forwards-compatible way.
"""
from . import exc
from . import util
_registrars = util.defaultdict(list)
def inspect(subject, raiseerr=True):
"""Produce an inspection object for the given target.
The returned value in some cases may be the
same object as the one given, such as if a
:class:`_orm.Mapper` object is passed. In other
cases, it will be an instance of the registered
inspection type for the given object, such as
if an :class:`_engine.Engine` is passed, an
:class:`_reflection.Inspector` object is returned.
:param subject: the subject to be inspected.
:param raiseerr: When ``True``, if the given subject
does not
correspond to a known SQLAlchemy inspected type,
:class:`sqlalchemy.exc.NoInspectionAvailable`
is raised. If ``False``, ``None`` is returned.
"""
type_ = type(subject)
for cls in type_.__mro__:
if cls in _registrars:
reg = _registrars[cls]
if reg is True:
return subject
ret = reg(subject)
if ret is not None:
break
else:
reg = ret = None
if raiseerr and (reg is None or ret is None):
raise exc.NoInspectionAvailable(
"No inspection system is "
"available for object of type %s" % type_
)
return ret
def _inspects(*types):
def decorate(fn_or_cls):
for type_ in types:
if type_ in _registrars:
raise AssertionError(
"Type %s is already " "registered" % type_
)
_registrars[type_] = fn_or_cls
return fn_or_cls
return decorate
def _self_inspects(cls):
_inspects(cls)(True)
return cls
|
gpl-2.0
| -4,353,928,173,663,887,400
| 31.580645
| 72
| 0.671617
| false
|
eJon/enjoy
|
main.py
|
1
|
1373
|
# -*- coding:utf-8 -*-
#!/usr/bin/env python
__author__ = 'Leo'
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
from game.game_server import GameApplication
define("port", default=8000, type=int, metavar="SERVER PORT", help="Run on the given port")
define("config", default="conf/server.conf", type=str, metavar="CONFIG FILE", help="Server configuration file")
define("data", default="./data/default", type=str, metavar="DATA FILE", help="Server data file")
define("user_group", type=str, metavar="USER GROUP", help="User Group")
# database config
define("database", default="", type=str, metavar="DATABASE", help="Server database")
define("db_host", default="127.0.0.1", type=str, metavar="HOST", help="Server database host")
define("db_user", default="root", type=str, metavar="USER", help="Server database user")
define("db_password", default="123456", type=str, metavar="PASSWORD", help="Server database password")
define("db_connect_num", default=5, type=int, metavar="NUM", help="Connect DB Number")
def main():
app = GameApplication()
app.prepare_application()
print "Running ..."
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
gpl-3.0
| -1,949,856,449,309,619,000
| 31.690476
| 111
| 0.706482
| false
|
metinkilicse/pyTurEng
|
pyTurEng/pyTurEng.py
|
1
|
1308
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from __future__ import absolute_import
from TurEng import TurEng
import sys
import os
args = sys.argv
if len(args)==4:
dic = args[1]
lang = args[2]
query = args[3]
dic_obj = TurEng()
dic_obj.change_url(dic)
if lang == "en":
result = dic_obj.get_meaning(query,"tr ts")
else:
result = dic_obj.get_meaning(query,"en tm")
types, meaning = result[0],result[1]
if len(meaning)==5:
for i in range(5):
print("{} : {}".format(types[i].text,meaning[i].text))
else:
if len(meaning)==0:
print("No Result")
else:
print("{} : {}".format(types[1].text,meaning[0].text))
elif len(args)==5:
dic = args[1]
lang = args[2]
input_file = args[3]
output_file = args[4]
try:
dic_obj = TurEng()
dic_obj.change_url(dic)
if os.path.exists(input_file):
if lang == "en":
dic_obj.search_from_file(input_file,output_file,"tr ts")
else:
dic_obj.search_from_file(input_file,output_file,"en tm")
else:
print("File Does Not Exist")
except Exception as e:
print("Error : {} : line : {}".format(e,sys.exc_info()[2].tb_lineno))
exit()
else:
print("Use as :\n'python pyTurEng.py tren tr merhaba\nor\n"
"python pyTurEng.py tren en \"go away\"\nor\n"
"python pyTurEng.py tren en wordlist.txt outlist.txt")
|
gpl-3.0
| 5,442,897,099,161,561,000
| 23.679245
| 71
| 0.626147
| false
|
scylladb/seastar
|
scripts/perftune.py
|
1
|
61933
|
#!/usr/bin/env python3
import abc
import argparse
import distutils.util
import enum
import functools
import glob
import itertools
import logging
import multiprocessing
import os
import pathlib
import pyudev
import re
import shutil
import subprocess
import sys
import urllib.request
import yaml
import platform
import shlex
dry_run_mode = False
def perftune_print(log_msg, *args, **kwargs):
if dry_run_mode:
log_msg = "# " + log_msg
print(log_msg, *args, **kwargs)
def __run_one_command(prog_args, stderr=None, check=True):
proc = subprocess.Popen(prog_args, stdout = subprocess.PIPE, stderr = stderr)
outs, errs = proc.communicate()
outs = str(outs, 'utf-8')
if check and proc.returncode != 0:
raise subprocess.CalledProcessError(returncode=proc.returncode, cmd=" ".join(prog_args), output=outs, stderr=errs)
return outs
def run_one_command(prog_args, stderr=None, check=True):
if dry_run_mode:
print(" ".join([shlex.quote(x) for x in prog_args]))
else:
__run_one_command(prog_args, stderr=stderr, check=check)
def run_read_only_command(prog_args, stderr=None, check=True):
return __run_one_command(prog_args, stderr=stderr, check=check)
def run_hwloc_distrib(prog_args):
"""
Returns a list of strings - each representing a single line of hwloc-distrib output.
"""
return run_read_only_command(['hwloc-distrib'] + prog_args).splitlines()
def run_hwloc_calc(prog_args):
"""
Returns a single string with the result of the execution.
"""
return run_read_only_command(['hwloc-calc'] + prog_args).rstrip()
def run_ethtool(prog_args):
"""
Returns a list of strings - each representing a single line of ethtool output.
"""
return run_read_only_command(['ethtool'] + prog_args).splitlines()
def fwriteln(fname, line, log_message, log_errors=True):
try:
if dry_run_mode:
print("echo {} > {}".format(line, fname))
return
else:
with open(fname, 'w') as f:
f.write(line)
print(log_message)
except:
if log_errors:
print("{}: failed to write into {}: {}".format(log_message, fname, sys.exc_info()))
def readlines(fname):
try:
with open(fname, 'r') as f:
return f.readlines()
except:
print("Failed to read {}: {}".format(fname, sys.exc_info()))
return []
def fwriteln_and_log(fname, line, log_errors=True):
msg = "Writing '{}' to {}".format(line, fname)
fwriteln(fname, line, log_message=msg, log_errors=log_errors)
double_commas_pattern = re.compile(',,')
def set_one_mask(conf_file, mask, log_errors=True):
if not os.path.exists(conf_file):
raise Exception("Configure file to set mask doesn't exist: {}".format(conf_file))
mask = re.sub('0x', '', mask)
while double_commas_pattern.search(mask):
mask = double_commas_pattern.sub(',0,', mask)
msg = "Setting mask {} in {}".format(mask, conf_file)
fwriteln(conf_file, mask, log_message=msg, log_errors=log_errors)
def distribute_irqs(irqs, cpu_mask, log_errors=True):
# If IRQs' list is empty - do nothing
if not irqs:
return
for i, mask in enumerate(run_hwloc_distrib(["{}".format(len(irqs)), '--single', '--restrict', cpu_mask])):
set_one_mask("/proc/irq/{}/smp_affinity".format(irqs[i]), mask, log_errors=log_errors)
def is_process_running(name):
return len(list(filter(lambda ps_line : not re.search('<defunct>', ps_line), run_read_only_command(['ps', '--no-headers', '-C', name], check=False).splitlines()))) > 0
def restart_irqbalance(banned_irqs):
"""
Restart irqbalance if it's running and ban it from moving the IRQs from the
given list.
"""
config_file = '/etc/default/irqbalance'
options_key = 'OPTIONS'
systemd = False
banned_irqs_list = list(banned_irqs)
# If there is nothing to ban - quit
if not banned_irqs_list:
return
# return early if irqbalance is not running
if not is_process_running('irqbalance'):
perftune_print("irqbalance is not running")
return
# If this file exists - this a "new (systemd) style" irqbalance packaging.
# This type of packaging uses IRQBALANCE_ARGS as an option key name, "old (init.d) style"
# packaging uses an OPTION key.
if os.path.exists('/lib/systemd/system/irqbalance.service'):
options_key = 'IRQBALANCE_ARGS'
systemd = True
if not os.path.exists(config_file):
if os.path.exists('/etc/sysconfig/irqbalance'):
config_file = '/etc/sysconfig/irqbalance'
elif os.path.exists('/etc/conf.d/irqbalance'):
config_file = '/etc/conf.d/irqbalance'
options_key = 'IRQBALANCE_OPTS'
with open('/proc/1/comm', 'r') as comm:
systemd = 'systemd' in comm.read()
else:
perftune_print("Unknown system configuration - not restarting irqbalance!")
perftune_print("You have to prevent it from moving IRQs {} manually!".format(banned_irqs_list))
return
orig_file = "{}.scylla.orig".format(config_file)
# Save the original file
if not dry_run_mode:
if not os.path.exists(orig_file):
print("Saving the original irqbalance configuration is in {}".format(orig_file))
shutil.copyfile(config_file, orig_file)
else:
print("File {} already exists - not overwriting.".format(orig_file))
# Read the config file lines
cfile_lines = open(config_file, 'r').readlines()
# Build the new config_file contents with the new options configuration
perftune_print("Restarting irqbalance: going to ban the following IRQ numbers: {} ...".format(", ".join(banned_irqs_list)))
# Search for the original options line
opt_lines = list(filter(lambda line : re.search("^\s*{}".format(options_key), line), cfile_lines))
if not opt_lines:
new_options = "{}=\"".format(options_key)
elif len(opt_lines) == 1:
# cut the last "
new_options = re.sub("\"\s*$", "", opt_lines[0].rstrip())
opt_lines = opt_lines[0].strip()
else:
raise Exception("Invalid format in {}: more than one lines with {} key".format(config_file, options_key))
for irq in banned_irqs_list:
# prevent duplicate "ban" entries for the same IRQ
patt_str = "\-\-banirq\={}\Z|\-\-banirq\={}\s".format(irq, irq)
if not re.search(patt_str, new_options):
new_options += " --banirq={}".format(irq)
new_options += "\""
if dry_run_mode:
if opt_lines:
print("sed -i 's/^{}/#{}/g' {}".format(options_key, options_key, config_file))
print("echo {} | tee -a {}".format(new_options, config_file))
else:
with open(config_file, 'w') as cfile:
for line in cfile_lines:
if not re.search("^\s*{}".format(options_key), line):
cfile.write(line)
cfile.write(new_options + "\n")
if systemd:
perftune_print("Restarting irqbalance via systemctl...")
run_one_command(['systemctl', 'try-restart', 'irqbalance'])
else:
perftune_print("Restarting irqbalance directly (init.d)...")
run_one_command(['/etc/init.d/irqbalance', 'restart'])
def learn_irqs_from_proc_interrupts(pattern, irq2procline):
return [ irq for irq, proc_line in filter(lambda irq_proc_line_pair : re.search(pattern, irq_proc_line_pair[1]), irq2procline.items()) ]
def learn_all_irqs_one(irq_conf_dir, irq2procline, xen_dev_name):
"""
Returns a list of IRQs of a single device.
irq_conf_dir: a /sys/... directory with the IRQ information for the given device
irq2procline: a map of IRQs to the corresponding lines in the /proc/interrupts
xen_dev_name: a device name pattern as it appears in the /proc/interrupts on Xen systems
"""
msi_irqs_dir_name = os.path.join(irq_conf_dir, 'msi_irqs')
# Device uses MSI IRQs
if os.path.exists(msi_irqs_dir_name):
return os.listdir(msi_irqs_dir_name)
irq_file_name = os.path.join(irq_conf_dir, 'irq')
# Device uses INT#x
if os.path.exists(irq_file_name):
return [ line.lstrip().rstrip() for line in open(irq_file_name, 'r').readlines() ]
# No irq file detected
modalias = open(os.path.join(irq_conf_dir, 'modalias'), 'r').readline()
# virtio case
if re.search("^virtio", modalias):
return list(itertools.chain.from_iterable(
map(lambda dirname : learn_irqs_from_proc_interrupts(dirname, irq2procline),
filter(lambda dirname : re.search('virtio', dirname),
itertools.chain.from_iterable([ dirnames for dirpath, dirnames, filenames in os.walk(os.path.join(irq_conf_dir, 'driver')) ])))))
# xen case
if re.search("^xen:", modalias):
return learn_irqs_from_proc_interrupts(xen_dev_name, irq2procline)
return []
def get_irqs2procline_map():
return { line.split(':')[0].lstrip().rstrip() : line for line in open('/proc/interrupts', 'r').readlines() }
################################################################################
class PerfTunerBase(metaclass=abc.ABCMeta):
def __init__(self, args):
self.__args = args
self.__args.cpu_mask = run_hwloc_calc(['--restrict', self.__args.cpu_mask, 'all'])
self.__mode = None
self.__irq_cpu_mask = args.irq_cpu_mask
if self.__irq_cpu_mask:
self.__compute_cpu_mask = run_hwloc_calc([self.__args.cpu_mask, "~{}".format(self.__irq_cpu_mask)])
else:
self.__compute_cpu_mask = None
self.__is_aws_i3_nonmetal_instance = None
#### Public methods ##########################
class CPUMaskIsZeroException(Exception):
"""Thrown if CPU mask turns out to be zero"""
pass
class SupportedModes(enum.IntEnum):
"""
Modes are ordered from the one that cuts the biggest number of CPUs
from the compute CPUs' set to the one that takes the smallest ('mq' doesn't
cut any CPU from the compute set).
This fact is used when we calculate the 'common quotient' mode out of a
given set of modes (e.g. default modes of different Tuners) - this would
be the smallest among the given modes.
"""
sq_split = 0
sq = 1
mq = 2
# Note: no_irq_restrictions should always have the greatest value in the enum since it's the least restricting mode.
no_irq_restrictions = 9999
@staticmethod
def names():
return PerfTunerBase.SupportedModes.__members__.keys()
@staticmethod
def combine(modes):
"""
:param modes: a set of modes of the PerfTunerBase.SupportedModes type
:return: the mode that is the "common ground" for a given set of modes.
"""
# Perform an explicit cast in order to verify that the values in the 'modes' are compatible with the
# expected PerfTunerBase.SupportedModes type.
return min([PerfTunerBase.SupportedModes(m) for m in modes])
@staticmethod
def cpu_mask_is_zero(cpu_mask):
"""
The irqs_cpu_mask is a coma-separated list of 32-bit hex values, e.g. 0xffff,0x0,0xffff
We want to estimate if the whole mask is all-zeros.
:param cpu_mask: hwloc-calc generated CPU mask
:return: True if mask is zero, False otherwise
"""
for cur_irqs_cpu_mask in cpu_mask.split(','):
if int(cur_irqs_cpu_mask, 16) != 0:
return False
return True
@staticmethod
def compute_cpu_mask_for_mode(mq_mode, cpu_mask):
mq_mode = PerfTunerBase.SupportedModes(mq_mode)
irqs_cpu_mask = 0
if mq_mode == PerfTunerBase.SupportedModes.sq:
# all but CPU0
irqs_cpu_mask = run_hwloc_calc([cpu_mask, '~PU:0'])
elif mq_mode == PerfTunerBase.SupportedModes.sq_split:
# all but CPU0 and its HT siblings
irqs_cpu_mask = run_hwloc_calc([cpu_mask, '~core:0'])
elif mq_mode == PerfTunerBase.SupportedModes.mq:
# all available cores
irqs_cpu_mask = cpu_mask
elif mq_mode == PerfTunerBase.SupportedModes.no_irq_restrictions:
# all available cores
irqs_cpu_mask = cpu_mask
else:
raise Exception("Unsupported mode: {}".format(mq_mode))
if PerfTunerBase.cpu_mask_is_zero(irqs_cpu_mask):
raise PerfTunerBase.CPUMaskIsZeroException("Bad configuration mode ({}) and cpu-mask value ({}): this results in a zero-mask for compute".format(mq_mode.name, cpu_mask))
return irqs_cpu_mask
@staticmethod
def irqs_cpu_mask_for_mode(mq_mode, cpu_mask):
mq_mode = PerfTunerBase.SupportedModes(mq_mode)
irqs_cpu_mask = 0
if mq_mode != PerfTunerBase.SupportedModes.mq and mq_mode != PerfTunerBase.SupportedModes.no_irq_restrictions:
irqs_cpu_mask = run_hwloc_calc([cpu_mask, "~{}".format(PerfTunerBase.compute_cpu_mask_for_mode(mq_mode, cpu_mask))])
else: # mq_mode == PerfTunerBase.SupportedModes.mq or mq_mode == PerfTunerBase.SupportedModes.no_irq_restrictions
# distribute equally between all available cores
irqs_cpu_mask = cpu_mask
if PerfTunerBase.cpu_mask_is_zero(irqs_cpu_mask):
raise PerfTunerBase.CPUMaskIsZeroException("Bad configuration mode ({}) and cpu-mask value ({}): this results in a zero-mask for IRQs".format(mq_mode.name, cpu_mask))
return irqs_cpu_mask
@property
def mode(self):
"""
Return the configuration mode
"""
# Make sure the configuration mode is set (see the __set_mode_and_masks() description).
if self.__mode is None:
self.__set_mode_and_masks()
return self.__mode
@mode.setter
def mode(self, new_mode):
"""
Set the new configuration mode and recalculate the corresponding masks.
"""
# Make sure the new_mode is of PerfTunerBase.AllowedModes type
self.__mode = PerfTunerBase.SupportedModes(new_mode)
self.__compute_cpu_mask = PerfTunerBase.compute_cpu_mask_for_mode(self.__mode, self.__args.cpu_mask)
self.__irq_cpu_mask = PerfTunerBase.irqs_cpu_mask_for_mode(self.__mode, self.__args.cpu_mask)
@property
def compute_cpu_mask(self):
"""
Return the CPU mask to use for seastar application binding.
"""
# see the __set_mode_and_masks() description
if self.__compute_cpu_mask is None:
self.__set_mode_and_masks()
return self.__compute_cpu_mask
@property
def irqs_cpu_mask(self):
"""
Return the mask of CPUs used for IRQs distribution.
"""
# see the __set_mode_and_masks() description
if self.__irq_cpu_mask is None:
self.__set_mode_and_masks()
return self.__irq_cpu_mask
@property
def is_aws_i3_non_metal_instance(self):
"""
:return: True if we are running on the AWS i3.nonmetal instance, e.g. i3.4xlarge
"""
if self.__is_aws_i3_nonmetal_instance is None:
self.__check_host_type()
return self.__is_aws_i3_nonmetal_instance
@property
def args(self):
return self.__args
@property
def irqs(self):
return self._get_irqs()
#### "Protected"/Public (pure virtual) methods ###########
@abc.abstractmethod
def tune(self):
pass
@abc.abstractmethod
def _get_def_mode(self):
"""
Return a default configuration mode.
"""
pass
@abc.abstractmethod
def _get_irqs(self):
"""
Return the iteratable value with all IRQs to be configured.
"""
pass
#### Private methods ############################
def __set_mode_and_masks(self):
"""
Sets the configuration mode and the corresponding CPU masks. We can't
initialize them in the constructor because the default mode may depend
on the child-specific values that are set in its constructor.
That's why we postpone the mode's and the corresponding masks'
initialization till after the child instance creation.
"""
if self.__args.mode:
self.mode = PerfTunerBase.SupportedModes[self.__args.mode]
else:
self.mode = self._get_def_mode()
def __check_host_type(self):
"""
Check if we are running on the AWS i3 nonmetal instance.
If yes, set self.__is_aws_i3_nonmetal_instance to True, and to False otherwise.
"""
try:
aws_instance_type = urllib.request.urlopen("http://169.254.169.254/latest/meta-data/instance-type", timeout=0.1).read().decode()
if re.match(r'^i3\.((?!metal)\w)+$', aws_instance_type):
self.__is_aws_i3_nonmetal_instance = True
else:
self.__is_aws_i3_nonmetal_instance = False
return
except (urllib.error.URLError, ConnectionError, TimeoutError):
# Non-AWS case
pass
except:
logging.warning("Unexpected exception while attempting to access AWS meta server: {}".format(sys.exc_info()[0]))
self.__is_aws_i3_nonmetal_instance = False
#################################################
class NetPerfTuner(PerfTunerBase):
def __init__(self, args):
super().__init__(args)
self.nics=args.nics
self.__nic_is_bond_iface = self.__check_dev_is_bond_iface()
self.__slaves = self.__learn_slaves()
# check that self.nics contain a HW device or a bonding interface
self.__check_nics()
self.__irqs2procline = get_irqs2procline_map()
self.__nic2irqs = self.__learn_irqs()
#### Public methods ############################
def tune(self):
"""
Tune the networking server configuration.
"""
for nic in self.nics:
if self.nic_is_hw_iface(nic):
perftune_print("Setting a physical interface {}...".format(nic))
self.__setup_one_hw_iface(nic)
else:
perftune_print("Setting {} bonding interface...".format(nic))
self.__setup_bonding_iface(nic)
# Increase the socket listen() backlog
fwriteln_and_log('/proc/sys/net/core/somaxconn', '4096')
# Increase the maximum number of remembered connection requests, which are still
# did not receive an acknowledgment from connecting client.
fwriteln_and_log('/proc/sys/net/ipv4/tcp_max_syn_backlog', '4096')
def nic_is_bond_iface(self, nic):
return self.__nic_is_bond_iface[nic]
def nic_exists(self, nic):
return self.__iface_exists(nic)
def nic_is_hw_iface(self, nic):
return self.__dev_is_hw_iface(nic)
def slaves(self, nic):
"""
Returns an iterator for all slaves of the nic.
If agrs.nic is not a bonding interface an attempt to use the returned iterator
will immediately raise a StopIteration exception - use __dev_is_bond_iface() check to avoid this.
"""
return iter(self.__slaves[nic])
#### Protected methods ##########################
def _get_def_mode(self):
mode=PerfTunerBase.SupportedModes.no_irq_restrictions
for nic in self.nics:
if self.nic_is_bond_iface(nic):
mode = min(mode, min(map(self.__get_hw_iface_def_mode, filter(self.__dev_is_hw_iface, self.slaves(nic)))))
else:
mode = min(mode, self.__get_hw_iface_def_mode(nic))
return mode
def _get_irqs(self):
"""
Returns the iterator for all IRQs that are going to be configured (according to args.nics parameter).
For instance, for a bonding interface that's going to include IRQs of all its slaves.
"""
return itertools.chain.from_iterable(self.__nic2irqs.values())
#### Private methods ############################
@property
def __rfs_table_size(self):
return 32768
def __check_nics(self):
"""
Checks that self.nics are supported interfaces
"""
for nic in self.nics:
if not self.nic_exists(nic):
raise Exception("Device {} does not exist".format(nic))
if not self.nic_is_hw_iface(nic) and not self.nic_is_bond_iface(nic):
raise Exception("Not supported virtual device {}".format(nic))
def __get_irqs_one(self, iface):
"""
Returns the list of IRQ numbers for the given interface.
"""
return self.__nic2irqs[iface]
def __setup_rfs(self, iface):
rps_limits = glob.glob("/sys/class/net/{}/queues/*/rps_flow_cnt".format(iface))
one_q_limit = int(self.__rfs_table_size / len(rps_limits))
# If RFS feature is not present - get out
try:
run_one_command(['sysctl', 'net.core.rps_sock_flow_entries'])
except:
return
# Enable RFS
perftune_print("Setting net.core.rps_sock_flow_entries to {}".format(self.__rfs_table_size))
run_one_command(['sysctl', '-w', 'net.core.rps_sock_flow_entries={}'.format(self.__rfs_table_size)])
# Set each RPS queue limit
for rfs_limit_cnt in rps_limits:
msg = "Setting limit {} in {}".format(one_q_limit, rfs_limit_cnt)
fwriteln(rfs_limit_cnt, "{}".format(one_q_limit), log_message=msg)
# Enable ntuple filtering HW offload on the NIC
ethtool_msg = "Enable ntuple filtering HW offload for {}...".format(iface)
if dry_run_mode:
perftune_print(ethtool_msg)
run_one_command(['ethtool','-K', iface, 'ntuple', 'on'], stderr=subprocess.DEVNULL)
else:
try:
print("Trying to enable ntuple filtering HW offload for {}...".format(iface), end='')
run_one_command(['ethtool','-K', iface, 'ntuple', 'on'], stderr=subprocess.DEVNULL)
print("ok")
except:
print("not supported")
def __setup_rps(self, iface, mask):
for one_rps_cpus in self.__get_rps_cpus(iface):
set_one_mask(one_rps_cpus, mask)
self.__setup_rfs(iface)
def __setup_xps(self, iface):
xps_cpus_list = glob.glob("/sys/class/net/{}/queues/*/xps_cpus".format(iface))
masks = run_hwloc_distrib(["{}".format(len(xps_cpus_list))])
for i, mask in enumerate(masks):
set_one_mask(xps_cpus_list[i], mask)
def __iface_exists(self, iface):
if len(iface) == 0:
return False
return os.path.exists("/sys/class/net/{}".format(iface))
def __dev_is_hw_iface(self, iface):
return os.path.exists("/sys/class/net/{}/device".format(iface))
def __check_dev_is_bond_iface(self):
bond_dict = {}
if not os.path.exists('/sys/class/net/bonding_masters'):
for nic in self.nics:
bond_dict[nic] = False
#return False for every nic
return bond_dict
for nic in self.nics:
bond_dict[nic] = any([re.search(nic, line) for line in open('/sys/class/net/bonding_masters', 'r').readlines()])
return bond_dict
def __learn_slaves(self):
slaves_list_per_nic = {}
for nic in self.nics:
if self.nic_is_bond_iface(nic):
slaves_list_per_nic[nic] = list(itertools.chain.from_iterable([line.split() for line in open("/sys/class/net/{}/bonding/slaves".format(nic), 'r').readlines()]))
return slaves_list_per_nic
def __intel_irq_to_queue_idx(self, irq):
"""
Return the HW queue index for a given IRQ for Intel NICs in order to sort the IRQs' list by this index.
Intel's fast path IRQs have the following name convention:
<bla-bla>-TxRx-<queue index>
Intel NICs also have the IRQ for Flow Director (which is not a regular fast path IRQ) which name looks like
this:
<bla-bla>:fdir-TxRx-<index>
We want to put the Flow Director's IRQ at the end of the sorted list of IRQs.
:param irq: IRQ number
:return: HW queue index for Intel NICs and 0 for all other NICs
"""
intel_fp_irq_re = re.compile("\-TxRx\-(\d+)")
fdir_re = re.compile("fdir\-TxRx\-\d+")
m = intel_fp_irq_re.search(self.__irqs2procline[irq])
m1 = fdir_re.search(self.__irqs2procline[irq])
if m and not m1:
return int(m.group(1))
else:
return sys.maxsize
def __mlx_irq_to_queue_idx(self, irq):
"""
Return the HW queue index for a given IRQ for Mellanox NICs in order to sort the IRQs' list by this index.
Mellanox NICs have the IRQ which name looks like
this:
mlx5_comp23
mlx5_comp<index>
or this:
mlx4-6
mlx4-<index>
:param irq: IRQ number
:return: HW queue index for Mellanox NICs and 0 for all other NICs
"""
mlx5_fp_irq_re = re.compile("mlx5_comp(\d+)")
mlx4_fp_irq_re = re.compile("mlx4\-(\d+)")
m5 = mlx5_fp_irq_re.search(self.__irqs2procline[irq])
if m5:
return int(m5.group(1))
else:
m4 = mlx4_fp_irq_re.search(self.__irqs2procline[irq])
if m4:
return int(m4.group(1))
return sys.maxsize
def __get_driver_name(self, iface):
"""
:param iface: Interface to check
:return: driver name from ethtool
"""
driver_name = ''
ethtool_i_lines = run_ethtool(['-i', iface])
driver_re = re.compile("driver:")
driver_lines = list(filter(lambda one_line: driver_re.search(one_line), ethtool_i_lines))
if driver_lines:
if len(driver_lines) > 1:
raise Exception("More than one 'driver:' entries in the 'ethtool -i {}' output. Unable to continue.".format(iface))
driver_name = driver_lines[0].split()[1].strip()
return driver_name
def __learn_irqs_one(self, iface):
"""
This is a slow method that is going to read from the system files. Never
use it outside the initialization code. Use __get_irqs_one() instead.
Filter the fast path queues IRQs from the __get_all_irqs_one() result according to the known
patterns.
Right now we know about the following naming convention of the fast path queues vectors:
- Intel: <bla-bla>-TxRx-<bla-bla>
- Broadcom: <bla-bla>-fp-<bla-bla>
- ena: <bla-bla>-Tx-Rx-<bla-bla>
- Mellanox: for mlx4
mlx4-<queue idx>@<bla-bla>
or for mlx5
mlx5_comp<queue idx>@<bla-bla>
So, we will try to filter the etries in /proc/interrupts for IRQs we've got from get_all_irqs_one()
according to the patterns above.
If as a result all IRQs are filtered out (if there are no IRQs with the names from the patterns above) then
this means that the given NIC uses a different IRQs naming pattern. In this case we won't filter any IRQ.
Otherwise, we will use only IRQs which names fit one of the patterns above.
For NICs with a limited number of Rx queues the IRQs that handle Rx are going to be at the beginning of the
list.
"""
# filter 'all_irqs' to only reference valid keys from 'irqs2procline' and avoid an IndexError on the 'irqs' search below
all_irqs = set(learn_all_irqs_one("/sys/class/net/{}/device".format(iface), self.__irqs2procline, iface)).intersection(self.__irqs2procline.keys())
fp_irqs_re = re.compile("\-TxRx\-|\-fp\-|\-Tx\-Rx\-|mlx4-\d+@|mlx5_comp\d+@")
irqs = list(filter(lambda irq : fp_irqs_re.search(self.__irqs2procline[irq]), all_irqs))
if irqs:
driver_name = self.__get_driver_name(iface)
if (driver_name.startswith("mlx")):
irqs.sort(key=self.__mlx_irq_to_queue_idx)
else:
irqs.sort(key=self.__intel_irq_to_queue_idx)
return irqs
else:
return list(all_irqs)
def __learn_irqs(self):
"""
This is a slow method that is going to read from the system files. Never
use it outside the initialization code.
"""
nic_irq_dict={}
for nic in self.nics:
if self.nic_is_bond_iface(nic):
for slave in filter(self.__dev_is_hw_iface, self.slaves(nic)):
nic_irq_dict[slave] = self.__learn_irqs_one(slave)
else:
nic_irq_dict[nic] = self.__learn_irqs_one(nic)
return nic_irq_dict
def __get_rps_cpus(self, iface):
"""
Prints all rps_cpus files names for the given HW interface.
There is a single rps_cpus file for each RPS queue and there is a single RPS
queue for each HW Rx queue. Each HW Rx queue should have an IRQ.
Therefore the number of these files is equal to the number of fast path Rx IRQs for this interface.
"""
return glob.glob("/sys/class/net/{}/queues/*/rps_cpus".format(iface))
def __setup_one_hw_iface(self, iface):
max_num_rx_queues = self.__max_rx_queue_count(iface)
all_irqs = self.__get_irqs_one(iface)
# Bind the NIC's IRQs according to the configuration mode
#
# If this NIC has a limited number of Rx queues then we want to distribute their IRQs separately.
# For such NICs we've sorted IRQs list so that IRQs that handle Rx are all at the head of the list.
if max_num_rx_queues < len(all_irqs):
num_rx_queues = self.__get_rx_queue_count(iface)
perftune_print("Distributing IRQs handling Rx:")
distribute_irqs(all_irqs[0:num_rx_queues], self.irqs_cpu_mask)
perftune_print("Distributing the rest of IRQs")
distribute_irqs(all_irqs[num_rx_queues:], self.irqs_cpu_mask)
else:
perftune_print("Distributing all IRQs")
distribute_irqs(all_irqs, self.irqs_cpu_mask)
self.__setup_rps(iface, self.compute_cpu_mask)
self.__setup_xps(iface)
def __setup_bonding_iface(self, nic):
for slave in self.slaves(nic):
if self.__dev_is_hw_iface(slave):
perftune_print("Setting up {}...".format(slave))
self.__setup_one_hw_iface(slave)
else:
perftune_print("Skipping {} (not a physical slave device?)".format(slave))
def __max_rx_queue_count(self, iface):
"""
:param iface: Interface to check
:return: The maximum number of RSS queues for the given interface if there is known limitation and sys.maxsize
otherwise.
Networking drivers serving HW with the known maximum RSS queue limitation (due to lack of RSS bits):
ixgbe: PF NICs support up to 16 RSS queues.
ixgbevf: VF NICs support up to 4 RSS queues.
i40e: PF NICs support up to 64 RSS queues.
i40evf: VF NICs support up to 16 RSS queues.
"""
driver_to_max_rss = {'ixgbe': 16, 'ixgbevf': 4, 'i40e': 64, 'i40evf': 16}
driver_name = self.__get_driver_name(iface)
return driver_to_max_rss.get(driver_name, sys.maxsize)
def __get_rx_queue_count(self, iface):
"""
:return: the RSS Rx queues count for the given interface.
"""
num_irqs = len(self.__get_irqs_one(iface))
rx_queues_count = len(self.__get_rps_cpus(iface))
if rx_queues_count == 0:
rx_queues_count = num_irqs
return min(self.__max_rx_queue_count(iface), rx_queues_count)
def __get_hw_iface_def_mode(self, iface):
"""
Returns the default configuration mode for the given interface.
"""
rx_queues_count = self.__get_rx_queue_count(iface)
num_cores = int(run_hwloc_calc(['--number-of', 'core', 'machine:0', '--restrict', self.args.cpu_mask]))
num_PUs = int(run_hwloc_calc(['--number-of', 'PU', 'machine:0', '--restrict', self.args.cpu_mask]))
if num_PUs <= 4 or rx_queues_count == num_PUs:
return PerfTunerBase.SupportedModes.mq
elif num_cores <= 4:
return PerfTunerBase.SupportedModes.sq
else:
return PerfTunerBase.SupportedModes.sq_split
class ClocksourceManager:
class PreferredClockSourceNotAvailableException(Exception):
pass
def __init__(self, args):
self.__args = args
self._preferred = {"x86_64": "tsc", "kvm": "kvm-clock"}
self._arch = self._get_arch()
self._available_clocksources_file = "/sys/devices/system/clocksource/clocksource0/available_clocksource"
self._current_clocksource_file = "/sys/devices/system/clocksource/clocksource0/current_clocksource"
self._recommendation_if_unavailable = { "x86_64": "The tsc clocksource is not available. Consider using a hardware platform where the tsc clocksource is available, or try forcing it withe the tsc=reliable boot option", "kvm": "kvm-clock is not available" }
def _available_clocksources(self):
return open(self._available_clocksources_file).readline().split()
def _current_clocksource(self):
return open(self._current_clocksource_file).readline().strip()
def _get_arch(self):
try:
virt = run_read_only_command(['systemd-detect-virt']).strip()
if virt == "kvm":
return virt
except:
pass
return platform.machine()
def enforce_preferred_clocksource(self):
fwriteln(self._current_clocksource_file, self._preferred[self._arch], "Setting clocksource to {}".format(self._preferred[self._arch]))
def preferred(self):
return self._preferred[self._arch]
def setting_available(self):
return self._arch in self._preferred
def preferred_clocksource_available(self):
return self._preferred[self._arch] in self._available_clocksources()
def recommendation_if_unavailable(self):
return self._recommendation_if_unavailable[self._arch]
class SystemPerfTuner(PerfTunerBase):
def __init__(self, args):
super().__init__(args)
self._clocksource_manager = ClocksourceManager(args)
def tune(self):
if self.args.tune_clock:
if not self._clocksource_manager.setting_available():
perftune_print("Clocksource setting not available or not needed for this architecture. Not tuning");
elif not self._clocksource_manager.preferred_clocksource_available():
perftune_print(self._clocksource_manager.recommendation_if_unavailable())
else:
self._clocksource_manager.enforce_preferred_clocksource()
#### Protected methods ##########################
def _get_def_mode(self):
"""
This tuner doesn't apply any restriction to the final tune mode for now.
"""
return PerfTunerBase.SupportedModes.no_irq_restrictions
def _get_irqs(self):
return []
#################################################
class DiskPerfTuner(PerfTunerBase):
class SupportedDiskTypes(enum.IntEnum):
nvme = 0
non_nvme = 1
def __init__(self, args):
super().__init__(args)
if not (self.args.dirs or self.args.devs):
raise Exception("'disks' tuning was requested but neither directories nor storage devices were given")
self.__pyudev_ctx = pyudev.Context()
self.__dir2disks = self.__learn_directories()
self.__irqs2procline = get_irqs2procline_map()
self.__disk2irqs = self.__learn_irqs()
self.__type2diskinfo = self.__group_disks_info_by_type()
# sets of devices that have already been tuned
self.__io_scheduler_tuned_devs = set()
self.__nomerges_tuned_devs = set()
self.__write_back_cache_tuned_devs = set()
#### Public methods #############################
def tune(self):
"""
Distribute IRQs according to the requested mode (args.mode):
- Distribute NVMe disks' IRQs equally among all available CPUs.
- Distribute non-NVMe disks' IRQs equally among designated CPUs or among
all available CPUs in the 'mq' mode.
"""
mode_cpu_mask = PerfTunerBase.irqs_cpu_mask_for_mode(self.mode, self.args.cpu_mask)
non_nvme_disks, non_nvme_irqs = self.__disks_info_by_type(DiskPerfTuner.SupportedDiskTypes.non_nvme)
if non_nvme_disks:
perftune_print("Setting non-NVMe disks: {}...".format(", ".join(non_nvme_disks)))
distribute_irqs(non_nvme_irqs, mode_cpu_mask)
self.__tune_disks(non_nvme_disks)
else:
perftune_print("No non-NVMe disks to tune")
nvme_disks, nvme_irqs = self.__disks_info_by_type(DiskPerfTuner.SupportedDiskTypes.nvme)
if nvme_disks:
# Linux kernel is going to use IRQD_AFFINITY_MANAGED mode for NVMe IRQs
# on most systems (currently only AWS i3 non-metal are known to have a
# different configuration). SMP affinity of an IRQ in this mode may not be
# changed and an attempt to modify it is going to fail. However right now
# the only way to determine that IRQD_AFFINITY_MANAGED mode has been used
# is to attempt to modify IRQ SMP affinity (and fail) therefore we prefer
# to always do it.
#
# What we don't want however is to see annoying errors every time we
# detect that IRQD_AFFINITY_MANAGED was actually used. Therefore we will only log
# them in the "verbose" mode or when we run on an i3.nonmetal AWS instance.
perftune_print("Setting NVMe disks: {}...".format(", ".join(nvme_disks)))
distribute_irqs(nvme_irqs, self.args.cpu_mask,
log_errors=(self.is_aws_i3_non_metal_instance or self.args.verbose))
self.__tune_disks(nvme_disks)
else:
perftune_print("No NVMe disks to tune")
#### Protected methods ##########################
def _get_def_mode(self):
"""
Return a default configuration mode.
"""
# if the only disks we are tuning are NVMe disks - return the MQ mode
non_nvme_disks, non_nvme_irqs = self.__disks_info_by_type(DiskPerfTuner.SupportedDiskTypes.non_nvme)
if not non_nvme_disks:
return PerfTunerBase.SupportedModes.mq
num_cores = int(run_hwloc_calc(['--number-of', 'core', 'machine:0', '--restrict', self.args.cpu_mask]))
num_PUs = int(run_hwloc_calc(['--number-of', 'PU', 'machine:0', '--restrict', self.args.cpu_mask]))
if num_PUs <= 4:
return PerfTunerBase.SupportedModes.mq
elif num_cores <= 4:
return PerfTunerBase.SupportedModes.sq
else:
return PerfTunerBase.SupportedModes.sq_split
def _get_irqs(self):
return itertools.chain.from_iterable(irqs for disks, irqs in self.__type2diskinfo.values())
#### Private methods ############################
@property
def __io_schedulers(self):
"""
:return: An ordered list of IO schedulers that we want to configure. Schedulers are ordered by their priority
from the highest (left most) to the lowest.
"""
return ["none", "noop"]
@property
def __nomerges(self):
return '2'
@property
def __write_cache_config(self):
"""
:return: None - if write cache mode configuration is not requested or the corresponding write cache
configuration value string
"""
if self.args.set_write_back is None:
return None
return "write back" if self.args.set_write_back else "write through"
def __disks_info_by_type(self, disks_type):
"""
Returns a tuple ( [<disks>], [<irqs>] ) for the given disks type.
IRQs numbers in the second list are promised to be unique.
"""
return self.__type2diskinfo[DiskPerfTuner.SupportedDiskTypes(disks_type)]
def __nvme_fast_path_irq_filter(self, irq):
"""
Return True for fast path NVMe IRQs.
For NVMe device only queues 1-<number of CPUs> are going to do fast path work.
NVMe IRQs have the following name convention:
nvme<device index>q<queue index>, e.g. nvme0q7
:param irq: IRQ number
:return: True if this IRQ is an IRQ of a FP NVMe queue.
"""
nvme_irq_re = re.compile(r'(\s|^)nvme\d+q(\d+)(\s|$)')
# There may be more than an single HW queue bound to the same IRQ. In this case queue names are going to be
# coma separated
split_line = self.__irqs2procline[irq].split(",")
for line in split_line:
m = nvme_irq_re.search(line)
if m and 0 < int(m.group(2)) <= multiprocessing.cpu_count():
return True
return False
def __group_disks_info_by_type(self):
"""
Return a map of tuples ( [<disks>], [<irqs>] ), where "disks" are all disks of the specific type
and "irqs" are the corresponding IRQs.
It's promised that every element is "disks" and "irqs" is unique.
The disk types are 'nvme' and 'non-nvme'
"""
disks_info_by_type = {}
nvme_disks = set()
nvme_irqs = set()
non_nvme_disks = set()
non_nvme_irqs = set()
nvme_disk_name_pattern = re.compile('^nvme')
for disk, irqs in self.__disk2irqs.items():
if nvme_disk_name_pattern.search(disk):
nvme_disks.add(disk)
for irq in irqs:
nvme_irqs.add(irq)
else:
non_nvme_disks.add(disk)
for irq in irqs:
non_nvme_irqs.add(irq)
if not (nvme_disks or non_nvme_disks):
raise Exception("'disks' tuning was requested but no disks were found")
nvme_irqs = list(nvme_irqs)
# There is a known issue with Xen hypervisor that exposes itself on AWS i3 instances where nvme module
# over-allocates HW queues and uses only queues 1,2,3,..., <up to number of CPUs> for data transfer.
# On these instances we will distribute only these queues.
if self.is_aws_i3_non_metal_instance:
nvme_irqs = list(filter(self.__nvme_fast_path_irq_filter, nvme_irqs))
# Sort IRQs for easier verification
nvme_irqs.sort(key=lambda irq_num_str: int(irq_num_str))
disks_info_by_type[DiskPerfTuner.SupportedDiskTypes.nvme] = (list(nvme_disks), nvme_irqs)
disks_info_by_type[DiskPerfTuner.SupportedDiskTypes.non_nvme] = ( list(non_nvme_disks), list(non_nvme_irqs) )
return disks_info_by_type
def __learn_directories(self):
return { directory : self.__learn_directory(directory) for directory in self.args.dirs }
def __learn_directory(self, directory, recur=False):
"""
Returns a list of disks the given directory is mounted on (there will be more than one if
the mount point is on the RAID volume)
"""
if not os.path.exists(directory):
if not recur:
perftune_print("{} doesn't exist - skipping".format(directory))
return []
try:
udev_obj = pyudev.Devices.from_device_number(self.__pyudev_ctx, 'block', os.stat(directory).st_dev)
return self.__get_phys_devices(udev_obj)
except:
# handle cases like ecryptfs where the directory is mounted to another directory and not to some block device
filesystem = run_read_only_command(['df', '-P', directory]).splitlines()[-1].split()[0].strip()
if not re.search(r'^/dev/', filesystem):
devs = self.__learn_directory(filesystem, True)
else:
raise Exception("Logic error: failed to create a udev device while 'df -P' {} returns a {}".format(directory, filesystem))
# log error only for the original directory
if not recur and not devs:
perftune_print("Can't get a block device for {} - skipping".format(directory))
return devs
def __get_phys_devices(self, udev_obj):
# if device is a virtual device - the underlying physical devices are going to be its slaves
if re.search(r'virtual', udev_obj.sys_path):
slaves = os.listdir(os.path.join(udev_obj.sys_path, 'slaves'))
# If the device is virtual but doesn't have slaves (e.g. as nvm-subsystem virtual devices) handle it
# as a regular device.
if slaves:
return list(itertools.chain.from_iterable([ self.__get_phys_devices(pyudev.Devices.from_device_file(self.__pyudev_ctx, "/dev/{}".format(slave))) for slave in slaves ]))
# device node is something like /dev/sda1 - we need only the part without /dev/
return [ re.match(r'/dev/(\S+\d*)', udev_obj.device_node).group(1) ]
def __learn_irqs(self):
disk2irqs = {}
for devices in list(self.__dir2disks.values()) + [ self.args.devs ]:
for device in devices:
# There could be that some of the given directories are on the same disk.
# There is no need to rediscover IRQs of the disk we've already handled.
if device in disk2irqs.keys():
continue
udev_obj = pyudev.Devices.from_device_file(self.__pyudev_ctx, "/dev/{}".format(device))
dev_sys_path = udev_obj.sys_path
# If the device is a virtual NVMe device it's sys file name goes as follows:
# /sys/devices/virtual/nvme-subsystem/nvme-subsys0/nvme0n1
#
# and then there is this symlink:
# /sys/devices/virtual/nvme-subsystem/nvme-subsys0/nvme0n1/device/nvme0 -> ../../../pci0000:85/0000:85:01.0/0000:87:00.0/nvme/nvme0
#
# So, the "main device" is a "nvme\d+" prefix of the actual device name.
if re.search(r'virtual', udev_obj.sys_path):
m = re.match(r'(nvme\d+)\S*', device)
if m:
dev_sys_path = "{}/device/{}".format(udev_obj.sys_path, m.group(1))
split_sys_path = list(pathlib.PurePath(pathlib.Path(dev_sys_path).resolve()).parts)
# first part is always /sys/devices/pciXXX ...
controller_path_parts = split_sys_path[0:4]
# ...then there is a chain of one or more "domain:bus:device.function" followed by the storage device enumeration crap
# e.g. /sys/devices/pci0000:00/0000:00:1f.2/ata2/host1/target1:0:0/1:0:0:0/block/sda/sda3 or
# /sys/devices/pci0000:00/0000:00:02.0/0000:02:00.0/host6/target6:2:0/6:2:0:0/block/sda/sda1
# We want only the path till the last BDF including - it contains the IRQs information.
patt = re.compile("^[0-9ABCDEFabcdef]{4}\:[0-9ABCDEFabcdef]{2}\:[0-9ABCDEFabcdef]{2}\.[0-9ABCDEFabcdef]$")
for split_sys_path_branch in split_sys_path[4:]:
if patt.search(split_sys_path_branch):
controller_path_parts.append(split_sys_path_branch)
else:
break
controler_path_str = functools.reduce(lambda x, y : os.path.join(x, y), controller_path_parts)
disk2irqs[device] = learn_all_irqs_one(controler_path_str, self.__irqs2procline, 'blkif')
return disk2irqs
def __get_feature_file(self, dev_node, path_creator):
"""
Find the closest ancestor with the given feature and return its ('feature file', 'device node') tuple.
If there isn't such an ancestor - return (None, None) tuple.
:param dev_node Device node file name, e.g. /dev/sda1
:param path_creator A functor that creates a feature file name given a device system file name
"""
# Sanity check
if dev_node is None or path_creator is None:
return None, None
udev = pyudev.Devices.from_device_file(pyudev.Context(), dev_node)
feature_file = path_creator(udev.sys_path)
if os.path.exists(feature_file):
return feature_file, dev_node
elif udev.parent is not None:
return self.__get_feature_file(udev.parent.device_node, path_creator)
else:
return None, None
def __tune_one_feature(self, dev_node, path_creator, value, tuned_devs_set):
"""
Find the closest ancestor that has the given feature, configure it and
return True.
If there isn't such ancestor - return False.
:param dev_node Device node file name, e.g. /dev/sda1
:param path_creator A functor that creates a feature file name given a device system file name
"""
feature_file, feature_node = self.__get_feature_file(dev_node, path_creator)
if feature_file is None:
return False
if feature_node not in tuned_devs_set:
fwriteln_and_log(feature_file, value)
tuned_devs_set.add(feature_node)
return True
def __tune_io_scheduler(self, dev_node, io_scheduler):
return self.__tune_one_feature(dev_node, lambda p : os.path.join(p, 'queue', 'scheduler'), io_scheduler, self.__io_scheduler_tuned_devs)
def __tune_nomerges(self, dev_node):
return self.__tune_one_feature(dev_node, lambda p : os.path.join(p, 'queue', 'nomerges'), self.__nomerges, self.__nomerges_tuned_devs)
# If write cache configuration is not requested - return True immediately
def __tune_write_back_cache(self, dev_node):
if self.__write_cache_config is None:
return True
return self.__tune_one_feature(dev_node, lambda p : os.path.join(p, 'queue', 'write_cache'), self.__write_cache_config, self.__write_back_cache_tuned_devs)
def __get_io_scheduler(self, dev_node):
"""
Return a supported scheduler that is also present in the required schedulers list (__io_schedulers).
If there isn't such a supported scheduler - return None.
"""
feature_file, feature_node = self.__get_feature_file(dev_node, lambda p : os.path.join(p, 'queue', 'scheduler'))
lines = readlines(feature_file)
if not lines:
return None
# Supported schedulers appear in the config file as a single line as follows:
#
# sched1 [sched2] sched3
#
# ...with one or more schedulers where currently selected scheduler is the one in brackets.
#
# Return the scheduler with the highest priority among those that are supported for the current device.
supported_schedulers = frozenset([scheduler.lstrip("[").rstrip("]").rstrip("\n") for scheduler in lines[0].split(" ")])
return next((scheduler for scheduler in self.__io_schedulers if scheduler in supported_schedulers), None)
def __tune_disk(self, device):
dev_node = "/dev/{}".format(device)
io_scheduler = self.__get_io_scheduler(dev_node)
if not io_scheduler:
perftune_print("Not setting I/O Scheduler for {} - required schedulers ({}) are not supported".format(device, list(self.__io_schedulers)))
elif not self.__tune_io_scheduler(dev_node, io_scheduler):
perftune_print("Not setting I/O Scheduler for {} - feature not present".format(device))
if not self.__tune_nomerges(dev_node):
perftune_print("Not setting 'nomerges' for {} - feature not present".format(device))
if not self.__tune_write_back_cache(dev_node):
perftune_print("Not setting 'write_cache' for {} - feature not present".format(device))
def __tune_disks(self, disks):
for disk in disks:
self.__tune_disk(disk)
################################################################################
class TuneModes(enum.Enum):
disks = 0
net = 1
system = 2
@staticmethod
def names():
return list(TuneModes.__members__.keys())
argp = argparse.ArgumentParser(description = 'Configure various system parameters in order to improve the seastar application performance.', formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=
'''
This script will:
- Ban relevant IRQs from being moved by irqbalance.
- Configure various system parameters in /proc/sys.
- Distribute the IRQs (using SMP affinity configuration) among CPUs according to the configuration mode (see below).
As a result some of the CPUs may be destined to only handle the IRQs and taken out of the CPU set
that should be used to run the seastar application ("compute CPU set").
Modes description:
sq - set all IRQs of a given NIC to CPU0 and configure RPS
to spreads NAPIs' handling between other CPUs.
sq_split - divide all IRQs of a given NIC between CPU0 and its HT siblings and configure RPS
to spreads NAPIs' handling between other CPUs.
mq - distribute NIC's IRQs among all CPUs instead of binding
them all to CPU0. In this mode RPS is always enabled to
spreads NAPIs' handling between all CPUs.
If there isn't any mode given script will use a default mode:
- If number of physical CPU cores per Rx HW queue is greater than 4 - use the 'sq-split' mode.
- Otherwise, if number of hyperthreads per Rx HW queue is greater than 4 - use the 'sq' mode.
- Otherwise use the 'mq' mode.
Default values:
--nic NIC - default: eth0
--cpu-mask MASK - default: all available cores mask
--tune-clock - default: false
''')
argp.add_argument('--mode', choices=PerfTunerBase.SupportedModes.names(), help='configuration mode')
argp.add_argument('--nic', action='append', help='network interface name(s), by default uses \'eth0\' (may appear more than once)', dest='nics', default=[])
argp.add_argument('--tune-clock', action='store_true', help='Force tuning of the system clocksource')
argp.add_argument('--get-cpu-mask', action='store_true', help="print the CPU mask to be used for compute")
argp.add_argument('--get-cpu-mask-quiet', action='store_true', help="print the CPU mask to be used for compute, print the zero CPU set if that's what it turns out to be")
argp.add_argument('--verbose', action='store_true', help="be more verbose about operations and their result")
argp.add_argument('--tune', choices=TuneModes.names(), help="components to configure (may be given more than once)", action='append', default=[])
argp.add_argument('--cpu-mask', help="mask of cores to use, by default use all available cores", metavar='MASK')
argp.add_argument('--irq-cpu-mask', help="mask of cores to use for IRQs binding", metavar='MASK')
argp.add_argument('--dir', help="directory to optimize (may appear more than once)", action='append', dest='dirs', default=[])
argp.add_argument('--dev', help="device to optimize (may appear more than once), e.g. sda1", action='append', dest='devs', default=[])
argp.add_argument('--options-file', help="configuration YAML file")
argp.add_argument('--dump-options-file', action='store_true', help="Print the configuration YAML file containing the current configuration")
argp.add_argument('--dry-run', action='store_true', help="Don't take any action, just recommend what to do.")
argp.add_argument('--write-back-cache', help="Enable/Disable \'write back\' write cache mode.", dest="set_write_back")
def parse_cpu_mask_from_yaml(y, field_name, fname):
hex_32bit_pattern='0x[0-9a-fA-F]{1,8}'
mask_pattern = re.compile('^{}((,({})?)*,{})*$'.format(hex_32bit_pattern, hex_32bit_pattern, hex_32bit_pattern))
if mask_pattern.match(str(y[field_name])):
return y[field_name]
else:
raise Exception("Bad '{}' value in {}: {}".format(field_name, fname, str(y[field_name])))
def extend_and_unique(orig_list, iterable):
"""
Extend items to a list, and make the list items unique
"""
assert(isinstance(orig_list, list))
assert(isinstance(iterable, list))
orig_list.extend(iterable)
return list(set(orig_list))
def parse_options_file(prog_args):
if not prog_args.options_file:
return
y = yaml.safe_load(open(prog_args.options_file))
if y is None:
return
if 'mode' in y and not prog_args.mode:
if not y['mode'] in PerfTunerBase.SupportedModes.names():
raise Exception("Bad 'mode' value in {}: {}".format(prog_args.options_file, y['mode']))
prog_args.mode = y['mode']
if 'nic' in y:
# Multiple nics was supported by commit a2fc9d72c31b97840bc75ae49dbd6f4b6d394e25
# `nic' option dumped to config file will be list after this change, but the `nic'
# option in old config file is still string, which was generated before this change.
# So here convert the string option to list.
if not isinstance(y['nic'], list):
y['nic'] = [y['nic']]
prog_args.nics = extend_and_unique(prog_args.nics, y['nic'])
if 'tune_clock' in y and not prog_args.tune_clock:
prog_args.tune_clock= y['tune_clock']
if 'tune' in y:
if set(y['tune']) <= set(TuneModes.names()):
prog_args.tune = extend_and_unique(prog_args.tune, y['tune'])
else:
raise Exception("Bad 'tune' value in {}: {}".format(prog_args.options_file, y['tune']))
if 'cpu_mask' in y and not prog_args.cpu_mask:
prog_args.cpu_mask = parse_cpu_mask_from_yaml(y, 'cpu_mask', prog_args.options_file)
if 'irq_cpu_mask' in y and not prog_args.irq_cpu_mask:
prog_args.irq_cpu_mask = parse_cpu_mask_from_yaml(y, 'irq_cpu_mask', prog_args.options_file)
if 'dir' in y:
prog_args.dirs = extend_and_unique(prog_args.dirs, y['dir'])
if 'dev' in y:
prog_args.devs = extend_and_unique(prog_args.devs, y['dev'])
if 'write_back_cache' in y:
prog_args.set_write_back = distutils.util.strtobool("{}".format(y['write_back_cache']))
def dump_config(prog_args):
prog_options = {}
if prog_args.mode:
prog_options['mode'] = prog_args.mode
if prog_args.nics:
prog_options['nic'] = prog_args.nics
if prog_args.tune_clock:
prog_options['tune_clock'] = prog_args.tune_clock
if prog_args.tune:
prog_options['tune'] = prog_args.tune
if prog_args.cpu_mask:
prog_options['cpu_mask'] = prog_args.cpu_mask
if prog_args.irq_cpu_mask:
prog_options['irq_cpu_mask'] = prog_args.irq_cpu_mask
if prog_args.dirs:
prog_options['dir'] = prog_args.dirs
if prog_args.devs:
prog_options['dev'] = prog_args.devs
if prog_args.set_write_back is not None:
prog_options['write_back_cache'] = prog_args.set_write_back
perftune_print(yaml.dump(prog_options, default_flow_style=False))
################################################################################
args = argp.parse_args()
# Sanity check
try:
if args.set_write_back:
args.set_write_back = distutils.util.strtobool(args.set_write_back)
except:
sys.exit("Invalid --write-back-cache value: should be boolean but given: {}".format(args.set_write_back))
dry_run_mode = args.dry_run
parse_options_file(args)
# if nothing needs to be configured - quit
if not args.tune:
sys.exit("ERROR: At least one tune mode MUST be given.")
# The must be either 'mode' or an explicit 'irq_cpu_mask' given - not both
if args.mode and args.irq_cpu_mask:
sys.exit("ERROR: Provide either tune mode or IRQs CPU mask - not both.")
# set default values #####################
if not args.nics:
args.nics = ['eth0']
if not args.cpu_mask:
args.cpu_mask = run_hwloc_calc(['all'])
##########################################
# Sanity: irq_cpu_mask should be a subset of cpu_mask
if args.irq_cpu_mask and run_hwloc_calc([args.cpu_mask]) != run_hwloc_calc([args.cpu_mask, args.irq_cpu_mask]):
sys.exit("ERROR: IRQ CPU mask({}) must be a subset of CPU mask({})".format(args.irq_cpu_mask, args.cpu_mask))
if args.dump_options_file:
dump_config(args)
sys.exit(0)
try:
tuners = []
if TuneModes.disks.name in args.tune:
tuners.append(DiskPerfTuner(args))
if TuneModes.net.name in args.tune:
tuners.append(NetPerfTuner(args))
if TuneModes.system.name in args.tune:
tuners.append(SystemPerfTuner(args))
# Set the minimum mode among all tuners
if not args.irq_cpu_mask:
mode = PerfTunerBase.SupportedModes.combine([tuner.mode for tuner in tuners])
for tuner in tuners:
tuner.mode = mode
if args.get_cpu_mask or args.get_cpu_mask_quiet:
# Print the compute mask from the first tuner - it's going to be the same in all of them
perftune_print(tuners[0].compute_cpu_mask)
else:
# Tune the system
restart_irqbalance(itertools.chain.from_iterable([ tuner.irqs for tuner in tuners ]))
for tuner in tuners:
tuner.tune()
except PerfTunerBase.CPUMaskIsZeroException as e:
# Print a zero CPU set if --get-cpu-mask-quiet was requested.
if args.get_cpu_mask_quiet:
perftune_print("0x0")
else:
sys.exit("ERROR: {}. Your system can't be tuned until the issue is fixed.".format(e))
except Exception as e:
sys.exit("ERROR: {}. Your system can't be tuned until the issue is fixed.".format(e))
|
apache-2.0
| -1,003,424,227,751,248,800
| 39.852902
| 264
| 0.611516
| false
|
onejgordon/cloud-memory
|
api.py
|
1
|
11636
|
import os, logging
from datetime import datetime,timedelta
import webapp2
from google.appengine.ext import db, blobstore, deferred
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.api import images, taskqueue, users, mail, search, urlfetch
import logging
from models import *
import tools
import services
import messages
import authorized
from errors import APIError
import json
import handlers
from apiclient import discovery
import httplib2
from oauth2client import client
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
class PublicAPI(handlers.JsonRequestHandler):
@authorized.role()
def forgot_password(self, email_or_phone, d):
success = False
override_sitename = self.request.get('override_sitename')
if email_or_phone:
user = User.FuzzyGet(email_or_phone)
if user:
if user.email:
new_password = user.setPass()
user.put()
success = True
if tools.on_dev_server():
logging.debug(new_password)
message = "Password reset successful - check your email"
prefix = EMAIL_PREFIX if not override_sitename else "[ %s ] " % override_sitename
deferred.defer(mail.send_mail, SENDER_EMAIL, user.email, prefix + "Password Reset", "Your password has been reset: %s. You can change this upon signing in." % new_password)
else:
message = "No email address on file for that user. Please contact support."
else:
message = "User not found..."
else:
message = "Email or phone required"
self.json_out(success=success, message=message)
class UserAPI(handlers.JsonRequestHandler):
"""
"""
@authorized.role('api')
def list(self, d):
message = None
users = User.query().fetch(100)
success = True
data = {
'users': [user.json() for user in users]
}
self.json_out(data, success=success, message=message)
@authorized.role('api')
def update(self, d):
success = False
message = None
missing_scopes = []
id = self.request.get_range('id')
params = tools.gets(self, strings=['name','password','phone','email','location_text','currency'],
integers=['level'], lists=['services_enabled'], json=['service_settings'], ignoreMissing=True)
user = None
isSelf = False
if id:
user = User.get_by_id(id)
else:
user = User.Create(email=params.get('email'), phone=params.get('phone'))
if user:
isSelf = user.key == self.user.key
user.Update(**params)
missing_scopes = user.check_available_scopes()
user.put()
success = True
if user:
if isSelf:
self.session['user'] = user
message = "Profile saved"
else:
message = "User saved"
else:
message = "Problem creating user"
data = {
'user': user.json() if user else None,
}
if user and missing_scopes:
data['oauth_uri'] = user.get_auth_uri(scope=' '.join(missing_scopes))
self.json_out(data, success=success, message=message)
@authorized.role('api')
def delete(self, d):
id = self.request.get_range('id')
success = False
if id:
u = User.get_by_id(id)
if u and self.user.is_admin():
u.clean_delete()
success = True
self.json_out(success=success)
class APILogAPI(handlers.JsonRequestHandler):
@authorized.role('api')
def list(self, d):
success = False
message = None
_max = self.request.get_range('max', max_value=500, default=100)
apilogs = APILog.Recent(_max=_max)
success = True
data = {
'logs': [r.json() for r in apilogs]
}
self.json_out(data, success=success, message=message)
class UploadMedia(handlers.BaseUploadHandler):
def post(self):
try:
tid = self.request.get_range('tid')
prop = self.request.get('prop')
file_infos = self.get_file_infos()
user = self.session.get('user')
dbp = []
urls = []
if tid and user:
t = Topic.get_by_id(tid)
if t:
if len(file_infos):
for fi in file_infos:
if fi and fi.gs_object_name:
params = {};
params[prop] = fi.gs_object_name;
t.Update(**params)
dbp.append(t)
else: raise Exception("Malformed 2")
else: raise Exception("No file data found")
else: raise Exception("Topic not found with ID %s. User: %s" % (tid, user))
if dbp:
ndb.put_multi(dbp)
else: raise Exception("Malformed")
except Exception, e:
logging.error(e)
self.response.out.write("Error: %s" % e)
self.response.set_status(500)
else:
if dbp:
self.response.out.write(json.dumps({'media': [p.json() for p in dbp]}))
else:
self.response.out.write("OK")
class Logout(handlers.JsonRequestHandler):
def post(self):
if self.session.has_key('user'):
for key in self.session.keys():
del self.session[key]
self.json_out({'success': True})
class AuthenticateAPI(handlers.BaseRequestHandler):
@authorized.role()
def action(self, action, d):
base = "http://localhost:8080" if tools.on_dev_server() else BASE
if action == 'login':
scope = "email profile"
flow = User.get_auth_flow(scope=scope)
flow.params['access_type'] = 'offline'
flow.params['include_granted_scopes'] = 'true'
auth_uri = flow.step1_get_authorize_url(state=scope)
self.json_out({'auth_uri': auth_uri}, success=True, debug=True)
elif action == 'oauth2callback':
error = self.request.get('error')
code = self.request.get('code')
scope = self.request.get('scope')
state_scopes = self.request.get('state')
if code:
CLIENT_SECRET_FILE = os.path.join(os.path.dirname(__file__), 'client_secrets.json')
credentials = client.credentials_from_clientsecrets_and_code(
CLIENT_SECRET_FILE,
scope.split(' '),
code,
redirect_uri=base + "/api/auth/oauth2callback")
user = self.user
if not user:
email = credentials.id_token['email']
user = User.GetByEmail(email)
if not user:
# Create account
user = User.Create(email=email)
if user:
user.save_credentials(credentials)
user.put()
self.session['user'] = user
elif error:
logging.error(error)
self.redirect("/app/settings")
def background_service_fetch(uid, mckeys=None, limit=20):
'''Fetch data from all requested services and store to memcache -- may be slow.
'''
user = User.get_by_id(int(uid))
if user and mckeys:
http_auth = user.get_http_auth()
if http_auth:
to_cache = {}
for mckey in mckeys:
to_cache[mckey] = {
'items': [],
'status': SERVICE.LOADING,
'issue': None
}
# Set loading status
memcache.set_multi(to_cache)
for mckey in mckeys:
svc, date = mckey.split(':')
date_dt = tools.fromISODate(date)
next_date_dt = date_dt + timedelta(days=1)
items = []
issue = None
try:
fetcher_class = getattr(services, 'ServiceFetcher_%s' % svc)
if issubclass(fetcher_class, services.ServiceFetcher):
fetcher = fetcher_class(user=user, date_dt=date_dt, next_date_dt=next_date_dt, http_auth=http_auth, limit=limit)
items = fetcher.fetch()
success = True
else:
logging.error("Failed to get fetcher_class for %s" % svc)
except Exception, e:
issue = "Error fetching from %s - %s" % (svc, e)
to_cache = {
'items': items,
'status': SERVICE.LOADED if not issue else SERVICE.ERROR,
'issue': issue
}
memcache.set(mckey, to_cache, time=MEMCACHE_EXPIRE_SECS)
if date:
# Log search
DaySearch.Increment(user=user, date=date)
class FetchAPI(handlers.BaseRequestHandler):
@authorized.role('api')
def fetch(self, d):
success = False
message = None
mckeys = self.request.get('mckeys').split(',')
date = self.request.get('date')
limit = self.request.get_range('limit', max_value=100)
results = memcache.get_multi(mckeys)
fetch_keys = []
for mckey in mckeys:
if mckey not in results:
fetch_keys.append(mckey)
if date and fetch_keys:
deferred.defer(background_service_fetch,
uid=self.user.key.id(),
mckeys=fetch_keys, limit=limit)
message = "Beginning fetch..."
success = True
self.json_out(results, success=success, message=message)
# TODO: Timezone
class ServiceConfigureAPI(handlers.BaseRequestHandler):
@authorized.role('api')
def configure(self, svc_key, d):
success = False
message = None
http_auth = self.user.get_http_auth()
results = {}
if http_auth:
try:
config_fn = getattr(services, 'config_%s' % svc_key)
if callable(config_fn):
results = config_fn(self.user, http_auth)
success = True
except Exception, e:
message = "Error configuring %s - %s" % (svc_key, e)
self.json_out(results, success=success, message=message)
class SearchesAPI(handlers.BaseRequestHandler):
@authorized.role('api')
def star(self, d):
success = False
message = None
date = self.request.get('date')
do_star = self.request.get_range('star', default=1) == 1 # Unstar if 0
success, ds = DaySearch.Star(user=self.user, date=date, do_star=do_star)
self.json_out({
'date': date,
'starred': ds.starred if ds else False
}, success=success, message=message)
@authorized.role('api')
def starred(self, d):
success = False
message = None
starred_searches = DaySearch.Starred(user=self.user)
success = True
self.json_out({
'searches': [ds.json() for ds in starred_searches]
}, success=success, message=message)
|
mit
| 4,608,389,611,228,572,700
| 33.838323
| 192
| 0.528618
| false
|
flant/dapp
|
pkg/build/builder/ansible/callback/werf.py
|
1
|
2679
|
# -*- coding: utf-8 -*-
# (c) 2018, Ivan Mikheykin <ivan.mikheykin@flant.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: werf
type: stdout
short_description: Print related werf config section in case of task failure
version_added: "2.4"
description:
- Solo mode with live stdout for raw and script tasks
- Werf specific error messages
requirements:
- set as stdout callback in configuration
'''
from callback.live import CallbackModule as CallbackModule_live
from callback.live import vt100, lColor
from ansible import constants as C
from ansible.utils.color import stringc
import os
import json
class CallbackModule(CallbackModule_live):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'werf'
def __init__(self):
self.super_ref = super(CallbackModule, self)
self.super_ref.__init__()
def v2_runner_on_failed(self, result, ignore_errors=False):
self.super_ref.v2_runner_on_failed(result, ignore_errors)
# get config sections from werf
# task config text is in a last tag
# doctext is in a file WERF_DUMP_CONFIG_DOC_PATH
self._display_werf_config(result._task)
def _read_dump_config_doc(self):
# read content from file in WERF_DUMP_CONFIG_DOC_PATH env
if 'WERF_DUMP_CONFIG_DOC_PATH' not in os.environ:
return {}
dump_path = os.environ['WERF_DUMP_CONFIG_DOC_PATH']
res = {}
try:
fh = open(dump_path, 'r')
res = json.load(fh)
fh.close()
except:
pass
return res
# werf_stage_name commented for consistency with werffile-yml behaviour
def _display_werf_config(self, task):
tags = task.tags
if not tags or len(tags) == 0:
return
# last tag is a key to a section dump in dump_config
dump_config_section_key = tags[-1]
dump_config = self._read_dump_config_doc()
dump_config_doc = dump_config.get('dump_config_doc', '')
dump_config_sections = dump_config.get('dump_config_sections', {})
dump_config_section = dump_config_sections.get(dump_config_section_key, '')
self.LogArgs(
u"\n",
lColor.COLOR_DEBUG, u"Failed task configuration:\n\n", vt100.reset,
stringc(dump_config_section, C.COLOR_DEBUG),
u"\n",
stringc(dump_config_doc, C.COLOR_DEBUG),
u"\n")
|
apache-2.0
| -6,994,132,084,483,275,000
| 31.670732
| 92
| 0.628593
| false
|
gatsinski/kindergarten-management-system
|
kindergarten_management_system/kms/contrib/cms_carousel/migrations/0001_initial.py
|
1
|
1898
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import filer.fields.image
class Migration(migrations.Migration):
dependencies = [
('cms', '0016_auto_20160608_1535'),
('filer', '0007_auto_20161016_1055'),
]
operations = [
migrations.CreateModel(
name='CarouselContainerPluginModel',
fields=[
('cmsplugin_ptr', models.OneToOneField(related_name='cms_carousel_carouselcontainerpluginmodel', parent_link=True, auto_created=True, serialize=False, primary_key=True, to='cms.CMSPlugin')),
('title', models.CharField(max_length=254, verbose_name='Title')),
('slug', models.SlugField(max_length=254, verbose_name='Slug')),
],
options={
'verbose_name_plural': 'Carousels',
'verbose_name': 'Carousel',
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='CarouselImagePluginModel',
fields=[
('cmsplugin_ptr', models.OneToOneField(related_name='cms_carousel_carouselimagepluginmodel', parent_link=True, auto_created=True, serialize=False, primary_key=True, to='cms.CMSPlugin')),
('title', models.CharField(max_length=254, verbose_name='Text', blank=True)),
('text', models.TextField(max_length=1000, verbose_name='Text', blank=True)),
('image', filer.fields.image.FilerImageField(related_name='carousel_images', on_delete=django.db.models.deletion.PROTECT, verbose_name='Image', to='filer.Image')),
],
options={
'verbose_name_plural': 'Carousel images',
'verbose_name': 'Carousel image',
},
bases=('cms.cmsplugin',),
),
]
|
gpl-3.0
| -9,153,950,609,728,425,000
| 42.136364
| 206
| 0.591149
| false
|
yousrabk/mne-python
|
mne/viz/ica.py
|
1
|
30591
|
"""Functions to plot ICA specific data (besides topographies)
"""
from __future__ import print_function
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: Simplified BSD
from functools import partial
import numpy as np
from .utils import (tight_layout, _prepare_trellis, _select_bads,
_layout_figure, _plot_raw_onscroll, _mouse_click,
_helper_raw_resize, _plot_raw_onkey, plt_show)
from .raw import _prepare_mne_browse_raw, _plot_raw_traces
from .epochs import _prepare_mne_browse_epochs
from .evoked import _butterfly_on_button_press, _butterfly_onpick
from .topomap import _prepare_topo_plot, plot_topomap
from ..utils import logger
from ..defaults import _handle_default
from ..io.meas_info import create_info
from ..io.pick import pick_types
from ..externals.six import string_types
def _ica_plot_sources_onpick_(event, sources=None, ylims=None):
"""Onpick callback for plot_ica_panel"""
# make sure that the swipe gesture in OS-X doesn't open many figures
if event.mouseevent.inaxes is None or event.mouseevent.button != 1:
return
artist = event.artist
try:
import matplotlib.pyplot as plt
plt.figure()
src_idx = artist._mne_src_idx
component = artist._mne_component
plt.plot(sources[src_idx], 'r' if artist._mne_is_bad else 'k')
plt.ylim(ylims)
plt.grid(linestyle='-', color='gray', linewidth=.25)
plt.title('ICA #%i' % component)
except Exception as err:
# matplotlib silently ignores exceptions in event handlers, so we print
# it here to know what went wrong
print(err)
raise err
def plot_ica_sources(ica, inst, picks=None, exclude=None, start=None,
stop=None, show=True, title=None, block=False):
"""Plot estimated latent sources given the unmixing matrix.
Typical usecases:
1. plot evolution of latent sources over time based on (Raw input)
2. plot latent source around event related time windows (Epochs input)
3. plot time-locking in ICA space (Evoked input)
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA solution.
inst : instance of mne.io.Raw, mne.Epochs, mne.Evoked
The object to plot the sources from.
picks : int | array_like of int | None.
The components to be displayed. If None, plot will show the
sources in the order as fitted.
exclude : array_like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
start : int
X-axis start index. If None, from the beginning.
stop : int
X-axis stop index. If None, next 20 are shown, in case of evoked to the
end.
show : bool
Show figure if True.
title : str | None
The figure title. If None a default is provided.
block : bool
Whether to halt program execution until the figure is closed.
Useful for interactive selection of components in raw and epoch
plotter. For evoked, this parameter has no effect. Defaults to False.
Returns
-------
fig : instance of pyplot.Figure
The figure.
Notes
-----
For raw and epoch instances, it is possible to select components for
exclusion by clicking on the line. The selected components are added to
``ica.exclude`` on close.
.. versionadded:: 0.10.0
"""
from ..io.base import _BaseRaw
from ..evoked import Evoked
from ..epochs import _BaseEpochs
if exclude is None:
exclude = ica.exclude
elif len(ica.exclude) > 0:
exclude = np.union1d(ica.exclude, exclude)
if isinstance(inst, _BaseRaw):
fig = _plot_sources_raw(ica, inst, picks, exclude, start=start,
stop=stop, show=show, title=title,
block=block)
elif isinstance(inst, _BaseEpochs):
fig = _plot_sources_epochs(ica, inst, picks, exclude, start=start,
stop=stop, show=show, title=title,
block=block)
elif isinstance(inst, Evoked):
sources = ica.get_sources(inst)
if start is not None or stop is not None:
inst = inst.crop(start, stop, copy=True)
fig = _plot_ica_sources_evoked(
evoked=sources, picks=picks, exclude=exclude, title=title,
labels=getattr(ica, 'labels_', None), show=show)
else:
raise ValueError('Data input must be of Raw or Epochs type')
return fig
def _plot_ica_grid(sources, start, stop,
source_idx, ncol, exclude,
title, show):
"""Create panel plots of ICA sources
Clicking on the plot of an individual source opens a new figure showing
the source.
Parameters
----------
sources : ndarray
Sources as drawn from ica.get_sources.
start : int
x-axis start index. If None from the beginning.
stop : int
x-axis stop index. If None to the end.
n_components : int
Number of components fitted.
source_idx : array-like
Indices for subsetting the sources.
ncol : int
Number of panel-columns.
title : str
The figure title. If None a default is provided.
show : bool
If True, all open plots will be shown.
"""
import matplotlib.pyplot as plt
if source_idx is None:
source_idx = np.arange(len(sources))
elif isinstance(source_idx, list):
source_idx = np.array(source_idx)
if exclude is None:
exclude = []
n_components = len(sources)
ylims = sources.min(), sources.max()
xlims = np.arange(sources.shape[-1])[[0, -1]]
fig, axes = _prepare_trellis(n_components, ncol)
if title is None:
fig.suptitle('Reconstructed latent sources', size=16)
elif title:
fig.suptitle(title, size=16)
plt.subplots_adjust(wspace=0.05, hspace=0.05)
my_iter = enumerate(zip(source_idx, axes, sources))
for i_source, (i_selection, ax, source) in my_iter:
component = '[%i]' % i_selection
# plot+ emebed idx and comp. name to use in callback
color = 'r' if i_selection in exclude else 'k'
line = ax.plot(source, linewidth=0.5, color=color, picker=1e9)[0]
vars(line)['_mne_src_idx'] = i_source
vars(line)['_mne_component'] = i_selection
vars(line)['_mne_is_bad'] = i_selection in exclude
ax.set_xlim(xlims)
ax.set_ylim(ylims)
ax.text(0.05, .95, component, transform=ax.transAxes,
verticalalignment='top')
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
# register callback
callback = partial(_ica_plot_sources_onpick_, sources=sources, ylims=ylims)
fig.canvas.mpl_connect('pick_event', callback)
plt_show(show)
return fig
def _plot_ica_sources_evoked(evoked, picks, exclude, title, show, labels=None):
"""Plot average over epochs in ICA space
Parameters
----------
evoked : instance of mne.Evoked
The Evoked to be used.
picks : int | array_like of int | None.
The components to be displayed. If None, plot will show the
sources in the order as fitted.
exclude : array_like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
title : str
The figure title.
show : bool
Show figure if True.
labels : None | dict
The ICA labels attribute.
"""
import matplotlib.pyplot as plt
if title is None:
title = 'Reconstructed latent sources, time-locked'
fig, axes = plt.subplots(1)
ax = axes
axes = [axes]
idxs = [0]
times = evoked.times * 1e3
# plot unclassified sources and label excluded ones
lines = list()
texts = list()
if picks is None:
picks = np.arange(evoked.data.shape[0])
picks = np.sort(picks)
idxs = [picks]
color = 'r'
if labels is not None:
labels_used = [k for k in labels if '/' not in k]
exclude_labels = list()
for ii in picks:
if ii in exclude:
line_label = 'ICA %03d' % (ii + 1)
if labels is not None:
annot = list()
for this_label in labels_used:
indices = labels[this_label]
if ii in indices:
annot.append(this_label)
line_label += (' - ' + ', '.join(annot))
exclude_labels.append(line_label)
else:
exclude_labels.append(None)
if labels is not None:
unique_labels = set([k.split(' - ')[1] for k in exclude_labels if k])
label_colors = plt.cm.rainbow(np.linspace(0, 1, len(unique_labels)))
label_colors = dict(zip(unique_labels, label_colors))
else:
label_colors = dict((k, 'red') for k in exclude_labels)
for exc_label, ii in zip(exclude_labels, picks):
if exc_label is not None:
if labels is not None:
exc_label = exc_label.split(' - ')[1]
color = label_colors[exc_label]
lines.extend(ax.plot(times, evoked.data[ii].T, picker=3.,
zorder=1, color=color, label=exc_label))
else:
lines.extend(ax.plot(times, evoked.data[ii].T, picker=3.,
color='k', zorder=0))
ax.set_title(title)
ax.set_xlim(times[[0, -1]])
ax.set_xlabel('Time (ms)')
ax.set_ylabel('(NA)')
if len(exclude) > 0:
plt.legend(loc='best')
tight_layout(fig=fig)
# for old matplotlib, we actually need this to have a bounding
# box (!), so we have to put some valid text here, change
# alpha and path effects later
texts.append(ax.text(0, 0, 'blank', zorder=2,
verticalalignment='baseline',
horizontalalignment='left',
fontweight='bold', alpha=0))
# this is done to give the structure of a list of lists of a group of lines
# in each subplot
lines = [lines]
ch_names = evoked.ch_names
from matplotlib import patheffects
path_effects = [patheffects.withStroke(linewidth=2, foreground="w",
alpha=0.75)]
params = dict(axes=axes, texts=texts, lines=lines, idxs=idxs,
ch_names=ch_names, need_draw=False,
path_effects=path_effects)
fig.canvas.mpl_connect('pick_event',
partial(_butterfly_onpick, params=params))
fig.canvas.mpl_connect('button_press_event',
partial(_butterfly_on_button_press,
params=params))
plt_show(show)
return fig
def plot_ica_scores(ica, scores,
exclude=None, labels=None,
axhline=None,
title='ICA component scores',
figsize=(12, 6), show=True):
"""Plot scores related to detected components.
Use this function to asses how well your score describes outlier
sources and how well you were detecting them.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
scores : array_like of float, shape (n ica components) | list of arrays
Scores based on arbitrary metric to characterize ICA components.
exclude : array_like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
labels : str | list | 'ecg' | 'eog' | None
The labels to consider for the axes tests. Defaults to None.
If list, should match the outer shape of `scores`.
If 'ecg' or 'eog', the labels_ attributes will be looked up.
Note that '/' is used internally for sublabels specifying ECG and
EOG channels.
axhline : float
Draw horizontal line to e.g. visualize rejection threshold.
title : str
The figure title.
figsize : tuple of int
The figure size. Defaults to (12, 6).
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.pyplot.Figure
The figure object
"""
import matplotlib.pyplot as plt
my_range = np.arange(ica.n_components_)
if exclude is None:
exclude = ica.exclude
exclude = np.unique(exclude)
if not isinstance(scores[0], (list, np.ndarray)):
scores = [scores]
n_rows = len(scores)
figsize = (12, 6) if figsize is None else figsize
fig, axes = plt.subplots(n_rows, figsize=figsize, sharex=True, sharey=True)
if isinstance(axes, np.ndarray):
axes = axes.flatten()
else:
axes = [axes]
plt.suptitle(title)
if labels == 'ecg':
labels = [l for l in ica.labels_ if l.startswith('ecg/')]
elif labels == 'eog':
labels = [l for l in ica.labels_ if l.startswith('eog/')]
labels.sort(key=lambda l: l.split('/')[1]) # sort by index
elif isinstance(labels, string_types):
if len(axes) > 1:
raise ValueError('Need as many labels as axes (%i)' % len(axes))
labels = [labels]
elif isinstance(labels, (tuple, list)):
if len(labels) != len(axes):
raise ValueError('Need as many labels as axes (%i)' % len(axes))
elif labels is None:
labels = (None, None)
for label, this_scores, ax in zip(labels, scores, axes):
if len(my_range) != len(this_scores):
raise ValueError('The length of `scores` must equal the '
'number of ICA components.')
ax.bar(my_range, this_scores, color='w')
for excl in exclude:
ax.bar(my_range[excl], this_scores[excl], color='r')
if axhline is not None:
if np.isscalar(axhline):
axhline = [axhline]
for axl in axhline:
ax.axhline(axl, color='r', linestyle='--')
ax.set_ylabel('score')
if label is not None:
if 'eog/' in label:
split = label.split('/')
label = ', '.join([split[0], split[2]])
elif '/' in label:
label = ', '.join(label.split('/'))
ax.set_title('(%s)' % label)
ax.set_xlabel('ICA components')
ax.set_xlim(0, len(this_scores))
tight_layout(fig=fig)
if len(axes) > 1:
plt.subplots_adjust(top=0.9)
plt_show(show)
return fig
def plot_ica_overlay(ica, inst, exclude=None, picks=None, start=None,
stop=None, title=None, show=True):
"""Overlay of raw and cleaned signals given the unmixing matrix.
This method helps visualizing signal quality and artifact rejection.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
inst : instance of mne.io.Raw or mne.Evoked
The signals to be compared given the ICA solution. If Raw input,
The raw data are displayed before and after cleaning. In a second
panel the cross channel average will be displayed. Since dipolar
sources will be canceled out this display is sensitive to
artifacts. If evoked input, butterfly plots for clean and raw
signals will be superimposed.
exclude : array_like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
picks : array-like of int | None (default)
Indices of channels to include (if None, all channels
are used that were included on fitting).
start : int
X-axis start index. If None from the beginning.
stop : int
X-axis stop index. If None to the end.
title : str
The figure title.
show : bool
Show figure if True.
Returns
-------
fig : instance of pyplot.Figure
The figure.
"""
# avoid circular imports
from ..io.base import _BaseRaw
from ..evoked import Evoked
from ..preprocessing.ica import _check_start_stop
if not isinstance(inst, (_BaseRaw, Evoked)):
raise ValueError('Data input must be of Raw or Evoked type')
if title is None:
title = 'Signals before (red) and after (black) cleaning'
if picks is None:
picks = [inst.ch_names.index(k) for k in ica.ch_names]
if exclude is None:
exclude = ica.exclude
if isinstance(inst, _BaseRaw):
if start is None:
start = 0.0
if stop is None:
stop = 3.0
ch_types_used = [k for k in ['mag', 'grad', 'eeg'] if k in ica]
start_compare, stop_compare = _check_start_stop(inst, start, stop)
data, times = inst[picks, start_compare:stop_compare]
raw_cln = ica.apply(inst, exclude=exclude, start=start, stop=stop,
copy=True)
data_cln, _ = raw_cln[picks, start_compare:stop_compare]
fig = _plot_ica_overlay_raw(data=data, data_cln=data_cln,
times=times * 1e3, title=title,
ch_types_used=ch_types_used, show=show)
elif isinstance(inst, Evoked):
if start is not None and stop is not None:
inst = inst.crop(start, stop, copy=True)
if picks is not None:
inst.pick_channels([inst.ch_names[p] for p in picks])
evoked_cln = ica.apply(inst, exclude=exclude, copy=True)
fig = _plot_ica_overlay_evoked(evoked=inst, evoked_cln=evoked_cln,
title=title, show=show)
return fig
def _plot_ica_overlay_raw(data, data_cln, times, title, ch_types_used, show):
"""Plot evoked after and before ICA cleaning
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
epochs : instance of mne.Epochs
The Epochs to be regarded.
show : bool
Show figure if True.
Returns
-------
fig : instance of pyplot.Figure
"""
import matplotlib.pyplot as plt
# Restore sensor space data and keep all PCA components
# let's now compare the date before and after cleaning.
# first the raw data
assert data.shape == data_cln.shape
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
plt.suptitle(title)
ax1.plot(times, data.T, color='r')
ax1.plot(times, data_cln.T, color='k')
ax1.set_xlabel('time (s)')
ax1.set_xlim(times[0], times[-1])
ax1.set_xlim(times[0], times[-1])
ax1.set_title('Raw data')
_ch_types = {'mag': 'Magnetometers',
'grad': 'Gradiometers',
'eeg': 'EEG'}
ch_types = ', '.join([_ch_types[k] for k in ch_types_used])
ax2.set_title('Average across channels ({0})'.format(ch_types))
ax2.plot(times, data.mean(0), color='r')
ax2.plot(times, data_cln.mean(0), color='k')
ax2.set_xlim(100, 106)
ax2.set_xlabel('time (ms)')
ax2.set_xlim(times[0], times[-1])
tight_layout(fig=fig)
fig.subplots_adjust(top=0.90)
fig.canvas.draw()
plt_show(show)
return fig
def _plot_ica_overlay_evoked(evoked, evoked_cln, title, show):
"""Plot evoked after and before ICA cleaning
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
epochs : instance of mne.Epochs
The Epochs to be regarded.
show : bool
If True, all open plots will be shown.
Returns
-------
fig : instance of pyplot.Figure
"""
import matplotlib.pyplot as plt
ch_types_used = [c for c in ['mag', 'grad', 'eeg'] if c in evoked]
n_rows = len(ch_types_used)
ch_types_used_cln = [c for c in ['mag', 'grad', 'eeg'] if
c in evoked_cln]
if len(ch_types_used) != len(ch_types_used_cln):
raise ValueError('Raw and clean evokeds must match. '
'Found different channels.')
fig, axes = plt.subplots(n_rows, 1)
fig.suptitle('Average signal before (red) and after (black) ICA')
axes = axes.flatten() if isinstance(axes, np.ndarray) else axes
evoked.plot(axes=axes, show=show)
for ax in fig.axes:
for l in ax.get_lines():
l.set_color('r')
fig.canvas.draw()
evoked_cln.plot(axes=axes, show=show)
tight_layout(fig=fig)
fig.subplots_adjust(top=0.90)
fig.canvas.draw()
plt_show(show)
return fig
def _plot_sources_raw(ica, raw, picks, exclude, start, stop, show, title,
block):
"""Function for plotting the ICA components as raw array."""
color = _handle_default('color', (0., 0., 0.))
orig_data = ica._transform_raw(raw, 0, len(raw.times)) * 0.2
if picks is None:
picks = range(len(orig_data))
types = ['misc' for _ in picks]
picks = list(sorted(picks))
eog_chs = pick_types(raw.info, meg=False, eog=True, ref_meg=False)
ecg_chs = pick_types(raw.info, meg=False, ecg=True, ref_meg=False)
data = [orig_data[pick] for pick in picks]
c_names = ['ICA %03d' % x for x in range(len(orig_data))]
for eog_idx in eog_chs:
c_names.append(raw.ch_names[eog_idx])
types.append('eog')
for ecg_idx in ecg_chs:
c_names.append(raw.ch_names[ecg_idx])
types.append('ecg')
extra_picks = np.append(eog_chs, ecg_chs).astype(int)
if len(extra_picks) > 0:
eog_ecg_data, _ = raw[extra_picks, :]
for idx in range(len(eog_ecg_data)):
if idx < len(eog_chs):
eog_ecg_data[idx] /= 150e-6 # scaling for eog
else:
eog_ecg_data[idx] /= 5e-4 # scaling for ecg
data = np.append(data, eog_ecg_data, axis=0)
for idx in range(len(extra_picks)):
picks = np.append(picks, ica.n_components_ + idx)
if title is None:
title = 'ICA components'
info = create_info([c_names[x] for x in picks], raw.info['sfreq'])
info['bads'] = [c_names[x] for x in exclude]
if start is None:
start = 0
if stop is None:
stop = start + 20
stop = min(stop, raw.times[-1])
duration = stop - start
if duration <= 0:
raise RuntimeError('Stop must be larger than start.')
t_end = int(duration * raw.info['sfreq'])
times = raw.times[0:t_end]
bad_color = (1., 0., 0.)
inds = list(range(len(picks)))
data = np.array(data)
n_channels = min([20, len(picks)])
params = dict(raw=raw, orig_data=data, data=data[:, 0:t_end],
ch_start=0, t_start=start, info=info, duration=duration,
ica=ica, n_channels=n_channels, times=times, types=types,
n_times=raw.n_times, bad_color=bad_color, picks=picks)
_prepare_mne_browse_raw(params, title, 'w', color, bad_color, inds,
n_channels)
params['scale_factor'] = 1.0
params['plot_fun'] = partial(_plot_raw_traces, params=params, inds=inds,
color=color, bad_color=bad_color)
params['update_fun'] = partial(_update_data, params)
params['pick_bads_fun'] = partial(_pick_bads, params=params)
params['label_click_fun'] = partial(_label_clicked, params=params)
_layout_figure(params)
# callbacks
callback_key = partial(_plot_raw_onkey, params=params)
params['fig'].canvas.mpl_connect('key_press_event', callback_key)
callback_scroll = partial(_plot_raw_onscroll, params=params)
params['fig'].canvas.mpl_connect('scroll_event', callback_scroll)
callback_pick = partial(_mouse_click, params=params)
params['fig'].canvas.mpl_connect('button_press_event', callback_pick)
callback_resize = partial(_helper_raw_resize, params=params)
params['fig'].canvas.mpl_connect('resize_event', callback_resize)
callback_close = partial(_close_event, params=params)
params['fig'].canvas.mpl_connect('close_event', callback_close)
params['fig_proj'] = None
params['event_times'] = None
params['update_fun']()
params['plot_fun']()
try:
plt_show(show, block=block)
except TypeError: # not all versions have this
plt_show(show)
return params['fig']
def _update_data(params):
"""Function for preparing the data on horizontal shift of the viewport."""
sfreq = params['info']['sfreq']
start = int(params['t_start'] * sfreq)
end = int((params['t_start'] + params['duration']) * sfreq)
params['data'] = params['orig_data'][:, start:end]
params['times'] = params['raw'].times[start:end]
def _pick_bads(event, params):
"""Function for selecting components on click."""
bads = params['info']['bads']
params['info']['bads'] = _select_bads(event, params, bads)
params['update_fun']()
params['plot_fun']()
def _close_event(events, params):
"""Function for excluding the selected components on close."""
info = params['info']
c_names = ['ICA %03d' % x for x in range(params['ica'].n_components_)]
exclude = [c_names.index(x) for x in info['bads'] if x.startswith('ICA')]
params['ica'].exclude = exclude
def _plot_sources_epochs(ica, epochs, picks, exclude, start, stop, show,
title, block):
"""Function for plotting the components as epochs."""
data = ica._transform_epochs(epochs, concatenate=True)
eog_chs = pick_types(epochs.info, meg=False, eog=True, ref_meg=False)
ecg_chs = pick_types(epochs.info, meg=False, ecg=True, ref_meg=False)
c_names = ['ICA %03d' % x for x in range(ica.n_components_)]
ch_types = np.repeat('misc', ica.n_components_)
for eog_idx in eog_chs:
c_names.append(epochs.ch_names[eog_idx])
ch_types = np.append(ch_types, 'eog')
for ecg_idx in ecg_chs:
c_names.append(epochs.ch_names[ecg_idx])
ch_types = np.append(ch_types, 'ecg')
extra_picks = np.append(eog_chs, ecg_chs).astype(int)
if len(extra_picks) > 0:
eog_ecg_data = np.concatenate(epochs.get_data()[:, extra_picks],
axis=1)
data = np.append(data, eog_ecg_data, axis=0)
scalings = _handle_default('scalings_plot_raw')
scalings['misc'] = 5.0
info = create_info(ch_names=c_names, sfreq=epochs.info['sfreq'],
ch_types=ch_types)
info['projs'] = list()
info['bads'] = [c_names[x] for x in exclude]
if title is None:
title = 'ICA components'
if picks is None:
picks = list(range(ica.n_components_))
if start is None:
start = 0
if stop is None:
stop = start + 20
stop = min(stop, len(epochs.events))
for idx in range(len(extra_picks)):
picks = np.append(picks, ica.n_components_ + idx)
n_epochs = stop - start
if n_epochs <= 0:
raise RuntimeError('Stop must be larger than start.')
params = {'ica': ica,
'epochs': epochs,
'info': info,
'orig_data': data,
'bads': list(),
'bad_color': (1., 0., 0.),
't_start': start * len(epochs.times)}
params['label_click_fun'] = partial(_label_clicked, params=params)
_prepare_mne_browse_epochs(params, projs=list(), n_channels=20,
n_epochs=n_epochs, scalings=scalings,
title=title, picks=picks,
order=['misc', 'eog', 'ecg'])
params['plot_update_proj_callback'] = _update_epoch_data
_update_epoch_data(params)
params['hsel_patch'].set_x(params['t_start'])
callback_close = partial(_close_epochs_event, params=params)
params['fig'].canvas.mpl_connect('close_event', callback_close)
try:
plt_show(show, block=block)
except TypeError: # not all versions have this
plt_show(show)
return params['fig']
def _update_epoch_data(params):
"""Function for preparing the data on horizontal shift."""
start = params['t_start']
n_epochs = params['n_epochs']
end = start + n_epochs * len(params['epochs'].times)
data = params['orig_data'][:, start:end]
types = params['types']
for pick, ind in enumerate(params['inds']):
params['data'][pick] = data[ind] / params['scalings'][types[pick]]
params['plot_fun']()
def _close_epochs_event(events, params):
"""Function for excluding the selected components on close."""
info = params['info']
exclude = [info['ch_names'].index(x) for x in info['bads']
if x.startswith('ICA')]
params['ica'].exclude = exclude
def _label_clicked(pos, params):
"""Function for plotting independent components on click to label."""
import matplotlib.pyplot as plt
offsets = np.array(params['offsets']) + params['offsets'][0]
line_idx = np.searchsorted(offsets, pos[1]) + params['ch_start']
if line_idx >= len(params['picks']):
return
ic_idx = [params['picks'][line_idx]]
types = list()
info = params['ica'].info
if len(pick_types(info, meg=False, eeg=True, ref_meg=False)) > 0:
types.append('eeg')
if len(pick_types(info, meg='mag', ref_meg=False)) > 0:
types.append('mag')
if len(pick_types(info, meg='grad', ref_meg=False)) > 0:
types.append('grad')
ica = params['ica']
data = np.dot(ica.mixing_matrix_[:, ic_idx].T,
ica.pca_components_[:ica.n_components_])
data = np.atleast_2d(data)
fig, axes = _prepare_trellis(len(types), max_col=3)
for ch_idx, ch_type in enumerate(types):
try:
data_picks, pos, merge_grads, _, _ = _prepare_topo_plot(ica,
ch_type,
None)
except Exception as exc:
logger.warning(exc)
plt.close(fig)
return
this_data = data[:, data_picks]
ax = axes[ch_idx]
if merge_grads:
from ..channels.layout import _merge_grad_data
for ii, data_ in zip(ic_idx, this_data):
ax.set_title('IC #%03d ' % ii + ch_type, fontsize=12)
data_ = _merge_grad_data(data_) if merge_grads else data_
plot_topomap(data_.flatten(), pos, axis=ax, show=False)
ax.set_yticks([])
ax.set_xticks([])
ax.set_frame_on(False)
tight_layout(fig=fig)
fig.subplots_adjust(top=0.95)
fig.canvas.draw()
plt_show(True)
|
bsd-3-clause
| 7,818,122,220,882,212,000
| 36.397311
| 79
| 0.593475
| false
|
kernsuite-debian/lofar
|
MAC/Deployment/data/Coordinates/calc_coordinates.py
|
1
|
5787
|
#!/usr/bin/env python
# coding: iso-8859-15
import sys
import pgdb
import pg
from copy import deepcopy
from optparse import OptionParser
import getpass
from database import getDBname, getDBhost, getDBport, getDBuser
INTRO = """
Conversion between ETRS89 and ITRS2000 coordinates based on
Memo : Specifications for reference frame fixing in the analysis of a
EUREF GPS campaign
By Claude Boucher and Zuheir Altamimi
which is available from EUREF
In this utility I use the translational coefficients obtained by method "A" in
section 4 and the rotational coefficients in section 5, both for the 2000 (00)
reference frame.
"""
def subtract(a, b):
return [x - y for x, y in zip(a, b)]
def print_help():
print("Usage: calc_coordinates <stationname> <objecttype> date")
print(" <objecttype>: LBA|HBA|HBA0|HBA1|marker")
print(" <date> : yyyy.yy e.g. 2008.75 for Oct 1st 2008")
def solve(m, y):
"""
solve Mx=y. The algorithm is Gauss-Jordan elimination
without pivoting, which is allowed in this case as M is
dominated by the diagonal.
"""
dim = len(y)
a = deepcopy(m)
sol = deepcopy(y)
if (len(a) != len(a[0])) or len(a[0]) != len(y):
raise 'Incompatible dimensions'
for row in range(dim):
scale = 1./float(a[row][row])
a[row] = [x*scale for x in a[row]]
sol[row] = scale*float(sol[row])
for ix in range(dim):
if ix != row:
factor = float(a[ix][row])
a[ix] = subtract(a[ix], [factor*float(x) for x in a[row]])
a[ix][row] = 0.0
sol[ix] -= factor*float(sol[row])
return sol
def convert(xetrs, date_years, trans):
"""
Solve equation:
/X\Etrs /T0\ = [[ 1 , -R2*dt, R1*dt] /X\Itrs2000
|Y| - |T1| [ R2*dt , 1 , -R0*dt] |Y|
\Z/ \T2/ [ -R1*dt , R0*dt , 1]] \Z/
"""
#
# get translate parameters from database
# ref-frame = trans[0]
# TOO = trans[1:4] = Tx,Ty,Tz
# mas = trans[5:8] = Rx,Ry,Rz
# diagonal(sf) = trans[4] + 1 = sf
#
t00 = [float(t) for t in trans[1:4]] # meters
rdot00 = [float(t) for t in trans[5:8]] # mas
# print "T00=[%e %e %e] Rdot00=[%e %e %e]" % (t00[0], t00[1], t00[2],
# rdot00[0], rdot00[1], rdot00[2])
dt = date_years - 1989.0
# print 'date_years=%f dt=%f' %(date_years, dt)
sf = float(trans[4]) + 1.
# print 'sf=',sf
matrix = [[sf, -rdot00[2]*dt, rdot00[1]*dt],
[rdot00[2]*dt, sf, -rdot00[0]*dt],
[-rdot00[1]*dt, rdot00[0]*dt, sf]]
xshifted = subtract(xetrs, t00)
# print "Matrix=", matrix
return solve(matrix, xshifted)
#
# MAIN
#
if __name__ == '__main__':
parser = OptionParser("""Usage: %prog [options] <stationname> <objecttype> date
<objecttype>: LBA|HBA|HBA0|HBA1|marker
<date> : yyyy.yy e.g. 2008.75 for Oct 1st 2008""")
parser.add_option("-D", "--database",
dest="dbName",
type="string",
default=getDBname(),
help="Name of StationCoordinates database to use")
parser.add_option("-H", "--host",
dest="dbHost",
type="string",
default=getDBhost(),
help="Hostname of StationCoordinates database")
parser.add_option("-P", "--port",
dest="dbPort",
type="int",
default=getDBport(),
help="Port of StationCoordinates database")
parser.add_option("-U", "--user",
dest="dbUser",
type="string",
default=getDBuser(),
help="Username of StationCoordinates database")
# parse arguments
(options, args) = parser.parse_args()
dbName = options.dbName
dbHost = options.dbHost
dbPort = options.dbPort
dbUser = options.dbUser
# print sys.argv
if len(args) != 3:
parser.print_help()
sys.exit(1)
station_name = str(args[0]).upper()
object_type = str(args[1]).upper()
date_years = float(args[2])
dbPassword = getpass.getpass()
host = "{}:{}".format(dbHost, dbPort)
db1 = pgdb.connect(user=dbUser, host=host, database=dbName, password=dbPassword)
cursor = db1.cursor()
# calling stored procedures only works from the pg module for some reason.
db2 = pg.connect(user=dbUser, host=dbHost, dbname=dbName, port=dbPort, passwd=dbPassword)
cursor.execute("select * from get_transformation_info('ITRF2005')")
trans = cursor.fetchone()
cursor.execute("select * from get_ref_objects(%s, %s)", (str(sys.argv[1]).upper(), str(sys.argv[2]).upper()))
print("\n%s %s %8.3f" %(str(sys.argv[1]).upper(), str(sys.argv[2]).upper(),float(sys.argv[3])))
while (1):
record = cursor.fetchone()
if record is None:
print('record even = None')
break
# print record
XEtrs = [float(record[4]),
float(record[5]),
float(record[6])]
# print 'XEtrs=',XEtrs
XItrs2000 = convert(XEtrs, date_years, trans)
# write output to generated_coord ??
print("%s %d %14.6f %14.6f %14.6f" %(str(record[1]), record[2], XItrs2000[0], XItrs2000[1],XItrs2000[2]))
db2.query("select * from add_gen_coord('%s','%s',%s,%s,%s,%s,%s,'%s')" %\
(record[0], record[1], record[2], XItrs2000[0], XItrs2000[1], XItrs2000[2], date_years, 'ITRF2005'))
#record = None
db1.close()
db2.close()
sys.exit(0)
|
gpl-3.0
| -5,707,530,155,973,449,000
| 31.880682
| 122
| 0.544496
| false
|
zoni/pushover-cli
|
pushover_cli.py
|
1
|
2324
|
#!/usr/bin/env python
# Copyright (c) 2013 Nick Groenen <nick@groenen.me>
import argparse
import chump
def main():
parser = argparse.ArgumentParser(description="Simple pushover client")
parser.add_argument('--token', required=True, help="your application's API token")
parser.add_argument('--user', required=True, help="the user/group key (not e-mail address) of your user (or you)")
parser.add_argument('--message', required=True, help="your message")
parser.add_argument('--title', default=None, help="your message's title, otherwise your app's name is used")
parser.add_argument('--url', default=None, help="a supplementary URL to show with your message")
parser.add_argument('--url-title', default=None, help="a title for your supplementary URL, otherwise just the URL is shown")
parser.add_argument('--device', default=None, help="your user's device name to send the message directly to that device, rather than all of the user's devices")
parser.add_argument('--priority', default=0, help="send as -1 to always send as a quiet notification, 1 to display as high-priority and bypass the user's quiet hours, or 2 to also require confirmation from the user")
parser.add_argument('--callback', default=None, help="a publicly-accessible URL the Pushover servers will send a request to when the user has acknowledged your notification")
parser.add_argument('--retry', default=30, help="how often (in seconds) to repeat the notification to the user in case of an emergency priority")
parser.add_argument('--expire', default=86400, help="how many seconds your notification will continue to be retried for (every retry seconds) in case of an emergency priority")
parser.add_argument('--sound', default=None, help="the name of one of the sounds supported by device clients to override the user's default sound choice")
args = parser.parse_args()
app = chump.Application(args.token)
user = app.get_user(args.user)
user.send_message(
args.message,
title=args.title,
url=args.url,
url_title=args.url_title,
device=args.device,
priority=args.priority,
callback=args.callback,
retry=args.retry,
expire=args.expire,
sound=args.sound,
)
if __name__ == "__main__":
main()
|
mit
| -6,797,070,299,818,647,000
| 53.046512
| 220
| 0.70568
| false
|
aminhp93/learning_python
|
src/comments/migrations/0001_initial.py
|
1
|
1264
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-31 16:25
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('timestamp', models.DateTimeField(auto_now_add=True)),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='comments.Comment')),
('user', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
mit
| 909,350,399,107,684,600
| 38.5
| 137
| 0.631329
| false
|
jhoenicke/mempool
|
eth/txpool_parse.py
|
1
|
2468
|
#!/usr/bin/env python3
import json
import sys
import time
from subprocess import PIPE, Popen
FEELIMIT = [0.0001, 1, 2, 3, 4, 5, 6, 7, 8, 10,
12, 14, 17, 20, 25, 30, 40, 50, 60, 70, 80, 100,
120, 140, 170, 200, 250, 300, 400, 500, 600, 700, 800, 1000,
1200, 1400, 1700, 2000, 2500, 3000, 4000, 5000, 6000, 7000, 8000, 10000]
sizes = [0] * len(FEELIMIT)
count = [0] * len(FEELIMIT)
fees = [0] * len(FEELIMIT)
found = False
lastfrom = ""
lastgprice = 0
def parse_txdata(obj):
global sizes, count, fees, found, lastfrom, lastgprice
try:
firstval = next(iter(obj.values()));
if "gasPrice" in firstval:
# effective gas price is the gas that miners use
# to determine if to mine a transaction. It is the
# minimum of the gas price and the effective gas price
# of the previous unconfirmed transaction with a smaller
# nonce. We set effgprice to a very large value initially,
# so that it doesn't effect the gas price of the first
# trnasaction.
effgprice = 1e18;
# sort the txes by nonce
for k in sorted(obj.keys(), key=int):
tx = obj[k]
gprice = int(tx["gasPrice"], 0)
gas = int(tx["gas"], 0)
size = gas
gprice = gprice / 1000000000
effgprice = min(effgprice, gprice)
found = True
for i, limit in enumerate(FEELIMIT):
if (effgprice >= limit and
(i == len(FEELIMIT) - 1 or effgprice < FEELIMIT[i+1])):
sizes[i] += size
count[i] += 1
# Fees in ETH
fees[i] += round(gprice * gas)
break
return None
return obj
except:
return obj
def dump_data(timestamp, sizes, count, fees):
sizesstr = ",".join(str(x) for x in sizes)
countstr = ",".join(str(x) for x in count)
feesstr = ",".join(str(x) for x in fees)
print("[{:d},[{}],[{}],[{}]],"
.format(timestamp, countstr, sizesstr, feesstr))
def main():
global sizes, count, fees, found
timestamp = int(time.time())
try:
output = json.load(sys.stdin, object_hook=parse_txdata)
except:
pass
if found:
dump_data(timestamp, sizes, count, fees)
main()
|
agpl-3.0
| 7,091,031,645,175,694,000
| 32.808219
| 84
| 0.522285
| false
|
bdarnell/plop
|
plop/test/collector_test.py
|
1
|
3976
|
import ast
import logging
import threading
import time
import unittest
import six
from plop.collector import Collector, PlopFormatter
class CollectorTest(unittest.TestCase):
def filter_stacks(self, collector):
# Kind of hacky, but this is the simplest way to keep the tests
# working after the internals of the collector changed to support
# multiple formatters.
stack_counts = ast.literal_eval(PlopFormatter().format(collector))
counts = {}
for stack, count in six.iteritems(stack_counts):
filtered_stack = [frame[2] for frame in stack
if frame[0].endswith('collector_test.py')]
if filtered_stack:
counts[tuple(filtered_stack)] = count
return counts
def check_counts(self, counts, expected):
failed = False
output = []
for stack, count in six.iteritems(expected):
# every expected frame should appear in the data, but
# the inverse is not true if the signal catches us between
# calls.
self.assertTrue(stack in counts)
ratio = float(counts[stack])/float(count)
output.append('%s: expected %s, got %s (%s)' %
(stack, count, counts[stack], ratio))
if not (0.70 <= ratio <= 1.25):
failed = True
if failed:
for line in output:
logging.warning(line)
for key in set(counts.keys()) - set(expected.keys()):
logging.warning('unexpected key: %s: got %s' % (key, counts[key]))
self.fail("collected data did not meet expectations")
def test_collector(self):
start = time.time()
def a(end):
while time.time() < end: pass
c(time.time() + 0.1)
def b(end):
while time.time() < end: pass
c(time.time() + 0.1)
def c(end):
while time.time() < end: pass
collector = Collector(interval=0.01, mode='prof')
collector.start()
a(time.time() + 0.1)
b(time.time() + 0.2)
c(time.time() + 0.3)
end = time.time()
collector.stop()
elapsed = end - start
self.assertTrue(0.8 < elapsed < 0.9, elapsed)
counts = self.filter_stacks(collector)
expected = {
('a', 'test_collector'): 10,
('c', 'a', 'test_collector'): 10,
('b', 'test_collector'): 20,
('c', 'b', 'test_collector'): 10,
('c', 'test_collector'): 30,
}
self.check_counts(counts, expected)
# cost depends on stack depth; for this tiny test I see 40-80usec
time_per_sample = float(collector.sample_time) / collector.samples_taken
self.assertTrue(time_per_sample < 0.000100, time_per_sample)
# TODO: any way to make this test not flaky?
def disabled_test_collect_threads(self):
start = time.time()
def a(end):
while time.time() < end: pass
def thread1_func():
a(time.time() + 0.2)
def thread2_func():
a(time.time() + 0.3)
collector = Collector(interval=0.01, mode='prof')
collector.start()
thread1 = threading.Thread(target=thread1_func)
thread2 = threading.Thread(target=thread2_func)
thread1.start()
thread2.start()
a(time.time() + 0.1)
while thread1.isAlive(): pass
while thread2.isAlive(): pass
thread1.join()
thread2.join()
end = time.time()
collector.stop()
elapsed = end - start
self.assertTrue(0.3 < elapsed < 0.4, elapsed)
counts = self.filter_stacks(collector)
expected = {
('a', 'test_collect_threads'): 10,
('a', 'thread1_func'): 20,
('a', 'thread2_func'): 30,
}
self.check_counts(counts, expected)
|
mit
| -2,761,755,829,914,873,300
| 34.81982
| 82
| 0.544266
| false
|
zegra1989/pytree
|
bplustree.py
|
1
|
17009
|
# -*- coding:utf-8 -*-
# 使用 UTF-8
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
class NameNode(object):
def __init__(self, degree, optimize=3):
super(NameNode, self).__init__()
self.num = 0
self.degree = degree
self.threshold = degree*2
self.keys = [None for _ in xrange(self.threshold)]
self.pnodes = [None for _ in xrange(self.threshold)]
self.isleaf = True
def pointer(self):
return self
def __str__(self):
return "num:{0} keys:{1}".format(
self.num, self.keys[:self.num])
class DataNode(object):
"""docstring for DataNode"""
F_INCREASE = 0
F_DECREASE = 1
def __init__(self, max_length=10, optimize=3):
super(DataNode, self).__init__()
self.data = None
self.max_length = max_length
base, mode = divmod(self.max_length, 2)
if mode > 0:
base += 1
self.min_length = base
self.num = 0
# 记录上一次插入的数据
self.last_insert_pos = None
# 连续递增标识
self.is_increase = None
# 记录同一方向连续插入的数量
self.n_directions = 0
# 当同方向连续插入到达 n_optimize 时才会启动 split 优化
self.n_optimize = optimize
self.prev = None
self.next = None
def link(self, prev_node=None, next_node=None):
if prev_node is not None:
tmp = self.prev
self.prev = prev_node
prev_node.prev = tmp
prev_node.next = self
if prev_node.prev is not None:
prev_node.prev.next = prev_node
if next_node is not None:
tmp = self.next
self.next = next_node
next_node.next = tmp
next_node.prev = self
if next_node.next is not None:
next_node.next.prev = next_node
def insert(self, data, doc):
raise NotImplementedError()
def update(self, data, doc):
raise NotImplementedError()
def pop(self, num=1):
raise NotImplementedError()
def isfull(self):
raise NotImplementedError()
def isguarenteed(self):
raise NotImplementedError()
def split(self, mode=None):
raise NotImplementedError()
def merge(self, datanode):
raise NotImplementedError()
@property
def low_key(self):
return self._low_key
class BPlusTree(object):
def __init__(self, degree):
super(BPlusTree, self).__init__()
self.degree = degree
self.threshold = degree*2
self.root = self.allocate_namenode()
def allocate_namenode(self):
raise NotImplementedError()
def deallocate_namenode(self, node):
raise NotImplementedError()
def allocate_datanode(self):
raise NotImplementedError()
def deallocate_datanode(self, node):
raise NotImplementedError()
def save_docs(self, metanode):
raise NotImplementedError()
def load_docs(self, metanode, ipos):
raise NotImplementedError()
def remove(self, key):
res = self.remove_key(self.root, key)
self.shrink()
return res
def shrink(self):
if self.root.num == 1 and self.root.isleaf is False:
old_root = self.root
self.root = old_root.pnodes[0]
self.deallocate_namenode(old_root)
def update(self, key, doc):
docs = self.search(key)
if docs is None:
node, ipos = self.insert2(key, doc)
return 0
docs = self.load_docs(node, ipos)
docs.update(key, doc)
return 1
def select(self, key):
node = self.search(key)
if node is None:
return None
return node
def search(self, key, node=None):
if node is None:
node = self.root
ipos = node.num-1
while ipos >= 0 and key < node.keys[ipos]:
ipos -= 1
# 如果 ipos<0 则,没有找到对应的key
if ipos < 0:
return None
if node.isleaf is True:
return self.load_docs(node.pnodes[ipos])
return self.search(key, node.pnodes[ipos])
def split(self, parent, ipos, node):
if parent.isleaf is False:
new_node = self.allocate_namenode()
new_node.isleaf = node.isleaf
for i in xrange(0, self.degree):
new_node.keys[i] = node.keys[i+self.degree]
new_node.pnodes[i] = node.pnodes[i+self.degree]
new_node.num = node.num = self.degree
for i in xrange(parent.num-1, ipos-1, -1):
parent.keys[i+1] = parent.keys[i]
parent.pnodes[i+1] = parent.pnodes[i]
parent.keys[ipos+1] = new_node.keys[0]
parent.pnodes[ipos+1] = new_node.pointer()
parent.num += 1
return None
for i in xrange(parent.num-1, ipos-1, -1):
# 此处不会越界,因为在 insert 中有保护
parent.keys[i+1] = parent.keys[i]
parent.pnodes[i+1] = parent.pnodes[i]
# 优化 split 算法
if node.n_directions > node.n_optimize:
# 避开 MySQL Bug #67718
if node.is_increase is True:
# 连续递增插入
new_node = node.split(mode=DataNode.F_INCREASE)
ipos += 1
node.link(next_node=new_node)
else:
# 连续递减插入
new_node = node.split(mode=DataNode.F_DECREASE)
parent.keys[ipos+1] = node.low_key
node.link(prev_node=new_node)
else:
# 基础 split 算法
new_node = node.split()
ipos += 1
node.link(next_node=new_node)
parent.keys[ipos] = new_node.low_key
parent.pnodes[ipos] = new_node
parent.num += 1
return None
def insert_nonfull(self, node, key, doc):
ipos = node.num-1
while ipos >= 0 and key < node.keys[ipos]:
ipos -= 1
# 如果 ipos < 0,则说明要插入点小于当前节点中最小关键词
if ipos < 0:
node.keys[0] = key
ipos = 0
if node.isleaf is True:
datanode = node.pnodes[ipos]
if datanode is None:
datanode = self.allocate_datanode()
node.keys[ipos] = key
node.pnodes[ipos] = datanode
node.num += 1
# 此处不用连接 DataNode 的链表,因为此处仅在初始化时运行一次
if datanode.isfull() is True:
if datanode.is_increase is True and datanode.last_insert_pos > key:
datanode.is_increase = False
datanode.n_directions = 1
elif datanode.is_increase is False and datanode.last_insert_pos < key:
datanode.is_increase = True
datanode.n_directions = 1
self.split(node, ipos, datanode)
if node.keys[ipos+1] < key:
ipos += 1
datanode = node.pnodes[ipos]
datanode.insert(key, doc)
node.keys[ipos] = datanode.low_key
return None
child = node.pnodes[ipos]
if child.num == self.threshold:
self.split(node, ipos, child)
if node.keys[ipos+1] is not None and node.keys[ipos+1] < key:
child = node.pnodes[ipos+1]
return self.insert_nonfull(child, key, doc)
def insert(self, key, doc):
if self.root.num != self.threshold:
return self.insert_nonfull(self.root, key, doc)
old_root = self.root
new_root = self.allocate_namenode()
new_root.isleaf = False
new_root.keys[0] = old_root.keys[0]
new_root.pnodes[0] = old_root.pointer()
new_root.num += 1
self.root = new_root
self.split(new_root, 0, old_root)
return self.insert_nonfull(new_root, key, doc)
def merge(self, node, ipos):
"""
将当前节点 关键词 对应的孩子与其 左/右兄弟 合并
ipos 是 node.keys 中关键词的位置
"""
# 当前节点没有右兄弟
if ipos == node.num-1:
ipos -= 1
child = node.pnodes[ipos]
rchild = node.pnodes[ipos+1]
if node.isleaf is True:
child.merge(rchild)
self.deallocate_datanode(rchild)
else:
irpos = 0
while irpos < rchild.num:
child.keys[child.num+irpos] = rchild.keys[irpos]
child.pnodes[child.num+irpos] = rchild.pnodes[irpos]
irpos += 1
child.num += rchild.num
self.deallocate_namenode(rchild)
inpos = ipos+1
while inpos < node.num-1:
node.keys[inpos] = node.keys[inpos+1]
node.pnodes[inpos] = node.pnodes[inpos+1]
inpos += 1
node.num -= 1
return ipos
def guarantee(self, node, ipos):
"""
确保 node.pnodes[ipos] 拥有至少 t 个关键词
"""
child = node.pnodes[ipos]
if child.num > self.degree:
return ipos
# 如果 ipos = 0,则 child 没有左兄弟
if ipos > 0:
lbrother = node.pnodes[ipos-1]
if lbrother.num > self.degree:
icpos = child.num
while icpos > 0:
child.keys[icpos] = child.keys[icpos-1]
child.pnodes[icpos] = child.pnodes[icpos-1]
icpos -= 1
child.keys[0] = lbrother.keys[lbrother.num-1]
child.pnodes[0] = lbrother.pnodes[lbrother.num-1]
child.num += 1
node.keys[ipos] = child.keys[0]
lbrother.num -= 1
return ipos
# 如果 ipos = node.num-1, 则 child 没有右兄弟
if ipos < node.num-1:
rbrother = node.pnodes[ipos+1]
if rbrother.num > self.degree:
child.keys[child.num] = rbrother.keys[0]
child.pnodes[child.num] = rbrother.pnodes[0]
child.num += 1
irpos = 0
while irpos < rbrother.num-1:
rbrother.keys[irpos] = rbrother.keys[irpos+1]
rbrother.pnodes[irpos] = rbrother.pnodes[irpos+1]
irpos += 1
node.keys[ipos+1] = rbrother.keys[0]
rbrother.num -= 1
return ipos
return self.merge(node, ipos)
def remove_key(self, node, key):
ipos = node.num-1
while ipos >= 0 and key < node.keys[ipos]:
ipos -= 1
# 如果 ipos < 0,则说明没有找到要删除的节点
if ipos < 0:
return None
if node.isleaf is False:
icpos = self.guarantee(node, ipos)
child = node.pnodes[icpos]
self.remove_key(child, key)
node.keys[icpos] = node.pnodes[icpos].keys[0]
return 0
datanode = node.pnodes[ipos]
if datanode.isguarenteed() is True:
datanode.remove(key)
node.keys[ipos] = datanode.low_key
return datanode.low_key
if node.num == 1:
datanode.remove(key)
if datanode.num > 0:
node.keys[ipos] = datanode.low_key
else:
node.num = 0
node.pnodes[0] = None
self.deallocate_datanode(datanode)
return 0
if ipos > 0:
lbrother = node.pnodes[ipos-1]
if lbrother.isguarenteed() is True:
lkey, ldoc = lbrother.pop()
datanode.insert(lkey, ldoc)
node.keys[ipos] = lkey
datanode.remove(key)
node.keys[ipos] = datanode.low_key
return datanode.low_key
if ipos < node.num-1:
rbrother = node.pnodes[ipos+1]
if rbrother.isguarenteed() is True:
rkey, rdoc = rbrother.shift()
datanode.insert(rkey, rdoc)
node.keys[ipos+1] = rbrother.low_key
datanode.remove(key)
node.keys[ipos] = datanode.low_key
return datanode.low_key
ipos = self.merge(node, ipos)
datanode = node.pnodes[ipos]
datanode.remove(key)
node.keys[ipos] = datanode.low_key
return datanode.low_key
def traverse(self, callback, node=None):
pass
def print_node(self, node, string, depth=0):
pass
def __str__(self):
strings = ["*****************************"]
self.print_node(self.root, strings)
return "\n".join(strings).strip() + "\n*****************************\n"
################################################
class MemDataNode(DataNode):
"""docstring for MemDataNode"""
def __init__(self, max_length=4):
super(MemDataNode, self).__init__(max_length)
self.data = {}
def insert(self, key, doc):
if isinstance(doc, list,) is True and len(doc) == 1:
doc = doc[0]
self.data[key] = [doc]
self._low_key = min(self.data.keys())
if self.is_increase is True:
if self.last_insert_pos < key:
self.n_directions += 1
else:
self.is_increase = False
self.n_directions = 1
else:
if self.last_insert_pos > key:
self.n_directions += 1
else:
self.is_increase = True
self.n_directions = 1
self.last_insert_pos = key
self.num += 1
def update(self, key, doc):
docs = self.data.get(key, None)
if docs is not None:
docs.append(doc)
else:
self.data[key] = [doc]
self.num += 1
self._low_key = min(self.data.keys())
def remove(self, key):
del self.data[key]
self.num -= 1
if len(self.data) > 0:
self._low_key = min(self.data.keys())
else:
self._low_key = None
def isfull(self):
return self.num == self.max_length
def isguarenteed(self):
return self.num > self.min_length
def pop(self):
key = sorted(self.data)[-1]
doc = self.data.pop(key)
if len(self.data) == 0:
self._low_key = None
self.num -= 1
return key, doc
def shift(self):
key = sorted(self.data)[0]
doc = self.data.pop(key)
if len(self.data) == 0:
self._low_key = None
else:
self._low_key = min(self.data.keys())
self.num -= 1
return key, doc
def split(self, mode=None):
new_node = MemDataNode(self.max_length)
if mode is DataNode.F_INCREASE:
key, doc = self.pop()
new_node.insert(key, doc)
self.num -= 1
elif mode is DataNode.F_DECREASE:
key, doc = self.shift()
new_node.insert(key, doc)
self.num -= 1
else:
for key in sorted(self.data)[self.min_length:]:
new_node.insert(key, self.data.pop(key))
self.num -= 1
return new_node
def merge(self, datanode):
self.data.update(datanode.data)
self.num = len(self.data)
def __str__(self):
keys = sorted(self.data.keys())
values = map(lambda x: self.data[x], keys)
return "num:{0} keys:{1} docs:{2}, increase:{3}".format(
len(self.data), keys, values, self.n_directions)
class MemBPlusTree(BPlusTree):
"""docstring for MemBPlusTree"""
def __init__(self, degree):
super(MemBPlusTree, self).__init__(degree)
def allocate_namenode(self):
return NameNode(self.degree)
def deallocate_namenode(self, node):
pass
def allocate_datanode(self):
return MemDataNode()
def deallocate_datanode(self, node):
pass
def load_docs(self, datanode):
return datanode
def print_node(self, node, strings, depth=0):
if node is None:
return
strings.append(">"*depth + str(node))
if node.isleaf is False:
strings.append("")
for ipos in xrange(node.num):
self.print_node(node.pnodes[ipos], strings, depth+1)
strings.append("")
else:
for ipos in xrange(node.num):
strings.append(">"*(depth+1) + str(node.pnodes[ipos]))
def __str__(self):
strings = ["*****************************"]
self.print_node(self.root, strings)
return "\n".join(strings).strip() + "\n*****************************\n"
|
mit
| 5,075,025,887,707,922,000
| 28.057895
| 86
| 0.513796
| false
|
julienmalard/Tinamit
|
tinamit/mod/var.py
|
1
|
3075
|
import numpy as np
import xarray as xr
from tinamit.config import _
class Variable(object):
"""La clase más general para variables de modelos en Tinamït."""
def __init__(símismo, nombre, unid, ingr, egr, inic=0, líms=None, info=''):
"""
Parameters
----------
nombre: str
El nombre del variable.
unid: str or None
Las unidades del variable.
ingr: bool
Si es un ingreso al modelo.
egr: bool
Si es un egreso del modelo.
inic: int or float or np.ndarray
El valor inicial del modelo.
líms: tuple
Los límites del variable.
info: str
Descripción detallada del variable.
"""
if not (ingr or egr):
raise ValueError(_('Si no es variable ingreso, debe ser egreso.'))
símismo.nombre = nombre
símismo.unid = unid
símismo.ingr = ingr
símismo.egr = egr
símismo.inic = _a_np(inic)
símismo.dims = símismo.inic.shape
símismo.líms = _proc_líms(líms)
símismo.info = info
símismo._val = símismo.inic.astype(float)
def poner_val(símismo, val):
"""
Establece el valor del variable.
Parameters
----------
val: int or float or np.ndarray
El nuevo valor.
"""
if isinstance(val, np.ndarray) and val.size == 1:
val = val[0]
if isinstance(val, np.ndarray):
existen = np.invert(np.isnan(val)) # No cambiamos nuevos valores que faltan
símismo._val[existen] = val[existen]
elif not np.isnan(val):
símismo._val[:] = val
def obt_val(símismo):
"""
Devuelve el valor del variable.
"""
return símismo._val # para disuadir modificaciones directas a `símismo._val`
def reinic(símismo):
"""
Reinicializa el variable a su valor pre-simulación.
"""
símismo._val[:] = símismo.inic
def __iadd__(símismo, otro):
símismo.poner_val(símismo._val + otro)
return símismo
def __imul__(símismo, otro):
símismo.poner_val(símismo._val * otro)
def __imod__(símismo, otro):
símismo.poner_val(símismo._val % otro)
def __ifloordiv__(símismo, otro):
símismo.poner_val(símismo._val // otro)
def __ipow__(símismo, otro):
símismo.poner_val(símismo._val ** otro)
def __str__(símismo):
return símismo.nombre
def _a_np(val):
if isinstance(val, xr.DataArray):
val = val.values
if isinstance(val, np.ndarray):
if val.shape:
return val
return np.array([val])
elif isinstance(val, (int, float, np.number)):
return np.array([val])
else:
return np.array(val)
def _proc_líms(líms):
if líms is None:
return -np.inf, np.inf
else:
return -np.inf if líms[0] is None else líms[0], np.inf if líms[1] is None else líms[1]
|
gpl-3.0
| -3,288,465,236,079,383,000
| 26.198198
| 94
| 0.556476
| false
|
InfoAgeTech/django-umanage
|
umanage/accounts/views.py
|
1
|
3423
|
from __future__ import unicode_literals
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
from django_core.utils.loading import get_class_from_settings_full_path
from django_core.views.mixins.auth import LoginRequiredViewMixin
from ..exceptions import UManageSettingImproperlyConfigured
from .forms import UserAccountForm
from inspect import isfunction
class AccountView(LoginRequiredViewMixin, TemplateView):
template_name = 'umanage/accounts/account_view.html'
def get_context_data(self, **kwargs):
context = super(AccountView, self).get_context_data(**kwargs)
user = self.request.user
settings_key = 'UMANAGE_USER_ACCOUNT_DISPLAY_FIELDS'
user_fields_to_display = getattr(settings,
settings_key,
('first_name', 'last_name', 'email'))
if not isinstance(user_fields_to_display, (tuple, list)):
raise UManageSettingImproperlyConfigured(settings_key)
fields_to_display = []
for field_name in user_fields_to_display:
label = None
if isinstance(field_name, (list, tuple)):
label = field_name[0]
field_name = field_name[1]
try:
val = getattr(user, field_name)
if isfunction(val):
# it's a function, call the function and get the results
val = val()
if not label:
field = user._meta.get_field(field_name)
label = field.verbose_name
except AttributeError:
raise UManageSettingImproperlyConfigured(
settings_key,
message=_('"{0}" is not a valid field on the User model. '
'Check the "{1}" config '
'setting.').format(field_name, settings_key)
)
fields_to_display.append((label.title(), val))
context['fields_to_display'] = fields_to_display
return context
class AccountEditView(LoginRequiredViewMixin, FormView):
template_name = 'umanage/accounts/account_edit.html'
form_class = UserAccountForm
def dispatch(self, *args, **kwargs):
settings_key = 'UMANAGE_USER_ACCOUNT_EDIT_FORM'
if hasattr(settings, settings_key):
try:
self.form_class = get_class_from_settings_full_path(settings_key)
except:
msg = _('{0} setting path is either incorrect or the app is '
'not installed. Please check the '
'configuration.').format(settings_key)
raise UManageSettingImproperlyConfigured(settings_key, msg)
return super(AccountEditView, self).dispatch(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super(AccountEditView, self).get_form_kwargs()
kwargs['instance'] = self.request.user
kwargs['request'] = self.request
return kwargs
def form_valid(self, form):
form.save()
return super(AccountEditView, self).form_valid(form)
def get_success_url(self):
return reverse('umanage_account_view')
|
mit
| -5,431,782,645,010,916,000
| 36.206522
| 81
| 0.603856
| false
|
ict-felix/stack
|
vt_manager_kvm/src/python/vt_manager_kvm/communication/sfaCommunication.py
|
1
|
6246
|
from django.http import *
import os, sys, logging
from vt_manager_kvm.common.rpc4django import rpcmethod
from vt_manager_kvm.common.rpc4django import *
from vt_manager_kvm.communication.sfa.util.xrn import urn_to_hrn
from vt_manager_kvm.communication.sfa.util.faults import SfaInvalidArgument
from vt_manager_kvm.communication.sfa.util.version import version_core
from vt_manager_kvm.communication.sfa.util.xrn import Xrn
from vt_manager_kvm.communication.sfa.methods.permission_manager import PermissionManager
from vt_manager_kvm.communication.sfa.managers.AggregateManager import AggregateManager
#from vt_manager_kvm.communication.sfa.drivers.VTSfaDriver import VTSfaDriver
from vt_manager_kvm.communication.sfa.sfa_config import config as CONFIG
# Parameter Types
CREDENTIALS_TYPE = 'array' # of strings
OPTIONS_TYPE = 'struct'
RSPEC_TYPE = 'string'
VERSION_TYPE = 'struct'
URN_TYPE = 'string'
SUCCESS_TYPE = 'boolean'
STATUS_TYPE = 'struct'
TIME_TYPE = 'string'
#driver = VTSfaDriver(None)
aggregate = AggregateManager()
pm = PermissionManager()
@rpcmethod(signature=['string', 'string'], url_name="sfa")
def ping(challenge):
return challenge
@rpcmethod(signature=[VERSION_TYPE], url_name="sfa")
def GetVersion(api=None, options={}):
version = {'output':'',
'geni_api': 2,
'code': {'am_type':'sfa',
'geni_code':0
},
'value': {'urn':CONFIG.URN,
'hostname':CONFIG.HOSTNAME,
'code_tag':CONFIG.CODE_TAG,
'hrn':CONFIG.HRN,
'testbed':CONFIG.TESTBED,
'geni_api_versions': CONFIG.GENI_API_VERSIONS,
'interface':CONFIG.INTERFACE,
'geni_api':int(CONFIG.GENI_API_VERSION),
'geni_ad_rspec_versions': CONFIG.GENI_AD_RSPEC_VERSIONS,
'code_url': CONFIG.CODE_URL,
'geni_request_rspec_versions': CONFIG.GENI_REQUEST_RSPEC_VERSIONS,
'sfa':int(CONFIG.SFA_VERSION),
#F4F required params
'f4f_describe_testbed':CONFIG.DESCRIBE_TESTBED,
'f4f_testbed_homepage':CONFIG.TESTBED_HOMEPAGE,
'f4f_testbed_picture':CONFIG.TESTBED_PICTURE,
'f4f_endorsed_tools':CONFIG.ENDORSED_TOOLS,
},
}
return version
@rpcmethod(signature=[RSPEC_TYPE, CREDENTIALS_TYPE, OPTIONS_TYPE], url_name="sfa")
def ListResources(credentials, options, **kwargs):
#pm.check_permissions('ListResources',locals())
rspec = aggregate.ListResources(options,**kwargs)
to_return = {'output': '', 'geni_api': 2, 'code': {'am_type': 'sfa', 'geni_code': 0}, 'value': rspec}
return to_return
@rpcmethod(signature=[CREDENTIALS_TYPE, OPTIONS_TYPE], url_name="sfa")
def ListSlices(self, creds, options):
#TODO: SFAException??
#XXX: should this method list vms?
return ""
@rpcmethod(signature=[RSPEC_TYPE, URN_TYPE, CREDENTIALS_TYPE, OPTIONS_TYPE], url_name="sfa")
def CreateSliver(slice_xrn, creds, rspec, users, options):
#pm.check_permissions('CreateSliver',locals())
rspec = aggregate.CreateSliver(slice_xrn,rspec,users,creds,options)
#xrn = Xrn(slice_urn, 'slice')
#slice_leaf = xrn.get_leaf()
#authority = xrn.get_authority_hrn()
#rspec = driver.create_sliver(slice_leaf,authority,rspec,users,options)
to_return = {'output': '', 'geni_api': 2, 'code': {'am_type': 'sfa', 'geni_code': 0}, 'value': rspec}
return to_return #driver.create_sliver(slice_urn,slice_leaf,authority,rspec,users,options)
@rpcmethod(signature=[SUCCESS_TYPE, URN_TYPE, CREDENTIALS_TYPE], url_name="sfa")
def DeleteSliver(xrn, creds, options={},**kwargs):
#pm.check_permissions('DeleteSliver',locals())
flag = aggregate.DeleteSliver(xrn,options)
to_return = {'output': '', 'geni_api': 2, 'code': {'am_type': 'sfa', 'geni_code': 0}, 'value': flag}
return to_return #driver.crud_slice(slice_urn,authority,credentials,action='delete_slice')
@rpcmethod(signature=[STATUS_TYPE, URN_TYPE, CREDENTIALS_TYPE], url_name="sfa")
def SliverStatus(slice_xrn, creds, options):
#xrn = Xrn(slice_urn,'slice')
#slice_leaf = xrn.get_leaf()
#authority = xrn.get_authority_hrn()
#pm.check_permissions('SliverStatus',locals())
struct = aggregate.SliverStatus(slice_xrn,options)
struct = {'geni_resources':struct, 'geni_urn':slice_xrn, 'geni_status':'ready'}
to_return = {'output': '', 'geni_api': 2, 'code': {'am_type': 'sfa', 'geni_code': 0}, 'value': struct}
return to_return#driver.sliver_status(slice_urn,authority,credentials,options)
@rpcmethod(signature=[SUCCESS_TYPE, URN_TYPE, CREDENTIALS_TYPE, TIME_TYPE], url_name="sfa")
def RenewSliver(slice_xrn, creds, expiration_time, **kwargs):
#pm.check_permissions('RenewSliver',locals())
return {'output': '', 'geni_api': 2, 'code': {'am_type': 'sfa', 'geni_code': 0}, 'value': True}
@rpcmethod(signature=[SUCCESS_TYPE, URN_TYPE, CREDENTIALS_TYPE], url_name="sfa")
def Shutdown(slice_xrn, creds, **kwargs):
pm.check_permissions('ShutDown',locals())
return {'output': '', 'geni_api': 2, 'code': {'am_type': 'sfa', 'geni_code': 0}, 'value': True}
@rpcmethod(signature=[SUCCESS_TYPE, URN_TYPE, CREDENTIALS_TYPE], url_name="sfa")
def Start(xrn, creds, **kwargs):
#pm.check_permissions('Start',locals())
slice_action = aggregate.start_slice(xrn)
return {'output': '', 'geni_api': 2, 'code': {'am_type': 'sfa', 'geni_code': 0}, 'value': slice_action}
@rpcmethod(signature=[SUCCESS_TYPE, URN_TYPE, CREDENTIALS_TYPE], url_name="sfa")
def Stop(xrn, creds):
#pm.check_permissions('Stop',locals())
slice_action = aggregate.stop_slice(xrn)
return {'output': '', 'geni_api': 2, 'code': {'am_type': 'sfa', 'geni_code': 0}, 'value': slice_action}
@rpcmethod(signature=[SUCCESS_TYPE, URN_TYPE], url_name="sfa")
def reset_slice(xrn):
slice_action = aggregate.reset_slice(xrn)
return {'output': '', 'geni_api': 2, 'code': {'am_type': 'sfa', 'geni_code': 0}, 'value': slice_action}
|
apache-2.0
| 6,046,672,735,316,768,000
| 48.181102
| 107
| 0.641691
| false
|
erdc/proteus
|
proteus/mprans/Dissipation.py
|
1
|
70664
|
from __future__ import division
from builtins import range
from past.utils import old_div
import proteus
from proteus.mprans.cDissipation import *
from proteus.mprans.cDissipation2D import *
import numpy as np
from proteus import Profiling as prof
from proteus import cfemIntegrals
from . import cArgumentsDict
"""
NOTES:
Hardwired Numerics include:
lagging all terms from Navier-Stokes, Kappa equations
same solution space for velocity from Navier-Stokes and Dissipation
equations
This can be removed by saving gradient calculations in N-S and lagging
rather than passing degrees of freedom between models
"""
class SubgridError(proteus.SubgridError.SGE_base):
def __init__(self, coefficients, nd):
proteus.SubgridError.SGE_base.__init__(self, coefficients, nd, lag=False)
def initializeElementQuadrature(self, mesh, t, cq):
pass
def updateSubgridErrorHistory(self, initializationPhase=False):
pass
def calculateSubgridError(self, q):
pass
class ShockCapturing(proteus.ShockCapturing.ShockCapturing_base):
def __init__(self, coefficients, nd, shockCapturingFactor=0.25, lag=True, nStepsToDelay=None):
proteus.ShockCapturing.ShockCapturing_base.__init__(self, coefficients, nd, shockCapturingFactor, lag)
self.nStepsToDelay = nStepsToDelay
self.nSteps = 0
if self.lag:
prof.logEvent("Kappa.ShockCapturing: lagging requested but must lag the first step; switching lagging off and delaying")
self.nStepsToDelay = 1
self.lag = False
def initializeElementQuadrature(self, mesh, t, cq):
self.mesh = mesh
self.numDiff = []
self.numDiff_last = []
for ci in range(self.nc):
self.numDiff.append(cq[('numDiff', ci, ci)])
self.numDiff_last.append(cq[('numDiff', ci, ci)])
def updateShockCapturingHistory(self):
self.nSteps += 1
if self.lag:
for ci in range(self.nc):
self.numDiff_last[ci][:] = self.numDiff[ci]
if self.lag == False and self.nStepsToDelay is not None and self.nSteps > self.nStepsToDelay:
prof.logEvent("Dissipation.ShockCapturing: switched to lagged shock capturing")
self.lag = True
self.numDiff_last = []
for ci in range(self.nc):
self.numDiff_last.append(self.numDiff[ci].copy())
prof.logEvent("Dissipation: max numDiff %e" % (proteus.Comm.globalMax(self.numDiff_last[0].max()),))
class NumericalFlux(proteus.NumericalFlux.Advection_DiagonalUpwind_Diffusion_IIPG_exterior):
def __init__(self, vt, getPointwiseBoundaryConditions,
getAdvectiveFluxBoundaryConditions,
getDiffusiveFluxBoundaryConditions):
proteus.NumericalFlux.Advection_DiagonalUpwind_Diffusion_IIPG_exterior.__init__(self, vt, getPointwiseBoundaryConditions,
getAdvectiveFluxBoundaryConditions,
getDiffusiveFluxBoundaryConditions)
class Coefficients(proteus.TransportCoefficients.TC_base):
"""Basic k-epsilon model for incompressible flow from Hutter etal
Chaper 11 or k-omega (Wilcox 1998).
"""
# Solves for just dissipation variable (epsilon, or omega) assuming
# kappa (intensity) computed independently and lagged in time
# \bar{\vec v} = <\vec v> Reynolds-averaged (mean) velocity
# \vec v^{'} = turbulent fluctuation
# assume \vec v = <\vec v> + \vec v^{'}, with <\vec v^{'}> = 0
# Reynolds averaged NS equations
# \deld \bar{\vec v} = 0
# \pd{\bar{\vec v}}{t} + \deld \left(\bar{\vec v} \outer \bar{\vec v}\right)
# -\nu \deld \ten \bar{D} + \frac{1}{\rho}\grad \bar p
# - \frac{1}{rho}\deld \ten{R} = 0
# Reynolds stress term
# \ten R = -\rho <\vec v^{'}\outer \vec v^{'}>
# \frac{1}{\rho}\ten{R} = 2 \nu_t \bar{D} - \frac{2}{3}k\ten{I}
# D_{ij}(\vec v) = \frac{1}{2} \left( \pd{v_i}{x_j} + \pd{v_j}{x_i})
# \ten D \bar{\ten D} = D(<\vec v>), \ten D^{'} = \ten D(\vec v^{'})
# k-epsilon tranport equations
# \pd{k}{t} + \deld (k\bar{\vec v})
# - \deld\left[\left(\frac{\nu_t}{\sigma_k} + \nu\right)\grad k \right]
# - 4\nu_t \Pi_{D} + \epsilon = 0
# \pd{\varepsilon}{t} + \deld (\varepsilon \bar{\vec v})
# - \deld\left[\left(\frac{\nu_t}{\sigma_\varepsilon} + \nu\right)\grad \varepsilon \right]
# - 4c_1 k \Pi_{D} + c_2 \frac{\epsilon^2}{k} = 0
# k -- turbulent kinetic energy = <\vec v^{'}\dot \vec v^{'}>
# \varepsilon -- turbulent dissipation rate = 4 \nu <\Pi_{D^{'}}>
# \nu -- kinematic viscosity (\mu/\rho)
# \nu_t -- turbulent viscosity = c_mu \frac{k^2}{\varepsilon}
# \Pi_{\ten A} = \frac{1}{2}tr(\ten A^2) = 1/2 \ten A\cdot \ten A
# \ten D \cdot \ten D = \frac{1}{4}\left[ (4 u_x^2 + 4 v_y^2 +
# 1/2 (u_y + v_x)^2 \right]
# 4 \Pi_{D} = 2 \frac{1}{4}\left[ (4 u_x^2 + 4 v_y^2 +
# 1/2 (u_y + v_x)^2 \right]
# = \left[ (2 u_x^2 + 2 v_y^2 + (u_y + v_x)^2 \right]
# \sigma_k -- Prandtl number \approx 1
# \sigma_e -- c_{\mu}/c_e
# c_{\mu} = 0.09, c_1 = 0.126, c_2 = 1.92, c_{\varepsilon} = 0.07
# """
from proteus.ctransportCoefficients import kEpsilon_k_3D_Evaluate_sd
from proteus.ctransportCoefficients import kEpsilon_k_2D_Evaluate_sd
def __init__(self,
VOS_model=None, # Solid model
V_model=None, # Fluid model
LS_model=None,
RD_model=None,
kappa_model=None,
ME_model=None,
SED_model=None,
dissipation_model_flag=1, # default K-Epsilon, 2 --> K-Omega 1998, 3 --> K-Omega 1988
c_mu=0.09,
c_1=0.126,
c_2=1.92,
c_e=0.07,
sigma_e=1.29,
rho_0=998.2,
nu_0=1.004e-6,
rho_1=1.205,
nu_1=1.500e-5,
g=[0.0, -9.8],
nd=3,
epsFact=0.01,
useMetrics=0.0,
sc_uref=1.0,
sc_beta=1.0,
default_kappa=1.0e-3,
closure=None,
nullSpace='NoNullSpace',
initialize=True):
self.useMetrics = useMetrics
self.dissipation_model_flag = dissipation_model_flag # default K-Epsilon, 2 ==> K-Omega 1998, 3 --> K-Omega 1988
self.variableNames = ['epsilon']
self.nd = nd
self.rho_0 = rho_0
self.nu_0 = nu_0
self.rho_1 = rho_1
self.rho = rho_0
self.nu_1 = nu_1
self.c_mu = c_mu
self.c_1 = c_1
self.c_2 = c_2
self.c_e = c_e
self.sigma_e = sigma_e
self.g = g
self.epsFact = epsFact
self.flowModelIndex = V_model
self.modelIndex = ME_model
self.RD_modelIndex = RD_model
self.LS_modelIndex = LS_model
self.VOS_modelIndex = VOS_model
self.SED_modelIndex = SED_model
self.kappa_modelIndex = kappa_model
self.sc_uref = sc_uref
self.sc_beta = sc_beta
self.nullSpace = nullSpace
# for debugging model
self.default_kappa = default_kappa
self.closure = closure
if initialize:
self.initialize()
def initialize(self):
if self.dissipation_model_flag >= 2:
self.variableNames = ['omega']
#
nc = 1
mass = {0: {0: 'linear'}}
advection = {0: {0: 'linear'}}
hamiltonian = {}
potential = {0: {0: 'u'}}
diffusion = {0: {0: {0: 'nonlinear', }}}
reaction = {0: {0: 'nonlinear'}}
if self.nd == 2:
sdInfo = {(0, 0): (np.array([0, 1, 2], dtype='i'),
np.array([0, 1], dtype='i'))}
else:
sdInfo = {(0, 0): (np.array([0, 1, 2, 3], dtype='i'),
np.array([0, 1, 2], dtype='i'))}
proteus.TransportCoefficients.TC_base.__init__(self,
nc,
mass,
advection,
diffusion,
potential,
reaction,
hamiltonian,
self.variableNames,
sparseDiffusionTensors=sdInfo)
closure = self.closure
try:
self.aDarcy=closure.aDarcy
self.betaForch=closure.betaForch
self.grain=closure.grain
self.packFraction=closure.packFraction
self.packMargin=closure.packMargin
self.maxFraction=closure.maxFraction
self.frFraction=closure.frFraction
self.sigmaC=closure.sigmaC
self.C3e=closure.C3e
self.C4e=closure.C4e
self.eR=closure.eR
self.fContact=closure.fContact
self.mContact=closure.mContact
self.nContact=closure.nContact
self.angFriction=closure.angFriction
self.vos_limiter = closure.vos_limiter
self.mu_fr_limiter = closure.mu_fr_limiter
self.sedFlag = 1
prof.logEvent("INFO: Loading parameters for sediment closure",2)
except:
self.aDarcy=-1.
self.betaForch=-1.
self.grain=-1.
self.packFraction=-1.
self.packMargin=-1.
self.maxFraction=-1.
self.frFraction=-1.
self.sigmaC=-1.
self.C3e=-1.
self.C4e=-1.
self.eR=-1.
self.fContact=-1.
self.mContact=-1.
self.nContact=-1.
self.angFriction=-1.
self.vos_limiter = -1.
self.mu_fr_limiter = -1.
self.sedFlag=0
assert self.VOS_modelIndex == None
assert self.SED_modelIndex == None
prof.logEvent("Sediment module is off. Loading dummy parameters",2)
def initializeMesh(self, mesh):
self.eps = self.epsFact * mesh.h
def attachModels(self, modelList):
assert self.modelIndex is not None and self.modelIndex < len(
modelList), "Dissipation: invalid index for self model allowed range: [0,%s]" % len(modelList)
# self
self.model = modelList[self.modelIndex]
# redistanced level set
if self.RD_modelIndex is not None:
self.rdModel = modelList[self.RD_modelIndex]
# level set
if self.LS_modelIndex is not None:
self.lsModel = modelList[self.LS_modelIndex]
self.q_phi = modelList[self.LS_modelIndex].q[('u', 0)]
self.ebqe_phi = modelList[self.LS_modelIndex].ebqe[('u', 0)]
if ('u', 0) in modelList[self.LS_modelIndex].ebq:
self.ebq_phi = modelList[self.LS_modelIndex].ebq[('u', 0)]
else:
self.ebq_phi = None
else:
self.q_phi =-np.ones( modelList[self.kappa_modelIndex].q[('u', 0)].shape, 'd')
#self.ebq_phi =-np.ones( modelList[self.dissipation_modelIndex].ebq[('u', 0)].shape, 'd')
self.ebqe_phi = -np.ones( modelList[self.kappa_modelIndex].ebqe[('u', 0)].shape, 'd')
# flow model
self.u_old_dof = np.copy(self.model.u[0].dof)
assert self.flowModelIndex is not None, "Dissipation: invalid index for flow model allowed range: [0,%s]" % len(modelList)
# print "flow model index------------",self.flowModelIndex,modelList[self.flowModelIndex].q.has_key(('velocity',0))
if self.flowModelIndex is not None: # keep for debugging for now
self.model.ebqe['n'][:] = modelList[self.flowModelIndex].ebqe['n']
if ('velocity', 0) in modelList[self.flowModelIndex].q:
self.q_v = modelList[self.flowModelIndex].q[('velocity', 0)]
self.ebqe_v = modelList[self.flowModelIndex].ebqe[('velocity', 0)]
else:
self.q_v = modelList[self.flowModelIndex].q[('f', 0)]
self.ebqe_v = modelList[self.flowModelIndex].ebqe[('f', 0)]
if ('velocity', 0) in modelList[self.flowModelIndex].ebq:
self.ebq_v = modelList[self.flowModelIndex].ebq[('velocity', 0)]
else:
if ('f', 0) in modelList[self.flowModelIndex].ebq:
self.ebq_v = modelList[self.flowModelIndex].ebq[('f', 0)]
#
import copy
self.q_grad_u = modelList[self.flowModelIndex].q[('grad(u)', 1)]
self.q_grad_v = modelList[self.flowModelIndex].q[('grad(u)', 2)]
#
self.ebqe_grad_u = modelList[self.flowModelIndex].ebqe[('grad(u)', 1)]
self.ebqe_grad_v = modelList[self.flowModelIndex].ebqe[('grad(u)', 2)]
if ('grad(u)', 1) in modelList[self.flowModelIndex].ebq:
self.ebq_grad_u = modelList[self.flowModelIndex].ebq[('grad(u)', 1)]
if ('grad(u)', 2) in modelList[self.flowModelIndex].ebq:
self.ebq_grad_v = modelList[self.flowModelIndex].ebq[('grad(u)', 2)]
#
# now allocate the 3D variables
if self.nd == 2:
self.q_grad_w = self.q_grad_v.copy()
self.ebqe_grad_w = self.ebqe_grad_v.copy()
if ('grad(u)', 2) in modelList[self.flowModelIndex].ebq:
self.ebq_grad_w = self.ebq_grad_v.copy()
else:
self.q_grad_w = modelList[self.flowModelIndex].q[('grad(u)', 3)]
self.ebqe_grad_w = modelList[self.flowModelIndex].ebqe[('grad(u)', 3)]
if ('grad(u)', 3) in modelList[self.flowModelIndex].ebq:
self.ebq_grad_w = modelList[self.flowModelIndex].ebq[('grad(u)', 3)]
#
self.velocity_dof_u = modelList[self.flowModelIndex].u[1].dof
self.velocity_dof_v = modelList[self.flowModelIndex].u[2].dof
if self.nd == 2:
self.velocity_dof_w = self.velocity_dof_v.copy()
else:
self.velocity_dof_w = modelList[self.flowModelIndex].u[3].dof
if hasattr(modelList[self.flowModelIndex].coefficients, 'q_porosity'):
self.q_porosity = modelList[self.flowModelIndex].coefficients.q_porosity
else:
self.q_porosity = np.ones(self.q[('u', 0)].shape, 'd')
if hasattr(modelList[self.flowModelIndex].coefficients, 'ebqe_porosity'):
self.ebqe_porosity = modelList[self.flowModelIndex].coefficients.ebqe_porosity
else:
self.ebqe_porosity = np.ones( modelList[self.flowModelIndex].ebqe[('velocity', 0)].shape, 'd')
else:
self.velocity_dof_u = np.zeros(self.model.u[0].dof.shape, 'd')
self.velocity_dof_v = np.zeros(self.model.u[0].dof.shape, 'd')
if self.nd == 2:
self.velocity_dof_w = self.velocity_dof_v.copy()
else:
self.velocity_dof_w = np.zeros(self.model.u[0].dof.shape, 'd')
self.q_porosity = np.ones(self.q[('u', 0)].shape, 'd')
self.ebqe_porosity = np.ones(self.ebqe[('u', 0)].shape, 'd')
#
#assert self.kappa_modelIndex is not None and self.kappa_modelIndex < len(modelList), "Dissipation: invalid index for dissipation model allowed range: [0,%s]" % len(modelList)
if self.kappa_modelIndex is not None: # keep for debugging for now
# assume have q,ebqe always
self.q_kappa = modelList[self.kappa_modelIndex].q[('u', 0)]
self.ebqe_kappa = modelList[self.kappa_modelIndex].ebqe[('u', 0)]
self.q_grad_kappa = modelList[self.kappa_modelIndex].q[('grad(u)', 0)]
if ('u', 0) in modelList[self.kappa_modelIndex].ebq:
self.ebq_kappa = modelList[self.kappa_modelIndex].ebq[('u', 0)]
else:
self.q_kappa = np.zeros(self.model.q[('u', 0)].shape, 'd')
self.q_kappa.fill(self.default_kappa)
self.ebqe_kappa = np.zeros(self.model.ebqe[('u', 0)].shape, 'd')
self.ebqe_kappa.fill(self.default_kappa)
self.q_grad_kappa = np.zeros(self.model.q[('grad(u)', 0)].shape, 'd')
if ('u', 0) in self.model.ebq:
self.ebq_kappa = np.zeros(self.model.ebq[('u', 0)].shape, 'd')
self.ebq_kappa.fill(self.default_kappa)
#
if self.VOS_modelIndex is not None:
self.vosModel = model[self.VOS_modelIndex ]
self.q_vos = modelList[self.VOS_modelIndex].q[('u', 0)]
self.grad_vos = modelList[self.VOS_modelIndex].q[('grad(u)', 0)]
self.ebqe_vos = modelList[self.VOS_modelIndex].ebqe[('u', 0)]
self.ebqe_grad_vos = modelList[self.VOS_modelIndex].ebqe[('grad(u)', 0)]
else:
self.q_vos = self.model.q[('u', 0)]
self.grad_vos = self.model.q[('u', 0)]
self.ebqe_vos = self.model.ebqe[('u', 0)]
self.ebqe_grad_vos = self.model.ebqe[('u', 0)]
if self.SED_modelIndex is not None:
self.rho_s=modelList[self.SED_modelIndex].coefficients.rho_s
self.vs=modelList[self.SED_modelIndex].q[('u', 0)]
self.ebqe_vs=modelList[self.SED_modelIndex].ebqe[('u', 0)]
else:
self.rho_s=self.rho_0
self.vs=self.q_v
self.ebqe_vs=self.ebqe_v
#
def initializeElementQuadrature(self, t, cq):
if self.flowModelIndex is None:
self.q_v = np.ones(cq[('f', 0)].shape, 'd')
self.q_grad_u = np.ones(cq[('grad(u)', 0)].shape, 'd')
self.q_grad_v = np.ones(cq[('grad(u)', 0)].shape, 'd')
if self.nd == 2:
self.q_grad_w = self.q_grad_v.copy()
else:
self.q_grad_w = np.ones(cq[('grad(u)', 0)].shape, 'd')
if self.kappa_modelIndex is None:
self.q_kappa = np.ones(cq[('u', 0)].shape, 'd')
self.q_kappa.fill(self.default_kappa)
self.q_grad_kappa = np.zeros(cq[('grad(u)', 0)].shape, 'd')
def initializeElementBoundaryQuadrature(self, t, cebq, cebq_global):
if self.flowModelIndex is None:
self.ebq_v = np.ones(cebq[('f', 0)].shape, 'd')
self.ebq_grad_u = np.ones(cebq[('grad(u)', 0)].shape, 'd')
self.ebq_grad_v = np.ones(cebq[('grad(u)', 0)].shape, 'd')
if self.nd == 2:
self.ebq_grad_w = self.ebq_grad_v.copy()
else:
self.ebq_grad_w = np.ones(cebq[('grad(u)', 0)].shape, 'd')
if self.kappa_modelIndex is None:
self.ebq_kappa = np.ones(cebq[('u', 0)].shape, 'd')
self.ebq_kappa.fill(self.default_kappa)
def initializeGlobalExteriorElementBoundaryQuadrature(self, t, cebqe):
if self.flowModelIndex is None:
self.ebqe_v = np.ones(cebqe[('f', 0)].shape, 'd')
self.ebqe_grad_u = np.ones(cebqe[('grad(u)', 0)].shape, 'd')
self.ebqe_grad_v = np.ones(cebqe[('grad(u)', 0)].shape, 'd')
self.ebqe_grad_w = np.ones(cebqe[('grad(u)', 0)].shape, 'd')
if self.kappa_modelIndex is None:
self.ebqe_kappa = np.ones(cebqe[('u', 0)].shape, 'd')
self.ebqe_kappa.fill(self.default_kappa)
def preStep(self, t, firstStep=False):
copyInstructions = {}
return copyInstructions
def postStep(self, t, firstStep=False):
self.u_old_dof = np.copy(self.model.u[0].dof)
for eN in range(self.model.q[('u',0)].shape[0]):
for k in range(self.model.q[('u',0)].shape[1]):
self.model.q[('u',0)][eN,k] = max( self.model.q[('u',0)][eN,k], 1e-10)
if ('u', 0) in self.model.ebq:
for eN in range(self.model.ebq[('u',0)].shape[0]):
for k in range(self.model.ebq[('u',0)].shape[1]):
for l in range(len(self.model.ebq[('u',0)][eN,k])):
self.model.ebq[('u',0)][eN,k,l] = max( self.model.ebq[('u',0)][eN,k,l], 1e-10)
for eN in range(self.model.ebqe[('u',0)].shape[0]):
for k in range(self.model.ebqe[('u',0)].shape[1]):
self.model.ebqe[('u',0)][eN,k] = max( self.model.ebqe[('u',0)][eN,k], 1e-10)
copyInstructions = {}
return copyInstructions
def updateToMovingDomain(self, t, c):
# in a moving domain simulation the velocity coming in is already for the moving domain
pass
def evaluate(self, t, c):
# mwf debug
# print "Dissipationcoeficients eval t=%s " % t
if c[('f', 0)].shape == self.q_v.shape:
v = self.q_v
phi = self.q_phi
grad_u = self.q_grad_u
grad_v = self.q_grad_v
grad_w = self.q_grad_w
kappa = self.q_kappa
elif c[('f', 0)].shape == self.ebqe_v.shape:
v = self.ebqe_v
phi = self.ebqe_phi
grad_u = self.ebqe_grad_u
grad_v = self.ebqe_grad_v
grad_w = self.ebqe_grad_w
kappa = self.ebqe_kappa
elif ((self.ebq_v is not None and self.ebq_phi is not None and self.ebq_grad_u is not None and self.ebq_grad_v is not None and self.ebq_grad_w is not None and self.ebq_kappa is not None) and c[('f', 0)].shape == self.ebq_v.shape):
v = self.ebq_v
phi = self.ebq_phi
grad_u = self.ebq_grad_u
grad_v = self.ebq_grad_v
grad_w = self.ebqe_grad_w
kappa = self.ebq_kappa
else:
v = None
phi = None
grad_u = None
grad_v = None
grad_w = None
if v is not None:
if self.nd == 2:
self.kEpsilon_epsilon_2D_Evaluate_sd(self.sigma_e,
self.c_1,
self.c_2,
self.c_mu,
self.c_e,
self.nu,
velocity,
gradu,
gradv,
c[('u', 0)],
kappa,
c[('m', 0)],
c[('dm', 0, 0)],
c[('f', 0)],
c[('df', 0, 0)],
c[('a', 0, 0)],
c[('da', 0, 0, 0)],
c[('r', 0)],
c[('dr', 0, 0)])
else:
self.kEpsilon_epsilon_3D_Evaluate_sd(self.sigma_e,
self.c_1,
self.c_2,
self.c_mu,
self.c_e,
self.nu,
velocity,
gradu,
gradv,
gradw,
c[('u', 0)],
kappa,
c[('m', 0)],
c[('dm', 0, 0)],
c[('f', 0)],
c[('df', 0, 0)],
c[('a', 0, 0)],
c[('da', 0, 0, 0)],
c[('r', 0)],
c[('dr', 0, 0)])
class LevelModel(proteus.Transport.OneLevelTransport):
nCalls = 0
def __init__(self,
uDict,
phiDict,
testSpaceDict,
matType,
dofBoundaryConditionsDict,
dofBoundaryConditionsSetterDict,
coefficients,
elementQuadrature,
elementBoundaryQuadrature,
fluxBoundaryConditionsDict=None,
advectiveFluxBoundaryConditionsSetterDict=None,
diffusiveFluxBoundaryConditionsSetterDictDict=None,
stressTraceBoundaryConditionsSetterDict=None,
stabilization=None,
shockCapturing=None,
conservativeFluxDict=None,
numericalFluxType=None,
TimeIntegrationClass=None,
massLumping=False,
reactionLumping=False,
options=None,
name='defaultName',
reuse_trial_and_test_quadrature=True,
sd = True,
movingDomain=False,
bdyNullSpace=False):
#
# set the objects describing the method and boundary conditions
#
self.bdyNullSpace=bdyNullSpace
self.movingDomain=movingDomain
self.tLast_mesh=None
#
self.name = name
self.sd = sd
self.Hess = False
self.lowmem = True
self.timeTerm = True # allow turning off the time derivative
# self.lowmem=False
self.testIsTrial = True
self.phiTrialIsTrial = True
self.u = uDict
self.ua = {} # analytical solutions
self.phi = phiDict
self.dphi = {}
self.matType = matType
# try to reuse test and trial information across components if spaces are the same
self.reuse_test_trial_quadrature = reuse_trial_and_test_quadrature # True#False
if self.reuse_test_trial_quadrature:
for ci in range(1, coefficients.nc):
assert self.u[ci].femSpace.__class__.__name__ == self.u[0].femSpace.__class__.__name__, "to reuse_test_trial_quad all femSpaces must be the same!"
# Simplicial Mesh
self.mesh = self.u[0].femSpace.mesh # assume the same mesh for all components for now
self.testSpace = testSpaceDict
self.dirichletConditions = dofBoundaryConditionsDict
self.dirichletNodeSetList = None # explicit Dirichlet conditions for now, no Dirichlet BC constraints
self.coefficients = coefficients
self.coefficients.initializeMesh(self.mesh)
self.nc = self.coefficients.nc
self.stabilization = stabilization
self.shockCapturing = shockCapturing
self.conservativeFlux = conservativeFluxDict # no velocity post-processing for now
self.fluxBoundaryConditions = fluxBoundaryConditionsDict
self.advectiveFluxBoundaryConditionsSetterDict = advectiveFluxBoundaryConditionsSetterDict
self.diffusiveFluxBoundaryConditionsSetterDictDict = diffusiveFluxBoundaryConditionsSetterDictDict
# determine whether the stabilization term is nonlinear
self.stabilizationIsNonlinear = False
# cek come back
if self.stabilization is not None:
for ci in range(self.nc):
if ci in coefficients.mass:
for flag in list(coefficients.mass[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear = True
if ci in coefficients.advection:
for flag in list(coefficients.advection[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear = True
if ci in coefficients.diffusion:
for diffusionDict in list(coefficients.diffusion[ci].values()):
for flag in list(diffusionDict.values()):
if flag != 'constant':
self.stabilizationIsNonlinear = True
if ci in coefficients.potential:
for flag in list(coefficients.potential[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear = True
if ci in coefficients.reaction:
for flag in list(coefficients.reaction[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear = True
if ci in coefficients.hamiltonian:
for flag in list(coefficients.hamiltonian[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear = True
# determine if we need element boundary storage
self.elementBoundaryIntegrals = {}
for ci in range(self.nc):
self.elementBoundaryIntegrals[ci] = ((self.conservativeFlux is not None) or
(numericalFluxType is not None) or
(self.fluxBoundaryConditions[ci] == 'outFlow') or
(self.fluxBoundaryConditions[ci] == 'mixedFlow') or
(self.fluxBoundaryConditions[ci] == 'setFlow'))
#
# calculate some dimensions
#
self.nSpace_global = self.u[0].femSpace.nSpace_global # assume same space dim for all variables
self.nDOF_trial_element = [u_j.femSpace.max_nDOF_element for u_j in list(self.u.values())]
self.nDOF_phi_trial_element = [phi_k.femSpace.max_nDOF_element for phi_k in list(self.phi.values())]
self.n_phi_ip_element = [phi_k.femSpace.referenceFiniteElement.interpolationConditions.nQuadraturePoints for phi_k in list(self.phi.values())]
self.nDOF_test_element = [femSpace.max_nDOF_element for femSpace in list(self.testSpace.values())]
self.nFreeDOF_global = [dc.nFreeDOF_global for dc in list(self.dirichletConditions.values())]
self.nVDOF_element = sum(self.nDOF_trial_element)
self.nFreeVDOF_global = sum(self.nFreeDOF_global)
#
proteus.NonlinearSolvers.NonlinearEquation.__init__(self, self.nFreeVDOF_global)
#
# build the quadrature point dictionaries from the input (this
# is just for convenience so that the input doesn't have to be
# complete)
#
elementQuadratureDict = {}
elemQuadIsDict = isinstance(elementQuadrature, dict)
if elemQuadIsDict: # set terms manually
for I in self.coefficients.elementIntegralKeys:
if I in elementQuadrature:
elementQuadratureDict[I] = elementQuadrature[I]
else:
elementQuadratureDict[I] = elementQuadrature['default']
else:
for I in self.coefficients.elementIntegralKeys:
elementQuadratureDict[I] = elementQuadrature
if self.stabilization is not None:
for I in self.coefficients.elementIntegralKeys:
if elemQuadIsDict:
if I in elementQuadrature:
elementQuadratureDict[('stab',) + I[1:]] = elementQuadrature[I]
else:
elementQuadratureDict[('stab',) + I[1:]] = elementQuadrature['default']
else:
elementQuadratureDict[('stab',) + I[1:]] = elementQuadrature
if self.shockCapturing is not None:
for ci in self.shockCapturing.components:
if elemQuadIsDict:
if ('numDiff', ci, ci) in elementQuadrature:
elementQuadratureDict[('numDiff', ci, ci)] = elementQuadrature[('numDiff', ci, ci)]
else:
elementQuadratureDict[('numDiff', ci, ci)] = elementQuadrature['default']
else:
elementQuadratureDict[('numDiff', ci, ci)] = elementQuadrature
if massLumping:
for ci in list(self.coefficients.mass.keys()):
elementQuadratureDict[('m', ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)
for I in self.coefficients.elementIntegralKeys:
elementQuadratureDict[('stab',) + I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)
if reactionLumping:
for ci in list(self.coefficients.mass.keys()):
elementQuadratureDict[('r', ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)
for I in self.coefficients.elementIntegralKeys:
elementQuadratureDict[('stab',) + I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)
elementBoundaryQuadratureDict = {}
if isinstance(elementBoundaryQuadrature, dict): # set terms manually
for I in self.coefficients.elementBoundaryIntegralKeys:
if I in elementBoundaryQuadrature:
elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature[I]
else:
elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature['default']
else:
for I in self.coefficients.elementBoundaryIntegralKeys:
elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature
#
# find the union of all element quadrature points and
# build a quadrature rule for each integral that has a
# weight at each point in the union
# mwf include tag telling me which indices are which quadrature rule?
(self.elementQuadraturePoints, self.elementQuadratureWeights,
self.elementQuadratureRuleIndeces) = proteus.Quadrature.buildUnion(elementQuadratureDict)
self.nQuadraturePoints_element = self.elementQuadraturePoints.shape[0]
self.nQuadraturePoints_global = self.nQuadraturePoints_element * self.mesh.nElements_global
#
# Repeat the same thing for the element boundary quadrature
#
(self.elementBoundaryQuadraturePoints,
self.elementBoundaryQuadratureWeights,
self.elementBoundaryQuadratureRuleIndeces) = proteus.Quadrature.buildUnion(elementBoundaryQuadratureDict)
self.nElementBoundaryQuadraturePoints_elementBoundary = self.elementBoundaryQuadraturePoints.shape[0]
self.nElementBoundaryQuadraturePoints_global = (self.mesh.nElements_global *
self.mesh.nElementBoundaries_element *
self.nElementBoundaryQuadraturePoints_elementBoundary)
# if isinstance(self.u[0].femSpace,C0_AffineLinearOnSimplexWithNodalBasis):
# print self.nQuadraturePoints_element
# if self.nSpace_global == 3:
# assert(self.nQuadraturePoints_element == 5)
# elif self.nSpace_global == 2:
# assert(self.nQuadraturePoints_element == 6)
# elif self.nSpace_global == 1:
# assert(self.nQuadraturePoints_element == 3)
#
# print self.nElementBoundaryQuadraturePoints_elementBoundary
# if self.nSpace_global == 3:
# assert(self.nElementBoundaryQuadraturePoints_elementBoundary == 4)
# elif self.nSpace_global == 2:
# assert(self.nElementBoundaryQuadraturePoints_elementBoundary == 4)
# elif self.nSpace_global == 1:
# assert(self.nElementBoundaryQuadraturePoints_elementBoundary == 1)
#
# storage dictionaries
self.scalars_element = set()
#
# simplified allocations for test==trial and also check if space is mixed or not
#
self.q = {}
self.ebq = {}
self.ebq_global = {}
self.ebqe = {}
self.phi_ip = {}
# mesh
#self.q['x'] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,3),'d')
self.ebqe['x'] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary, 3), 'd')
self.ebqe['n'] = np.zeros(
(self.mesh.nExteriorElementBoundaries_global,
self.nElementBoundaryQuadraturePoints_elementBoundary,
self.nSpace_global),
'd')
self.q[('u', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')
self.q[('grad(u)', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element, self.nSpace_global), 'd')
#diffusion, isotropic
self.q[('a', 0, 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element, self.nSpace_global), 'd')
self.q[('da', 0, 0, 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element, self.nSpace_global), 'd')
# linear potential
self.q[('phi', 0)] = self.q[('u', 0)]
self.q[('grad(phi)', 0)] = self.q[('grad(u)', 0)]
self.q[('dphi', 0, 0)] = np.ones((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')
# mass
self.q[('m', 0)] = self.q[('u', 0)]
self.q[('m_last', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')
self.q[('m_tmp', 0)] = self.q[('u', 0)]
self.q[('cfl', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')
self.q[('numDiff', 0, 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')
self.ebqe[('u', 0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')
self.ebqe[('grad(u)', 0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global,
self.nElementBoundaryQuadraturePoints_elementBoundary, self.nSpace_global), 'd')
self.ebqe[('advectiveFlux_bc_flag', 0)] = np.zeros(
(self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'i')
self.ebqe[('advectiveFlux_bc', 0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')
self.ebqe[('advectiveFlux', 0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')
self.ebqe[('diffusiveFlux_bc_flag', 0, 0)] = np.zeros(
(self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'i')
self.ebqe[('diffusiveFlux_bc', 0, 0)] = np.zeros(
(self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')
self.ebqe[('penalty')] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')
self.points_elementBoundaryQuadrature = set()
self.scalars_elementBoundaryQuadrature = set([('u', ci) for ci in range(self.nc)])
self.vectors_elementBoundaryQuadrature = set()
self.tensors_elementBoundaryQuadrature = set()
self.inflowBoundaryBC = {}
self.inflowBoundaryBC_values = {}
self.inflowFlux = {}
for cj in range(self.nc):
self.inflowBoundaryBC[cj] = np.zeros((self.mesh.nExteriorElementBoundaries_global,), 'i')
self.inflowBoundaryBC_values[cj] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nDOF_trial_element[cj]), 'd')
self.inflowFlux[cj] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')
self.internalNodes = set(range(self.mesh.nNodes_global))
# identify the internal nodes this is ought to be in mesh
# \todo move this to mesh
for ebNE in range(self.mesh.nExteriorElementBoundaries_global):
ebN = self.mesh.exteriorElementBoundariesArray[ebNE]
eN_global = self.mesh.elementBoundaryElementsArray[ebN, 0]
ebN_element = self.mesh.elementBoundaryLocalElementBoundariesArray[ebN, 0]
for i in range(self.mesh.nNodes_element):
if i != ebN_element:
I = self.mesh.elementNodesArray[eN_global, i]
self.internalNodes -= set([I])
self.nNodes_internal = len(self.internalNodes)
self.internalNodesArray = np.zeros((self.nNodes_internal,), 'i')
for nI, n in enumerate(self.internalNodes):
self.internalNodesArray[nI] = n
#
del self.internalNodes
self.internalNodes = None
prof.logEvent("Updating local to global mappings", 2)
self.updateLocal2Global()
prof.logEvent("Building time integration object", 2)
prof.logEvent(prof.memory("inflowBC, internalNodes,updateLocal2Global", "OneLevelTransport"), level=4)
# mwf for interpolating subgrid error for gradients etc
if self.stabilization and self.stabilization.usesGradientStabilization:
self.timeIntegration = TimeIntegrationClass(self, integrateInterpolationPoints=True)
else:
self.timeIntegration = TimeIntegrationClass(self)
if options is not None:
self.timeIntegration.setFromOptions(options)
prof.logEvent(prof.memory("TimeIntegration", "OneLevelTransport"), level=4)
prof.logEvent("Calculating numerical quadrature formulas", 2)
self.calculateQuadrature()
self.setupFieldStrides()
comm = proteus.Comm.get()
self.comm = comm
if comm.size() > 1:
assert numericalFluxType is not None and numericalFluxType.useWeakDirichletConditions, "You must use a numerical flux to apply weak boundary conditions for parallel runs"
prof.logEvent(prof.memory("stride+offset", "OneLevelTransport"), level=4)
if numericalFluxType is not None:
if options is None or options.periodicDirichletConditions is None:
self.numericalFlux = numericalFluxType(self,
dofBoundaryConditionsSetterDict,
advectiveFluxBoundaryConditionsSetterDict,
diffusiveFluxBoundaryConditionsSetterDictDict)
else:
self.numericalFlux = numericalFluxType(self,
dofBoundaryConditionsSetterDict,
advectiveFluxBoundaryConditionsSetterDict,
diffusiveFluxBoundaryConditionsSetterDictDict,
options.periodicDirichletConditions)
else:
self.numericalFlux = None
# set penalty terms
# cek todo move into numerical flux initialization
if 'penalty' in self.ebq_global:
for ebN in range(self.mesh.nElementBoundaries_global):
for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):
self.ebq_global['penalty'][ebN, k] = old_div(self.numericalFlux.penalty_constant, \
(self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power))
# penalty term
# cek move to Numerical flux initialization
if 'penalty' in self.ebqe:
for ebNE in range(self.mesh.nExteriorElementBoundaries_global):
ebN = self.mesh.exteriorElementBoundariesArray[ebNE]
for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):
self.ebqe['penalty'][ebNE, k] = old_div(self.numericalFlux.penalty_constant, \
self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power)
prof.logEvent(prof.memory("numericalFlux", "OneLevelTransport"), level=4)
self.elementEffectiveDiametersArray = self.mesh.elementInnerDiametersArray
# use post processing tools to get conservative fluxes, None by default
from proteus import PostProcessingTools
self.velocityPostProcessor = PostProcessingTools.VelocityPostProcessingChooser(self)
prof.logEvent(prof.memory("velocity postprocessor", "OneLevelTransport"), level=4)
# helper for writing out data storage
from proteus import Archiver
self.elementQuadratureDictionaryWriter = Archiver.XdmfWriter()
self.elementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()
self.exteriorElementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()
# TODO get rid of this
# mwf can I use the numericalFlux's flag information?
for ci, fbcObject in list(self.fluxBoundaryConditionsObjectsDict.items()):
self.ebqe[('advectiveFlux_bc_flag', ci)] = np.zeros(self.ebqe[('advectiveFlux_bc', ci)].shape, 'i')
for t, g in list(fbcObject.advectiveFluxBoundaryConditionsDict.items()):
if ci in self.coefficients.advection:
self.ebqe[('advectiveFlux_bc', ci)][t[0], t[1]] = g(self.ebqe[('x')][t[0], t[1]], self.timeIntegration.t)
self.ebqe[('advectiveFlux_bc_flag', ci)][t[0], t[1]] = 1
for ck, diffusiveFluxBoundaryConditionsDict in list(fbcObject.diffusiveFluxBoundaryConditionsDictDict.items()):
self.ebqe[('diffusiveFlux_bc_flag', ck, ci)] = np.zeros(self.ebqe[('diffusiveFlux_bc', ck, ci)].shape, 'i')
for t, g in list(diffusiveFluxBoundaryConditionsDict.items()):
self.ebqe[('diffusiveFlux_bc', ck, ci)][t[0], t[1]] = g(self.ebqe[('x')][t[0], t[1]], self.timeIntegration.t)
self.ebqe[('diffusiveFlux_bc_flag', ck, ci)][t[0], t[1]] = 1
if hasattr(self.numericalFlux, 'setDirichletValues'):
self.numericalFlux.setDirichletValues(self.ebqe)
if not hasattr(self.numericalFlux, 'isDOFBoundary'):
self.numericalFlux.isDOFBoundary = {0: np.zeros(self.ebqe[('u', 0)].shape, 'i')}
if not hasattr(self.numericalFlux, 'ebqe'):
self.numericalFlux.ebqe = {('u', 0): np.zeros(self.ebqe[('u', 0)].shape, 'd')}
# TODO how to handle redistancing calls for calculateCoefficients,calculateElementResidual etc
self.globalResidualDummy = None
compKernelFlag = 0
if self.nSpace_global == 2:
self.dissipation = cDissipation2D_base(self.nSpace_global,
self.nQuadraturePoints_element,
self.u[0].femSpace.elementMaps.localFunctionSpace.dim,
self.u[0].femSpace.referenceFiniteElement.localFunctionSpace.dim,
self.testSpace[0].referenceFiniteElement.localFunctionSpace.dim,
self.nElementBoundaryQuadraturePoints_elementBoundary,
compKernelFlag,
self.coefficients.aDarcy,
self.coefficients.betaForch,
self.coefficients.grain,
self.coefficients.packFraction,
self.coefficients.packMargin,
self.coefficients.maxFraction,
self.coefficients.frFraction,
self.coefficients.sigmaC,
self.coefficients.C3e,
self.coefficients.C4e,
self.coefficients.eR,
self.coefficients.fContact,
self.coefficients.mContact,
self.coefficients.nContact,
self.coefficients.angFriction,
self.coefficients.vos_limiter,
self.coefficients.mu_fr_limiter)
else:
self.dissipation = cDissipation_base(self.nSpace_global,
self.nQuadraturePoints_element,
self.u[0].femSpace.elementMaps.localFunctionSpace.dim,
self.u[0].femSpace.referenceFiniteElement.localFunctionSpace.dim,
self.testSpace[0].referenceFiniteElement.localFunctionSpace.dim,
self.nElementBoundaryQuadraturePoints_elementBoundary,
compKernelFlag,
self.coefficients.aDarcy,
self.coefficients.betaForch,
self.coefficients.grain,
self.coefficients.packFraction,
self.coefficients.packMargin,
self.coefficients.maxFraction,
self.coefficients.frFraction,
self.coefficients.sigmaC,
self.coefficients.C3e,
self.coefficients.C4e,
self.coefficients.eR,
self.coefficients.fContact,
self.coefficients.mContact,
self.coefficients.nContact,
self.coefficients.angFriction,
self.coefficients.vos_limiter,
self.coefficients.mu_fr_limiter)
self.forceStrongConditions = False
if self.forceStrongConditions:
self.dirichletConditionsForceDOF = DOFBoundaryConditions(self.u[0].femSpace, dofBoundaryConditionsSetterDict[0], weakDirichletConditions=False)
if self.movingDomain:
self.MOVING_DOMAIN = 1.0
else:
self.MOVING_DOMAIN = 0.0
# cek hack
self.movingDomain = False
self.MOVING_DOMAIN = 0.0
if self.mesh.nodeVelocityArray is None:
self.mesh.nodeVelocityArray = np.zeros(self.mesh.nodeArray.shape, 'd')
# mwf these are getting called by redistancing classes,
def calculateCoefficients(self):
pass
def calculateElementResidual(self):
if self.globalResidualDummy is not None:
self.getResidual(self.u[0].dof, self.globalResidualDummy)
def getResidual(self, u, r):
import pdb
import copy
"""
Calculate the element residuals and add in to the global residual
"""
r.fill(0.0)
# Load the unknowns into the finite element dof
self.timeIntegration.calculateCoefs()
# print "***************max/min(m_last)*********************",max(self.timeIntegration.m_last[0].flat[:]),min(self.timeIntegration.m_last[0].flat[:])
# print "***************max/min(m_last)*********************",max(-self.timeIntegration.dt*self.timeIntegration.beta_bdf[0].flat[:]),min(-self.timeIntegration.dt*self.timeIntegration.beta_bdf[0].flat[:]),
self.timeIntegration.calculateU(u)
self.setUnknowns(self.timeIntegration.u)
# cek can put in logic to skip of BC's don't depend on t or u
# Dirichlet boundary conditions
# if hasattr(self.numericalFlux,'setDirichletValues'):
self.numericalFlux.setDirichletValues(self.ebqe)
# flux boundary conditions
for t, g in list(self.fluxBoundaryConditionsObjectsDict[0].advectiveFluxBoundaryConditionsDict.items()):
self.ebqe[('advectiveFlux_bc', 0)][t[0], t[1]] = g(self.ebqe[('x')][t[0], t[1]], self.timeIntegration.t)
self.ebqe[('advectiveFlux_bc_flag', 0)][t[0], t[1]] = 1
for ck, diffusiveFluxBoundaryConditionsDict in list(self.fluxBoundaryConditionsObjectsDict[0].diffusiveFluxBoundaryConditionsDictDict.items()):
for t, g in list(diffusiveFluxBoundaryConditionsDict.items()):
self.ebqe[('diffusiveFlux_bc', ck, 0)][t[0], t[1]] = g(self.ebqe[('x')][t[0], t[1]], self.timeIntegration.t)
self.ebqe[('diffusiveFlux_bc_flag', ck, 0)][t[0], t[1]] = 1
# self.shockCapturing.lag=True
if self.forceStrongConditions:
for dofN, g in list(self.dirichletConditionsForceDOF.DOFBoundaryConditionsDict.items()):
self.u[0].dof[dofN] = g(self.dirichletConditionsForceDOF.DOFBoundaryPointDict[dofN], self.timeIntegration.t)
#
# mwf debug
#import pdb
# pdb.set_trace()
argsDict = cArgumentsDict.ArgumentsDict()
argsDict["mesh_trial_ref"] = self.u[0].femSpace.elementMaps.psi
argsDict["mesh_grad_trial_ref"] = self.u[0].femSpace.elementMaps.grad_psi
argsDict["mesh_dof"] = self.mesh.nodeArray
argsDict["mesh_velocity_dof"] = self.mesh.nodeVelocityArray
argsDict["MOVING_DOMAIN"] = self.MOVING_DOMAIN
argsDict["mesh_l2g"] = self.mesh.elementNodesArray
argsDict["dV_ref"] = self.elementQuadratureWeights[('u', 0)]
argsDict["u_trial_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_trial_ref"] = self.u[0].femSpace.grad_psi
argsDict["u_test_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_test_ref"] = self.u[0].femSpace.grad_psi
argsDict["mesh_trial_trace_ref"] = self.u[0].femSpace.elementMaps.psi_trace
argsDict["mesh_grad_trial_trace_ref"] = self.u[0].femSpace.elementMaps.grad_psi_trace
argsDict["dS_ref"] = self.elementBoundaryQuadratureWeights[('u', 0)]
argsDict["u_trial_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_trial_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["u_test_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_test_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["normal_ref"] = self.u[0].femSpace.elementMaps.boundaryNormals
argsDict["boundaryJac_ref"] = self.u[0].femSpace.elementMaps.boundaryJacobians
argsDict["nElements_global"] = self.mesh.nElements_global
argsDict["nu_0"] = self.coefficients.nu_0
argsDict["nu_1"] = self.coefficients.nu_1
argsDict["sigma_e"] = self.coefficients.sigma_e
argsDict["c_mu"] = self.coefficients.c_mu
argsDict["c_1"] = self.coefficients.c_1
argsDict["c_2"] = self.coefficients.c_2
argsDict["c_e"] = self.coefficients.c_e
argsDict["rho_0"] = self.coefficients.rho_0
argsDict["rho_1"] = self.coefficients.rho_1
argsDict["sedFlag"] = self.coefficients.sedFlag
argsDict["q_vos"] = self.coefficients.q_vos
argsDict["q_vos_gradc"] = self.coefficients.grad_vos
argsDict["ebqe_q_vos"] = self.coefficients.ebqe_vos
argsDict["ebqe_q_vos_gradc"] = self.coefficients.ebqe_grad_vos
argsDict["rho_f"] = self.coefficients.rho_0
argsDict["rho_s"] = self.coefficients.rho_s
argsDict["vs"] = self.coefficients.vs
argsDict["ebqe_vs"] = self.coefficients.ebqe_vs
argsDict["g"] = self.coefficients.g
argsDict["dissipation_model_flag"] = self.coefficients.dissipation_model_flag
argsDict["useMetrics"] = self.coefficients.useMetrics
argsDict["alphaBDF"] = self.timeIntegration.alpha_bdf
argsDict["lag_shockCapturing"] = self.shockCapturing.lag
argsDict["shockCapturingDiffusion"] = self.shockCapturing.shockCapturingFactor
argsDict["sc_uref"] = self.coefficients.sc_uref
argsDict["sc_alpha"] = self.coefficients.sc_beta
argsDict["u_l2g"] = self.u[0].femSpace.dofMap.l2g
argsDict["elementDiameter"] = self.mesh.elementDiametersArray
argsDict["u_dof"] = self.u[0].dof
argsDict["u_dof_old"] = self.coefficients.u_old_dof
argsDict["velocity"] = self.coefficients.q_v
argsDict["phi_ls"] = self.coefficients.q_phi
argsDict["q_kappa"] = self.coefficients.q_kappa
argsDict["q_grad_kappa"] = self.coefficients.q_grad_kappa
argsDict["q_porosity"] = self.coefficients.q_porosity
argsDict["velocity_dof_u"] = self.coefficients.velocity_dof_u
argsDict["velocity_dof_v"] = self.coefficients.velocity_dof_v
argsDict["velocity_dof_w"] = self.coefficients.velocity_dof_w
argsDict["q_m"] = self.timeIntegration.m_tmp[0]
argsDict["q_u"] = self.q[('u', 0)]
argsDict["q_grad_u"] = self.q[('grad(u)', 0)]
argsDict["q_m_betaBDF"] = self.timeIntegration.beta_bdf[0]
argsDict["cfl"] = self.q[('cfl', 0)]
argsDict["q_numDiff_u"] = self.shockCapturing.numDiff[0]
argsDict["q_numDiff_u_last"] = self.shockCapturing.numDiff_last[0]
argsDict["ebqe_penalty_ext"] = self.ebqe['penalty']
argsDict["offset_u"] = self.offset[0]
argsDict["stride_u"] = self.stride[0]
argsDict["globalResidual"] = r
argsDict["nExteriorElementBoundaries_global"] = self.mesh.nExteriorElementBoundaries_global
argsDict["exteriorElementBoundariesArray"] = self.mesh.exteriorElementBoundariesArray
argsDict["elementBoundaryElementsArray"] = self.mesh.elementBoundaryElementsArray
argsDict["elementBoundaryLocalElementBoundariesArray"] = self.mesh.elementBoundaryLocalElementBoundariesArray
argsDict["ebqe_velocity_ext"] = self.coefficients.ebqe_v
argsDict["isDOFBoundary_u"] = self.numericalFlux.isDOFBoundary[0]
argsDict["ebqe_bc_u_ext"] = self.numericalFlux.ebqe[('u', 0)]
argsDict["isAdvectiveFluxBoundary_u"] = self.ebqe[('advectiveFlux_bc_flag', 0)]
argsDict["ebqe_bc_advectiveFlux_u_ext"] = self.ebqe[('advectiveFlux_bc', 0)]
argsDict["isDiffusiveFluxBoundary_u"] = self.ebqe[('diffusiveFlux_bc_flag', 0, 0)]
argsDict["ebqe_bc_diffusiveFlux_u_ext"] = self.ebqe[('diffusiveFlux_bc', 0, 0)]
argsDict["ebqe_phi"] = self.coefficients.ebqe_phi
argsDict["epsFact"] = self.coefficients.epsFact
argsDict["ebqe_kappa"] = self.coefficients.ebqe_kappa
argsDict["ebqe_porosity"] = self.coefficients.ebqe_porosity
argsDict["ebqe_u"] = self.ebqe[('u', 0)]
argsDict["ebqe_flux"] = self.ebqe[('advectiveFlux', 0)]
self.dissipation.calculateResidual(argsDict)
if self.forceStrongConditions:
for dofN, g in list(self.dirichletConditionsForceDOF.DOFBoundaryConditionsDict.items()):
r[dofN] = 0
if self.stabilization:
self.stabilization.accumulateSubgridMassHistory(self.q)
prof.logEvent("Global residual", level=9, data=r)
# mwf decide if this is reasonable for keeping solver statistics
self.nonlinear_function_evaluations += 1
if self.globalResidualDummy is None:
self.globalResidualDummy = np.zeros(r.shape, 'd')
def getJacobian(self, jacobian):
cfemIntegrals.zeroJacobian_CSR(self.nNonzerosInJacobian,
jacobian)
argsDict = cArgumentsDict.ArgumentsDict()
argsDict["mesh_trial_ref"] = self.u[0].femSpace.elementMaps.psi
argsDict["mesh_grad_trial_ref"] = self.u[0].femSpace.elementMaps.grad_psi
argsDict["mesh_dof"] = self.mesh.nodeArray
argsDict["mesh_velocity_dof"] = self.mesh.nodeVelocityArray
argsDict["MOVING_DOMAIN"] = self.MOVING_DOMAIN
argsDict["mesh_l2g"] = self.mesh.elementNodesArray
argsDict["dV_ref"] = self.elementQuadratureWeights[('u', 0)]
argsDict["u_trial_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_trial_ref"] = self.u[0].femSpace.grad_psi
argsDict["u_test_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_test_ref"] = self.u[0].femSpace.grad_psi
argsDict["mesh_trial_trace_ref"] = self.u[0].femSpace.elementMaps.psi_trace
argsDict["mesh_grad_trial_trace_ref"] = self.u[0].femSpace.elementMaps.grad_psi_trace
argsDict["dS_ref"] = self.elementBoundaryQuadratureWeights[('u', 0)]
argsDict["u_trial_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_trial_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["u_test_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_test_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["normal_ref"] = self.u[0].femSpace.elementMaps.boundaryNormals
argsDict["boundaryJac_ref"] = self.u[0].femSpace.elementMaps.boundaryJacobians
argsDict["nElements_global"] = self.mesh.nElements_global
argsDict["nu_0"] = self.coefficients.nu_0
argsDict["nu_1"] = self.coefficients.nu_1
argsDict["sigma_e"] = self.coefficients.sigma_e
argsDict["c_mu"] = self.coefficients.c_mu
argsDict["c_1"] = self.coefficients.c_1
argsDict["c_2"] = self.coefficients.c_2
argsDict["c_e"] = self.coefficients.c_e
argsDict["rho_0"] = self.coefficients.rho_0
argsDict["rho_1"] = self.coefficients.rho_1
argsDict["dissipation_model_flag"] = self.coefficients.dissipation_model_flag
argsDict["useMetrics"] = self.coefficients.useMetrics
argsDict["alphaBDF"] = self.timeIntegration.alpha_bdf
argsDict["lag_shockCapturing"] = self.shockCapturing.lag
argsDict["shockCapturingDiffusion"] = self.shockCapturing.shockCapturingFactor
argsDict["u_l2g"] = self.u[0].femSpace.dofMap.l2g
argsDict["elementDiameter"] = self.mesh.elementDiametersArray
argsDict["u_dof"] = self.u[0].dof
argsDict["u_dof_old"] = self.coefficients.u_old_dof
argsDict["velocity"] = self.coefficients.q_v
argsDict["phi_ls"] = self.coefficients.q_phi
argsDict["q_kappa"] = self.coefficients.q_kappa
argsDict["q_grad_kappa"] = self.coefficients.q_grad_kappa
argsDict["q_porosity"] = self.coefficients.q_porosity
argsDict["sedFlag"] = self.coefficients.sedFlag
argsDict["q_vos"] = self.coefficients.q_vos
argsDict["q_vos_gradc"] = self.coefficients.grad_vos
argsDict["ebqe_q_vos"] = self.coefficients.ebqe_vos
argsDict["ebqe_q_vos_gradc"] = self.coefficients.ebqe_grad_vos
argsDict["rho_f"] = self.coefficients.rho_0
argsDict["rho_s"] = self.coefficients.rho_s
argsDict["vs"] = self.coefficients.vs
argsDict["ebqe_vs"] = self.coefficients.ebqe_vs
argsDict["g"] = self.coefficients.g
argsDict["velocity_dof_u"] = self.coefficients.velocity_dof_u
argsDict["velocity_dof_v"] = self.coefficients.velocity_dof_v
argsDict["velocity_dof_w"] = self.coefficients.velocity_dof_w
argsDict["q_m_betaBDF"] = self.timeIntegration.beta_bdf[0]
argsDict["cfl"] = self.q[('cfl', 0)]
argsDict["q_numDiff_u_last"] = self.shockCapturing.numDiff_last[0]
argsDict["ebqe_penalty_ext"] = self.ebqe['penalty']
argsDict["csrRowIndeces_u_u"] = self.csrRowIndeces[(0, 0)]
argsDict["csrColumnOffsets_u_u"] = self.csrColumnOffsets[(0, 0)]
argsDict["globalJacobian"] = jacobian.getCSRrepresentation()[2]
argsDict["nExteriorElementBoundaries_global"] = self.mesh.nExteriorElementBoundaries_global
argsDict["exteriorElementBoundariesArray"] = self.mesh.exteriorElementBoundariesArray
argsDict["elementBoundaryElementsArray"] = self.mesh.elementBoundaryElementsArray
argsDict["elementBoundaryLocalElementBoundariesArray"] = self.mesh.elementBoundaryLocalElementBoundariesArray
argsDict["ebqe_velocity_ext"] = self.coefficients.ebqe_v
argsDict["isDOFBoundary_u"] = self.numericalFlux.isDOFBoundary[0]
argsDict["ebqe_bc_u_ext"] = self.numericalFlux.ebqe[('u', 0)]
argsDict["isAdvectiveFluxBoundary_u"] = self.ebqe[('advectiveFlux_bc_flag', 0)]
argsDict["ebqe_bc_advectiveFlux_u_ext"] = self.ebqe[('advectiveFlux_bc', 0)]
argsDict["isDiffusiveFluxBoundary_u"] = self.ebqe[('diffusiveFlux_bc_flag', 0, 0)]
argsDict["ebqe_bc_diffusiveFlux_u_ext"] = self.ebqe[('diffusiveFlux_bc', 0, 0)]
argsDict["csrColumnOffsets_eb_u_u"] = self.csrColumnOffsets_eb[(0, 0)]
argsDict["ebqe_phi"] = self.coefficients.ebqe_phi
argsDict["epsFact"] = self.coefficients.epsFact
argsDict["ebqe_kappa"] = self.coefficients.ebqe_kappa
argsDict["ebqe_porosity"] = self.coefficients.ebqe_porosity
self.dissipation.calculateJacobian(argsDict) # VRANS
# Load the Dirichlet conditions directly into residual
if self.forceStrongConditions:
scaling = 1.0 # probably want to add some scaling to match non-dirichlet diagonals in linear system
for dofN in list(self.dirichletConditionsForceDOF.DOFBoundaryConditionsDict.keys()):
global_dofN = dofN
for i in range(self.rowptr[global_dofN], self.rowptr[global_dofN + 1]):
if (self.colind[i] == global_dofN):
# print "RBLES forcing residual cj = %s dofN= %s global_dofN= %s was self.nzval[i]= %s now =%s " % (cj,dofN,global_dofN,self.nzval[i],scaling)
self.nzval[i] = scaling
else:
self.nzval[i] = 0.0
# print "RBLES zeroing residual cj = %s dofN= %s global_dofN= %s " % (cj,dofN,global_dofN)
prof.logEvent("Jacobian ", level=10, data=jacobian)
# mwf decide if this is reasonable for solver statistics
self.nonlinear_function_jacobian_evaluations += 1
return jacobian
def calculateElementQuadrature(self):
"""
Calculate the physical location and weights of the quadrature rules
and the shape information at the quadrature points.
This function should be called only when the mesh changes.
"""
# self.u[0].femSpace.elementMaps.getValues(self.elementQuadraturePoints,
# self.q['x'])
self.u[0].femSpace.elementMaps.getBasisValuesRef(self.elementQuadraturePoints)
self.u[0].femSpace.elementMaps.getBasisGradientValuesRef(self.elementQuadraturePoints)
self.u[0].femSpace.getBasisValuesRef(self.elementQuadraturePoints)
self.u[0].femSpace.getBasisGradientValuesRef(self.elementQuadraturePoints)
self.coefficients.initializeElementQuadrature(self.timeIntegration.t, self.q)
if self.stabilization is not None:
self.stabilization.initializeElementQuadrature(self.mesh, self.timeIntegration.t, self.q)
self.stabilization.initializeTimeIntegration(self.timeIntegration)
if self.shockCapturing is not None:
self.shockCapturing.initializeElementQuadrature(self.mesh, self.timeIntegration.t, self.q)
def calculateElementBoundaryQuadrature(self):
pass
def calculateExteriorElementBoundaryQuadrature(self):
"""
Calculate the physical location and weights of the quadrature rules
and the shape information at the quadrature points on global element boundaries.
This function should be called only when the mesh changes.
"""
#
# get physical locations of element boundary quadrature points
#
# assume all components live on the same mesh
self.u[0].femSpace.elementMaps.getBasisValuesTraceRef(self.elementBoundaryQuadraturePoints)
self.u[0].femSpace.elementMaps.getBasisGradientValuesTraceRef(self.elementBoundaryQuadraturePoints)
self.u[0].femSpace.getBasisValuesTraceRef(self.elementBoundaryQuadraturePoints)
self.u[0].femSpace.getBasisGradientValuesTraceRef(self.elementBoundaryQuadraturePoints)
self.u[0].femSpace.elementMaps.getValuesGlobalExteriorTrace(self.elementBoundaryQuadraturePoints,
self.ebqe['x'])
self.fluxBoundaryConditionsObjectsDict = dict([(cj, proteus.FemTools.FluxBoundaryConditions(self.mesh,
self.nElementBoundaryQuadraturePoints_elementBoundary,
self.ebqe[('x')],
getAdvectiveFluxBoundaryConditions=self.advectiveFluxBoundaryConditionsSetterDict[cj],
getDiffusiveFluxBoundaryConditions=self.diffusiveFluxBoundaryConditionsSetterDictDict[cj]))
for cj in list(self.advectiveFluxBoundaryConditionsSetterDict.keys())])
self.coefficients.initializeGlobalExteriorElementBoundaryQuadrature(self.timeIntegration.t, self.ebqe)
def estimate_mt(self):
pass
def calculateSolutionAtQuadrature(self):
pass
def calculateAuxiliaryQuantitiesAfterStep(self):
pass
|
mit
| -5,397,842,456,893,396,000
| 53.566795
| 238
| 0.570205
| false
|
jaekor91/xd-elg-scripts
|
produce-DECaLS-DR3-Tractor-DEEP2f234.py
|
1
|
4558
|
# Loading modules
import numpy as np
from os import listdir
from os.path import isfile, join
from astropy.io import ascii, fits
from astropy.wcs import WCS
import numpy.lib.recfunctions as rec
from xd_elg_utils import *
import sys
large_random_constant = -999119283571
deg2arcsec=3600
data_directory = "./"
# True if tractor files have been already downloaded.
tractor_file_downloaded = True
##############################################################################
if not tractor_file_downloaded: # If the tractor files are not downloaded.
print("1. Generate download scripts for relevant Tractor files.")
print("This step generates three files that the user can use to download\n\
the relevant tractor files.")
print("To identify relevant bricks use survey-bricks-dr3.fits which the user\n\
should have downloaded. Approximate field ranges.\n\
\n\
Field 2\n\
RA bounds: [251.3, 253.7]\n\
DEC bounds: [34.6, 35.3]\n\
\n\
Field 3\n\
RA bounds: [351.25, 353.8]\n\
DEC bounds: [-.2, .5]\n\
\n\
Field 4\n\
RA bounds: [36.4, 38]\n\
DEC bounds: [.3, 1.0]\n\
")
fits_bricks = fits.open(data_directory+"survey-bricks-dr3.fits")[1].data
ra = fits_bricks['ra'][:]
dec = fits_bricks['dec'][:]
br_name = fits_bricks['brickname'][:]
# Getting the brick names near the ranges specified below.
tol = 0.25
f2_bricks = return_bricknames(ra, dec, br_name,[251.3, 253.7],[34.6, 35.3],tol)
f3_bricks = return_bricknames(ra, dec, br_name,[351.25, 353.8],[-.2, .5],tol)
f4_bricks = return_bricknames(ra, dec, br_name,[36.4,38.],[.3, 1.0],tol)
bricks = [f2_bricks, f3_bricks, f4_bricks]
print("Generating download scripts. DR3-DEEP2f**-tractor-download.sh")
portal_address = "http://portal.nersc.gov/project/cosmo/data/legacysurvey/dr3/tractor/"
postfix = ".fits\n"
prefix = "wget "
for i in range(3):
f = open("DR3-DEEP2f%d-tractor-download.sh"%(i+2),"w")
for brick in bricks[i]:
tractor_directory = brick[:3]
brick_address = tractor_directory+"/tractor-"+brick+postfix
download_command = prefix + portal_address + brick_address
f.write(download_command)
f.close()
print("Completed")
print("Exiting the program. Please download the necessary files using the script\n\
and re-run the program with tractor_file_downloaded=True.")
sys.exit()
else:
print("Proceeding using the downloaded tractor files.")
print("Within data_directory, Tractor files should be \n\
saved in directories in \DR3-f**\.")
##############################################################################
print("2. Combine all Tractor files by field, append Tycho-2 stellar mask column, \n\
and mask objects using DEEP2 window funtions.")
print("2a. Combining the tractor files: Impose mask conditions (brick_primary==True\n\
and flux inverse variance positive).")
# Field 2
DR3f2 = combine_tractor(data_directory+"DR3-f2/")
# Field 3
DR3f3 = combine_tractor(data_directory+"DR3-f3/")
# Field 4
DR3f4 = combine_tractor(data_directory+"DR3-f4/")
print("Completed.")
print("2b. Append Tycho2 stark mask field.")
# Field 2
DR3f2 = apply_tycho(DR3f2,"tycho2.fits",galtype="ELG")
# Field 3
DR3f3 = apply_tycho(DR3f3,"tycho2.fits",galtype="ELG")
# Field 4
DR3f4 = apply_tycho(DR3f4,"tycho2.fits",galtype="ELG")
print("Completed.")
print("2c. Impose DEEP2 window functions.")
# Field 2
idx = np.logical_or(window_mask(DR3f2["ra"], DR3f2["dec"], "windowf.21.fits"), window_mask(DR3f2["ra"], DR3f2["dec"], "windowf.22.fits"))
DR3f2_trimmed = DR3f2[idx]
# Field 3
idx = np.logical_or.reduce((window_mask(DR3f3["ra"], DR3f3["dec"], "windowf.31.fits"), window_mask(DR3f3["ra"], DR3f3["dec"], "windowf.32.fits"),window_mask(DR3f3["ra"], DR3f3["dec"], "windowf.33.fits")))
DR3f3_trimmed = DR3f3[idx]
# Field 4
idx = np.logical_or(window_mask(DR3f4["ra"], DR3f4["dec"], "windowf.41.fits"), window_mask(DR3f4["ra"], DR3f4["dec"], "windowf.42.fits"))
DR3f4_trimmed = np.copy(DR3f4[idx])
print("Completed.")
##############################################################################
print("3. Save the trimmed catalogs.")
# Field 2
cols = fits.ColDefs(DR3f2_trimmed)
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.writeto('DECaLS-DR3-Tractor-DEEP2f2.fits', clobber=True)
# Field 3
cols = fits.ColDefs(DR3f3_trimmed)
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.writeto('DECaLS-DR3-Tractor-DEEP2f3.fits', clobber=True)
# Field 4
cols = fits.ColDefs(DR3f4_trimmed)
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.writeto('DECaLS-DR3-Tractor-DEEP2f4.fits', clobber=True)
print("Completed.")
|
gpl-3.0
| -1,165,579,882,566,895,900
| 33.793893
| 204
| 0.667617
| false
|
FishyByte/SushiRNG
|
NIST/bitStreamTesting.py
|
1
|
1588
|
# Copyright (c) 2016 Christopher Asakawa, Nicholas McHale, Matthew O'Brien, Corey Aing
# This code is available under the "MIT License".
# Please see the file COPYING in this distribution
# for license terms.
# Python script to run NIST tests against a bitstream found in a textfile
import sys
from subprocess import Popen, PIPE
def main():
runNist(sys.argv[1], sys.argv[2])
showResults()
def runNist(bitStreamLength, path):
# Results and statistics .txt files are found in
# ./experiments/AlgorithmTesting/__respective test directory__/stats.txt
# Final Analysis Report will be shown automatically after running this script
# The number following the ./assess argument states the size of the bitstream
try:
p = Popen(["./assess", bitStreamLength], stdin=PIPE, stdout=PIPE)
p.stdin.write("0\n") # indicates that we want to select a file
p.stdin.write(path + "\n") # indicates the file path
p.stdin.write("1\n") # apply all tests
p.stdin.write("0\n") # indicates using default parameters
p.stdin.write("1\n") # indicates how many repeated tests against the bitstream size
# as long as the .txt file has enough bits
p.stdin.write("0\n") # indicates its an ASCII binary file consisting of 0's and 1's
print "Tests ran successfully"
except:
print "Script failed to execute"
def showResults():
try:
f = open("./experiments/AlgorithmTesting/finalAnalysisReport.txt", 'r')
print f.read()
except:
print "Results failed to show"
main()
|
mit
| -2,803,145,593,131,312,600
| 34.288889
| 92
| 0.68073
| false
|
GoogleCloudPlatform/sap-deployment-automation
|
third_party/github.com/ansible/awx/awx/main/managers.py
|
1
|
10952
|
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
import sys
import logging
import os
from django.db import models
from django.conf import settings
from awx.main.utils.filters import SmartFilter
from awx.main.utils.pglock import advisory_lock
___all__ = ['HostManager', 'InstanceManager', 'InstanceGroupManager']
logger = logging.getLogger('awx.main.managers')
class HostManager(models.Manager):
"""Custom manager class for Hosts model."""
def active_count(self):
"""Return count of active, unique hosts for licensing.
Construction of query involves:
- remove any ordering specified in model's Meta
- Exclude hosts sourced from another Tower
- Restrict the query to only return the name column
- Only consider results that are unique
- Return the count of this query
"""
return self.order_by().exclude(inventory_sources__source='tower').values('name').distinct().count()
def org_active_count(self, org_id):
"""Return count of active, unique hosts used by an organization.
Construction of query involves:
- remove any ordering specified in model's Meta
- Exclude hosts sourced from another Tower
- Consider only hosts where the canonical inventory is owned by the organization
- Restrict the query to only return the name column
- Only consider results that are unique
- Return the count of this query
"""
return self.order_by().exclude(
inventory_sources__source='tower'
).filter(inventory__organization=org_id).values('name').distinct().count()
def get_queryset(self):
"""When the parent instance of the host query set has a `kind=smart` and a `host_filter`
set. Use the `host_filter` to generate the queryset for the hosts.
"""
qs = super(HostManager, self).get_queryset()
if (hasattr(self, 'instance') and
hasattr(self.instance, 'host_filter') and
hasattr(self.instance, 'kind')):
if self.instance.kind == 'smart' and self.instance.host_filter is not None:
q = SmartFilter.query_from_string(self.instance.host_filter)
if self.instance.organization_id:
q = q.filter(inventory__organization=self.instance.organization_id)
# If we are using host_filters, disable the core_filters, this allows
# us to access all of the available Host entries, not just the ones associated
# with a specific FK/relation.
#
# If we don't disable this, a filter of {'inventory': self.instance} gets automatically
# injected by the related object mapper.
self.core_filters = {}
qs = qs & q
return qs.order_by('name', 'pk').distinct('name')
return qs
def get_ig_ig_mapping(ig_instance_mapping, instance_ig_mapping):
# Create IG mapping by union of all groups their instances are members of
ig_ig_mapping = {}
for group_name in ig_instance_mapping.keys():
ig_ig_set = set()
for instance_hostname in ig_instance_mapping[group_name]:
ig_ig_set |= instance_ig_mapping[instance_hostname]
else:
ig_ig_set.add(group_name) # Group contains no instances, return self
ig_ig_mapping[group_name] = ig_ig_set
return ig_ig_mapping
class InstanceManager(models.Manager):
"""A custom manager class for the Instance model.
Provides "table-level" methods including getting the currently active
instance or role.
"""
def me(self):
"""Return the currently active instance."""
# If we are running unit tests, return a stub record.
if settings.IS_TESTING(sys.argv) or hasattr(sys, '_called_from_test'):
return self.model(id=1,
hostname='localhost',
uuid='00000000-0000-0000-0000-000000000000')
node = self.filter(hostname=settings.CLUSTER_HOST_ID)
if node.exists():
return node[0]
raise RuntimeError("No instance found with the current cluster host id")
def register(self, uuid=None, hostname=None, ip_address=None):
if not uuid:
uuid = settings.SYSTEM_UUID
if not hostname:
hostname = settings.CLUSTER_HOST_ID
with advisory_lock('instance_registration_%s' % hostname):
if settings.AWX_AUTO_DEPROVISION_INSTANCES:
# detect any instances with the same IP address.
# if one exists, set it to None
inst_conflicting_ip = self.filter(ip_address=ip_address).exclude(hostname=hostname)
if inst_conflicting_ip.exists():
for other_inst in inst_conflicting_ip:
other_hostname = other_inst.hostname
other_inst.ip_address = None
other_inst.save(update_fields=['ip_address'])
logger.warning("IP address {0} conflict detected, ip address unset for host {1}.".format(ip_address, other_hostname))
instance = self.filter(hostname=hostname)
if instance.exists():
instance = instance.get()
if instance.ip_address != ip_address:
instance.ip_address = ip_address
instance.save(update_fields=['ip_address'])
return (True, instance)
else:
return (False, instance)
instance = self.create(uuid=uuid,
hostname=hostname,
ip_address=ip_address,
capacity=0)
return (True, instance)
def get_or_register(self):
if settings.AWX_AUTO_DEPROVISION_INSTANCES:
from awx.main.management.commands.register_queue import RegisterQueue
pod_ip = os.environ.get('MY_POD_IP')
registered = self.register(ip_address=pod_ip)
RegisterQueue('tower', None, 100, 0, []).register()
return registered
else:
return (False, self.me())
def active_count(self):
"""Return count of active Tower nodes for licensing."""
return self.all().count()
def my_role(self):
# NOTE: TODO: Likely to repurpose this once standalone ramparts are a thing
return "tower"
def all_non_isolated(self):
return self.exclude(rampart_groups__controller__isnull=False)
class InstanceGroupManager(models.Manager):
"""A custom manager class for the Instance model.
Used for global capacity calculations
"""
def capacity_mapping(self, qs=None):
"""
Another entry-point to Instance manager method by same name
"""
if qs is None:
qs = self.all().prefetch_related('instances')
instance_ig_mapping = {}
ig_instance_mapping = {}
# Create dictionaries that represent basic m2m memberships
for group in qs:
ig_instance_mapping[group.name] = set(
instance.hostname for instance in group.instances.all() if
instance.capacity != 0
)
for inst in group.instances.all():
if inst.capacity == 0:
continue
instance_ig_mapping.setdefault(inst.hostname, set())
instance_ig_mapping[inst.hostname].add(group.name)
# Get IG capacity overlap mapping
ig_ig_mapping = get_ig_ig_mapping(ig_instance_mapping, instance_ig_mapping)
return instance_ig_mapping, ig_ig_mapping
@staticmethod
def zero_out_group(graph, name, breakdown):
if name not in graph:
graph[name] = {}
graph[name]['consumed_capacity'] = 0
if breakdown:
graph[name]['committed_capacity'] = 0
graph[name]['running_capacity'] = 0
def capacity_values(self, qs=None, tasks=None, breakdown=False, graph=None):
"""
Returns a dictionary of capacity values for all IGs
"""
if qs is None: # Optionally BYOQS - bring your own queryset
qs = self.all().prefetch_related('instances')
instance_ig_mapping, ig_ig_mapping = self.capacity_mapping(qs=qs)
if tasks is None:
tasks = self.model.unifiedjob_set.related.related_model.objects.filter(
status__in=('running', 'waiting'))
if graph is None:
graph = {group.name: {} for group in qs}
for group_name in graph:
self.zero_out_group(graph, group_name, breakdown)
for t in tasks:
# TODO: dock capacity for isolated job management tasks running in queue
impact = t.task_impact
if t.status == 'waiting' or not t.execution_node:
# Subtract capacity from any peer groups that share instances
if not t.instance_group:
impacted_groups = []
elif t.instance_group.name not in ig_ig_mapping:
# Waiting job in group with 0 capacity has no collateral impact
impacted_groups = [t.instance_group.name]
else:
impacted_groups = ig_ig_mapping[t.instance_group.name]
for group_name in impacted_groups:
if group_name not in graph:
self.zero_out_group(graph, group_name, breakdown)
graph[group_name]['consumed_capacity'] += impact
if breakdown:
graph[group_name]['committed_capacity'] += impact
elif t.status == 'running':
# Subtract capacity from all groups that contain the instance
if t.execution_node not in instance_ig_mapping:
if not t.is_containerized:
logger.warning('Detected %s running inside lost instance, '
'may still be waiting for reaper.', t.log_format)
if t.instance_group:
impacted_groups = [t.instance_group.name]
else:
impacted_groups = []
else:
impacted_groups = instance_ig_mapping[t.execution_node]
for group_name in impacted_groups:
if group_name not in graph:
self.zero_out_group(graph, group_name, breakdown)
graph[group_name]['consumed_capacity'] += impact
if breakdown:
graph[group_name]['running_capacity'] += impact
else:
logger.error('Programming error, %s not in ["running", "waiting"]', t.log_format)
return graph
|
apache-2.0
| 6,251,329,385,971,334,000
| 42.633466
| 141
| 0.582816
| false
|
mferenca/HMS-ecommerce
|
ecommerce/extensions/basket/app.py
|
1
|
1028
|
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from oscar.apps.basket import app
from oscar.core.loading import get_class
class BasketApplication(app.BasketApplication):
single_item_view = get_class('basket.views', 'BasketSingleItemView')
summary_view = get_class('basket.views', 'BasketSummaryView')
def get_urls(self):
urls = [
url(r'^$', self.summary_view.as_view(), name='summary'),
url(r'^add/(?P<pk>\d+)/$', self.add_view.as_view(), name='add'),
url(r'^vouchers/add/$', self.add_voucher_view.as_view(), name='vouchers-add'),
url(r'^vouchers/(?P<pk>\d+)/remove/$', self.remove_voucher_view.as_view(), name='vouchers-remove'),
url(r'^saved/$', login_required(self.saved_view.as_view()), name='saved'),
url(r'^single-item/$', login_required(self.single_item_view.as_view()), name='single-item'),
]
return self.post_process_urls(urls)
application = BasketApplication()
|
agpl-3.0
| 4,891,847,817,165,289,000
| 43.695652
| 111
| 0.644942
| false
|
azoft-dev-team/imagrium
|
src/pages/bottom_navigation.py
|
1
|
1266
|
from src.core.page import ResourceLoader, Page
from src.core.r import Resource
from src.pages.explore import Explore
from src.pages.me.me import Me
class BottomNavigation(Page):
meNavIconInactive = ResourceLoader(Resource.meNavIconInactive)
meNavIconActive = ResourceLoader(Resource.meNavIconActive)
exploreNavIconInactive = ResourceLoader(Resource.exploreNavIconInactive)
exploreNavIconActive = ResourceLoader(Resource.exploreNavIconActive)
def __init__(self, box, settings):
super(Page, self).__init__(box, settings)
self.box = box
self.settings = settings
# It is necessary to assign a search area to all class fields
self.meNavIconInactive = self.box
self.meNavIconActive = self.box
def actionGoMe(self, inactive=True):
if inactive:
self.meNavIconInactive.click()
else:
self.meNavIconActive.click()
return Me.load(self.box, self.settings)
def actionGoExplore(self, inactive=True):
if inactive:
self.exploreNavIconInactive.click()
else:
self.exploreNavIconActive.click()
return Explore.load(self.box, self.settings)
class BottomNavigationiOS(BottomNavigation):
pass
|
mit
| 1,978,720,015,273,471,700
| 30.65
| 76
| 0.691943
| false
|
cdent/tiddlywebplugins.policyfilter
|
test/test_filter.py
|
1
|
2381
|
from tiddlyweb.filters import FilterError, recursive_filter, parse_for_filters
from tiddlyweb.model.tiddler import Tiddler
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.recipe import Recipe
from tiddlyweb.store import Store
from tiddlywebplugins.policyfilter import init
from tiddlyweb.config import config
import pytest
def setup_module(module):
init(config)
environ = {
'tiddlyweb.config': config,
'tiddlyweb.usersign': {'name': 'cdent', 'roles': ['COW', 'MOO']}
}
module.store = Store(config['server_store'][0],
config['server_store'][1],
environ)
environ['tiddlyweb.store'] = module.store
module.environ = environ
def test_filtering_bags():
bag1 = Bag('bag1')
bag1.policy.create = ['cdent']
bag2 = Bag('bag2')
bag2.policy.create = ['R:COW']
bag3 = Bag('bag3')
bag3.policy.create = []
bag4 = Bag('bag4')
bag4.policy.create = ['NONE']
bags = [bag1, bag2, bag3, bag4]
for bag in bags:
store.put(bag)
found_bags = list(filter('select=policy:create', bags))
assert len(found_bags) == 3
names = [bag.name for bag in found_bags]
assert 'bag1' in names
assert 'bag2' in names
assert 'bag3' in names
assert 'bag4' not in names
def test_filter_recipes():
recipe1 = Recipe('recipe1')
recipe1.policy.create = ['cdent']
recipe2 = Recipe('recipe2')
recipe2.policy.create = ['R:COW']
recipe3 = Recipe('recipe3')
recipe3.policy.create = []
recipe4 = Recipe('recipe4')
recipe4.policy.create = ['NONE']
recipes = [recipe1, recipe2, recipe3, recipe4]
for recipe in recipes:
store.put(recipe)
found_recipes = list(filter('select=policy:create', recipes))
assert len(found_recipes) == 3
names = [recipe.name for recipe in found_recipes]
assert 'recipe1' in names
assert 'recipe2' in names
assert 'recipe3' in names
assert 'recipe4' not in names
def test_filter_tiddlers():
"""
This should error.
"""
tiddler1 = Tiddler('tiddler1', 'bag1')
tiddler1.text = 'foo'
store.put(tiddler1)
with pytest.raises(AttributeError):
found_tiddlers = list(filter('select=policy:create', [tiddler1]))
def filter(filter_string, entities):
return recursive_filter(parse_for_filters(
filter_string, environ)[0], entities)
|
bsd-3-clause
| -800,240,816,916,698,600
| 25.164835
| 78
| 0.649727
| false
|
mjonescase/flask-truss
|
flask_truss/manage.py
|
1
|
1920
|
from flask.ext.script import Manager
from flask.ext.migrate import Migrate, MigrateCommand
from flask_truss.factory import create_app
from flask_truss.conf.app import Config
from flask_truss.async.base import celery_instance
from flask_truss.models.base import db
config = Config()
app = create_app(config)
manager = Manager(app)
migrate = Migrate(app, db)
@manager.shell
def make_shell_context():
"""IPython session with app loaded"""
return dict(app=app)
@manager.option('-n', '--nose_arguments', dest='nose_arguments', required=False,
help="List of arguments to pass to nose. First argument MUST be ''",
default=['', '--with-coverage', '--cover-package=flask_truss'])
def test(nose_arguments):
"""Run nosetests with the given arguments and report coverage"""
assert nose_arguments[0] == ''
import nose
from nose.plugins.cover import Coverage
nose.main(argv=nose_arguments, addplugins=[Coverage()])
@manager.command
def runserver():
"""Run the Flask development server with the config's settings"""
app.run(port=config.PORT, debug=config.DEBUG, threaded=config.THREADED)
@manager.option('-Q', '--queues', dest='queues', required=False, default='celery',
help="Comma separated names of queues")
@manager.option('-c', '--concurrency', dest='concurrency', required=False, type=int, default=0,
help="Number of processes/threads the worker uses")
@manager.option('-l', '--loglevel', dest='loglevel', required=False, default='INFO',
help="DEBUG, INFO, WARN, ERROR, CRITICAL, FATAL")
def worker(queues, concurrency, loglevel=None):
"""Run a celery worker process locally"""
worker = celery_instance.Worker(queues=queues, concurrency=concurrency, loglevel=loglevel, **app.config)
worker.start()
manager.add_command('db', MigrateCommand)
if __name__ == "__main__":
manager.run()
|
mit
| -6,636,964,529,650,362,000
| 33.285714
| 108
| 0.690625
| false
|
slongfield/StereoCensus
|
verilog/census/argmin_gen.py
|
1
|
3581
|
# argmin_gen.py
#
# Takes in a single argument, the number of inputs, and generates a verilog
# armin tree, using the argmin_helper.
#
# Copyright (c) 2016, Stephen Longfield, stephenlongfield.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
# Header is a format string, expecting number of inputs as an argument.
_HEADER = """
`ifndef CENSUS_ARGMIN_{0}_V_
`define CENSUS_ARGMIN_{0}_V_
module argmin_{0}#(
parameter WIDTH=1
) (
input wire clk,
input wire rst,
input wire [WIDTH*{0}-1:0] inp,
output wire [WIDTH-1:0] outp,
output wire [$clog2({0})-1:0] outp_addr
);
localparam ADDR_WIDTH = $clog2({0});
"""
_FOOTER = """
endmodule
`endif // CENSUS_ARGMIN_V_
"""
_STAGE = """
argmin_helper#(.WIDTH(WIDTH), .ADDR_WIDTH(ADDR_WIDTH), .NUM_INP({num_inp}),
.NUM_OUTP({num_outp}), .STAGE({stage}))
ah_{stage}(clk, rst, {inp}, {inp_addr}, {outp}, {outp_addr});
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--num_inputs",
help="number of inputs in the generated argmin",
type=int,
required=True)
def get_args():
"""get_args parses the args with argparse.
Returns:
num_inputs (int): Number of inputs that was passed on the commmand line.
"""
args = parser.parse_args()
return args.num_inputs
def generate_argmin(num_inputs):
"""generate_argmin generates an argmin function
Args:
Returns:
argmin (string): verilog that computes the argmin function
"""
lines = []
lines.append(_HEADER.format(num_inputs))
# Pretend the inputs were the outputs from some imaginary previous stage.
prev_output = "inp"
prev_output_addr = "0"
stage = 0
input_size = num_inputs
while (input_size > 1):
output_size = input_size // 2 + input_size % 2
outp_name = "data_{}".format(stage)
outp_addr = "addr_{}".format(stage)
# Create some new output ports
lines.append(" wire [WIDTH*{}-1:0] {};".format(output_size,
outp_name))
lines.append(" wire [ADDR_WIDTH*{}-1:0] {};".format(output_size,
outp_addr))
lines.append(_STAGE.format(num_inp=input_size, num_outp=output_size,
stage=stage, inp=prev_output,
inp_addr=prev_output_addr,
outp=outp_name, outp_addr=outp_addr))
stage += 1
input_size = output_size
prev_output = outp_name
prev_output_addr = outp_addr
# Set up the outputs
lines.append(" assign outp = {};".format(prev_output))
lines.append(" assign outp_addr = {};".format(prev_output_addr))
lines.append(_FOOTER)
return "\n".join(lines)
def run():
num_inputs = get_args()
print(generate_argmin(num_inputs))
if __name__ == '__main__':
run()
|
gpl-3.0
| -7,825,781,145,096,044,000
| 27.879032
| 78
| 0.60458
| false
|
croscon/fleaker
|
tests/test_exceptions.py
|
1
|
15483
|
# ~*~ coding: utf-8 ~*~
"""
tests.test_exceptions
~~~~~~~~~~~~~~~~~
Provides tests for the custom Exceptions Fleaker implements.
:copyright: (c) 2016 by Croscon Consulting, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import json
import pytest
from flask import redirect, request, session, url_for
from fleaker import App, AppException, DEFAULT_DICT, MISSING, exceptions
from fleaker._compat import to_bytes, urlencode
from fleaker.exceptions import ErrorAwareApp
from tests._compat import mock
SERVER_NAME = 'localhost'
# standard response Flask returns if you raise an uncaught exception
STANDARD_FLASK_500 = to_bytes("""\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
<title>500 Internal Server Error</title>
<h1>Internal Server Error</h1>
<p>The server encountered an internal error and was unable to complete your \
request. Either the server is overloaded or there is an error in the \
application.</p>
""")
def _create_app(register_error_handlers=True):
"""Create a small app for help in testing."""
app = App(__name__)
app.config['SECRET_KEY'] = 'ITSASECRET'
app.config['SERVER_NAME'] = SERVER_NAME
# This is needed to make these tests pass. As of v0.4.0, we register
# a global 500 errorhandler so that it can be logged. These tests were
# written by a nut job :)
app.error_handlers = {}
@app.route('/app_exc')
def app_exception():
"""Raise an AppException with some custom parameters."""
request_args = request.args.to_dict()
args = request_args.pop('redirect_args', '{}')
args = json.loads(args)
raise exceptions.AppException("Testing App exception",
redirect_args=args, **request_args)
@app.route('/fleaker_exc')
def fleaker_exception():
"""Raise a FleakerException with some custom parameters."""
request_args = request.args.to_dict()
args = request_args.pop('redirect_args', '{}')
args = json.loads(args)
raise exceptions.FleakerException("Testing Fleaker exception",
redirect_args=args, **request_args)
@app.route('/base_exc')
def base_exception():
"""Raise a FleakerBaseException with some custom parameters."""
request_args = request.args.to_dict()
args = request_args.pop('redirect_args', '{}')
args = json.loads(args)
raise exceptions.FleakerBaseException("Testing base exception",
redirect_args=args, **request_args)
@app.route('/redirected')
def redir_method():
"""Small method to redirect to."""
return 'OK', 200
if register_error_handlers:
# this is more old-Flask friendly than using ``add_errorhandler``.
app.errorhandler(exceptions.AppException)(
exceptions.AppException.errorhandler_callback)
app.errorhandler(exceptions.FleakerException)(
exceptions.FleakerException.errorhandler_callback)
app.errorhandler(exceptions.FleakerBaseException)(
exceptions.FleakerBaseException.errorhandler_callback)
return app
def _redir_url(fragment, query_args=''):
"""Construct a full, external URL from a fragment using the SERVER_NAME in
this file.
Args:
fragment (str): The query string fragment to turn into a full URL,
e.g., ``/foo``.
"""
if fragment.startswith('/'):
fragment = fragment[1:]
if query_args:
# @TODO (test): Ewwww.... fix this
query_args = json.loads(query_args)
query_args = '?' + urlencode(query_args)
return "http://{}/{}{}".format(SERVER_NAME, fragment, query_args)
# please list all custom exceptions here for a quick test
@pytest.mark.parametrize('spec', [
(exceptions.FleakerBaseException, 'Base Exc', 401),
(exceptions.FleakerException, 'Fleaker Exc', 402),
(exceptions.AppException, 'App Exc', 403),
])
def test_exceptions_basic_args(spec):
"""Ensure we can raise Exceptions with a status code and message, or no
args.
"""
exc_type, msg, code = spec
# message and status code should work
with pytest.raises(exc_type) as exc:
raise exc_type(msg, status_code=code)
assert type(exc.value) is exc_type
assert exc.value.message == msg
assert exc.value.status_code == code
# and no args should also work
with pytest.raises(exc_type) as exc:
raise exc_type()
assert type(exc.value) is exc_type
assert exc.value.message == ''
assert exc.value.status_code is None
assert exc.value.redirect is MISSING
assert exc.value.redirect_args is DEFAULT_DICT
assert exc.value.prevent_rollback is False
assert exc.value.flash_message is False
assert exc.value.flash_level == 'danger'
# @TODO (test): Combine with the flash instantiation test
@pytest.mark.parametrize('spec', [
(exceptions.FleakerBaseException, '/foo', {'bar': 1},),
(exceptions.FleakerException, '/bar', {'baz': 1},),
(exceptions.AppException, '/baz', {'qux': 1},),
])
def test_exception_create_with_redirect(spec):
"""Ensure we can create an exception setup to redirect."""
exc_type, redirect, redirect_args = spec
with pytest.raises(exc_type) as exc:
raise exc_type(redirect=redirect, redirect_args=redirect_args)
assert exc.value.redirect == redirect
assert exc.value.redirect_args == redirect_args
@pytest.mark.parametrize('spec', [
(exceptions.FleakerBaseException, '/base_exc',),
(exceptions.FleakerException, '/fleaker_exc',),
(exceptions.AppException, '/app_exc',),
])
def test_exception_handler_auto_redirect(spec):
"""Ensure that handled exceptions properly redirect."""
app = _create_app()
exc_type, route = spec
with app.test_client() as client:
redirect_args = json.dumps({"test": "redirarg"})
query_args = {
'redirect': 'redir_method',
'redirect_args': redirect_args
}
resp = client.get(route, query_string=query_args)
assert resp.status_code == 302
assert resp.location == _redir_url('/redirected',
query_args=redirect_args)
assert "test=redirarg" in resp.location
# now let's try with a properly defined named route to test url_for
assert resp.status_code == 302
@pytest.mark.parametrize('spec', [
(exceptions.FleakerBaseException, 'Joy!', 'success',),
(exceptions.FleakerException, 'Sorrow!', 'danger',),
(exceptions.AppException, 'Mixed feelings!', 'warning',),
])
def test_exception_create_with_flash(spec):
"""Ensure we can create a custom exception with flash info."""
exc_type, msg, level = spec
stock_message = "foo"
with pytest.raises(exc_type) as exc:
raise exc_type(stock_message, flash_message=msg, flash_level=level)
assert exc.value.message == stock_message
assert exc.value.flash_message == msg
assert exc.value.flash_level == level
@pytest.mark.parametrize('spec', [
(exceptions.FleakerBaseException, '/base_exc', 'Joy!', 'success',),
(exceptions.FleakerException, '/fleaker_exc', 'Sorrow!', 'danger',),
(exceptions.AppException, '/app_exc', 'Mixed feelings!', 'warning',),
])
def test_exception_handler_auto_flash(spec):
"""Ensure that we automatically flash the environment when needed."""
app = _create_app()
exc_type, route, flash_msg, flash_level = spec
with app.test_client() as client:
resp = client.get(
route,
query_string={
'flash_message': flash_msg,
'flash_level': flash_level
}
)
assert '_flashes' in session
assert session['_flashes'].pop() == (flash_level, flash_msg)
@pytest.mark.parametrize('spec', [
(exceptions.FleakerBaseException, '/base_exc', 'Joy', 'success',),
(exceptions.FleakerException, '/fleaker_exc', 'Sorrow', 'danger',),
(exceptions.AppException, '/app_exc', 'Mixed!', 'warning',),
])
def test_exception_handler_redirect_with_flash(spec):
"""Ensure flashing and redirecting together works fine."""
app = _create_app()
exc_type, route, flash_msg, flash_level = spec
redirect_args = json.dumps({'redir': 'tested'})
with app.test_client() as client:
resp = client.get(
route,
query_string={
'redirect': 'redir_method',
'flash_message': flash_msg,
'flash_level': flash_level,
'redirect_args': redirect_args,
}
)
assert resp.status_code == 302
assert resp.location == _redir_url('/redirected',
query_args=redirect_args)
assert '_flashes' in session
assert session['_flashes'].pop() == (flash_level, flash_msg)
@pytest.mark.skip(reason="While the ORM is finished, this needs implementing")
def test_exception_handler_auto_rollback():
"""Ensure we automatically roll back any open transactions."""
@pytest.mark.parametrize('exc_type', [
exceptions.FleakerBaseException,
exceptions.FleakerException,
exceptions.AppException,
])
def test_exception_handler_registration(exc_type):
"""Ensure we can easily register the exception handler."""
app = _create_app(register_error_handlers=False)
assert not app.error_handlers.keys()
exc_type.register_errorhandler(app)
assert app.error_handlers[None][exc_type] == exc_type.errorhandler_callback
def test_exception_handler_overridden():
"""Ensure an AppException can be overridden and it's handler still
works.
"""
app = _create_app(register_error_handlers=False)
code = 403
content = b"My error page."
class TestException(exceptions.AppException):
"""Simple testing exception."""
def error_page(self):
"""Return custom error page."""
return content
@app.route('/test')
def throw_error():
"""Throw an error for testing."""
raise TestException(flash_message='Flashed', status_code=code)
AppException.register_errorhandler(app)
with app.test_client() as client:
resp = client.get('/test')
assert resp.data == content
assert resp.status_code == code
def test_exception_handler_chained():
"""Ensure a chain of error handlers with no eror page works fine."""
app = _create_app(register_error_handlers=False)
content = b'Success!'
@app.route('/test')
def throw_error():
"""Just throw an exc for me."""
raise AppException('Something suddenly came up!')
def custom_errorhandler(exc):
"""Return actual content."""
return content
AppException.register_errorhandler(app)
app.errorhandler(AppException)(custom_errorhandler)
with app.test_client() as client:
resp = client.get('/test')
assert resp.data == content
def test_exception_auto_handler_registration():
"""Ensure that the exception mixin automatically registers handlers."""
app = ErrorAwareApp.create_app('tests')
app.config['SECRET_KEY'] = 'ITSASECRET'
# error handlers should be registered by default
expected_handler = AppException.errorhandler_callback
assert app.error_handlers[None][AppException] == expected_handler
msg = 'foo'
level = 'danger'
@app.route('/test_exc')
def test_exc():
"""Throw a simple exception for me."""
raise AppException(flash_message=msg, flash_level=level)
with app.test_client() as client:
res = client.get('/test_exc')
assert res.data == STANDARD_FLASK_500
assert '_flashes' in session
assert session['_flashes'].pop() == (level, msg)
def test_exception_auto_handler_explicit_registration():
"""Ensure that the exception mixin doesn't register handlers when told not
to.
"""
app = ErrorAwareApp.create_app('tests', register_errorhandler=False)
assert app.error_handlers == {}
def test_exception_error_handler_callback():
"""Ensure the error handler callback works on it's own."""
app = _create_app(register_error_handlers=False)
class ErrorPageException(AppException):
"""Implements a small error page for testing."""
def error_page(self):
return self.message
msg = 'foo'
level = 'danger'
code = 451
with app.test_client() as client:
# just give me a request context to work with
_ = client.get('/redirected')
exc = AppException(flash_message=msg, flash_level=level)
res = AppException.errorhandler_callback(exc)
assert res is None
assert '_flashes' in session
assert session['_flashes'].pop() == (level, msg)
exc = AppException(redirect='redir_method',
redirect_args={'foo': 'bar'})
res = AppException.errorhandler_callback(exc)
expected = redirect(url_for('redir_method', foo='bar'))
assert res.headers == expected.headers
assert res.data == expected.data
assert res.status_code == expected.status_code
exc = ErrorPageException(msg, status_code=code)
res = AppException.errorhandler_callback(exc)
assert res == (msg, code)
def test_exception_error_handler_custom_callback():
"""Ensure a custom callback gets installed correctly."""
app = _create_app(register_error_handlers=False)
content = b'test'
flash_level = 'danger'
class TestException(AppException):
"""Reimplement the errorhandler_callback."""
@classmethod
def errorhandler_callback(cls, exc):
"""Return static data, please."""
return content
handler_mock = mock.patch.object(TestException,
'errorhandler_callback',
wraps=TestException.errorhandler_callback)
@app.route('/trigger_test')
def trigger():
"""Trigger the exception we're testing."""
raise TestException(flash_message=content)
@app.route('/trigger_app')
def trigger_stock():
"""Trigger the stock exception to ensure it doesn't cause a run."""
raise AppException(flash_message=content, flash_level=flash_level)
@app.route('/trigger_exc')
def trigger_exc():
"""Raise a standard exception."""
raise Exception("Fail")
try:
errorhandler_cb = handler_mock.start()
TestException.register_errorhandler(app)
AppException.register_errorhandler(app)
with app.test_client() as client:
assert errorhandler_cb.call_count == 0
res = client.get('/trigger_test')
assert res.data == content
assert errorhandler_cb.call_count == 1
# ensure no flashes were added even though the route passes
# ``flash_message``
assert '_flashes' not in session
res = client.get('/trigger_app')
assert res.data != content
assert '_flashes' in session
assert session['_flashes'].pop() == (flash_level, content)
assert errorhandler_cb.call_count == 1
res = client.get('/trigger_exc')
assert res.status_code == 500
assert res.data == STANDARD_FLASK_500
finally:
handler_mock.stop()
|
bsd-3-clause
| -3,060,763,833,395,387,400
| 32.296774
| 82
| 0.634567
| false
|
yaukwankiu/armor
|
geometry/fractal.py
|
1
|
1840
|
import time
import numpy as np
from .. import defaultParameters as dp
def hausdorffDim(a, epsilon=2):
"""
#codes from
# hausdorffDimensionTest.py
# http://en.wikipedia.org/wiki/Hausdorff_dimension
# http://en.wikipedia.org/wiki/Minkowski-Bouligand_dimension
"""
dims = []
arr1 = (a.matrix>0) # turn it to 0-1 if it's not that form already
height, width = arr1.shape
arr2 = arr1[::epsilon, ::epsilon].copy()
for i in range(0, epsilon):
for j in range(0, epsilon):
h, w = arr1[i::epsilon, j::epsilon].shape
arr2[0:h, 0:w] += arr1[i::epsilon, j::epsilon]
dimH = np.log(arr2.sum()) / np.log((height*width)**.5/epsilon)
return dimH
def hausdorffDimLocal(a, epsilon=1, I=50, J=50, display=True, imagePath=""):
height, width = a.matrix.shape
dimLocal = {}
a1 = a.hausdorffDim(epsilon)['a1']
for i in range(height//I):
for j in range(width//J):
aa1 = a1.getWindow(i*I, j*J, I, J)
# one epsilon for now, may extend to a list later 2014-07-29
dimH = hausdorffDim(aa1, epsilon)
aa1.name = str(dimH)
#aa1.show()
#time.sleep(1)
dimLocal[(i,j)] = dimH
#print dimH #debug
a2 = a.copy()
a2.matrix= a2.matrix.astype(float)
#a2.show() # debug
#time.sleep(5)
a2.name = "Local Hausdorff Dimensions for\n" + a.name
a2.imagePath = 'testing/' + str(time.time()) + '_local_hausdorff_dim_' + a.name[-19:] + '.png'
for i in range(height//I):
for j in range(width//J):
a2.matrix[i*I:(i+1)*I, j*J:(j+1)*J] = dimLocal[(i,j)]
a2.vmax=2
a2.vmin=0
a2.cmap='jet'
if imagePath !="":
a2.saveImage()
if display:
a2.show()
return {'a2': a2, 'dimLocal': dimLocal}
|
cc0-1.0
| -3,036,818,565,936,485,000
| 33.716981
| 98
| 0.561957
| false
|
GuessWhatGame/generic
|
preprocess_data/extract_img_features.py
|
1
|
3564
|
#!/usr/bin/env python
import numpy
import os
import tensorflow as tf
from multiprocessing import Pool
from tqdm import tqdm
import numpy as np
import h5py
from generic.data_provider.nlp_utils import DummyTokenizer
from generic.data_provider.iterator import Iterator
def extract_features(
img_input,
ft_output,
network_ckpt,
dataset_cstor,
dataset_args,
batchifier_cstor,
out_dir,
set_type,
batch_size,
no_threads,
gpu_ratio):
# CPU/GPU option
cpu_pool = Pool(no_threads, maxtasksperchild=1000)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_ratio)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)) as sess:
saver = tf.train.Saver()
saver.restore(sess, network_ckpt)
for one_set in set_type:
print("Load dataset -> set: {}".format(one_set))
dataset_args["which_set"] = one_set
dataset = dataset_cstor(**dataset_args)
# hack dataset to only keep one game by image
image_id_set = {}
games = []
for game in dataset.games:
if game.image.id not in image_id_set:
games.append(game)
image_id_set[game.image.id] = 1
dataset.games = games
no_images = len(games)
#TODO find a more generic approach
if type(dataset.games[0].image.id) is int:
image_id_type = np.int64
else:
image_id_type = h5py.special_dtype(vlen=type(dataset.games[0].image.id))
source_name = os.path.basename(img_input.name[:-2])
dummy_tokenizer = DummyTokenizer()
batchifier = batchifier_cstor(tokenizer=dummy_tokenizer, sources=[source_name])
iterator = Iterator(dataset,
batch_size=batch_size,
pool=cpu_pool,
batchifier=batchifier)
############################
# CREATE FEATURES
############################
print("Start computing image features...")
if one_set == "all":
filepath = os.path.join(out_dir, "features.h5")
else:
filepath = os.path.join(out_dir, "{}_features.h5".format(one_set))
with h5py.File(filepath, 'w') as f:
ft_shape = [int(dim) for dim in ft_output.get_shape()[1:]]
ft_dataset = f.create_dataset('features', shape=[no_images] + ft_shape, dtype=np.float32)
idx2img = f.create_dataset('idx2img', shape=[no_images], dtype=image_id_type)
pt_hd5 = 0
i = 0
for batch in tqdm(iterator):
i += 1
feat = sess.run(ft_output, feed_dict={img_input: numpy.array(batch[source_name])})
# Store dataset
batch_size = len(batch["raw"])
ft_dataset[pt_hd5: pt_hd5 + batch_size] = feat
# Store idx to image.id
for i, game in enumerate(batch["raw"]):
idx2img[pt_hd5 + i] = game.image.id
# update hd5 pointer
pt_hd5 += batch_size
print("Start dumping file: {}".format(filepath))
print("Finished dumping file: {}".format(filepath))
print("Done!")
|
apache-2.0
| 6,149,952,111,916,122,000
| 33.941176
| 105
| 0.518799
| false
|
pfalcon/micropython
|
tests/micropython/heapalloc_exc_compressed.py
|
1
|
1200
|
try:
set
except NameError:
print("SKIP")
raise SystemExit
import micropython
# Tests both code paths for built-in exception raising.
# mp_obj_new_exception_msg_varg (exception requires decompression at raise-time to format)
# mp_obj_new_exception_msg (decompression can be deferred)
# NameError uses mp_obj_new_exception_msg_varg for NameError("name '%q' isn't defined")
# set.pop uses mp_obj_new_exception_msg for KeyError("pop from an empty set")
# Tests that deferred decompression works both via print(e) and accessing the message directly via e.args.
a = set()
# First test the regular case (can use heap for allocating the decompression buffer).
try:
name()
except NameError as e:
print(type(e).__name__, e)
try:
a.pop()
except KeyError as e:
print(type(e).__name__, e)
try:
name()
except NameError as e:
print(e.args[0])
try:
a.pop()
except KeyError as e:
print(e.args[0])
# Then test that it still works when the heap is locked (i.e. in ISR context).
micropython.heap_lock()
try:
name()
except NameError as e:
print(type(e).__name__)
try:
a.pop()
except KeyError as e:
print(type(e).__name__)
micropython.heap_unlock()
|
mit
| -2,314,301,840,580,059,600
| 21.222222
| 106
| 0.691667
| false
|
dmr/Ldtools
|
ldtools/cli.py
|
1
|
5600
|
from __future__ import print_function
import logging
import pprint
import datetime
import sys
import argparse
from ldtools.utils import (
is_valid_url,
get_slash_url,
get_rdflib_uriref,
urllib2,
)
from ldtools.helpers import set_colored_logger
from ldtools.backends import __version__
from ldtools.origin import Origin
from ldtools.resource import Resource
logger = logging.getLogger("ldtools.cli")
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'--version', action='version', version='%(prog)s ' + __version__,
help="Print current version")
parser.add_argument(
'-v', '--verbosity', action="store",
help='Adjust verbosity. 1 for every detail, 5 for silent',
default=2, type=int)
parser.add_argument(
'-d', '--depth', action="store", default=0, type=int,
help="Crawl discovered Origins x times")
follow_group = parser.add_mutually_exclusive_group()
follow_group.add_argument(
'--follow-all', action="store_true",
help="Follow all URIs discovered")
follow_group.add_argument(
'--follow-uris',
action="append", dest='follow_uris', default=[],
help="Follow the URIs specified")
print_group = parser.add_mutually_exclusive_group()
print_group.add_argument(
'--only-print-uris', action="store_true",
help='Only prints a short representation of Resources')
parser.add_argument(
'--only-print-uri-content', action="store_true",
help='Only prints data retrieved from URIs and exists')
parser.add_argument(
'--socket-timeout', action="store", type=int,
help="Set the socket timeout")
parser.add_argument(
'-o', '--only-negotiate', action="store_true",
help='Only do content negotiation for given URIs and print the '
'response headers')
parser.add_argument(
'--GRAPH_SIZE_LIMIT', action="store", type=int,
help="Set maximum graph size that will be processed")
parser.add_argument('--print-all-resources', action="store_true")
def check_uri(url):
if not is_valid_url(url):
raise argparse.ArgumentTypeError("%r is not a valid URL" % url)
return url
parser.add_argument(
'origin_urls', action="store", nargs='+', type=check_uri,
help="Pass a list of URIs. ldtools will crawl them one by one")
return parser
def execute_ldtools(
verbosity,
origin_urls,
depth,
follow_all,
follow_uris,
socket_timeout,
GRAPH_SIZE_LIMIT,
print_all_resources,
only_print_uris,
only_print_uri_content,
only_negotiate
):
set_colored_logger(verbosity)
# customize Origin.objects.post_create_hook for performance reasons
def custom_post_create_hook(origin):
origin.timedelta = datetime.timedelta(minutes=5)
return origin
Origin.objects.post_create_hook = custom_post_create_hook
url_count = len(origin_urls)
if url_count > 1:
logger.info("Retrieving content of %s URLs" % url_count)
if follow_all:
only_follow_uris = None
logging.info("Following all URIs")
elif follow_uris:
only_follow_uris = follow_uris
logging.info("Following values matching: %s"
% ", ".join(only_follow_uris))
else:
only_follow_uris = []
if socket_timeout:
import socket
logger.info("Setting socket timeout to %s" % socket_timeout)
socket.setdefaulttimeout(socket_timeout)
kw = dict(raise_errors=False)
if GRAPH_SIZE_LIMIT:
kw["GRAPH_SIZE_LIMIT"] = GRAPH_SIZE_LIMIT
for url in origin_urls:
url = get_slash_url(url)
origin, created = Origin.objects.get_or_create(url)
logger.info("Retrieving content of %s" % origin.uri)
if only_negotiate or only_print_uri_content:
try:
data = origin.backend.GET(
uri=origin.uri,
httphandler=urllib2.HTTPHandler(debuglevel=1))
except Exception as exc:
print(exc)
continue
if only_print_uri_content:
print('\n', data, '\n')
else:
origin.GET(only_follow_uris=only_follow_uris, **kw)
if only_negotiate or only_print_uri_content:
sys.exit(0)
if depth:
for round in range(depth):
for origin in Origin.objects.all():
origin.GET(only_follow_uris=only_follow_uris, **kw)
for orig_url in origin_urls:
url = get_slash_url(orig_url)
origin = Origin.objects.get(url)
for r in origin.get_resources():
if r._uri == get_rdflib_uriref(orig_url):
logger.info(u"Printing all available information "
"about {0}".format(r._uri))
if hasattr(r, "_has_changes"):
delattr(r, "_has_changes")
if hasattr(r, "pk"):
delattr(r, "pk")
pprint.pprint(r.__dict__)
if print_all_resources:
all_resources = Resource.objects.all()
if (only_print_uris):
for resource in all_resources:
print(resource)
else:
for r in all_resources:
if hasattr(r, "_has_changes"):
delattr(r, "_has_changes")
if hasattr(r, "pk"):
delattr(r, "pk")
pprint.pprint(r.__dict__)
def main():
execute_ldtools(**get_parser().parse_args().__dict__)
|
bsd-2-clause
| -6,555,714,055,889,037,000
| 31
| 75
| 0.594286
| false
|
terceiro/squad
|
squad/core/queries.py
|
1
|
3008
|
import datetime
from squad.core import models
from django.db.models import Q, F, Sum
from django.utils import timezone
def get_metric_data(project, metrics, environments, date_start=None,
date_end=None):
# Note that date_start and date_end **must** be datetime objects and not
# strings, if used.
date_start = timezone.make_aware(
date_start or datetime.datetime.fromtimestamp(0))
date_end = timezone.make_aware(date_end or datetime.datetime.now())
results = {}
for metric in metrics:
if metric == ':tests:':
results[metric] = get_tests_series(project, environments,
date_start, date_end)
else:
results[metric] = get_metric_series(project, metric, environments,
date_start, date_end)
return results
def get_metric_series(project, metric, environments, date_start, date_end):
entry = {}
for environment in environments:
series = models.Metric.objects.by_full_name(metric).filter(
test_run__build__project=project,
test_run__environment__slug=environment,
test_run__created_at__range=(date_start, date_end)
).order_by(
'test_run__datetime',
).values(
'id',
'test_run__build__datetime',
'test_run__build__version',
'result',
'test_run__build__annotation__description',
'is_outlier'
)
entry[environment] = [
[int(p['test_run__build__datetime'].timestamp()), p['result'], p['test_run__build__version'], p['test_run__build__annotation__description'] or "", p['id'], str(p['is_outlier'])] for p in series
]
return entry
def get_tests_series(project, environments, date_start, date_end):
results = {}
tests_total = (F('tests_pass') + F('tests_skip') + F('tests_fail') + F('tests_xfail'))
for environment in environments:
series = models.Status.objects.filter(
test_run__build__project=project,
suite=None,
test_run__environment__slug=environment,
test_run__created_at__range=(date_start, date_end)
).filter(
Q(tests_pass__gt=0) | Q(tests_skip__gt=0) | Q(tests_fail__gt=0) | Q(tests_xfail__gt=0)
).order_by(
'test_run__datetime'
).values(
'test_run__build_id',
'test_run__build__datetime',
'test_run__build__version',
'test_run__build__annotation__description',
).annotate(
pass_percentage=100 * Sum('tests_pass') / Sum(tests_total)
).order_by('test_run__build__datetime')
results[environment] = [
[int(s['test_run__build__datetime'].timestamp()), s['pass_percentage'], s['test_run__build__version'], s['test_run__build__annotation__description'] or ""]
for s in series
]
return results
|
agpl-3.0
| 5,134,490,325,207,839,000
| 37.564103
| 205
| 0.568152
| false
|
edx/edx-enterprise
|
integrated_channels/canvas/utils.py
|
1
|
8304
|
'''Collection of static util methods for various Canvas operations'''
import logging
from http import HTTPStatus
from requests.utils import quote
from integrated_channels.exceptions import ClientError
from integrated_channels.utils import generate_formatted_log
LOGGER = logging.getLogger(__name__)
class CanvasUtil:
"""
A util to make various util functions related to Canvas easier and co-located.
Every method in this class is static and stateless. They all need at least
- enterprise_configuration
- session
plus additional relevant arguments.
Usage example:
canvas_api_client._create_session() # if needed
CanvasUtil.find_course_in_account(
canvas_api_client.enterprise_configuration,
canvas_api_client.session,
course_id,
account_id,
)
"""
@staticmethod
def find_root_canvas_account(enterprise_configuration, session):
"""
Attempts to find root account id from Canvas.
Arguments:
- enterprise_configuration (EnterpriseCustomerPluginConfiguration)
- session (requests.Session)
If root account cannot be found, returns None
"""
url = "{}/api/v1/accounts".format(enterprise_configuration.canvas_base_url)
resp = session.get(url)
all_accounts = resp.json()
root_account = None
for account in all_accounts:
if account['parent_account_id'] is None:
root_account = account
break
return root_account
@staticmethod
def find_course_in_account(enterprise_configuration, session, canvas_account_id, edx_course_id):
"""
Search course by edx_course_id (used as integration_id in canvas) under provided account.
It will even return courses that are in the 'deleted' state in Canvas, so we can correctly
skip these courses in logic as needed.
Note: we do not need to follow pagination here since it would be extremely unlikely
that searching by a specific edx_course_id results in many records, we generally only
expect 1 record to come back anyway.
Arguments:
- enterprise_configuration (EnterpriseCustomerPluginConfiguration)
- session (requests.Session)
- canvas_account_id (Number) : account to search courses in
- edx_course_id (str) : edX course key
Ref: https://canvas.instructure.com/doc/api/accounts.html#method.accounts.courses_api
The `&state[]=all` is added so we can also fetch priorly 'delete'd courses as well
"""
url = "{}/api/v1/accounts/{}/courses/?search_term={}&state[]=all".format(
enterprise_configuration.canvas_base_url,
canvas_account_id,
quote(edx_course_id),
)
resp = session.get(url)
all_courses_response = resp.json()
if resp.status_code >= 400:
message = 'Failed to find a course under Canvas account: {account_id}'.format(
account_id=canvas_account_id
)
if 'reason' in all_courses_response:
message = '{} : Reason = {}'.format(message, all_courses_response['reason'])
elif 'errors' in all_courses_response:
message = '{} : Errors = {}'.format(message, str(all_courses_response['errors']))
raise ClientError(
message,
resp.status_code
)
course_found = None
for course in all_courses_response:
if course['integration_id'] == edx_course_id:
course_found = course
break
return course_found
@staticmethod
def get_course_id_from_edx_course_id(enterprise_configuration, session, edx_course_id):
"""
Uses the Canvas search api to find a course by edx_course_id
Arguments:
- enterprise_configuration (EnterpriseCustomerPluginConfiguration)
- session (requests.Session)
- edx_course_id (str) : edX course key
Returns:
canvas_course_id (string): id from Canvas
"""
course = CanvasUtil.find_course_by_course_id(
enterprise_configuration,
session,
edx_course_id,
)
if not course:
raise ClientError(
"No Canvas courses found with associated edx course ID: {}.".format(
edx_course_id
),
HTTPStatus.NOT_FOUND.value
)
return course['id']
@staticmethod
def find_course_by_course_id(
enterprise_configuration,
session,
edx_course_id,
):
"""
First attempts to find courase under current account id
As fallback, to account for cases where course was priorly transmitted to a different
account, it also searches under the root account for the course.
Arguments:
- enterprise_configuration (EnterpriseCustomerPluginConfiguration)
- session (requests.Session)
- edx_course_id (str) : edX course key
Returns:
- Course dict if the course found in Canvas,
- None otherwise
"""
course = CanvasUtil.find_course_in_account(
enterprise_configuration,
session,
enterprise_configuration.canvas_account_id,
edx_course_id,
)
if not course:
# now let's try the root account instead (searches under all subaccounts)
root_canvas_account = CanvasUtil.find_root_canvas_account(enterprise_configuration, session)
course = CanvasUtil.find_course_in_account(
enterprise_configuration,
session,
root_canvas_account['id'],
edx_course_id,
)
if course:
LOGGER.info(generate_formatted_log(
'canvas',
enterprise_configuration.enterprise_customer.uuid,
None,
edx_course_id,
'Found course under root Canvas account'
))
return course
@staticmethod
def determine_next_results_page(canvas_api_response):
"""
Canvas pagination headers come back as a string-
'headers': {
'Link': '<{assignment_url}?page=2&per_page=10>; rel="current",' \
'<{assignment_url}?page=1&per_page=10>; rel="prev",' \
'<{assignment_url}?page=1&per_page=10>; rel="first",' \
'<{assignment_url}?page=2&per_page=10>; rel="last"' \
}
so we have to parse out the linked list of assignment pages and determine if we're at the end or if a next exits
Args:
- canvas_api_response: a requests library Response object that contains the pagination headers
"""
page_results = canvas_api_response.headers['Link'].split(',')
pages = {}
for page in page_results:
page_type = page.split('; rel=')[1].strip('"')
pages[page_type] = page.split(';')[0].strip('<>')
if pages.get('current') == pages.get('last', None):
return False
return pages.get('next')
@staticmethod
def course_create_endpoint(enterprise_configuration):
"""
Returns endpoint to POST to for course creation
"""
return '{}/api/v1/accounts/{}/courses'.format(
enterprise_configuration.canvas_base_url,
enterprise_configuration.canvas_account_id,
)
@staticmethod
def course_update_endpoint(enterprise_configuration, course_id):
"""
Returns endpoint to PUT to for course update
"""
return '{}/api/v1/courses/{}'.format(
enterprise_configuration.canvas_base_url,
course_id,
)
@staticmethod
def course_assignments_endpoint(enterprise_configuration, course_id):
"""
Returns endpoint to GET to for course assignments
"""
return '{}/api/v1/courses/{}/assignments'.format(
enterprise_configuration.canvas_base_url,
course_id,
)
|
agpl-3.0
| -1,647,801,254,958,286,300
| 35.262009
| 120
| 0.591522
| false
|
shaochangbin/crosswalk
|
app/tools/android/manifest_json_parser.py
|
1
|
8310
|
#!/usr/bin/env python
# Copyright (c) 2013, 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Parse JSON-format manifest configuration file and
provide the specific fields, which have to be integrated with
packaging tool(e.g. make_apk.py) to generate xml-format manifest file.
Sample usage from shell script:
python manifest_json_parser.py --jsonfile=/path/to/manifest.json
"""
import json
import optparse
import os
import re
import sys
def HandlePermissionList(permission_list):
"""This function is used to handle the permission list and return the string
of permissions.
Args:
permission_list: the permission list, e.g.["permission1", "permission2"].
Returns:
The string of permissions with ':' as separator.
e.g. "permission1:permission2".
"""
permissions = list(permission_list)
reg_permission = re.compile(r'^[a-zA-Z\.]*$')
for permission in permissions:
if not reg_permission.match(permission):
print('\'Permissions\' field error, only alphabets and '
'\'.\' are allowed.')
sys.exit(1)
return ':'.join(permissions)
class ManifestJsonParser(object):
""" The class is used to parse json-format manifest file, recompose the fields
and provide the field interfaces required by the packaging tool.
Args:
input_path: the full path of the json-format manifest file.
"""
def __init__(self, input_path):
self.input_path = input_path
input_file = open(self.input_path)
try:
input_src = input_file.read()
self.data_src = json.JSONDecoder().decode(input_src)
self.ret_dict = self._output_items()
except (TypeError, ValueError, IOError):
print('There is a parser error in manifest.json file.')
sys.exit(1)
except KeyError:
print('There is a field error in manifest.json file.')
sys.exit(1)
finally:
input_file.close()
def _output_items(self):
""" The manifest field items are reorganized and returned as a
dictionary to support single or multiple values of keys.
Returns:
A dictionary to the corresponding items. the dictionary keys are
described as follows, the value is set to "" if the value of the
key is not set.
app_name: The application name.
version: The version number.
icons: An array of icons.
app_url: The url of application, e.g. hosted app.
description: The description of application.
app_root: The root path of the web, this flag allows to package
local web application as apk.
app_local_path: The relative path of entry file based on app_root,
this flag should work with "--app-root" together.
permissions: The permission list.
required_version: The required crosswalk runtime version.
plugin: The plug-in information.
fullscreen: The fullscreen flag of the application.
launch_screen: The launch screen configuration.
"""
ret_dict = {}
if 'name' not in self.data_src:
print('Error: no \'name\' field in manifest.json file.')
sys.exit(1)
ret_dict['app_name'] = self.data_src['name']
if 'version' not in self.data_src:
print('Error: no \'version\' field in manifest.json file.')
sys.exit(1)
ret_dict['version'] = self.data_src['version']
if 'launch_path' in self.data_src:
app_url = self.data_src['launch_path']
elif ('app' in self.data_src and
'launch' in self.data_src['app'] and
'local_path' in self.data_src['app']['launch']):
app_url = self.data_src['app']['launch']['local_path']
else:
app_url = ''
if app_url.lower().startswith(('http://', 'https://')):
app_local_path = ''
else:
app_local_path = app_url
app_url = ''
file_path_prefix = os.path.split(self.input_path)[0]
if 'icons' in self.data_src:
ret_dict['icons'] = self.data_src['icons']
else:
ret_dict['icons'] = {}
app_root = file_path_prefix
ret_dict['description'] = ''
if 'description' in self.data_src:
ret_dict['description'] = self.data_src['description']
ret_dict['app_url'] = app_url
ret_dict['app_root'] = app_root
ret_dict['app_local_path'] = app_local_path
ret_dict['permissions'] = ''
if 'permissions' in self.data_src:
try:
permission_list = self.data_src['permissions']
ret_dict['permissions'] = HandlePermissionList(permission_list)
except (TypeError, ValueError, IOError):
print('\'Permissions\' field error in manifest.json file.')
sys.exit(1)
ret_dict['required_version'] = ''
if 'required_version' in self.data_src:
ret_dict['required_version'] = self.data_src['required_version']
ret_dict['plugin'] = ''
if 'plugin' in self.data_src:
ret_dict['plugin'] = self.data_src['plugin']
if 'display' in self.data_src and 'fullscreen' in self.data_src['display']:
ret_dict['fullscreen'] = 'true'
else:
ret_dict['fullscreen'] = ''
ret_dict['launch_screen_img'] = ''
if 'launch_screen' in self.data_src:
if 'default' not in self.data_src['launch_screen']:
print('Error: no \'default\' field for \'launch_screen\'.')
sys.exit(1)
default = self.data_src['launch_screen']['default']
if 'image' not in default:
print('Error: no \'image\' field for \'launch_screen.default\'.')
sys.exit(1)
ret_dict['launch_screen_img'] = default['image']
return ret_dict
def ShowItems(self):
"""Show the processed results, it is used for command-line
internal debugging."""
print("app_name: %s" % self.GetAppName())
print("version: %s" % self.GetVersion())
print("description: %s" % self.GetDescription())
print("icons: %s" % self.GetIcons())
print("app_url: %s" % self.GetAppUrl())
print("app_root: %s" % self.GetAppRoot())
print("app_local_path: %s" % self.GetAppLocalPath())
print("permissions: %s" % self.GetPermissions())
print("required_version: %s" % self.GetRequiredVersion())
print("plugins: %s" % self.GetPlugins())
print("fullscreen: %s" % self.GetFullScreenFlag())
print('launch_screen.default.image: %s' % self.GetLaunchScreenImg())
def GetAppName(self):
"""Return the application name."""
return self.ret_dict['app_name']
def GetVersion(self):
"""Return the version number."""
return self.ret_dict['version']
def GetIcons(self):
"""Return the icons."""
return self.ret_dict['icons']
def GetAppUrl(self):
"""Return the URL of the application."""
return self.ret_dict['app_url']
def GetDescription(self):
"""Return the description of the application."""
return self.ret_dict['description']
def GetAppRoot(self):
"""Return the root path of the local web application."""
return self.ret_dict['app_root']
def GetAppLocalPath(self):
"""Return the local relative path of the local web application."""
return self.ret_dict['app_local_path']
def GetPermissions(self):
"""Return the permissions."""
return self.ret_dict['permissions']
def GetRequiredVersion(self):
"""Return the required crosswalk runtime version."""
return self.ret_dict['required_version']
def GetPlugins(self):
"""Return the plug-in path and file name."""
return self.ret_dict['plugin']
def GetFullScreenFlag(self):
"""Return the set fullscreen flag of the application."""
return self.ret_dict['fullscreen']
def GetLaunchScreenImg(self):
"""Return the default img for launch_screen."""
return self.ret_dict['launch_screen_img']
def main(argv):
"""Respond to command mode and show the processed field values."""
parser = optparse.OptionParser()
info = ('The input json-format file name. Such as: '
'--jsonfile=manifest.json')
parser.add_option('-j', '--jsonfile', action='store', dest='jsonfile',
help=info)
opts, _ = parser.parse_args()
if len(argv) == 1:
parser.print_help()
return 0
json_parser = ManifestJsonParser(opts.jsonfile)
json_parser.ShowItems()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
bsd-3-clause
| -8,263,335,172,196,524,000
| 34.211864
| 80
| 0.644043
| false
|
morevnaproject/RenderChan
|
renderchan/core.py
|
1
|
56360
|
__author__ = 'Konstantin Dmitriev'
__version__ = '1.0-alpha1'
import sys
from renderchan.file import RenderChanFile
from renderchan.project import RenderChanProjectManager
from renderchan.module import RenderChanModuleManager, RenderChanModule
from renderchan.utils import mkdirs
from renderchan.utils import float_trunc
from renderchan.utils import sync
from renderchan.utils import touch
from renderchan.utils import copytree
from renderchan.utils import which
from renderchan.utils import is_true_string
import os, time
import shutil
import subprocess
import zipfile
#TODO: This class actually should be named something like RenderChanJob (this better reflects its purpose)
class RenderChan():
def __init__(self):
self.datadir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "templates")
self.available_renderfarm_engines = ("puli","afanasy")
self.renderfarm_engine = ""
self.renderfarm_host = "127.0.0.1"
self.renderfarm_port = 8004
print("RenderChan initialized.")
self.start_time = time.time()
self.projects = RenderChanProjectManager()
self.modules = RenderChanModuleManager()
self.loadedFiles = {}
# TODO: dry_run and force shouldn't be stored in RenderChan object. It's better to pass them as arguments to submit()
self.dry_run = False
self.force = False
self.track = False
# Action. Possible values - render (default), print, pack, clean
self.action = "render"
# Option, which determines if RenderChan should create placeholders for missing files
# TODO: This option is not possible to set via commandline at this moment.
# TODO: Allow to configure how to deal with missing files: create empty placeholder (default), create warning placeholder, none or raise exception.
self.recreateMissing = False
self.force_proxy = False
self.trackedFiles = {}
self.trackedFilesStack = []
self.graph = None # used by renderfarm
# == taskgroups bug / commented ==
# The following are the special taskgroups used for managing stereo rendering
#self.taskgroupLeft = None
#self.taskgroupRight = None
# FIXME: The childTask is a dirty workaround, which we need because of broken taskgroups functionality (search for "taskgroups bug" string to get the commented code)
self.childTask = None
self.AfanasyBlockClass=None
self.cgru_location = "/opt/cgru"
self.snapshot_path = None
self.post_script = None
self.ffmpeg_binary = ''
ffmpeg_path = RenderChanModule.findBinary(self,'ffmpeg')
avconv_path = RenderChanModule.findBinary(self,'avconv')
if which(ffmpeg_path) != None:
self.ffmpeg_binary = ffmpeg_path
elif which(avconv_path) != None:
self.ffmpeg_binary = avconv_path
if self.ffmpeg_binary == '':
raise Exception('ERROR: No ffmpeg binary found. Please install ffmpeg.')
def __del__(self):
if self.renderfarm_engine == "":
t = time.time()-self.start_time
hours = int(t/3600)
t = t - hours*3600
minutes = int(t/60)
t = t - minutes*60
seconds = int(t)
print()
print()
print("Execution time: %02d:%02d:%02d " % ( hours, minutes, seconds ))
print()
def setHost(self, host):
self.renderfarm_host=host
def setPort(self, port):
self.renderfarm_port=port
def setStereoMode(self, mode):
self.setProfile(self.projects.profile, mode)
def setProfile(self, profile, stereo=None):
"""
:type profile: str
"""
if stereo == None:
stereo=self.projects.stereo
if self.projects.active:
# Update root project
self.projects.active.config["stereo"]=stereo
self.projects.active.loadRenderConfig(profile)
# Update child projects
for key in self.projects.list.keys():
project = self.projects.list[key]
project.config=self.projects.active.config.copy()
project.loadRenderConfig(self.projects.profile)
# Reload module configuration
loaded_modules = project.dependencies[:]
project.dependencies = []
for module_name in loaded_modules:
module = self.modules.get(module_name)
project.registerModule(module)
self.projects.profile=profile
self.projects.stereo=stereo
def submit(self, filename, dependenciesOnly=False, allocateOnly=False, stereo=""):
"""
:param filename:
:type filename: str
:param dependenciesOnly:
:param allocateOnly:
:param stereo:
:return:
"""
taskfile = RenderChanFile(filename, self.modules, self.projects)
self.trackFileBegin(taskfile)
if taskfile.project == None:
print(file=sys.stderr)
print("ERROR: Can't render a file which is not a part of renderchan project.", file=sys.stderr)
print(file=sys.stderr)
self.trackFileEnd()
return 1
if not taskfile.module:
print(file=sys.stderr)
extension = os.path.splitext(taskfile.getPath())[1]
if extension:
print("ERROR: The '%s' file type was not recoginized." % extension, file=sys.stderr)
else:
print("ERROR: The provided file does not have an extension.", file=sys.stderr)
print(file=sys.stderr)
self.trackFileEnd()
return 1
if self.action =="print":
self.addToGraph(taskfile, dependenciesOnly, allocateOnly)
print()
for file in self.trackedFiles.values():
print("File: "+file["source"])
print()
# Close cache
for path in self.projects.list.keys():
self.projects.list[path].cache.close()
elif self.action =="pack":
self.addToGraph(taskfile, dependenciesOnly, allocateOnly)
list = []
for file in self.trackedFiles.values():
list.append(file["source"])
commonpath = os.path.commonpath(list)
#for i,c in enumerate(list):
# list[i]=c[len(commonpath)+1:]
# print(list[i])
print()
zipname = os.path.basename(taskfile.getPath())+'.zip'
if os.path.exists(os.path.join(os.getcwd(),zipname)):
print("ERROR: File "+os.path.join(os.getcwd(),zipname)+" already exists.")
exit()
with zipfile.ZipFile(zipname, 'x') as myzip:
for i,c in enumerate(list):
print("Zipping file: "+c)
myzip.write(c, c[len(commonpath)+1:])
print("Written "+os.path.join(os.getcwd(),zipname)+".")
print()
# Close cache
for path in self.projects.list.keys():
self.projects.list[path].cache.close()
elif self.action =="render":
if self.renderfarm_engine=="afanasy":
if not os.path.exists(os.path.join(self.cgru_location,"afanasy")):
print("ERROR: Cannot render with afanasy, afanasy not found at cgru directory '%s'." % self.cgru_location, file=sys.stderr)
self.trackFileEnd()
return 1
os.environ["CGRU_LOCATION"]=self.cgru_location
os.environ["AF_ROOT"]=os.path.join(self.cgru_location,"afanasy")
sys.path.insert(0, os.path.join(self.cgru_location,"lib","python"))
sys.path.insert(0, os.path.join(self.cgru_location,"afanasy","python"))
from af import Job as AfanasyJob
from af import Block as AfanasyBlock
self.AfanasyBlockClass=AfanasyBlock
self.graph = AfanasyJob('RenderChan - %s - %s' % (taskfile.localPath, taskfile.projectPath))
elif self.renderfarm_engine=="puli":
from puliclient import Graph
self.graph = Graph( 'RenderChan graph', poolName="default" )
last_task = None
if stereo in ("vertical","v","vertical-cross","vc","horizontal","h","horizontal-cross","hc"):
# Left eye graph
self.setStereoMode("left")
self.addToGraph(taskfile, dependenciesOnly, allocateOnly)
if self.renderfarm_engine!="":
self.childTask = taskfile.taskPost
# Right eye graph
self.setStereoMode("right")
self.addToGraph(taskfile, dependenciesOnly, allocateOnly)
# Stitching altogether
if self.renderfarm_engine=="":
self.job_merge_stereo(taskfile, stereo)
elif self.renderfarm_engine=="afanasy":
name = "StereoPost - %f" % ( time.time() )
block = self.AfanasyBlockClass(name, 'generic')
block.setCommand("renderchan-job-launcher \"%s\" --action merge --profile %s --stereo %s --compare-time %f --active-project \"%s\"" % ( taskfile.getPath(), self.projects.profile, stereo, time.time(), self.projects.active.path ))
if taskfile.taskPost!=None:
block.setDependMask(taskfile.taskPost)
block.setNumeric(1,1,100)
block.setCapacity(100)
self.graph.blocks.append(block)
last_task = name
elif self.renderfarm_engine=="puli":
runner = "puliclient.contrib.commandlinerunner.CommandLineRunner"
# Add parent task which composes results and places it into valid destination
command = "renderchan-job-launcher \"%s\" --action merge --profile %s --stereo %s --compare-time %f --active-project %s" % ( taskfile.getPath(), self.projects.profile, stereo, time.time(), self.projects.active.path)
stereoTask = self.graph.addNewTask( name="StereoPost: "+taskfile.localPath, runner=runner, arguments={ "args": command} )
# Dummy task
#decomposer = "puliclient.contrib.generic.GenericDecomposer"
#params={ "cmd":"echo", "start":1, "end":1, "packetSize":1, "prod":"test", "shot":"test" }
#dummyTask = self.graph.addNewTask( name="StereoDummy", arguments=params, decomposer=decomposer )
# == taskgroups bug / commented ==
#self.graph.addEdges( [(self.taskgroupLeft, self.taskgroupRight)] )
#self.graph.addEdges( [(self.taskgroupRight, stereoTask)] )
#self.graph.addChain( [self.taskgroupLeft, dummyTask, self.taskgroupRight, stereoTask] )
if taskfile.taskPost!=None:
self.graph.addEdges( [(taskfile.taskPost, stereoTask)] )
last_task = stereoTask
else:
if stereo in ("left","l"):
self.setStereoMode("left")
elif stereo in ("right","r"):
self.setStereoMode("right")
self.addToGraph(taskfile, dependenciesOnly, allocateOnly)
last_task = taskfile.taskPost
# Post-script
if self.post_script:
if stereo in ("vertical","v","horizontal","h"):
script_arg = os.path.splitext(taskfile.getRenderPath())[0]+"-stereo-"+stereo[0:1]+os.path.splitext(taskfile.getRenderPath())[1]
else:
script_arg = taskfile.getRenderPath()
if self.renderfarm_engine=="":
commandline=[self.post_script, script_arg]
subprocess.run("\"%s\" \"%s\"" % ( self.post_script, script_arg), shell=True, check=True)
elif self.renderfarm_engine=="afanasy":
name = "Post Script - %f" % ( time.time() )
block = self.AfanasyBlockClass(name, 'generic')
block.setCommand("\"%s\" \"%s\"" % ( self.post_script, script_arg))
if last_task!=None:
block.setDependMask(last_task)
block.setNumeric(1,1,100)
block.setCapacity(100)
self.graph.blocks.append(block)
# Snapshot
if self.snapshot_path:
if stereo in ("vertical","v","horizontal","h"):
snapshot_source = os.path.splitext(taskfile.getRenderPath())[0]+"-stereo-"+stereo[0:1]+os.path.splitext(taskfile.getRenderPath())[1]
else:
snapshot_source = taskfile.getProfileRenderPath()
if self.renderfarm_engine=="":
self.job_snapshot(snapshot_source, self.snapshot_path)
elif self.renderfarm_engine=="afanasy":
name = "Snapshot - %f" % ( time.time() )
block = self.AfanasyBlockClass(name, 'generic')
block.setCommand("renderchan-job-launcher \"%s\" --action snapshot --target-dir %s" % ( snapshot_source, self.snapshot_path))
if last_task!=None:
block.setDependMask(last_task)
block.setNumeric(1,1,100)
block.setCapacity(50)
self.graph.blocks.append(block)
elif self.renderfarm_engine=="puli":
runner = "puliclient.contrib.commandlinerunner.CommandLineRunner"
# Add parent task which composes results and places it into valid destination
command = "renderchan-job-launcher \"%s\" --action snapshot --target-dir %s" % ( snapshot_source, self.snapshot_path)
snapshotTask = self.graph.addNewTask( name="Snapshot: "+taskfile.localPath, runner=runner, arguments={ "args": command} )
if last_task!=None:
self.graph.addEdges( [(last_task, snapshotTask)] )
# Make sure to close cache before submitting job to renderfarm
for path in self.projects.list.keys():
self.projects.list[path].cache.close()
# Submit job to renderfarm
if self.renderfarm_engine=="afanasy":
# Wait a moment to make sure cache is closed properly
# (this allows to avoid issues with shared nfs drives)
time.sleep(1)
self.graph.output()
self.graph.send()
elif self.renderfarm_engine=="puli":
self.graph.submit(self.renderfarm_host, self.renderfarm_port)
else:
# TODO: Render our Graph
pass
self.trackFileEnd()
def addToGraph(self, taskfile, dependenciesOnly=False, allocateOnly=False):
"""
:type taskfile: RenderChanFile
"""
for path in self.loadedFiles.keys():
self.loadedFiles[path].isDirty=None
#self.loadedFiles={}
# == taskgroups bug / commented ==
# Prepare taskgroups if we do stereo rendering
#if self.projects.active.getConfig("stereo")=="left":
# self.taskgroupLeft = self.graph.addNewTaskGroup( name="TG Left: "+taskfile.getPath() )
#elif self.projects.active.getConfig("stereo")=="right":
# self.taskgroupRight = self.graph.addNewTaskGroup( name="TG Right: "+taskfile.getPath() )
if allocateOnly and dependenciesOnly:
if os.path.exists(taskfile.getRenderPath()):
self.parseDirectDependency(taskfile, None, self.dry_run, self.force)
else:
taskfile.endFrame = taskfile.startFrame + 2
self.parseRenderDependency(taskfile, allocateOnly, self.dry_run, self.force)
elif dependenciesOnly:
self.parseDirectDependency(taskfile, None, self.dry_run, self.force)
elif allocateOnly:
if os.path.exists(taskfile.getRenderPath()):
print("File is already allocated.")
sys.exit(0)
taskfile.dependencies=[]
taskfile.endFrame = taskfile.startFrame + 2
self.parseRenderDependency(taskfile, allocateOnly, self.dry_run, self.force)
else:
self.parseRenderDependency(taskfile, allocateOnly, self.dry_run, self.force)
self.childTask = None
def trackFileBegin(self, taskfile):
"""
:type taskfile: RenderChanFile
"""
if self.track:
key = taskfile.getPath()
if key not in self.trackedFiles:
trackedFile = {}
trackedFile["source"] = key
trackedFile["deps"] = []
trackedFile["backDeps"] = []
self.trackedFiles[key] = trackedFile;
if self.trackedFilesStack:
parentKey = self.trackedFilesStack[-1]
if parentKey != key:
if key not in self.trackedFiles[parentKey]["deps"]:
self.trackedFiles[parentKey]["deps"].append(key)
if parentKey not in self.trackedFiles[key]["backDeps"]:
self.trackedFiles[key]["backDeps"].append(parentKey)
self.trackedFilesStack.append(key)
if taskfile.project and key != taskfile.project.confPath and os.path.exists(taskfile.project.confPath):
projectKey = taskfile.project.confPath
if projectKey not in self.trackedFiles:
trackedFile = {}
trackedFile["source"] = projectKey
trackedFile["deps"] = []
trackedFile["backDeps"] = []
self.trackedFiles[projectKey] = trackedFile
if projectKey not in self.trackedFiles[key]["deps"]:
self.trackedFiles[key]["deps"].append(projectKey)
if key not in self.trackedFiles[projectKey]["backDeps"]:
self.trackedFiles[projectKey]["backDeps"].append(key)
def trackFileEnd(self):
if self.track:
self.trackedFilesStack.pop()
def parseRenderDependency(self, taskfile, allocateOnly, dryRun = False, force = False):
"""
:type taskfile: RenderChanFile
"""
# TODO: Re-implement this function in the same way as __not_used__syncProfileData() ?
self.trackFileBegin(taskfile)
isDirty = False
# First, let's ensure, that we are in sync with profile data
t=taskfile.project.switchProfile(taskfile.project.getProfileDirName())
checkTime=None
if os.path.exists(taskfile.getProfileRenderPath()+".sync"):
checkFile=os.path.join(taskfile.getProjectRoot(),"render","project.conf","profile.conf")
checkTime=float_trunc(os.path.getmtime(checkFile),1)
if os.path.exists(taskfile.getProfileRenderPath()):
source=taskfile.getProfileRenderPath()
dest=taskfile.getRenderPath()
sync(source,dest,checkTime)
source=os.path.splitext(taskfile.getProfileRenderPath())[0]+"-alpha."+taskfile.getFormat()
dest=os.path.splitext(taskfile.getRenderPath())[0]+"-alpha."+taskfile.getFormat()
sync(source,dest,checkTime)
else:
isDirty = True
t.unlock()
if not os.path.exists(taskfile.getProfileRenderPath()):
# If no rendering exists, then obviously rendering is required
isDirty = True
compareTime = None
if os.environ.get('DEBUG'):
print("DEBUG: Dirty = 1 (no rendering exists)")
else:
# Otherwise we have to check against the time of the last rendering
compareTime = float_trunc(os.path.getmtime(taskfile.getProfileRenderPath()),1)
# Get "dirty" status for the target file and all dependent tasks, submitted as dependencies
(isDirtyValue, tasklist, maxTime)=self.parseDirectDependency(taskfile, compareTime, dryRun, force)
isDirty = isDirty or isDirtyValue
# Mark this file as already parsed and thus its "dirty" value is known
taskfile.isDirty=isDirty
# If rendering is requested
if not dryRun and (isDirty or force):
# Make sure we have all directories created
mkdirs(os.path.dirname(taskfile.getProfileRenderPath()))
mkdirs(os.path.dirname(taskfile.getRenderPath()))
params = taskfile.getParams(self.force_proxy)
# Keep track of created files to allow merging them later
output_list = os.path.splitext( taskfile.getProfileRenderPath() )[0] + ".txt"
output_list_alpha = os.path.splitext( taskfile.getProfileRenderPath() )[0] + "-alpha.txt"
if taskfile.getPacketSize() > 0:
segments = self.decompose(taskfile.getStartFrame(), taskfile.getEndFrame(), taskfile.getPacketSize())
f = open(output_list, 'w')
fa = None
try:
if "extract_alpha" in params and is_true_string(params["extract_alpha"]):
fa = open(output_list_alpha, 'w')
for range in segments:
start=range[0]
end=range[1]
chunk_name = taskfile.getProfileRenderPath(start,end)
f.write("file '%s'\n" % (chunk_name))
if "extract_alpha" in params and is_true_string(params["extract_alpha"]):
alpha_output = os.path.splitext(chunk_name)[0] + "-alpha" + os.path.splitext(chunk_name)[1]
fa.write("file '%s'\n" % (alpha_output))
finally:
f.close()
if fa:
fa.close()
else:
segments=[ (None,None) ]
if allocateOnly:
# Make sure this file will be re-rendered next time
compare_time=taskfile.mtime-1000
else:
compare_time=maxTime
if self.renderfarm_engine=="":
for range in segments:
start=range[0]
end=range[1]
self.job_render(taskfile, taskfile.getFormat(), self.updateCompletion, start, end, compare_time)
self.job_merge(taskfile, taskfile.getFormat(), taskfile.project.getConfig("stereo"), compare_time)
elif self.renderfarm_engine=="afanasy":
# Render block
command = "renderchan-job-launcher \"%s\" --action render --format %s --profile %s --compare-time %s --active-project \"%s\"" % ( taskfile.getPath(), taskfile.getFormat(), self.projects.profile, compare_time, self.projects.active.path)
if self.projects.stereo!="":
command += " --stereo %s" % (self.projects.stereo)
if taskfile.getPacketSize()>0:
command += " --start @#@ --end @#@"
if taskfile.project.path == self.projects.active.path:
name = "%s - %f" % ( taskfile.localPath, time.time() )
else:
name = "%s - %s - %f" % ( taskfile.localPath, taskfile.projectPath, time.time() )
# Afanasy uses his own algorythms to parse output for the modules it supports.
# For example, it terminates rendering process if Blender complains for missing library
# file.
# This behaviour is not desirable, since it can confuse users : file rendered succesfully
# with RenderChan in standalone mode, but fails to render on Renderfarm. So, I have diabled
# blocktype assigment below.
# Food for thought: In the future we need to think on how to handle integrity check on
# our own.
# Food for thought: SHould we make blocktype assigment an option?
#
#if taskfile.module.getName() in ("blender"):
# blocktype=taskfile.module.getName()
#else:
# blocktype="generic"
blocktype="generic"
block = self.AfanasyBlockClass(name, blocktype)
block.setCommand(command)
block.setErrorsTaskSameHost(-2)
if taskfile.getPacketSize()>0:
block.setNumeric(taskfile.getStartFrame(),taskfile.getEndFrame(),taskfile.getPacketSize())
else:
block.setNumeric(1,1,100)
if taskfile.module.getName() in ("flac","mp3","vorbis"):
block.setCapacity(50)
elif taskfile.module.getName() in ("krita"):
block.setCapacity(500)
depend_mask=[]
for dep_task in tasklist:
depend_mask.append(dep_task)
if self.childTask!=None:
depend_mask.append(self.childTask)
block.setDependMask("|".join(depend_mask))
command = "renderchan-job-launcher \"%s\" --action merge --format %s --profile %s --compare-time %s --active-project \"%s\"" % ( taskfile.getPath(), taskfile.getFormat(), self.projects.profile, compare_time, self.projects.active.path )
if self.projects.stereo!="":
command += " --stereo %s" % (self.projects.stereo)
self.graph.blocks.append(block)
# Post block
if taskfile.project.path == self.projects.active.path:
name_post = "Post %s - %f" % ( taskfile.localPath, time.time() )
else:
name_post = "Post %s - %s - %f" % ( taskfile.localPath, taskfile.projectPath, time.time() )
taskfile.taskPost = name_post
block = self.AfanasyBlockClass(name_post, "generic")
block.setNumeric(1,1,100)
block.setCommand(command)
block.setDependMask(name)
block.setErrorsTaskSameHost(-2)
block.setCapacity(50)
self.graph.blocks.append(block)
elif self.renderfarm_engine=="puli":
# Puli part here
graph_destination = self.graph
# == taskgroups bug / commented ==
#if self.projects.active.getConfig("stereo")=="left":
# graph_destination = self.taskgroupLeft
# name+=" (L)"
#elif self.projects.active.getConfig("stereo")=="right":
# graph_destination = self.taskgroupRight
# name+=" (R)"
#else:
# graph_destination = self.graph
runner = "puliclient.contrib.commandlinerunner.CommandLineRunner"
# Add parent task which composes results and places it into valid destination
command = "renderchan-job-launcher \"%s\" --action merge --format %s --profile %s --compare-time %s --active-project \"%s\"" % ( taskfile.getPath(), taskfile.getFormat(), self.projects.profile, compare_time, self.projects.active.path )
if self.projects.stereo!="":
command += " --stereo %s" % (self.projects.stereo)
taskfile.taskPost=graph_destination.addNewTask( name="Post: "+taskfile.localPath, runner=runner, arguments={ "args": command} )
# Add rendering segments
for range in segments:
start=range[0]
end=range[1]
if start!=None and end!=None:
segment_name = "Render: %s (%s-%s)" % (taskfile.localPath, start, end)
command = "renderchan-job-launcher \"%s\" --action render --format %s --profile %s --start %s --end %s --compare-time %s --active-project \"%s\"" % ( taskfile.getPath(), taskfile.getFormat(), self.projects.profile, start, end, compare_time, self.projects.active.path )
else:
segment_name = "Render: %s" % (taskfile.localPath)
command = "renderchan-job-launcher \"%s\" --action render --format %s --profile %s --compare-time %s --active-project \"%s\"" % ( taskfile.getPath(), taskfile.getFormat(), self.projects.profile, compare_time, self.projects.active.path )
if self.projects.stereo!="":
command += " --stereo %s" % (self.projects.stereo)
task=graph_destination.addNewTask( name=segment_name, runner=runner, arguments={ "args": command} )
self.graph.addEdges( [(task, taskfile.taskPost)] )
# Add edges for dependent tasks
for dep_task in tasklist:
self.graph.addEdges( [(dep_task, task)] )
if self.childTask!=None:
self.graph.addEdges( [(self.childTask, task)] )
self.trackFileEnd()
return isDirty
def parseDirectDependency(self, taskfile, compareTime, dryRun = False, force = False):
"""
:type taskfile: RenderChanFile
"""
self.trackFileBegin(taskfile)
isDirty=False
tasklist=[]
# maxTime is the maximum of modification times for all direct dependencies.
# It allows to compare with already rendered pieces and continue rendering
# if they are rendered AFTER the maxTime.
#
# But, if we have at least one INDIRECT dependency (i.e. render task) and it is submitted
# for rendering, then we can't compare with maxTime (because dependency will be rendered
# and thus rendering should take place no matter what).
maxTime = taskfile.getTime()
taskfile.pending=True # we need this to avoid circular dependencies
if not taskfile.isFrozen() or force:
deps = taskfile.getDependencies()
for path in deps:
path = os.path.abspath(path)
if path in self.loadedFiles.keys():
dependency = self.loadedFiles[path]
if dependency.pending:
# Avoid circular dependencies
print("Warning: Circular dependency detected for %s. Skipping." % (path))
continue
else:
dependency = RenderChanFile(path, self.modules, self.projects)
if not os.path.exists(dependency.getPath()):
if self.recreateMissing and dependency.projectPath!='':
# Let's look if we have a placeholder template
ext = os.path.splitext(path)[1]
placeholder = os.path.join(self.datadir, "missing", "empty" + ext)
if os.path.exists(placeholder):
print(" Creating an empty placeholder for %s..." % path)
mkdirs(os.path.dirname(path))
shutil.copy(placeholder, path)
t = time.mktime(time.strptime('01.01.1981 00:00:00', '%d.%m.%Y %H:%M:%S'))
os.utime(path,(t,t))
else:
print(" Skipping file %s..." % path)
else:
print(" Skipping file %s..." % path)
continue
self.loadedFiles[dependency.getPath()]=dependency
if dependency.project!=None and dependency.module!=None:
self.loadedFiles[dependency.getRenderPath()]=dependency
# Alpha
renderpath_alpha=os.path.splitext(dependency.getRenderPath())[0]+"-alpha."+dependency.getFormat()
self.loadedFiles[renderpath_alpha]=dependency
# Check if this is a rendering dependency
if path != dependency.getPath():
# We have a new task to render
if dependency.isDirty==None:
if dependency.module!=None:
dep_isDirty = self.parseRenderDependency(dependency, False, dryRun, force)
else:
raise Exception("No module to render file" + dependency.getPath())
else:
# The dependency was already submitted to graph
dep_isDirty = dependency.isDirty
if dep_isDirty:
# Let's return submitted task into tasklist
if dependency.taskPost not in tasklist:
tasklist.append(dependency.taskPost)
# Increase maxTime, because re-rendering of dependency will take place
maxTime=time.time()
isDirty = True
else:
# If no rendering requested, we still have to check if rendering result
# is newer than compareTime
#if os.path.exists(dependency.getRenderPath()): -- file is obviously exists, because isDirty==0
timestamp=float_trunc(os.path.getmtime(dependency.getProfileRenderPath()),1)
if compareTime is None:
isDirty = True
if os.environ.get('DEBUG'):
print("DEBUG: %s:" % taskfile.getPath())
print("DEBUG: Dirty = 1 (no compare time)")
print()
elif timestamp > compareTime:
isDirty = True
if os.environ.get('DEBUG'):
print("DEBUG: %s:" % taskfile.getPath())
print("DEBUG: Dirty = 1 (dependency timestamp is higher)")
print("DEBUG: compareTime = %f" % (compareTime))
print("DEBUG: dependency time = %f" % (timestamp))
print()
if timestamp>maxTime:
maxTime=timestamp
else:
# No, this is an ordinary dependency
(dep_isDirty, dep_tasklist, dep_maxTime) = self.parseDirectDependency(dependency, compareTime, dryRun, force)
isDirty = isDirty or dep_isDirty
maxTime = max(maxTime, dep_maxTime)
for task in dep_tasklist:
if task not in tasklist:
tasklist.append(task)
if not isDirty and not force:
timestamp = float_trunc(taskfile.getTime(), 1)
if compareTime is None:
if os.environ.get('DEBUG'):
print("DEBUG: %s:" % taskfile.getPath())
print("DEBUG: Dirty = 1 (no compare time)")
print()
isDirty = True
elif timestamp > compareTime:
isDirty = True
if os.environ.get('DEBUG'):
print("DEBUG: %s:" % taskfile.getPath())
print("DEBUG: Dirty = 1 (source timestamp is higher)")
print("DEBUG: compareTime = %f" % (compareTime))
print("DEBUG: source time = %f" % (timestamp))
print()
if timestamp>maxTime:
maxTime=timestamp
# Parse pack.lst and FILENAME.pack.lst files
if taskfile.projectPath:
deps = []
if self.action == "pack":
# pack.lst
check_path = os.path.dirname(taskfile.getPath())
while len(check_path) >= len(taskfile.projectPath):
path = os.path.join(check_path,"pack.lst")
if os.path.exists(path) and not path in self.loadedFiles.keys():
deps.append(path)
check_path = os.path.dirname(check_path)
# FILENAME.pack.lst
path = taskfile.getPath()+".pack.lst"
if os.path.exists(path) and not path in self.loadedFiles.keys():
deps.append(path)
for path in deps:
dependency = RenderChanFile(path, self.modules, self.projects)
self.loadedFiles[dependency.getPath()]=dependency
# NOTE: We don't need to modify dirty state of our taskfile, because
# packed data shouldn't trigger additional rendering. This is also why
# we don't store any returned values from parseDirectDependency().
# We still need to call parseDirectDependency() to make sure the
# dependencies of pack.lst will get added to self.trackedFiles.
self.parseDirectDependency(dependency, compareTime, dryRun, force)
taskfile.pending=False
self.trackFileEnd()
return (isDirty, list(tasklist), maxTime)
def updateCompletion(self, value):
print("Rendering: %s" % (value*100))
def __not_used__syncProfileData(self, renderpath):
if renderpath in self.loadedFiles.keys():
taskfile = self.loadedFiles[renderpath]
if taskfile.pending:
# Avoid circular dependencies
print("Warning: Circular dependency detected for %s. Skipping." % (renderpath))
return
else:
taskfile = RenderChanFile(renderpath, self.modules, self.projects)
if not os.path.exists(taskfile.getPath()):
print(" No source file for %s. Skipping." % renderpath)
return
self.loadedFiles[taskfile.getPath()]=taskfile
taskfile.pending=True # we need this to avoid circular dependencies
if taskfile.project!=None and taskfile.module!=None:
self.loadedFiles[taskfile.getRenderPath()]=taskfile
deps = taskfile.getDependencies()
for path in deps:
self.syncProfileData(path)
if renderpath != taskfile.getPath():
# TODO: Change parseRenderDependency() in the same way?
checkFile=os.path.join(taskfile.getProjectRoot(),"render","project.conf","profile.conf")
checkTime=float_trunc(os.path.getmtime(checkFile),1)
source=taskfile.getProfileRenderPath()
dest=taskfile.getRenderPath()
sync(source,dest,checkTime)
source=os.path.splitext(taskfile.getProfileRenderPath())[0]+"-alpha."+taskfile.getFormat()
dest=os.path.splitext(taskfile.getRenderPath())[0]+"-alpha."+taskfile.getFormat()
sync(source,dest,checkTime)
taskfile.pending=False
def job_render(self, taskfile, format, updateCompletion, start=None, end=None, compare_time=None):
"""
:type taskfile: RenderChanFile
"""
if start==None or end==None:
output = taskfile.getProfileRenderPath(0,0)
start=taskfile.getStartFrame()
end=taskfile.getEndFrame()
else:
output = taskfile.getProfileRenderPath(start,end)
if not os.path.exists(os.path.dirname(output)):
os.makedirs(os.path.dirname(output))
# Check if we really need to re-render
uptodate=False
if compare_time and not self.force:
if os.path.exists(output+".done") and os.path.exists(output):
if float_trunc(os.path.getmtime(output+".done"),1) >= compare_time:
# Hurray! No need to re-render that piece.
uptodate=True
if not uptodate:
# PROJECT LOCK
# Make sure our rendertree is in sync with current profile
locks=[]
for project in self.projects.list.values():
t=project.switchProfile(taskfile.project.getProfileDirName())
locks.append(t)
try:
if os.path.isdir(output):
shutil.rmtree(output)
if os.path.exists(output+".done"):
os.remove(output+".done")
# TODO: Create file lock here
params = taskfile.getParams(self.force_proxy)
taskfile.module.render(taskfile.getPath(),
output,
int(start),
int(end),
format,
updateCompletion,
params)
touch(output + ".done", compare_time)
if "extract_alpha" in params and is_true_string(params["extract_alpha"]):
alpha_output = os.path.splitext(output)[0] + "-alpha" + os.path.splitext(output)[1]
touch(alpha_output + ".done", compare_time)
# TODO: Release file lock here
except:
for lock in locks:
lock.unlock()
print("Unexpected error:", sys.exc_info()[0])
raise
# Releasing PROJECT LOCK
for lock in locks:
lock.unlock()
else:
print(" This chunk is already up to date. Skipping.")
updateCompletion(1.0)
def job_merge(self, taskfile, format, stereo, compare_time=None):
"""
:type taskfile: RenderChanFile
"""
# PROJECT LOCK
# Make sure our rendertree is in sync with current profile
locks=[]
for project in self.projects.list.values():
t=project.switchProfile(taskfile.project.getProfileDirName())
locks.append(t)
try:
params = taskfile.getParams(self.force_proxy)
suffix_list = [""]
if "extract_alpha" in params and is_true_string(params["extract_alpha"]):
suffix_list.append("-alpha")
for suffix in suffix_list:
output = os.path.splitext(taskfile.getRenderPath())[0] + suffix + "." + format
profile_output = os.path.splitext( taskfile.getProfileRenderPath() )[0] + suffix + "." + format
profile_output_list = os.path.splitext(profile_output)[0] + ".txt"
# We need to merge the rendered files into single one
print("Merging: %s" % profile_output)
# But first let's check if we really need to do that
uptodate = False
if os.path.exists(profile_output):
if os.path.exists(profile_output + ".done") and \
float_trunc(os.path.getmtime(profile_output + ".done"), 1) >= compare_time:
# Hurray! No need to merge that piece.
uptodate = True
else:
if os.path.isdir(profile_output):
shutil.rmtree(profile_output)
else:
os.remove(profile_output)
if os.path.exists(profile_output + ".done"):
os.remove(profile_output + ".done")
if not uptodate:
if taskfile.getPacketSize() > 0:
if os.path.exists(profile_output_list):
# Check if we really have all segments rendered correctly
with open(profile_output_list, 'r') as f:
segments = []
for line in f:
line = line.strip()[6:-1]
segments.append(line)
if not os.path.exists(line+".done") or not os.path.exists(line):
print("ERROR: Not all segments were rendered. Aborting.", file=sys.stderr)
exit(1)
if os.path.isfile(profile_output+".done"):
os.remove(profile_output+".done")
if format == "avi":
subprocess.check_call(
[self.ffmpeg_binary, "-y", "-safe", "0", "-f", "concat", "-i", profile_output_list, "-c", "copy", profile_output])
else:
# Merge all sequences into single directory
for line in segments:
print(line)
copytree(line, profile_output, hardlinks=True)
os.remove(profile_output_list)
for line in segments:
if os.path.isfile(line):
os.remove(line)
else:
shutil.rmtree(line, ignore_errors=True)
if os.path.isfile(line+".done"):
os.remove(line+".done")
touch(profile_output + ".done", float(compare_time))
else:
print(" This chunk is already merged. Skipping.")
#updateCompletion(0.5)
else:
segment = os.path.splitext( taskfile.getProfileRenderPath(0,0) )[0] + suffix + "." + format
if os.path.exists(segment+".done") and os.path.exists(segment):
os.rename(segment, profile_output)
touch(profile_output + ".done", float(compare_time))
else:
print("ERROR: Not all segments were rendered. Aborting.", file=sys.stderr)
exit(1)
# Add LST file
if format in RenderChanModule.imageExtensions and os.path.isdir(profile_output):
lst_profile_path = os.path.splitext(profile_output)[0] + ".lst"
lst_path = os.path.splitext(output)[0] + ".lst"
with open(lst_profile_path, 'w') as f:
f.write("FPS %s\n" % params["fps"])
for filename in sorted(os.listdir(profile_output)):
if filename.endswith(format):
f.write("%s/%s\n" % ( os.path.basename(profile_output), filename ))
sync(lst_profile_path, lst_path)
# Compatibility
if taskfile.project.version < 1:
lst_profile_path = os.path.join(profile_output, "file.lst")
lst_path = os.path.join(output, "file.lst")
with open(lst_profile_path, 'w') as f:
f.write("FPS %s\n" % params["fps"])
for filename in sorted(os.listdir(profile_output)):
if filename.endswith(format):
f.write("%s\n" % filename)
sync(lst_profile_path, lst_path)
sync(profile_output, output)
#touch(output+".done",arguments["maxTime"])
touch(output, float(compare_time))
except:
print("ERROR: Merge operation failed.", file=sys.stderr)
for lock in locks:
lock.unlock()
exit(1)
# Releasing PROJECT LOCK
for lock in locks:
lock.unlock()
#updateCompletion(1)
def job_merge_stereo(self, taskfile, mode, format="mp4"):
output = os.path.splitext(taskfile.getRenderPath())[0]+"-stereo-%s."+format
prev_mode = self.projects.stereo
self.setStereoMode("left")
input_left = taskfile.getProfileRenderPath()
self.setStereoMode("right")
input_right = taskfile.getProfileRenderPath()
self.setStereoMode(prev_mode)
if mode.endswith("c") or mode.endswith("-cross"):
output %= mode[0:1] + "c"
temp = input_left
input_left = input_right
input_right = temp
else:
output %= mode[0:1]
print("Merging: %s" % output)
# But first let's check if we really need to do that
uptodate = False
if os.path.exists(output):
if os.path.exists(output + ".done") and \
os.path.exists(input_left) and \
os.path.exists(input_right) and \
float_trunc(os.path.getmtime(output + ".done"), 1) >= float_trunc(os.path.getmtime(input_left), 1) and \
float_trunc(os.path.getmtime(output + ".done"), 1) >= float_trunc(os.path.getmtime(input_right), 1):
# Hurray! No need to merge that piece.
uptodate = True
else:
if os.path.isdir(output):
shutil.rmtree(output)
else:
os.remove(output)
if os.path.exists(output + ".done"):
os.remove(output + ".done")
if not uptodate:
if mode[0:1]=='v':
subprocess.check_call(
["ffmpeg", "-y", "-i", input_left, "-i", input_right,
"-filter_complex", "[0:v]setpts=PTS-STARTPTS, pad=iw:ih*2[bg]; [1:v]setpts=PTS-STARTPTS[fg]; [bg][fg]overlay=0:h",
"-c:v", "libx264", "-pix_fmt", "yuv420p", "-crf", "1",
"-c:a", "aac", "-qscale:a", "0",
"-f", "mp4",
output])
else:
subprocess.check_call(
["ffmpeg", "-y", "-i", input_left, "-i", input_right,
"-filter_complex", "[0:v]setpts=PTS-STARTPTS, pad=iw*2:ih[bg]; [1:v]setpts=PTS-STARTPTS[fg]; [bg][fg]overlay=w",
"-c:v", "libx264", "-pix_fmt", "yuv420p", "-crf", "1",
"-c:a", "aac", "-qscale:a", "0",
"-f", "mp4",
output])
touch(output + ".done", os.path.getmtime(output))
else:
print(" This chunk is already merged. Skipping.")
def job_snapshot(self, renderpath, snapshot_dir):
if not os.path.exists(snapshot_dir):
mkdirs(snapshot_dir)
time_string = "%s" % ( time.strftime("%Y%m%d-%H%M%S") )
filename = os.path.splitext(os.path.basename(renderpath))[0] + "-" + time_string + os.path.splitext(renderpath)[1]
snapshot_path = os.path.join(snapshot_dir, filename)
print()
print("Creating snapshot to %s ..." % (filename))
print()
if os.path.isdir(snapshot_path):
try:
copytree(renderpath, snapshot_dir, hardlinks=True)
except:
copytree(renderpath, snapshot_dir, hardlinks=False)
else:
try:
os.link(renderpath, snapshot_path)
except:
shutil.copy2(renderpath, snapshot_path)
def decompose(self, start, end, packetSize, framesList=""):
packetSize = int(packetSize)
result=[]
if len(framesList) != 0:
frames = framesList.split(",")
for frame in frames:
if "-" in frame:
frameList = frame.split("-")
start = int(frameList[0])
end = int(frameList[1])
length = end - start + 1
fullPacketCount, lastPacketCount = divmod(length, packetSize)
if length < packetSize:
result.append((start, end))
else:
for i in range(fullPacketCount):
packetStart = start + i * packetSize
packetEnd = packetStart + packetSize - 1
result.append((packetStart, packetEnd))
if lastPacketCount:
packetStart = start + (i + 1) * packetSize
result.append((packetStart, end))
else:
result.append((int(frame), int(frame)))
else:
start = int(start)
end = int(end)
length = end - start + 1
fullPacketCount, lastPacketCount = divmod(length, packetSize)
if length < packetSize:
result.append((start, end))
else:
for i in range(fullPacketCount):
packetStart = start + i * packetSize
packetEnd = packetStart + packetSize - 1
result.append((packetStart, packetEnd))
if lastPacketCount:
packetStart = start + (i + 1) * packetSize
result.append((packetStart, end))
return result
def loadFile(self, filename):
return RenderChanFile(filename, self.modules, self.projects)
class Attribution():
def __init__(self, filename, moduleManager=None, projectManager=None):
self.modules = moduleManager
if self.modules==None:
self.modules = RenderChanModuleManager()
self.projects = projectManager
if self.projects==None:
self.projects = RenderChanProjectManager()
self.licenses = {}
self.freesound_items = {} # author:[title1,title2,...]
taskfile = RenderChanFile(filename, self.modules, self.projects)
self.parse(taskfile)
def parse(self, taskfile):
for dep in taskfile.getDependencies():
t = RenderChanFile(dep, self.modules, self.projects)
metadata = t.getMetadata()
if "freesound" in metadata.sources:
for author in metadata.authors:
if author not in self.freesound_items:
self.freesound_items[author]=[]
if metadata.title not in self.freesound_items[author]:
self.freesound_items[author].append(metadata.title)
if not metadata.license == None:
if metadata.license not in self.licenses:
self.licenses[metadata.license]=[]
self.licenses[metadata.license].append(t.getPath())
self.parse(t)
def output(self):
print()
print("== Sound FX ==")
print("This video uses these sounds from freesound:")
print()
for author in self.freesound_items.keys():
print('"'+'", "'.join(self.freesound_items[author])+'" by '+author)
print()
print("== Licenses ==")
print(", ".join(self.licenses.keys()))
print()
print("== Files sorted by license ==")
for license in self.licenses.keys():
print(license+":")
for file in self.licenses[license]:
print(" "+file)
print()
|
bsd-3-clause
| 7,134,858,892,546,014,000
| 41.9246
| 292
| 0.527715
| false
|
JoseBlanca/seq_crumbs
|
crumbs/seq/alignment_result.py
|
1
|
48822
|
'''This module holds the code that allows to analyze the alignment search
result analysis.
It can deal with blasts, iprscan or ssaha2 results.
This results can be parsed, filtered and analyzed.
This module revolves around a memory structure that represents a blast or
an iprscan result. The schema of this structure is:
result = {'query':the_query_sequence,
'matches': [a_list_of_matches(hits in the blast terminology)]
}
The sequence can have: name, description, annotations={'database':some db} and
len(sequence).
Every match is a dict.
match = {'subject':the subject sequence
'start' :match start position in bp in query
'end' :match end position in bp in query
'subject_start' : match start position in bp in subject
'subject_end' :match end position in bp in subject
'scores' :a dict with the scores
'match_parts': [a list of match_parts(hsps in the blast lingo)]
'evidences' : [a list of tuples for the iprscan]
}
All the scores are holded in a dict
scores = {'key1': value1, 'key2':value2}
For instance the keys could be expect, similarity and identity for the blast
match_part is a dict:
match_part = {'query_start' : the query start in the alignment in bp
'query_end' : the query end in the alignment in bp
'query_strand' : 1 or -1
'subject_start' : the subject start in the alignment in bp
'subject_end' : the subject end in the alignment in bp
'subject_strand' : 1 or -1
'scores' :a dict with the scores
}
Iprscan has several evidences generated by different programs and databases
for every match. Every evidence is similar to a match.
'''
# Copyright 2009 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of franklin.
# franklin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# franklin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with franklin. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import itertools
import copy
import os
from math import log10
from crumbs.utils.optional_modules import NCBIXML
from crumbs.utils.tags import SUBJECT, QUERY, ELONGATED
from crumbs.utils.segments_utils import merge_overlaping_segments
def _text_blasts_in_file(fhand):
'It returns from Query= to Query'
cache = ''
first_time = True
for line in fhand:
if line.startswith('Query='):
if first_time:
cache = ''
first_time = False
else:
yield cache
cache = ''
cache += line
else:
if not first_time:
yield cache
def _split_description(string):
'It splits the description'
items = string.split(' ', 1)
name = items[0]
desc = items[1] if len(items) == 2 else None
return name, desc
def _text_blast_parser(fhand):
'It parses the blast results'
result = None
previous_query = None
for blast in _text_blasts_in_file(fhand):
in_query_def = False
in_subject_def = False
for line in blast.splitlines():
line = line.strip()
if not line:
continue
if line.startswith('Query='):
query_name = line.split('=')[-1].strip()
query_name, query_desc = _split_description(query_name)
in_query_def = True
subject_name = None
if line.startswith('Subject=') or line.startswith('>'):
if line.startswith('>'):
subject_name = line[1:].strip()
else:
subject_name = line.split('=')[-1].strip()
subject_name, subject_desc = _split_description(subject_name)
in_subject_def = True
query_start, query_end = None, None
subject_start, subject_end = None, None
query_strand, subject_strand = None, None
score, expect, identity = None, None, None
if line.startswith('Length='):
length = int(line.split('=')[-1].strip())
if in_query_def and query_name != previous_query:
if result is not None and result['matches']:
result = _fix_matches(result, score_keys=['expect',
'score'])
if result:
yield result
query_length = length
in_query_def = False
if query_desc:
query = {'name': query_name, 'description': query_desc,
'length': query_length}
else:
query = {'name': query_name, 'length': query_length}
matches = []
result = {'query': query,
'matches': matches}
previous_query = query_name
elif in_subject_def:
subject_length = length
if subject_desc:
subject = {'name': subject_name,
'description': subject_desc,
'length': subject_length}
else:
subject = {'name': subject_name,
'length': subject_length}
in_subject_def = False
matches.append({'subject': subject, 'match_parts': []})
if subject_name is None:
continue
if line.startswith('Score') or line.startswith('Effective'):
if score is not None:
match_part = {'subject_start': subject_start,
'subject_end': subject_end,
'subject_strand': subject_strand,
'query_start': query_start,
'query_end': query_end,
'query_strand': query_strand,
'scores': {'expect': expect,
'identity': identity,
'score': score}}
matches[-1]['match_parts'].append(match_part)
score, expect, identity = None, None, None
query_strand, subject_strand = None, None
query_start, query_end = None, None
subject_start, subject_end = None, None
if line.startswith('Score'):
items = line.split()
score = float(items[2])
expect = float(items[-1])
elif line.startswith('Identities'):
items = line.split()
identity = float(items[3].strip('(')[:-3])
elif line.startswith('Strand'):
strands = line.split('=')[-1]
strands = strands.split('/')
query_strand = 1 if strands[0] == 'Plus' else -1
subject_strand = 1 if strands[1] == 'Plus' else -1
if query_strand and line.startswith('Query'):
items = line.split()
if query_start is None:
query_start = int(items[1]) - 1
query_end = int(items[-1]) - 1
if query_strand and line.startswith('Sbjct'):
items = line.split()
if subject_start is None:
subject_start = int(items[1]) - 1
subject_end = int(items[-1]) - 1
else:
if result is not None and result['matches']:
result = _fix_matches(result, score_keys=['expect', 'score'])
if result:
yield result
class TextBlastParser(object):
'It parses the tabular output of a blast result'
def __init__(self, fhand):
'The init requires a file to be parsed'
self._gen = _text_blast_parser(fhand)
def __iter__(self):
'Part of the iterator protocol'
return self
def next(self):
'It returns the next blast result'
return self._gen.next()
DEFAULT_TABBLAST_FORMAT = ('query', 'subject', 'identity', 'alignment_length',
'mismatches', 'gap_open', 'query_start',
'query_end', 'subject_start', 'subject_end',
'expect', 'score')
def _lines_for_every_tab_blast(fhand, line_format):
'It returns the lines for every query in the tabular blast'
ongoing_query = None
match_parts = []
for line in fhand:
items = line.strip().split()
if len(line_format) != len(items):
msg = 'Malformed line. The line has an unexpected number of items.'
msg += '\nExpected format was: ' + ' '.join(line_format) + '\n'
msg += 'Line was: ' + line + '\n'
raise RuntimeError(msg)
items = dict(zip(line_format, items))
query = items['query']
subject = items['subject']
if 'query_length' in items:
query_len = int(items['query_length'])
else:
query_len = None
if 'subject_length' in items:
subject_len = int(items['subject_length'])
else:
subject_len = None
locations = ('query_start', 'query_end', 'subject_start',
'subject_end')
match_part = {}
for field in locations:
if field in items:
match_part[field] = int(items[field]) - 1
score_fields = ('expect', 'score', 'identity')
scores = {}
for field in score_fields:
if field in items:
scores[field] = float(items[field])
if scores:
match_part['scores'] = scores
if ongoing_query is None:
ongoing_query = query
match_parts.append({'subject': subject, 'match_part': match_part,
'subject_length': subject_len})
elif query == ongoing_query:
match_parts.append({'subject': subject, 'match_part': match_part,
'subject_length': subject_len})
else:
yield ongoing_query, query_len, match_parts
match_parts = [{'subject':subject, 'match_part':match_part,
'subject_length': subject_len}]
ongoing_query = query
if ongoing_query:
yield ongoing_query, query_len, match_parts
def _group_match_parts_by_subject(match_parts):
'It yields lists of match parts that share the subject'
parts = []
ongoing_subject = None
for match_part in match_parts:
subject = match_part['subject']
subject_length = match_part['subject_length']
if ongoing_subject is None:
parts.append(match_part['match_part'])
ongoing_subject = subject
ongoing_subject_length = subject_length
elif ongoing_subject == subject:
parts.append(match_part['match_part'])
else:
yield ongoing_subject, ongoing_subject_length, parts
parts = [match_part['match_part']]
ongoing_subject = subject
ongoing_subject_length = subject_length
else:
yield ongoing_subject, ongoing_subject_length, parts
def _tabular_blast_parser(fhand, line_format):
'Parses the tabular output of a blast result and yields Alignment result'
if hasattr(fhand, 'seek'):
fhand.seek(0)
for qname, qlen, match_parts in _lines_for_every_tab_blast(fhand,
line_format):
matches = []
# pylint: disable=C0301
for sname, slen, match_parts in _group_match_parts_by_subject(match_parts):
# match start and end
match_start, match_end = None, None
match_subject_start, match_subject_end = None, None
for match_part in match_parts:
if (match_start is None or
match_part['query_start'] < match_start):
match_start = match_part['query_start']
if match_end is None or match_part['query_end'] > match_end:
match_end = match_part['query_end']
if (match_subject_start is None or
match_part['subject_start'] < match_subject_start):
match_subject_start = match_part['subject_start']
if (match_subject_end is None or
match_part['subject_end'] > match_subject_end):
match_subject_end = match_part['subject_end']
subject = {'name': sname}
if slen:
subject['length'] = slen
match = {'subject': subject,
'start': match_start,
'end': match_end,
'subject_start': match_subject_start,
'subject_end': match_subject_end,
'scores': {'expect': match_parts[0]['scores']['expect']},
'match_parts': match_parts}
matches.append(match)
if matches:
query = {'name': qname}
if qlen:
query['length'] = qlen
yield {'query': query, 'matches': matches}
class TabularBlastParser(object):
'It parses the tabular output of a blast result'
def __init__(self, fhand, line_format=DEFAULT_TABBLAST_FORMAT):
'The init requires a file to be parsed'
self._gen = _tabular_blast_parser(fhand, line_format)
def __iter__(self):
'Part of the iterator protocol'
return self
def next(self):
'It returns the next blast result'
return self._gen.next()
class BlastParser(object):
'''An iterator blast parser that yields the blast results in a
multiblast file'''
def __init__(self, fhand, subj_def_as_accesion=None):
'The init requires a file to be parsed'
fhand.seek(0, 0)
sample = fhand.read(10)
if sample and 'xml' not in sample:
raise ValueError('Not a xml file')
fhand.seek(0, 0)
self._blast_file = fhand
metadata = self._get_blast_metadata()
blast_version = metadata['version']
plus = metadata['plus']
self.db_name = metadata['db_name']
self._blast_file.seek(0, 0)
if ((blast_version and plus) or
(blast_version and blast_version > '2.2.21')):
self.use_query_def_as_accession = True
self.use_subject_def_as_accession = True
else:
self.use_query_def_as_accession = True
self.use_subject_def_as_accession = False
if subj_def_as_accesion is not None:
self.use_subject_def_as_accession = subj_def_as_accesion
# we use the biopython parser
# if there are no results we put None in our blast_parse results
self._blast_parse = None
if fhand.read(1) == '<':
fhand.seek(0)
self._blast_parse = NCBIXML.parse(fhand)
def __iter__(self):
'Part of the iterator protocol'
return self
def _create_result_structure(self, bio_result):
'Given a BioPython blast result it returns our result structure'
# the query name and definition
definition = bio_result.query
if self.use_query_def_as_accession:
items = definition.split(' ', 1)
name = items[0]
if len(items) > 1:
definition = items[1]
else:
definition = None
else:
name = bio_result.query_id
definition = definition
if definition is None:
definition = "<unknown description>"
# length of query sequence
length = bio_result.query_letters
# now we can create the query sequence
query = {'name': name, 'description': definition, 'length': length}
# now we go for the hits (matches)
matches = []
for alignment in bio_result.alignments:
# the subject sequence
if self.use_subject_def_as_accession:
items = alignment.hit_def.split(' ', 1)
name = items[0]
if len(items) > 1:
definition = items[1]
else:
definition = None
else:
name = alignment.accession
definition = alignment.hit_def
if definition is None:
definition = "<unknown description>"
length = alignment.length
id_ = alignment.hit_id
subject = {'name': name, 'description': definition,
'length': length, 'id': id_}
# the hsps (match parts)
match_parts = []
match_start, match_end = None, None
match_subject_start, match_subject_end = None, None
for hsp in alignment.hsps:
expect = hsp.expect
subject_start = hsp.sbjct_start
subject_end = hsp.sbjct_end
query_start = hsp.query_start
query_end = hsp.query_end
hsp_length = len(hsp.query)
# We have to check the subject strand
if subject_start < subject_end:
subject_strand = 1
else:
subject_strand = -1
subject_start, subject_end = (subject_end,
subject_start)
# Also the query strand
if query_start < query_end:
query_strand = 1
else:
query_strand = -1
query_start, query_end = query_end, query_start
try:
similarity = hsp.positives * 100.0 / float(hsp_length)
except TypeError:
similarity = None
try:
identity = hsp.identities * 100.0 / float(hsp_length)
except TypeError:
identity = None
match_parts.append({'subject_start': subject_start,
'subject_end': subject_end,
'subject_strand': subject_strand,
'query_start': query_start,
'query_end': query_end,
'query_strand': query_strand,
'scores': {'similarity': similarity,
'expect': expect,
'identity': identity}
})
# It takes the first loc and the last loc of the hsp to
# determine hit start and end
if match_start is None or query_start < match_start:
match_start = query_start
if match_end is None or query_end > match_end:
match_end = query_end
if (match_subject_start is None or
subject_start < match_subject_start):
match_subject_start = subject_start
if (match_subject_end is None or
subject_end > match_subject_end):
match_subject_end = subject_end
matches.append({
'subject': subject,
'start': match_start,
'end': match_end,
'subject_start': match_subject_start,
'subject_end': match_subject_end,
'scores': {'expect': match_parts[0]['scores']['expect']},
'match_parts': match_parts})
result = {'query': query, 'matches': matches}
return result
def _get_blast_metadata(self):
'It gets blast parser version'
tell_ = self._blast_file.tell()
version = None
db_name = None
plus = False
for line in self._blast_file:
line = line.strip()
if line.startswith('<BlastOutput_version>'):
version = line.split('>')[1].split('<')[0].split()[1]
if line.startswith('<BlastOutput_db>'):
db_name = line.split('>')[1].split('<')[0]
db_name = os.path.basename(db_name)
if version is not None and db_name is not None:
break
if version and '+' in version:
plus = True
version = version[:-1]
self._blast_file.seek(tell_)
return {'version': version, 'plus': plus, 'db_name': db_name}
def next(self):
'It returns the next blast result'
if self._blast_parse is None:
raise StopIteration
else:
bio_result = self._blast_parse.next()
# now we have to change this biopython blast_result in our
# structure
our_result = self._create_result_structure(bio_result)
return our_result
class ExonerateParser(object):
'''Exonerate parser, it is a iterator that yields the result for each
query separated'''
def __init__(self, fhand):
'The init requires a file to be parser'
self._fhand = fhand
self._exonerate_results = self._results_query_from_exonerate()
def __iter__(self):
'Part of the iterator protocol'
return self
def _results_query_from_exonerate(self):
'''It takes the exonerate cigar output file and yields the result for
each query. The result is a list of match_parts '''
self._fhand.seek(0, 0)
cigar_dict = {}
for line in self._fhand:
if not line.startswith('cigar_like:'):
continue
items = line.split(':', 1)[1].strip().split()
query_id = items[0]
if query_id not in cigar_dict:
cigar_dict[query_id] = []
cigar_dict[query_id].append(items)
for query_id, values in cigar_dict.items():
yield values
@staticmethod
def _create_structure_result(query_result):
'''It creates the result dictionary structure giving a list of
match_parts of a query_id '''
# TODO add to the match the match subject start and end
struct_dict = {}
query_name = query_result[0][0]
query_length = int(query_result[0][9])
query = {'name': query_name, 'length': query_length}
struct_dict['query'] = query
struct_dict['matches'] = []
for match_part_ in query_result:
(query_name, query_start, query_end, query_strand, subject_name,
subject_start, subject_end, subject_strand, score, query_length,
subject_length, similarity) = match_part_
query_start = int(query_start)
# they number the positions between symbols
# A C G T
# 0 1 2 3 4
# Hence the subsequence "CG" would have start=1, end=3, and length=2
# but we would say start=1 and end=2
query_end = int(query_end) - 1
subject_start = int(subject_start)
subject_end = int(subject_end) - 1
query_strand = _strand_transform(query_strand)
subject_strand = _strand_transform(subject_strand)
score = int(score)
similarity = float(similarity)
# For each line , It creates a match part dict
match_part = {}
match_part['query_start'] = query_start
match_part['query_end'] = query_end
match_part['query_strand'] = query_strand
match_part['subject_start'] = subject_start
match_part['subject_end'] = subject_end
match_part['subject_strand'] = subject_strand
match_part['scores'] = {'score': score, 'similarity': similarity}
# Check if the match is already added to the struct. A match is
# defined by a list of part matches between a query and a subject
match_num = _match_num_if_exists_in_struc(subject_name,
struct_dict)
if match_num is not None:
match = struct_dict['matches'][match_num]
if match['start'] > query_start:
match['start'] = query_start
if match['end'] < query_end:
match['end'] = query_end
if match['scores']['score'] < score:
match['scores']['score'] = score
match['match_parts'].append(match_part)
else:
match = {}
match['subject'] = {'name': subject_name,
'length': int(subject_length)}
match['start'] = query_start
match['end'] = query_end
match['scores'] = {'score': score}
match['match_parts'] = []
match['match_parts'].append(match_part)
struct_dict['matches'].append(match)
return struct_dict
def next(self):
'''It return the next exonerate hit'''
query_result = self._exonerate_results.next()
return self._create_structure_result(query_result)
def _strand_transform(strand):
'''It transfrom the +/- strand simbols in our user case 1/-1 caracteres '''
if strand == '-':
return -1
elif strand == '+':
return 1
def _match_num_if_exists_in_struc(subject_name, struct_dict):
'It returns the match number of the list of matches that is about subject'
for i, match in enumerate(struct_dict['matches']):
if subject_name == match['subject']['name']:
return i
return None
def get_alignment_parser(kind):
'''It returns a parser depending of the aligner kind '''
if 'blast_tab' == kind:
parser = TabularBlastParser
elif 'blast_text' == kind:
parser = TextBlastParser
elif 'blast' in kind:
parser = BlastParser
else:
parsers = {'exonerate': ExonerateParser}
parser = parsers[kind]
return parser
def get_match_score(match, score_key, query=None, subject=None):
'''Given a match it returns its score.
It tries to get the score from the match, if it's not there it goes for
the first match_part.
It can also be a derived score like the incompatibility. All derived scores
begin with d_
'''
# the score can be in the match itself or in the first
# match_part
if score_key in match['scores']:
score = match['scores'][score_key]
else:
# the score is taken from the best hsp (the first one)
score = match['match_parts'][0]['scores'][score_key]
return score
def get_match_scores(match, score_keys, query, subject):
'''It returns the scores for one match.
scores should be a list and it will return a list of scores.
'''
scores_res = []
for score_key in score_keys:
score = get_match_score(match, score_key, query, subject)
scores_res.append(score)
return scores_res
def alignment_results_scores(results, scores, filter_same_query_subject=True):
'''It returns the list of scores for all results.
For instance, for a blast a generator with all e-values can be generated.
By default, the results with the same query and subject will be filtered
out.
The scores can be a single one or a list of them.
'''
# for each score we want a list to gather the results
score_res = []
for score in scores:
score_res.append([])
for result in results:
query = result['query']
for match in result['matches']:
subject = match['subject']
if (filter_same_query_subject and query is not None and subject is
not None and query['name'] == subject['name']):
continue
# all the scores for this match
score_values = get_match_scores(match, scores, query, subject)
# we append each score to the corresponding result list
for index, value in enumerate(score_values):
score_res[index].append(value)
if len(score_res) == 1:
return score_res[0]
else:
return score_res
def build_relations_from_aligment(fhand, query_name, subject_name):
'''It returns a relations dict given an alignment in markx10 format
The alignment must be only between two sequences query against subject
'''
# we parse the aligment
in_seq_section = 0
seq, seq_len, al_start = None, None, None
for line in fhand:
line = line.strip()
if not line:
continue
if line[0] == '>' and line[1] != '>':
if in_seq_section:
seq = {'seq': seq,
'length': seq_len,
'al_start': al_start - 1,
'name': query_name}
if in_seq_section == 1:
seq0 = seq
in_seq_section += 1
seq = ''
continue
if not in_seq_section:
continue
if '; sq_len:' in line:
seq_len = int(line.split(':')[-1])
if '; al_display_start:' in line:
al_start = int(line.split(':')[-1])
if line[0] not in (';', '#'):
seq += line
seq1 = {'seq': seq,
'length': seq_len,
'al_start': al_start - 1,
'name': subject_name}
# now we get the segments
gap = '-'
pos_seq0 = seq0['al_start']
pos_seq1 = seq1['al_start']
segment_start = None
segments = []
for ali_pos in range(len(seq1['seq'])):
try:
nucl0, nucl1 = seq0['seq'][ali_pos + 1], seq1['seq'][ali_pos + 1]
if (nucl0 == gap or nucl1 == gap) and segment_start:
do_segment = True
segment_end = pos_seq0 - 1, pos_seq1 - 1
else:
do_segment = False
except IndexError:
do_segment = True
segment_end = pos_seq0, pos_seq1
if do_segment:
segment = {seq0['name']: (segment_start[0], segment_end[0]),
seq1['name']: (segment_start[1], segment_end[1]), }
segments.append(segment)
segment_start = None
if nucl0 != gap and nucl1 != gap and segment_start is None:
segment_start = pos_seq0, pos_seq1
if nucl0 != gap:
pos_seq0 += 1
if nucl1 != gap:
pos_seq1 += 1
relations = {}
for seg in segments:
for seq_name, limits in seg.items():
if seq_name not in relations:
relations[seq_name] = []
relations[seq_name].append(limits)
return relations
def _get_match_score(match, score_key, query=None, subject=None):
'''Given a match it returns its score.
It tries to get the score from the match, if it's not there it goes for
the first match_part.
'''
# the score can be in the match itself or in the first
# match_part
if score_key in match['scores']:
score = match['scores'][score_key]
else:
# the score is taken from the best hsp (the first one)
score = match['match_parts'][0]['scores'][score_key]
return score
def _score_above_threshold(score, min_score, max_score, log_tolerance,
log_best_score):
'It checks if the given score is a good one'
if log_tolerance is None:
if min_score is not None and score >= min_score:
match_ok = True
elif max_score is not None and score <= max_score:
match_ok = True
else:
match_ok = False
else:
if max_score is not None and score == 0.0:
match_ok = True
elif min_score is not None and score <= min_score:
match_ok = False
elif max_score is not None and score >= max_score:
match_ok = False
elif abs(log10(score) - log_best_score) < log_tolerance:
match_ok = True
else:
match_ok = False
return match_ok
def _create_scores_mapper_(score_key, score_tolerance=None,
max_score=None, min_score=None):
'It creates a mapper that keeps only the best matches'
if score_tolerance is not None:
log_tolerance = log10(score_tolerance)
else:
log_tolerance = None
def map_(alignment):
'''It returns an alignment with the best matches'''
if alignment is None:
return None
if log_tolerance is None:
log_best_score = None
else:
# score of the best match
try:
best_match = alignment['matches'][0]
best_score = _get_match_score(best_match, score_key)
if best_score == 0.0:
log_best_score = 0.0
else:
log_best_score = log10(best_score)
except IndexError:
log_best_score = None
filtered_matches = []
for match in alignment['matches']:
filtered_match_parts = []
for match_part in match['match_parts']:
score = match_part['scores'][score_key]
if _score_above_threshold(score, min_score, max_score,
log_tolerance, log_best_score):
filtered_match_parts.append(match_part)
match['match_parts'] = filtered_match_parts
if not len(match['match_parts']):
continue
# is this match ok?
match_score = get_match_score(match, score_key)
if _score_above_threshold(match_score, min_score, max_score,
log_tolerance, log_best_score):
filtered_matches.append(match)
alignment['matches'] = filtered_matches
return alignment
return map_
def _create_best_scores_mapper(score_key, score_tolerance=None,
max_score=None, min_score=None):
'It creates a mapper that keeps only the best matches'
return _create_scores_mapper_(score_key, score_tolerance=score_tolerance,
max_score=max_score, min_score=min_score)
def _create_scores_mapper(score_key, max_score=None, min_score=None):
'It creates a mapper that keeps only the best matches'
if max_score is None and min_score is None:
raise ValueError('Either max_score or min_score should be given')
return _create_scores_mapper_(score_key, max_score=max_score,
min_score=min_score)
def _create_deepcopy_mapper():
'It creates a mapper that does a deepcopy of the alignment'
def map_(alignment):
'It does the deepcopy'
return copy.deepcopy(alignment)
return map_
def _create_empty_filter():
'It creates a filter that removes the false items'
def filter_(alignment):
'It filters the empty alignments'
if alignment:
return True
else:
return False
return filter_
def _fix_match_scores(match, score_keys):
'Given a match it copies the given scores from the first match_part'
scores = {}
if not match['match_parts']:
return
match_part = match['match_parts'][0]
for key in score_keys:
scores[key] = match_part['scores'][key]
match['scores'] = scores
def _fix_match_start_end(match):
'Given a match it fixes the start and end based on the match_parts'
match_start, match_end = None, None
match_subject_start, match_subject_end = None, None
for match_part in match['match_parts']:
if ('query_start' in match_part and
(match_start is None or
match_part['query_start'] < match_start)):
match_start = match_part['query_start']
if ('query_end' in match_part and
(match_end is None or match_part['query_end'] > match_end)):
match_end = match_part['query_end']
if ('subject_start' in match_part and
(match_subject_start is None or
match_part['subject_start'] < match_subject_start)):
match_subject_start = match_part['subject_start']
if ('subject_end' in match_part and
(match_subject_end is None or
match_part['subject_end'] > match_subject_end)):
match_subject_end = match_part['subject_end']
if match_start is not None:
match['start'] = match_start
if match_end is not None:
match['end'] = match_end
if match_subject_start is not None:
match['subject_start'] = match_subject_start
if match_subject_end is not None:
match['subject_end'] = match_subject_end
def _fix_matches(alignment, score_keys=None):
'It removes the empty match_parts and the alignments with no matches'
if alignment is None:
return None
new_matches = []
for match in alignment['matches']:
if len(match['match_parts']):
if score_keys:
_fix_match_scores(match, score_keys)
_fix_match_start_end(match)
new_matches.append(match)
if not new_matches:
return None
else:
alignment['matches'] = new_matches
return alignment
def _create_fix_matches_mapper():
''''It creates a function that removes alignments with no matches.
It also removes matches with no match_parts
'''
return _fix_matches
def covered_segments_from_match_parts(match_parts, in_query=True,
merge_segments_closer=1):
'''Given a list of match_parts it returns the covered segments.
match_part 1 ------- -----> -----------
match_part 2 ------
It returns the list of segments covered by the match parts either in the
query or in the subject.
merge_segments_closer is an integer. Segments closer than the given
number of residues will be merged.
'''
# we collect all start and ends
segments = []
for match_part in match_parts:
if in_query:
start = match_part['query_start']
end = match_part['query_end']
else:
start = match_part['subject_start']
end = match_part['subject_end']
if start > end: # a revesed item
start, end = end, start
segments.append((start, end))
return merge_overlaping_segments(segments,
merge_segments_closer=merge_segments_closer)
def elongate_match_part_till_global(match_part, query_length, subject_length,
align_completely):
'''It streches the match_part to convert it in a global alignment.
We asume that the subject or the query should be completely aligned and we
strech the match part to do it.
The elongated match_parts will be marked unless the segment added is
shorter than the mark_strech_longer integer.
'''
assert align_completely in (SUBJECT, QUERY)
# start and ends
if match_part['subject_start'] <= match_part['subject_end']:
subject_start = match_part['subject_start']
subject_end = match_part['subject_end']
subject_rev = False
else:
subject_start = match_part['subject_end']
subject_end = match_part['subject_start']
subject_rev = True
if match_part['query_start'] <= match_part['query_end']:
query_start = match_part['query_start']
query_end = match_part['query_end']
query_rev = False
else:
query_start = match_part['query_end']
query_end = match_part['query_start']
query_rev = True
# how much do we elongate?
if align_completely == SUBJECT:
stretch_left = subject_start
max_left_strecth = query_start
stretch_right = subject_length - subject_end - 1
max_right_stretch = query_length - query_end - 1
else:
stretch_left = query_start
max_left_strecth = subject_start
stretch_right = query_length - query_end - 1
max_right_stretch = subject_length - subject_end - 1
if stretch_left > max_left_strecth:
stretch_left = max_left_strecth
if stretch_right > max_right_stretch:
stretch_right = max_right_stretch
# The elongation
if subject_rev:
match_part['subject_end'] -= stretch_left
else:
match_part['subject_start'] -= stretch_left
if query_rev:
match_part['query_end'] -= stretch_left
else:
match_part['query_start'] -= stretch_left
if subject_rev:
match_part['subject_start'] += stretch_right
else:
match_part['subject_end'] += stretch_right
if query_rev:
match_part['query_start'] += stretch_right
else:
match_part['query_end'] += stretch_right
# The taggin
streched_length = stretch_left + stretch_right
if streched_length:
match_part[ELONGATED] = streched_length
# reverse
def elongate_match_parts_till_global(match_parts, query_length,
subject_length, align_completely):
'''It streches the match_part to convert it in a global alignment.
We assume that the subject should be completely aligned and we stretch the
match part to do it.
The elongated match_parts will be marked unless the segment added is
shorter than the mark_strech_longer integer.
'''
return [elongate_match_part_till_global(mp, query_length, subject_length,
align_completely=align_completely)
for mp in match_parts]
def _match_length(match, length_from_query):
'''It returns the match length.
It does take into account only the length covered by match_parts.
'''
segments = covered_segments_from_match_parts(match['match_parts'],
length_from_query)
length = 0
for segment in segments:
match_part_len = segment[1] - segment[0] + 1
length += match_part_len
return length
def _match_part_length(match_part, length_in_query):
'It calculates the length of the match part'
if length_in_query:
return abs(match_part['query_end'] - match_part['query_start'])
else:
return abs(match_part['subject_end'] - match_part['subject_start'])
def _match_long_enough(match_length, total_length, min_num_residues,
min_percentage, length_in_query):
'It returns a boolean if the criteria is met'
if min_num_residues is not None:
if match_length >= min_num_residues:
match_ok = True
else:
match_ok = False
else:
percentage = (match_length / total_length) * 100.0
if percentage >= min_percentage:
match_ok = True
else:
match_ok = False
return match_ok
def _create_min_length_mapper(length_in_query, min_num_residues=None,
min_percentage=None, filter_match_parts=False):
'''It creates a mapper that removes short matches.
The length can be given in percentage or in number of residues.
The length can be from the query or the subject
filter_match_parts determines if every individual match_part is to be
filtered against the length requirement
'''
if not isinstance(length_in_query, bool):
raise ValueError('length_in_query should be a boolean')
if min_num_residues is None and min_percentage is None:
raise ValueError('min_num_residues or min_percentage should be given')
elif min_num_residues is not None and min_percentage is not None:
msg = 'Both min_num_residues or min_percentage can not be given at the'
msg += ' same time'
raise ValueError(msg)
def map_(alignment):
'''It returns an alignment with the matches that span long enough'''
if alignment is None:
return None
filtered_matches = []
query = alignment.get('query', None)
for match in alignment['matches']:
if match is None:
continue
if min_num_residues is None:
if length_in_query:
mol_length = query['length']
else:
mol_length = match['subject']['length']
else:
mol_length = None # it doesn't matter because we're after an
# absolute value
if filter_match_parts:
filtered_match_parts = []
for match_part in match['match_parts']:
match_part_length = _match_part_length(match_part,
length_in_query)
match_part_ok = _match_long_enough(match_part_length,
mol_length,
min_num_residues,
min_percentage,
length_in_query)
if match_part_ok:
filtered_match_parts.append(match_part)
match['match_parts'] = filtered_match_parts
if not len(match['match_parts']):
continue
filtered_matches.append(match)
else:
match_length = _match_length(match, length_in_query)
match_ok = _match_long_enough(match_length, mol_length,
min_num_residues,
min_percentage,
length_in_query)
if match_ok:
filtered_matches.append(match)
alignment['matches'] = filtered_matches
return alignment
return map_
MAPPER = 1
FILTER = 2
FILTER_COLLECTION = {'best_scores':
{'funct_factory': _create_best_scores_mapper,
'kind': MAPPER},
'score_threshold':
{'funct_factory': _create_scores_mapper,
'kind': MAPPER},
'min_length': {'funct_factory': _create_min_length_mapper,
'kind': MAPPER},
'deepcopy': {'funct_factory': _create_deepcopy_mapper,
'kind': MAPPER},
'fix_matches':
{'funct_factory': _create_fix_matches_mapper,
'kind': MAPPER},
'filter_empty':
{'funct_factory': _create_empty_filter,
'kind': FILTER},
}
def filter_alignments(alignments, config):
'''It filters and maps the given alignments.
The filters and maps to use will be decided based on the configuration.
'''
config = copy.deepcopy(config)
config.insert(0, {'kind': 'deepcopy'})
config.append({'kind': 'fix_matches'})
config.append({'kind': 'filter_empty'})
# create the pipeline
for conf in config:
funct_fact = FILTER_COLLECTION[conf['kind']]['funct_factory']
kind = FILTER_COLLECTION[conf['kind']]['kind']
del conf['kind']
function = funct_fact(**conf)
if kind == MAPPER:
alignments = itertools.imap(function, alignments)
else:
alignments = itertools.ifilter(function, alignments)
return alignments
|
gpl-3.0
| 7,515,614,803,057,729,000
| 37.778396
| 83
| 0.540637
| false
|
bjmain/host_choice_GWAS_arabiensis
|
PCA-based_Fst/fst_by_chr_plot.2.py
|
1
|
9451
|
#!/usr/bin/python
import matplotlib as MPL
MPL.use('agg') # no X (so show won't work)
from matplotlib.figure import Figure
from matplotlib.patches import Rectangle
#from matplotlib import rc #for adding italics. Via latex style
#rc('text', usetex=True)
import pylab as P
import math
import numpy
import commands
import sys
from scipy import stats
DATA_DIR='/mnt/lanzarobas/home/bradmain/arabiensis/VCFs/'
FST_LIM = [-0.05, 0.25]
DSTAT_LIM = [-40, 60]
#FST_COLOR = 'b'
FST_SIG_COLOR = 'b'
DSTAT_COLOR = 'r'
INV_HEIGHT=0.05
#TITLE="Sequence Differentiation Between Homozygous 2Rb Inversion States (PCA3 Split)"
TITLE="2) Genome-wide FST (sliding windows)\nbetween PCA Clusters"
LEG_LINES = []
LEG_LABELS = []
#input windowed FST from vcftools
fig, axes = P.subplots(ncols=2,nrows=3)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=.01, hspace=None)
((N,chrX), (chr2R, chr2L), (chr3R, chr3L)) = axes
#((chrX,N), (chr2R, chr2L), (chr3R, chr3L)) = axes
#N.axis('off')
"""
#Second Y axis
chrXd = chrX.twinx()
chr2Rd = chr2R.twinx()
chr2Ld = chr2L.twinx()
chr3Rd = chr3R.twinx()
chr3Ld = chr3L.twinx()
"""
def smoothListGaussian(list,strippedXs=False,degree=5):
window=degree*2-1
weight=numpy.array([1.0]*window)
weightGauss=[]
for i in range(window):
i=i-degree+1
frac=i/float(window)
gauss=1/(numpy.exp((4*(frac))**2))
weightGauss.append(gauss)
weight=numpy.array(weightGauss)*weight
smoothed=[0.0]*(len(list)-window)
for i in range(len(smoothed)):
smoothed[i]=sum(numpy.array(list[i:i+window])*weight)/sum(weight)
return smoothed
inversions=["2Rc","2Rb","2La"]
## plot inversions
inv={}
for line in open("/mnt/lanzarobas/home/bradmain/gambiae/gene_flow/pest_M/An_gambiae_karyotype.gtf"):
i=line.strip().split()
chr=i[0]
l=int(i[3])
r=int(i[4])
name=i[9].strip(";").strip('"')
if name not in inversions:
continue
num=int(i[-1].strip(";").strip('"'))
if chr not in inv:
inv[chr]={}
if name not in inv[chr]:
inv[chr][name]={}
inv[chr][name][num]=[l/1.0e6,r/1.0e6]
outer=[inv["2R"]["2Rb"][1][0],inv["2R"]["2Rb"][2][1]]
inner=[inv["2R"]["2Rb"][1][1],inv["2R"]["2Rb"][2][0]]
Couter=[inv["2R"]["2Rc"][1][0],inv["2R"]["2Rc"][2][1]]
Cinner=[inv["2R"]["2Rc"][1][1],inv["2R"]["2Rc"][2][0]]
outer2La=[inv["2L"]["2La"][1][0],inv["2L"]["2La"][2][1]]
inner2La=[inv["2L"]["2La"][1][1],inv["2L"]["2La"][2][0]]
#for N in inv["2R"]["2Rb"]:
# outer.append(inv["2R"]["2Rb"][N][1])
# inner.append(inv["2R"]["2Rb"][N][0])
print 'outer',outer
print 'inner',inner
#chr2R.plot(outer,[INV_HEIGHT,INV_HEIGHT],'k-',linewidth=5,alpha=0.5)
#chr2R.plot(inner,[INV_HEIGHT,INV_HEIGHT],'y-',linewidth=5)
#chr2R.plot(Couter,[INV_HEIGHT,INV_HEIGHT],'k-',linewidth=5,alpha=0.5)
#chr2R.plot(Cinner,[INV_HEIGHT,INV_HEIGHT],'g-',linewidth=5)
#chr2L.plot(outer2La,[INV_HEIGHT,INV_HEIGHT],'k-',linewidth=5,alpha=0.5)
#chr2L.plot(inner2La,[INV_HEIGHT,INV_HEIGHT],'y-',linewidth=5)
#chr3R.plot([12.5,38],[INV_HEIGHT,INV_HEIGHT],'y-',linewidth=5,alpha=0.5)
chr2R.plot(outer,[INV_HEIGHT,INV_HEIGHT],'k-',linewidth=15,alpha=0.5)
chr2R.plot(inner,[INV_HEIGHT,INV_HEIGHT],'y-',linewidth=15,label='2Rb inversion')
chrX.plot(inner,[INV_HEIGHT+1000,INV_HEIGHT+1000],'y-',linewidth=15,label='2Rb inversion') #just plotting out of range on X for legend purposes
chr2R.text(numpy.mean(inner)-.5,INV_HEIGHT-0.01,'b',fontweight='bold',fontsize=14)
chr2R.plot(Couter,[INV_HEIGHT,INV_HEIGHT],'k-',linewidth=15,alpha=0.5)
chr2R.plot(Cinner,[INV_HEIGHT,INV_HEIGHT],'g-',linewidth=15,label='2Rc inversion')
chrX.plot(Cinner,[INV_HEIGHT+1000,INV_HEIGHT+1000],'g-',linewidth=15,label='2Rc inversion') #just plotting out of range on X for legend purposes
chr2R.text(numpy.mean(Cinner)-.5,INV_HEIGHT-0.01,'c',fontweight='bold',fontsize=14)
#chr2L.plot(outer2La,[INV_HEIGHT,INV_HEIGHT],'k-',linewidth=15,alpha=0.5)
#chr2L.plot(inner2La,[INV_HEIGHT,INV_HEIGHT],'y-',linewidth=15)
chr3R.plot([12.5,38],[INV_HEIGHT,INV_HEIGHT],'r-',linewidth=15,alpha=0.5,label='3Ra inversion')
chrX.plot([12.5+1000,38+1000],[INV_HEIGHT,INV_HEIGHT],'r-',linewidth=15,alpha=0.5,label='3Ra inversion') #just plotting out of range on X for legend purposes
chr3R.text(numpy.mean([12.5,38]),INV_HEIGHT-0.01,'a',fontsize=14,fontweight='bold')
#chr3R.legend()
or7=[22.849252,22.858650]
or40=[22.823983,22.825656]
gr53=[24.694665,24.698605]
gr13=[24.811173,24.812613]
or39=[24.850239,24.851846]
or38=[24.857474,24.859095]
def fst_plotter(fst_files,FST_COLOR,style,newLEGEND):
fstD={}
fstmean={}
leg_done = False
for file in fst_files:
for line in open(file):
i=line.strip().split()
chr=i[0]
#skip unknown and Y chromosomes
if chr=="CHROM" or chr=="UNKN" or chr=="Y_unplaced":
continue
if chr not in fstD:
fstD[chr]={}
fstmean[chr]={}
pos=int(i[1])+24999 #moves x position to middle of 50kb bin
if i[2]=="-nan":
continue
fst=float(i[4]) #i[4] is the weighted fst
fstM=float(i[5]) #i[5] is the mean fst
if pos not in fstD[chr]:
fstD[chr][pos]=fst
fstmean[chr][pos]=fstM
F=[]
Fs=[]
for CHROM in fstD:
x=numpy.array(sorted(fstD[CHROM]))
xmean=sorted(fstmean[CHROM])
y=[]
ymean=[]
for i in x:
F.append(fstD[CHROM][i])
y.append(fstD[CHROM][i])
ymean.append(fstmean[CHROM][i])
ax = globals()['chr'+CHROM]
#tmp, = ax.plot(x/1.0e6, y, '-', color=FST_COLOR, linewidth=1.5)
#tmp, = ax.plot(x/1.0e6, y, style, color=FST_COLOR, linewidth=1.5,label=newLEGEND)
tmp, = ax.plot(x/1.0e6, y, style, color=FST_COLOR, linewidth=2,label=newLEGEND)
#if( not leg_done ):
# LEG_LINES.append(tmp)
# LEG_LABELS.append(r"$F_{\mathrm{ST}}$ pre- vs post-2006 $A. coluzzii$")
# leg_done = True
chrX.legend(fontsize=12)
#LEG_LINES.append(leg_fst_sig)
#LEG_LABELS.append(r"$F_{\mathrm{ST}}$ 99.9 percentile level")
# actually plot fst (on top)
#fst_plotter([DATA_DIR+"pca1_pca2.windowed.weir.fst"],'b','--', "PCA1 vs PCA2")
#fst_plotter([DATA_DIR+"pca3_pca2.windowed.weir.fst"],'k','-', "PCA3 vs PCA2")
#fst_plotter(["pca1_pca2.windowed.weir.fst"],'b','--', "PCA1 vs PCA2")
#fst_plotter(["pca3_pca2.windowed.weir.fst"],'k','-', "PCA3 vs PCA2")
fst_plotter(["pca3_pca1.windowed.weir.fst"],'orange','--', "Right PCA cluster vs left")
fst_plotter(["pca1_pca2.windowed.weir.fst"],'green','--', "Left PCA cluster vs middle")
fst_plotter(["pca3_pca2.windowed.weir.fst"],'k','--', "Right PCA cluster vs middle")
# chromosome names
for C in ['X', '2R', '2L', '3R', '3L']:
ax = globals()['chr'+C]
if( C[-1] == 'L' ):
x = 0.975
ha = 'right'
else:
x = 0.025
ha = 'left'
#ax.text(x, 0.95, r'\textbf{'+C+'}', size='xx-large', ha=ha, va='top', transform=ax.transAxes)
ax.text(x, 0.95, C, size='xx-large', ha=ha, va='top', transform=ax.transAxes)
chrX.set_ylabel("$F_{\mathrm{ST}}$",color='k',fontsize=24)
chr2R.set_ylabel("$F_{\mathrm{ST}}$",color='k',fontsize=24)
chr3R.set_ylabel("$F_{\mathrm{ST}}$",color='k',fontsize=24)
chr3R.set_xlabel(r"position [Mb]",fontsize=24)
chr3L.set_xlabel(r"position [Mb]",fontsize=24)
chr2L.get_yaxis().set_visible(False)
chr3L.get_yaxis().set_visible(False)
chrX.set_ylim(FST_LIM)
chrX.set_xlim(0,22)
chr2L.set_ylim(FST_LIM)
chr2R.set_ylim(FST_LIM)
chr3L.set_ylim(FST_LIM)
chr3R.set_ylim(FST_LIM)
#P.show()
chrX.set_title(TITLE, y=1.04, fontsize=24)
##################### PCA PLOT
human=[line.strip() for line in open("../pca/allhumanfed.txt")]
cattle=[line.strip() for line in open("../pca/allcattlefed.txt")]
cattlex=[]
cattley=[]
humanx=[]
humany=[]
for line in open("../pca/LUPI_maf_pca.eigenvec"):
i=line.strip().split()
pc1=i[2]
pc2=i[4]
genome_id=i[0]
if i[1] in human:
humanx.append(pc1)
humany.append(pc2)
#ax.text(pc1,pc2,genome_id)
elif i[1] in cattle:
cattlex.append(pc1)
cattley.append(pc2)
#ax.text(pc1,pc2,genome_id)
else:
print "not human or cattle-fed:", line.strip()
gamx.append(pc1)
gamy.append(pc2)
###P.text(pc1,pc2,i[1],color='g',fontsize=14)
ax = N
ax.set_xlim(-.4,.3)
ax.set_ylim(-.35,.45)
pos = ax.get_position()
pts = pos.get_points()
w = pts[1,0]-pts[0,0]
h = pts[1,1]-pts[0,1]
nw = w*0.6
nh = h*0.8
#x0 = pts[0,0]+(w-nw)/2.0
x0 = pts[0,0]+(w-nw)/3.4
y0 = pts[0,1]+0.01 #+(h-nh)
print pts, w, h
ax.set_position([x0, y0, nw, nh])
ax.plot(cattlex,cattley,'bo',label="cattlefed")
ax.plot(humanx,humany,'ro',label="humanfed")
#P.text(-.38,-.3,"P<0.01; humanfed vs cattlefed 2x3 Fisher Exact")
ax.set_xlabel("PCA1",fontsize=14)
ax.set_ylabel("PCA2",fontsize=14)
ax.set_xlim(-.4,.3)
ax.set_ylim(-.35,.45)
leg = ax.legend(numpoints=1, ncol=2, loc=8, bbox_to_anchor=(0.5, 1.01))
leg.get_frame().set_alpha(0.5)
#P.title(r"PCA on all \textit{An. arabiensis} SNPs",fontsize=20)
ax.set_title("1) PCA on Genome-wide SNPs",fontsize=24, y=1.34)
################ Final adjustments and save
fig.set_size_inches(14.4, 9.6)
#P.show()
#P.savefig('pca_based_fst.1.svg', dpi=300)
P.savefig('pca_based_fst.2.png', dpi=300)
#P.savefig('pca_based_fst.1.pdf')
|
mit
| 2,981,110,976,975,777,000
| 32.753571
| 157
| 0.623214
| false
|
Zluurk/pypeman
|
pypeman/tests/test_remoteadmin.py
|
1
|
4679
|
import asyncio
import pytest
import pytest_asyncio.plugin # noqa F401
from pypeman import nodes, msgstore, channels
from pypeman.channels import BaseChannel
from pypeman.remoteadmin import RemoteAdminClient, RemoteAdminServer
from pypeman.test import TearDownProjectTestCase as TestCase
from pypeman.tests.common import generate_msg
class TestNode(nodes.BaseNode):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Used to test if node is processed during test
def process(self, msg):
print("Process %s" % self.name)
return msg
class RemoteAdminTests(TestCase):
@pytest.fixture(autouse=True)
def initfixture(self, unused_tcp_port):
self.tcp_port = unused_tcp_port
def clean_loop(self):
# Useful to execute future callbacks
pending = asyncio.Task.all_tasks(loop=self.loop)
if pending:
self.loop.run_until_complete(asyncio.gather(*pending))
def start_channels(self):
# Start channels
for chan in channels.all:
self.loop.run_until_complete(chan.start())
def setUp(self):
# Create class event loop used for tests to avoid failing
# previous tests to impact next test ? (Not sure)
self.loop = asyncio.new_event_loop()
self.loop.set_debug(True)
# Remove thread event loop to be sure we are not using
# another event loop somewhere
asyncio.set_event_loop(None)
# Avoid calling already tested channels
channels.all.clear()
def tearDown(self):
super().tearDown()
self.clean_loop()
def test_remote_admin_list(self):
""" Channel remote listing working """
port = self.tcp_port # port used for rmt admin
store_factory = msgstore.MemoryMessageStoreFactory()
chan = BaseChannel(name="test_remote050", loop=self.loop, message_store_factory=store_factory)
n = TestNode()
n2 = TestNode(name="sub")
n3 = TestNode(name="sub1")
n4 = TestNode(name="sub2")
msg = generate_msg(with_context=True)
msg2 = generate_msg(timestamp=(1982, 11, 27, 12, 35))
msg3 = generate_msg(timestamp=(1982, 11, 28, 12, 35))
msg4 = generate_msg(timestamp=(1982, 11, 28, 14, 35))
idref_msg3 = msg3.uuid
chan.add(n)
sub = chan.fork(name="subchannel")
sub.append(n2, n3, n4)
# Launch channel processing
self.start_channels()
self.loop.run_until_complete(chan.handle(msg))
self.loop.run_until_complete(chan.handle(msg2))
self.loop.run_until_complete(chan.handle(msg3))
self.loop.run_until_complete(chan.handle(msg4))
server = RemoteAdminServer(loop=self.loop, port=port)
self.loop.run_until_complete(server.start())
client = RemoteAdminClient(loop=self.loop, url="ws://localhost:%d" % port)
client.init()
# List channels
chans = client.channels()
print(chans)
self.assertEqual(chans[0]['name'], 'test_remote050', "Channel listing not working")
self.assertEqual(
chans[0]['subchannels'][0]['name'],
'test_remote050.subchannel',
"Subchannel listing not working")
# Stop channel
result = client.stop('test_remote050')
self.assertEqual(chan.status, BaseChannel.STOPPED, "Stopping channel doesn't work")
# Start channel
result = client.start('test_remote050')
self.assertEqual(chan.status, BaseChannel.WAITING, "Starting channel doesn't work")
# Search message
msg_list = client.list_msg(channel='test_remote050', start=2, count=5, order_by='-timestamp')
print(msg_list)
self.assertEqual(msg_list['total'], 4, 'List channel messages broken')
self.assertEqual(msg_list['messages'][0]['id'], idref_msg3, 'List channel messages broken')
# Replay message
result = client.replay_msg('test_remote050', [idref_msg3])
msg_list = client.list_msg(channel='test_remote050', start=0, count=5, order_by='-timestamp')
self.assertEqual(msg_list['total'], 5, 'List channel messages broken')
self.assertEqual(msg_list['messages'][0]['id'], result[0].uuid, 'Replay messages broken')
# Push message
result = client.push_msg(channel='test_remote050', text="Yaaay")
msg_list = client.list_msg(channel='test_remote050', start=0, count=5, order_by='-timestamp')
self.assertEqual(msg_list['total'], 6, 'Push message broken')
self.assertEqual(msg_list['messages'][0]['id'], result.uuid, 'Push message broken')
|
apache-2.0
| 305,318,444,714,465,000
| 32.661871
| 102
| 0.638384
| false
|
Phoenyx/TruemaxScriptPackage
|
Truemax/moduleScene.py
|
1
|
8187
|
__author__ = 'sofiaelm'
import os
from Truemax.checkNaming import get_top_node
from Truemax.hfFixShading import hfFixBadShading
import Truemax.makeReference as makeReference
import Truemax.exportFBX as exportFBX
import Truemax.deleteDPLayers as deleteDPLayers
import Truemax.fixAllBelow as fixAllBelow
from Truemax import checkList
import manager
import maya.cmds as cmds
from pymel.all import mel
import pymel.core as pm
from pymel.all import *
# Reloads script when update is ran
reload(fixAllBelow)
reload(exportFBX)
reload(checkList)
reload(deleteDPLayers)
reload(makeReference)
SCENE_FOLDER = "scenes"
TURNTABLE_FOLDER = "turnTable"
EXPORT_FOLDER = "export"
SOURCEIMAGES_FOLDER = "sourceimages"
# Gets first and last letter of username
def get_author_initials():
user = os.getenv('user', "na")
return str(user[0] + user[-1]).lower()
class ModuleScene(manager.Module):
cleanScene = "cleanScene"
def __init__(self, mngr):
manager.Module.__init__(self, mngr)
self.statusDir = None
if "assetslocation" in mngr.config:
self.statusDir = mngr.config["assetslocation"]
# Reset check status on selection
cmds.scriptJob(event=["DagObjectCreated", lambda *args: self.reset_check_list()], protected=True)
def new_scene(self):
cmds.file(newFile=True, force=True)
location = "{0}{1}{2}".format(os.path.dirname(os.path.realpath(__file__)), os.path.sep, self.cleanScene)
self.set_project(location)
cmds.file("cleanScene.ma", open=True)
select_dir = pm.fileDialog2(fileMode=2, dialogStyle=3, startingDirectory=self.statusDir)
if select_dir != None:
print select_dir[0]
sDir = str(select_dir[0])
result = cmds.promptDialog(
title='Asset Name',
message='Enter Name:',
button=['OK', 'Cancel'],
defaultButton='OK',
cancelButton='Cancel',
dismissString='Cancel')
if result == 'OK':
assetName = cmds.promptDialog(query=True, text=True)
print assetName
# makes project folder
projectFolder = os.path.join(sDir, assetName)
if not os.path.exists(projectFolder):
print "Creating {0}".format(projectFolder)
os.makedirs(projectFolder)
# makes scenes folder
scenesFolder = os.path.join(projectFolder, SCENE_FOLDER)
if not os.path.exists(scenesFolder):
print "Creating {0}".format(scenesFolder)
os.makedirs(scenesFolder)
# makes turntable folder
turntableFolder = os.path.join(projectFolder, TURNTABLE_FOLDER)
if not os.path.exists(turntableFolder):
print "Creating {0}".format(turntableFolder)
os.makedirs(turntableFolder)
# makes export folder
exportFolder = os.path.join(projectFolder, EXPORT_FOLDER)
if not os.path.exists(exportFolder):
print "Creating {0}".format(exportFolder)
os.makedirs(exportFolder)
# makes sourceimages folder
sourceimagesFolder = os.path.join(projectFolder, SOURCEIMAGES_FOLDER)
if not os.path.exists(sourceimagesFolder):
print "Creating {0}".format(sourceimagesFolder)
os.makedirs(sourceimagesFolder)
fileName = assetName + "_v001_" + get_author_initials() + ".ma"
fileSavePath = os.path.join(scenesFolder, fileName)
print fileSavePath
cmds.file(rename=fileSavePath)
cmds.file(save=True)
def set_project(self, location):
mel.setProject(location)
def setProjectAsCurrDirectory(self):
filePath = cmds.file(query=True, expandName=True)
directory = os.path.dirname(filePath)
project = os.path.dirname(directory)
self.set_project(project)
def importRefCube(self):
location = "{0}{1}{2}".format(os.path.dirname(os.path.realpath(__file__)), os.path.sep, self.cleanScene)
self.set_project(location)
cmds.file("refCube.ma", i=True)
self.setProjectAsCurrDirectory()
def update_check_list(self):
check_output = checkList.check_list()
output_errors = "\n".join(check_output[1])
if check_output[0]:
cmds.text(self.statusText, label=output_errors, edit=True, backgroundColor=[0, 1, 0])
else:
cmds.text(self.statusText, label=output_errors, edit=True, backgroundColor=[1, 0, 0])
def reset_check_list(self):
cmds.text(self.statusText, edit=True, backgroundColor=[1, 1, 0])
def select_hierachy(self):
cmds.select(hi=1)
def select_top_node(self):
cmds.select(get_top_node())
def pivot_at_origin(self):
self.select_top_node()
xform(zeroTransformPivots=1)
def create_ui(self):
if get_author_initials() == 'mj':
bg_colour = [0.9, 0.4, 1]
else:
bg_colour = [0.4, 0.4, 0.4]
tab = str(cmds.columnLayout())
cmds.separator(style="none")
cmds.frameLayout(collapsable=True, label="Common")
cmds.columnLayout()
cmds.button(command=lambda *args: self.new_scene(), label="New Work Scene", backgroundColor=bg_colour)
cmds.button(command=lambda *args: self.setProjectAsCurrDirectory(), label="Set Project",
backgroundColor=bg_colour)
cmds.button(command=lambda *args: self.importRefCube(), label="Import Reference Cube",
backgroundColor=bg_colour)
cmds.button(command=lambda *args: mel.Reset(), label="Create Playblast Turntable",
backgroundColor=bg_colour)
cmds.button(command=lambda *args: exportFBX.export_asset(), label="Export as FBX", backgroundColor=bg_colour)
cmds.button(command=lambda *args: makeReference.make_reference(), label="Make Reference File",
backgroundColor=bg_colour)
cmds.setParent('..')
cmds.setParent('..')
cmds.frameLayout(collapsable=True, label="Status")
cmds.columnLayout(rowSpacing=2)
cmds.button(command=lambda *args: self.update_check_list(), label="Update Status", backgroundColor=bg_colour)
cmds.text(label="Status errors:", align="left", backgroundColor=[0.2, 0.2, 0.2], height=15)
self.statusText = cmds.text("Status", backgroundColor=[1, 1, 0])
self.statusText = cmds.text(self.statusText, query=True, fullPathName=True)
cmds.setParent('..')
cmds.setParent('..')
cmds.frameLayout(collapsable=True, label="Check List")
cmds.columnLayout(rowSpacing=2)
cmds.button(command=lambda *args: fixAllBelow.fixAllBelow(), label="Run All Fix Scripts Below",
backgroundColor=bg_colour)
cmds.button(command=lambda *args: hfFixBadShading(), label="Fix Face Assignments on Scene Objects",
backgroundColor=bg_colour)
cmds.button(command=lambda *args: mel.deleteUnusedNodes(), label="Delete Unused Nodes",
backgroundColor=bg_colour)
cmds.button(command=lambda *args: self.select_top_node(), label="Select Top Node", backgroundColor=bg_colour)
cmds.button(command=lambda *args: self.select_hierachy(), label="Select Hierarchy", backgroundColor=bg_colour)
cmds.button(command=lambda *args: mel.FreezeTransformations(), label="Freeze Transformations",
backgroundColor=bg_colour)
cmds.button(command=lambda *args: mel.DeleteHistory(), label="Delete History", backgroundColor=bg_colour)
cmds.button(command=lambda *args: self.pivot_at_origin(), label="Pivot at Origin", backgroundColor=bg_colour)
cmds.button(command=lambda *args: deleteDPLayers.deleteDPLayers(), label="Delete Display Layers",
backgroundColor=bg_colour)
cmds.setParent('..')
cmds.setParent('..')
cmds.setParent('..')
return tab, "Scene"
def initModule(manager):
return ModuleScene(manager)
|
gpl-2.0
| 5,604,648,493,260,993,000
| 39.334975
| 118
| 0.636619
| false
|
webu/pybbm
|
pybb/models.py
|
1
|
20374
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models, transaction, DatabaseError
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.html import strip_tags
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now as tznow
from pybb.compat import get_user_model_path, get_username_field, get_atomic_func, slugify
from pybb import defaults
from pybb.profiles import PybbProfile
from pybb.util import unescape, FilePathGenerator, _get_markup_formatter
from annoying.fields import AutoOneToOneField
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(_('Name'), max_length=80)
position = models.IntegerField(_('Position'), blank=True, default=0)
hidden = models.BooleanField(_('Hidden'), blank=False, null=False, default=False,
help_text=_('If checked, this category will be visible only for staff'))
slug = models.SlugField(_("Slug"), max_length=255, unique=True)
class Meta(object):
ordering = ['position']
verbose_name = _('Category')
verbose_name_plural = _('Categories')
def __str__(self):
return self.name
def forum_count(self):
return self.forums.all().count()
def get_absolute_url(self):
if defaults.PYBB_NICE_URL:
return reverse('pybb:category', kwargs={'slug': self.slug, })
return reverse('pybb:category', kwargs={'pk': self.id})
@property
def topics(self):
return Topic.objects.filter(forum__category=self).select_related()
@property
def posts(self):
return Post.objects.filter(topic__forum__category=self).select_related()
@python_2_unicode_compatible
class Forum(models.Model):
category = models.ForeignKey(Category, related_name='forums', verbose_name=_('Category'))
parent = models.ForeignKey('self', related_name='child_forums', verbose_name=_('Parent forum'),
blank=True, null=True)
name = models.CharField(_('Name'), max_length=80)
position = models.IntegerField(_('Position'), blank=True, default=0)
description = models.TextField(_('Description'), blank=True)
moderators = models.ManyToManyField(get_user_model_path(), blank=True, verbose_name=_('Moderators'))
updated = models.DateTimeField(_('Updated'), blank=True, null=True)
post_count = models.IntegerField(_('Post count'), blank=True, default=0)
topic_count = models.IntegerField(_('Topic count'), blank=True, default=0)
hidden = models.BooleanField(_('Hidden'), blank=False, null=False, default=False)
readed_by = models.ManyToManyField(get_user_model_path(), through='ForumReadTracker', related_name='readed_forums')
headline = models.TextField(_('Headline'), blank=True, null=True)
slug = models.SlugField(verbose_name=_("Slug"), max_length=255)
class Meta(object):
ordering = ['position']
verbose_name = _('Forum')
verbose_name_plural = _('Forums')
unique_together = ('category', 'slug')
def __str__(self):
return self.name
def update_counters(self):
self.topic_count = Topic.objects.filter(forum=self).count()
if self.topic_count:
posts = Post.objects.filter(topic__forum_id=self.id)
self.post_count = posts.count()
if self.post_count:
try:
last_post = posts.order_by('-created', '-id')[0]
self.updated = last_post.updated or last_post.created
except IndexError:
pass
else:
self.post_count = 0
self.save()
def get_absolute_url(self):
if defaults.PYBB_NICE_URL:
return reverse('pybb:forum', kwargs={'slug': self.slug, 'category_slug': self.category.slug})
return reverse('pybb:forum', kwargs={'pk': self.id})
@property
def posts(self):
return Post.objects.filter(topic__forum=self).select_related()
@cached_property
def last_post(self):
try:
return self.posts.order_by('-created', '-id')[0]
except IndexError:
return None
def get_parents(self):
"""
Used in templates for breadcrumb building
"""
parents = [self.category]
parent = self.parent
while parent is not None:
parents.insert(1, parent)
parent = parent.parent
return parents
@python_2_unicode_compatible
class ForumSubscription(models.Model):
TYPE_NOTIFY = 1
TYPE_SUBSCRIBE = 2
TYPE_CHOICES = (
(TYPE_NOTIFY, _('be notified only when a new topic is added')),
(TYPE_SUBSCRIBE, _('be auto-subscribed to topics')),
)
user = models.ForeignKey(get_user_model_path(), on_delete=models.CASCADE,
related_name='forum_subscriptions+', verbose_name=_('Subscriber'))
forum = models.ForeignKey(Forum,
related_name='subscriptions+', verbose_name=_('Forum'))
type = models.PositiveSmallIntegerField(
_('Subscription type'), choices=TYPE_CHOICES,
help_text=_((
'The auto-subscription works like you manually subscribed to watch each topic :\n'
'you will be notified when a topic will receive an answer. \n'
'If you choose to be notified only when a new topic is added. It means'
'you will be notified only once when the topic is created : '
'you won\'t be notified for the answers.'
)), )
class Meta(object):
verbose_name = _('Subscription to forum')
verbose_name_plural = _('Subscriptions to forums')
unique_together = ('user', 'forum',)
def __str__(self):
return '%(user)s\'s subscription to "%(forum)s"' % {'user': self.user,
'forum': self.forum}
def save(self, all_topics=False, **kwargs):
if all_topics and self.type == self.TYPE_SUBSCRIBE:
old = None if not self.pk else ForumSubscription.objects.get(pk=self.pk)
if not old or old.type != self.type :
topics = Topic.objects.filter(forum=self.forum).exclude(subscribers=self.user)
self.user.subscriptions.add(*topics)
super(ForumSubscription, self).save(**kwargs)
def delete(self, all_topics=False, **kwargs):
if all_topics:
topics = Topic.objects.filter(forum=self.forum, subscribers=self.user)
self.user.subscriptions.remove(*topics)
super(ForumSubscription, self).delete(**kwargs)
@python_2_unicode_compatible
class Topic(models.Model):
POLL_TYPE_NONE = 0
POLL_TYPE_SINGLE = 1
POLL_TYPE_MULTIPLE = 2
POLL_TYPE_CHOICES = (
(POLL_TYPE_NONE, _('None')),
(POLL_TYPE_SINGLE, _('Single answer')),
(POLL_TYPE_MULTIPLE, _('Multiple answers')),
)
forum = models.ForeignKey(Forum, related_name='topics', verbose_name=_('Forum'))
name = models.CharField(_('Subject'), max_length=255)
created = models.DateTimeField(_('Created'), null=True)
updated = models.DateTimeField(_('Updated'), null=True)
user = models.ForeignKey(get_user_model_path(), verbose_name=_('User'))
views = models.IntegerField(_('Views count'), blank=True, default=0)
sticky = models.BooleanField(_('Sticky'), blank=True, default=False)
closed = models.BooleanField(_('Closed'), blank=True, default=False)
subscribers = models.ManyToManyField(get_user_model_path(), related_name='subscriptions',
verbose_name=_('Subscribers'), blank=True)
post_count = models.IntegerField(_('Post count'), blank=True, default=0)
readed_by = models.ManyToManyField(get_user_model_path(), through='TopicReadTracker', related_name='readed_topics')
on_moderation = models.BooleanField(_('On moderation'), default=False)
poll_type = models.IntegerField(_('Poll type'), choices=POLL_TYPE_CHOICES, default=POLL_TYPE_NONE)
poll_question = models.TextField(_('Poll question'), blank=True, null=True)
slug = models.SlugField(verbose_name=_("Slug"), max_length=255)
class Meta(object):
ordering = ['-created']
verbose_name = _('Topic')
verbose_name_plural = _('Topics')
unique_together = ('forum', 'slug')
def __str__(self):
return self.name
@cached_property
def head(self):
try:
return self.posts.all().order_by('created', 'id')[0]
except IndexError:
return None
@cached_property
def last_post(self):
try:
return self.posts.order_by('-created', '-id').select_related('user')[0]
except IndexError:
return None
def get_absolute_url(self):
if defaults.PYBB_NICE_URL:
return reverse('pybb:topic', kwargs={'slug': self.slug, 'forum_slug': self.forum.slug, 'category_slug': self.forum.category.slug})
return reverse('pybb:topic', kwargs={'pk': self.id})
def save(self, *args, **kwargs):
if self.id is None:
self.created = self.updated = tznow()
forum_changed = False
old_topic = None
if self.id is not None:
old_topic = Topic.objects.get(id=self.id)
if self.forum != old_topic.forum:
forum_changed = True
super(Topic, self).save(*args, **kwargs)
if forum_changed:
old_topic.forum.update_counters()
self.forum.update_counters()
def delete(self, using=None):
super(Topic, self).delete(using)
self.forum.update_counters()
def update_counters(self):
self.post_count = self.posts.count()
# force cache overwrite to get the real latest updated post
if hasattr(self, 'last_post'):
del self.last_post
if self.last_post:
self.updated = self.last_post.updated or self.last_post.created
self.save()
def get_parents(self):
"""
Used in templates for breadcrumb building
"""
parents = self.forum.get_parents()
parents.append(self.forum)
return parents
def poll_votes(self):
if self.poll_type != self.POLL_TYPE_NONE:
return PollAnswerUser.objects.filter(poll_answer__topic=self).count()
else:
return None
class RenderableItem(models.Model):
"""
Base class for models that has markup, body, body_text and body_html fields.
"""
class Meta(object):
abstract = True
body = models.TextField(_('Message'))
body_html = models.TextField(_('HTML version'))
body_text = models.TextField(_('Text version'))
def render(self):
self.body_html = _get_markup_formatter()(self.body, instance=self)
# Remove tags which was generated with the markup processor
text = strip_tags(self.body_html)
# Unescape entities which was generated with the markup processor
self.body_text = unescape(text)
@python_2_unicode_compatible
class Post(RenderableItem):
topic = models.ForeignKey(Topic, related_name='posts', verbose_name=_('Topic'))
user = models.ForeignKey(get_user_model_path(), related_name='posts', verbose_name=_('User'))
created = models.DateTimeField(_('Created'), blank=True, db_index=True)
updated = models.DateTimeField(_('Updated'), blank=True, null=True)
user_ip = models.GenericIPAddressField(_('User IP'), blank=True, null=True, default='0.0.0.0')
on_moderation = models.BooleanField(_('On moderation'), default=False)
class Meta(object):
ordering = ['created']
verbose_name = _('Post')
verbose_name_plural = _('Posts')
def summary(self):
limit = 50
tail = len(self.body) > limit and '...' or ''
return self.body[:limit] + tail
def __str__(self):
return self.summary()
def save(self, *args, **kwargs):
created_at = tznow()
if self.created is None:
self.created = created_at
self.render()
new = self.pk is None
topic_changed = False
old_post = None
if not new:
old_post = Post.objects.get(pk=self.pk)
if old_post.topic != self.topic:
topic_changed = True
super(Post, self).save(*args, **kwargs)
# If post is topic head and moderated, moderate topic too
if self.topic.head == self and not self.on_moderation and self.topic.on_moderation:
self.topic.on_moderation = False
self.topic.update_counters()
self.topic.forum.update_counters()
if topic_changed:
old_post.topic.update_counters()
old_post.topic.forum.update_counters()
def get_absolute_url(self):
return reverse('pybb:post', kwargs={'pk': self.id})
def delete(self, *args, **kwargs):
self_id = self.id
head_post_id = self.topic.posts.order_by('created', 'id')[0].id
if self_id == head_post_id:
self.topic.delete()
else:
super(Post, self).delete(*args, **kwargs)
self.topic.update_counters()
self.topic.forum.update_counters()
def get_parents(self):
"""
Used in templates for breadcrumb building
"""
return self.topic.forum.category, self.topic.forum, self.topic,
class Profile(PybbProfile):
"""
Profile class that can be used if you doesn't have
your site profile.
"""
user = AutoOneToOneField(get_user_model_path(), related_name='pybb_profile', verbose_name=_('User'))
class Meta(object):
verbose_name = _('Profile')
verbose_name_plural = _('Profiles')
def get_absolute_url(self):
return reverse('pybb:user', kwargs={'username': getattr(self.user, get_username_field())})
def get_display_name(self):
return self.user.get_username()
class Attachment(models.Model):
class Meta(object):
verbose_name = _('Attachment')
verbose_name_plural = _('Attachments')
post = models.ForeignKey(Post, verbose_name=_('Post'), related_name='attachments')
size = models.IntegerField(_('Size'))
file = models.FileField(_('File'),
upload_to=FilePathGenerator(to=defaults.PYBB_ATTACHMENT_UPLOAD_TO))
def save(self, *args, **kwargs):
self.size = self.file.size
super(Attachment, self).save(*args, **kwargs)
def size_display(self):
size = self.size
if size < 1024:
return '%db' % size
elif size < 1024 * 1024:
return '%dKb' % int(size / 1024)
else:
return '%.2fMb' % (size / float(1024 * 1024))
class TopicReadTrackerManager(models.Manager):
def get_or_create_tracker(self, user, topic):
"""
Correctly create tracker in mysql db on default REPEATABLE READ transaction mode
It's known problem when standrard get_or_create method return can raise exception
with correct data in mysql database.
See http://stackoverflow.com/questions/2235318/how-do-i-deal-with-this-race-condition-in-django/2235624
"""
is_new = True
sid = transaction.savepoint(using=self.db)
try:
with get_atomic_func()():
obj = TopicReadTracker.objects.create(user=user, topic=topic)
transaction.savepoint_commit(sid)
except DatabaseError:
transaction.savepoint_rollback(sid)
obj = TopicReadTracker.objects.get(user=user, topic=topic)
is_new = False
return obj, is_new
class TopicReadTracker(models.Model):
"""
Save per user topic read tracking
"""
user = models.ForeignKey(get_user_model_path(), blank=False, null=False)
topic = models.ForeignKey(Topic, blank=True, null=True)
time_stamp = models.DateTimeField(auto_now=True)
objects = TopicReadTrackerManager()
class Meta(object):
verbose_name = _('Topic read tracker')
verbose_name_plural = _('Topic read trackers')
unique_together = ('user', 'topic')
class ForumReadTrackerManager(models.Manager):
def get_or_create_tracker(self, user, forum):
"""
Correctly create tracker in mysql db on default REPEATABLE READ transaction mode
It's known problem when standrard get_or_create method return can raise exception
with correct data in mysql database.
See http://stackoverflow.com/questions/2235318/how-do-i-deal-with-this-race-condition-in-django/2235624
"""
is_new = True
sid = transaction.savepoint(using=self.db)
try:
with get_atomic_func()():
obj = ForumReadTracker.objects.create(user=user, forum=forum)
transaction.savepoint_commit(sid)
except DatabaseError:
transaction.savepoint_rollback(sid)
is_new = False
obj = ForumReadTracker.objects.get(user=user, forum=forum)
return obj, is_new
class ForumReadTracker(models.Model):
"""
Save per user forum read tracking
"""
user = models.ForeignKey(get_user_model_path(), blank=False, null=False)
forum = models.ForeignKey(Forum, blank=True, null=True)
time_stamp = models.DateTimeField(auto_now=True)
objects = ForumReadTrackerManager()
class Meta(object):
verbose_name = _('Forum read tracker')
verbose_name_plural = _('Forum read trackers')
unique_together = ('user', 'forum')
@python_2_unicode_compatible
class PollAnswer(models.Model):
topic = models.ForeignKey(Topic, related_name='poll_answers', verbose_name=_('Topic'))
text = models.CharField(max_length=255, verbose_name=_('Text'))
class Meta:
verbose_name = _('Poll answer')
verbose_name_plural = _('Polls answers')
def __str__(self):
return self.text
def votes(self):
return self.users.count()
def votes_percent(self):
topic_votes = self.topic.poll_votes()
if topic_votes > 0:
return 1.0 * self.votes() / topic_votes * 100
else:
return 0
@python_2_unicode_compatible
class PollAnswerUser(models.Model):
poll_answer = models.ForeignKey(PollAnswer, related_name='users', verbose_name=_('Poll answer'))
user = models.ForeignKey(get_user_model_path(), related_name='poll_answers', verbose_name=_('User'))
timestamp = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = _('Poll answer user')
verbose_name_plural = _('Polls answers users')
unique_together = (('poll_answer', 'user', ), )
def __str__(self):
return '%s - %s' % (self.poll_answer.topic, self.user)
def create_or_check_slug(instance, model, **extra_filters):
"""
returns a unique slug
:param instance : target instance
:param model: needed as instance._meta.model is available since django 1.6
:param extra_filters: filters needed for Forum and Topic for their unique_together field
"""
initial_slug = instance.slug or slugify(instance.name)
count = -1
last_count_len = 0
slug_is_not_unique = True
while slug_is_not_unique:
count += 1
if count >= defaults.PYBB_NICE_URL_SLUG_DUPLICATE_LIMIT:
msg = _('After %(limit)s attemps, there is not any unique slug value for "%(slug)s"')
raise ValidationError(msg % {'limit': defaults.PYBB_NICE_URL_SLUG_DUPLICATE_LIMIT,
'slug': initial_slug})
count_len = len(str(count))
if last_count_len != count_len:
last_count_len = count_len
filters = {'slug__startswith': initial_slug[:(254-count_len)], }
if extra_filters:
filters.update(extra_filters)
objs = model.objects.filter(**filters).exclude(pk=instance.pk)
slug_list = [obj.slug for obj in objs]
if count == 0:
slug = initial_slug
else:
slug = '%s-%d' % (initial_slug[:(254-count_len)], count)
slug_is_not_unique = slug in slug_list
return slug
|
bsd-2-clause
| 5,318,720,958,938,460,000
| 35.70991
| 142
| 0.620742
| false
|
HewlettPackard/python-proliant-sdk
|
examples/Redfish/ex23_dump_ilo_event_log.py
|
1
|
2839
|
# Copyright 2016 Hewlett Packard Enterprise Development, LP.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from _redfishobject import RedfishObject
from redfish.rest.v1 import ServerDownOrUnreachableError
def ex23_dump_ilo_event_log(redfishobj):
sys.stdout.write("\nEXAMPLE 23: Dump iLO Event Log\n")
instances = redfishobj.search_for_type("LogService.")
for instance in instances:
if instance["@odata.id"].endswith("IEL/"):
tmp = redfishobj.redfish_get(instance["@odata.id"])
rsp = redfishobj.redfish_get(tmp.dict["Entries"]["@odata.id"])
for entry in rsp.dict["Members"]:
response = redfishobj.redfish_get(entry["@odata.id"])
sys.stdout.write(response.dict["Message"] + "\n")
while 'NextPage' in rsp.dict["Members"]:
response = redfishobj.redfish_get(entry["@odata.id"] + \
'?page=' + \
str(response.dict["Entries"] \
['NextPage']['page']))
sys.stdout.write(response.dict["Message"] + "\n")
redfishobj.error_handler(response)
if __name__ == "__main__":
# When running on the server locally use the following commented values
# iLO_https_url = "blobstore://."
# iLO_account = "None"
# iLO_password = "None"
# When running remotely connect using the iLO secured (https://) address,
# iLO account name, and password to send https requests
# iLO_https_url acceptable examples:
# "https://10.0.0.100"
# "https://f250asha.americas.hpqcorp.net"
iLO_https_url = "https://10.0.0.100"
iLO_account = "admin"
iLO_password = "password"
# Create a REDFISH object
try:
REDFISH_OBJ = RedfishObject(iLO_https_url, iLO_account, iLO_password)
except ServerDownOrUnreachableError, excp:
sys.stderr.write("ERROR: server not reachable or doesn't support " \
"RedFish.\n")
sys.exit()
except Exception, excp:
raise excp
ex23_dump_ilo_event_log(REDFISH_OBJ)
|
apache-2.0
| 5,327,622,165,731,874,000
| 40.402985
| 78
| 0.586122
| false
|
lonelycorn/AHRS
|
source/test/test_least_square_estimator.py
|
1
|
2665
|
import sys
import os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import unittest
import numpy as np
from base.least_square_estimator import LeastSquareEstimator
class TestLeastSquareEstimator(unittest.TestCase):
'''
NOTE: not testing the forgetting factor now.
'''
VALUE_EQUAL_PLACE = 2
def setUp(self):
pass
def test_straight_line_no_noise(self):
'''
2D points are generated according to y = k * x + b, without noise.
'''
k = 1.0
b = 10.0
x_sample = np.arange(0, 100, 1)
y_sample = k * x_sample + b
initial_value = np.array([0.0, 0.0]) # deliberately made off
initial_covar = np.eye(2) * 1e3
lse = LeastSquareEstimator(initial_value, initial_covar)
for (x, y) in zip(x_sample, y_sample):
phi = np.array([x, 1])
lse.update(phi, y)
mean = lse.get_estimate_mean()
self.assertAlmostEqual(mean[0], k, TestLeastSquareEstimator.VALUE_EQUAL_PLACE)
self.assertAlmostEqual(mean[1], b, TestLeastSquareEstimator.VALUE_EQUAL_PLACE)
def test_straight_line_symmetric(self):
'''
2D points are symmetric about y = 0.
'''
x_sample = np.arange(-50, 50, 1)
y_sample = 2 * (np.mod(x_sample, 2) - 0.5)
initial_value = np.array([1.0, -1.0]) # deliberately made off
initial_covar = np.eye(2) * 1e3
lse = LeastSquareEstimator(initial_value, initial_covar)
for (x, y) in zip(x_sample, y_sample):
phi = np.array([x, 1])
lse.update(phi, y)
mean = lse.get_estimate_mean()
self.assertAlmostEqual(mean[0], 0.0, TestLeastSquareEstimator.VALUE_EQUAL_PLACE)
self.assertAlmostEqual(mean[1], 0.0, TestLeastSquareEstimator.VALUE_EQUAL_PLACE)
def test_strait_line(self):
'''
2D points are generated according to y = k * x + b, with moderate noise
'''
k = 1.0
b = 10.0
x_sample = np.arange(0, 100, 1)
y_sample = k * x_sample + b + np.random.normal(0.0, 0.01, x_sample.shape)
initial_value = np.array([0.0, 0.0]) # deliberately made off
initial_covar = np.eye(2) * 1e3
lse = LeastSquareEstimator(initial_value, initial_covar)
for (x, y) in zip(x_sample, y_sample):
phi = np.array([x, 1])
lse.update(phi, y)
mean = lse.get_estimate_mean()
self.assertAlmostEqual(mean[0], k, TestLeastSquareEstimator.VALUE_EQUAL_PLACE)
self.assertAlmostEqual(mean[1], b, TestLeastSquareEstimator.VALUE_EQUAL_PLACE)
if (__name__ == "__main__"):
unittest.main()
|
mit
| -8,764,785,770,075,181,000
| 31.108434
| 88
| 0.591745
| false
|
jpcofr/svgpathtools
|
test/test_parsing.py
|
1
|
6496
|
# Note: This file was taken mostly as is from the svg.path module (v 2.0)
#------------------------------------------------------------------------------
from __future__ import division, absolute_import, print_function
import unittest
from svgpathtools import *
class TestParser(unittest.TestCase):
def test_svg_examples(self):
"""Examples from the SVG spec"""
path1 = parse_path('M 100 100 L 300 100 L 200 300 z')
self.assertEqual(path1, Path(Line(100 + 100j, 300 + 100j),
Line(300 + 100j, 200 + 300j),
Line(200 + 300j, 100 + 100j)))
self.assertTrue(path1.isclosed())
# for Z command behavior when there is multiple subpaths
path1 = parse_path('M 0 0 L 50 20 M 100 100 L 300 100 L 200 300 z')
self.assertEqual(path1, Path(
Line(0 + 0j, 50 + 20j),
Line(100 + 100j, 300 + 100j),
Line(300 + 100j, 200 + 300j),
Line(200 + 300j, 100 + 100j)))
path1 = parse_path('M 100 100 L 200 200')
path2 = parse_path('M100 100L200 200')
self.assertEqual(path1, path2)
path1 = parse_path('M 100 200 L 200 100 L -100 -200')
path2 = parse_path('M 100 200 L 200 100 -100 -200')
self.assertEqual(path1, path2)
path1 = parse_path("""M100,200 C100,100 250,100 250,200
S400,300 400,200""")
self.assertEqual(path1,
Path(CubicBezier(100 + 200j, 100 + 100j, 250 + 100j, 250 + 200j),
CubicBezier(250 + 200j, 250 + 300j, 400 + 300j, 400 + 200j)))
path1 = parse_path('M100,200 C100,100 400,100 400,200')
self.assertEqual(path1,
Path(CubicBezier(100 + 200j, 100 + 100j, 400 + 100j, 400 + 200j)))
path1 = parse_path('M100,500 C25,400 475,400 400,500')
self.assertEqual(path1,
Path(CubicBezier(100 + 500j, 25 + 400j, 475 + 400j, 400 + 500j)))
path1 = parse_path('M100,800 C175,700 325,700 400,800')
self.assertEqual(path1,
Path(CubicBezier(100 + 800j, 175 + 700j, 325 + 700j, 400 + 800j)))
path1 = parse_path('M600,200 C675,100 975,100 900,200')
self.assertEqual(path1,
Path(CubicBezier(600 + 200j, 675 + 100j, 975 + 100j, 900 + 200j)))
path1 = parse_path('M600,500 C600,350 900,650 900,500')
self.assertEqual(path1,
Path(CubicBezier(600 + 500j, 600 + 350j, 900 + 650j, 900 + 500j)))
path1 = parse_path("""M600,800 C625,700 725,700 750,800
S875,900 900,800""")
self.assertEqual(path1,
Path(CubicBezier(600 + 800j, 625 + 700j, 725 + 700j, 750 + 800j),
CubicBezier(750 + 800j, 775 + 900j, 875 + 900j, 900 + 800j)))
path1 = parse_path('M200,300 Q400,50 600,300 T1000,300')
self.assertEqual(path1,
Path(QuadraticBezier(200 + 300j, 400 + 50j, 600 + 300j),
QuadraticBezier(600 + 300j, 800 + 550j, 1000 + 300j)))
path1 = parse_path('M300,200 h-150 a150,150 0 1,0 150,-150 z')
self.assertEqual(path1,
Path(Line(300 + 200j, 150 + 200j),
Arc(150 + 200j, 150 + 150j, 0, 1, 0, 300 + 50j),
Line(300 + 50j, 300 + 200j)))
path1 = parse_path('M275,175 v-150 a150,150 0 0,0 -150,150 z')
self.assertEqual(path1,
Path(Line(275 + 175j, 275 + 25j),
Arc(275 + 25j, 150 + 150j, 0, 0, 0, 125 + 175j),
Line(125 + 175j, 275 + 175j)))
path1 = parse_path("""M600,350 l 50,-25
a25,25 -30 0,1 50,-25 l 50,-25
a25,50 -30 0,1 50,-25 l 50,-25
a25,75 -30 0,1 50,-25 l 50,-25
a25,100 -30 0,1 50,-25 l 50,-25""")
self.assertEqual(path1,
Path(Line(600 + 350j, 650 + 325j),
Arc(650 + 325j, 25 + 25j, -30, 0, 1, 700 + 300j),
Line(700 + 300j, 750 + 275j),
Arc(750 + 275j, 25 + 50j, -30, 0, 1, 800 + 250j),
Line(800 + 250j, 850 + 225j),
Arc(850 + 225j, 25 + 75j, -30, 0, 1, 900 + 200j),
Line(900 + 200j, 950 + 175j),
Arc(950 + 175j, 25 + 100j, -30, 0, 1, 1000 + 150j),
Line(1000 + 150j, 1050 + 125j)))
def test_others(self):
# Other paths that need testing:
# Relative moveto:
path1 = parse_path('M 0 0 L 50 20 m 50 80 L 300 100 L 200 300 z')
self.assertEqual(path1, Path(
Line(0 + 0j, 50 + 20j),
Line(100 + 100j, 300 + 100j),
Line(300 + 100j, 200 + 300j),
Line(200 + 300j, 100 + 100j)))
# Initial smooth and relative CubicBezier
path1 = parse_path("""M100,200 s 150,-100 150,0""")
self.assertEqual(path1,
Path(CubicBezier(100 + 200j, 100 + 200j, 250 + 100j, 250 + 200j)))
# Initial smooth and relative QuadraticBezier
path1 = parse_path("""M100,200 t 150,0""")
self.assertEqual(path1,
Path(QuadraticBezier(100 + 200j, 100 + 200j, 250 + 200j)))
# Relative QuadraticBezier
path1 = parse_path("""M100,200 q 0,0 150,0""")
self.assertEqual(path1,
Path(QuadraticBezier(100 + 200j, 100 + 200j, 250 + 200j)))
def test_negative(self):
"""You don't need spaces before a minus-sign"""
path1 = parse_path('M100,200c10-5,20-10,30-20')
path2 = parse_path('M 100 200 c 10 -5 20 -10 30 -20')
self.assertEqual(path1, path2)
def test_numbers(self):
"""Exponents and other number format cases"""
# It can be e or E, the plus is optional, and a minimum of +/-3.4e38 must be supported.
path1 = parse_path('M-3.4e38 3.4E+38L-3.4E-38,3.4e-38')
path2 = Path(Line(-3.4e+38 + 3.4e+38j, -3.4e-38 + 3.4e-38j))
self.assertEqual(path1, path2)
def test_errors(self):
self.assertRaises(ValueError, parse_path, 'M 100 100 L 200 200 Z 100 200')
|
mit
| -5,238,550,928,768,653,000
| 45.733813
| 95
| 0.496459
| false
|
eugene-eeo/mailthon
|
mailthon/envelope.py
|
1
|
1654
|
"""
mailthon.envelope
~~~~~~~~~~~~~~~~~
Implements the Envelope object.
:copyright: (c) 2015 by Eeo Jun
:license: MIT, see LICENSE for details.
"""
class Envelope(object):
"""
Enclosure adapter for encapsulating the concept of
an Envelope- a wrapper around some content in the
form of an *enclosure*, and dealing with SMTP
specific idiosyncracies.
:param enclosure: An enclosure object to wrap around.
:param mail_from: The "real" sender. May be omitted.
:param rcpt_to: A list of "real" email addresses.
May be omitted.
"""
def __init__(self, enclosure, mail_from=None, rcpt_to=None):
self.enclosure = enclosure
self.mail_from = mail_from
self.rcpt_to = rcpt_to
@property
def sender(self):
"""
Returns the real sender if set in the *mail_from*
parameter/attribute, else returns the sender
attribute from the wrapped enclosure.
"""
return self.mail_from or self.enclosure.sender
@property
def receivers(self):
"""
Returns the "real" receivers which will be passed
to the ``RCPT TO`` command (in SMTP) if specified
in the *rcpt_to* attribute/parameter. Else, return
the receivers attribute from the wrapped enclosure.
"""
return self.rcpt_to or self.enclosure.receivers
def mime(self):
"""
Returns the mime object from the enclosure.
"""
return self.enclosure.mime()
def string(self):
"""
Returns the stringified mime object.
"""
return self.enclosure.string()
|
mit
| 1,613,581,448,409,228,000
| 27.033898
| 64
| 0.612455
| false
|
datagrok/python-misc
|
datagrok/math/vector.py
|
1
|
1548
|
"""Snippets from linear algebra class"""
from numpy import dot, array, sqrt, matrix
# TODO: many of these may be part of numpy now. Check and cull
def proj(M,x):
"""
>>> A = array([[1, 2], [2, 1]])
>>> x = array([[1], [2]])
>>> proj(A, x)
matrix([[ 1.],
[ 2.]])
"""
# proj_w(x) = M(M^TM)^-1M^Tx
M = matrix(M)
return M * (M.T * M).I * M.T * x
def mat_array(s):
"""Returns an array created from a spaces-and-lines blob of data.
>>> mat_array('''
... 1 2 3
... 4 5 6
... 7 8 9
... ''')
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
"""
return array([[int(v) for v in row.strip().split()] for row in [l for l in s.splitlines() if l]])
def col_array(s):
"""Returns transpose of mat_array.
>>> col_array('''
... 1 2 3
... 4 5 6
... 7 8 9
... ''')
array([[1, 4, 7],
[2, 5, 8],
[3, 6, 9]])
"""
return (mat_array(s)).T
def norm(x):
"""Returns the norm (length) of vector x
>>> norm(array([3, 4]))
5.0
"""
return sqrt(sum(x*x))
def unit(x):
"""Returns a unit vector in the direction of vector x.
>>> unit(array([9, 0]))
array([ 1., 0.])
>>> unit(array([0, 9]))
array([ 0., 1.])
>>> unit(array([9, 9]))
array([ 0.70710678, 0.70710678])
"""
return x/norm(x)
def lss(A, b):
"""Finds the least squares solution for Ax=b"""
A = matrix(A)
return (A.T * A).I * A.T * b
|
agpl-3.0
| -8,830,901,386,614,149,000
| 18.35
| 101
| 0.443152
| false
|
miracode/django_tdd
|
functional_tests/tests.py
|
1
|
4711
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
import sys
class NewVisitorTest(StaticLiveServerTestCase):
@classmethod
def setUpClass(cls):
for arg in sys.argv:
if 'liveserver' in arg:
cls.server_url = 'http://' + arg.split('=')[1]
return
super(NewVisitorTest, cls).setUpClass()
cls.server_url = cls.live_server_url
@classmethod
def tearDownClass(cls):
if cls.server_url == cls.live_server_url:
super(NewVisitorTest, cls).tearDownClass()
# special methods which run before and after each test
def setUp(self):
self.browser = webdriver.Chrome()
self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.quit()
def check_for_row_in_list_table(self, row_text):
# element returns element, raising exception if not found
table = self.browser.find_element_by_id('id_list_table')
# element*s* returns a list (which may be empty)
rows = table.find_elements_by_tag_name('tr')
self.assertIn(row_text, [row.text for row in rows])
def test_can_start_a_list_and_retrieve_it_later(self):
# New visitor would like to visit the homepage
self.browser.get(self.server_url)
# User notices the page title and header mention to-do lists
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do', header_text)
# User is invited to ender a to-do item straight away
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertEqual(inputbox.get_attribute('placeholder'),
'Enter a to-do item')
# User types "Buy peacock feathers" into a text box
inputbox.send_keys('Buy peacock feathers')
# When user hits enter, the page updates and pages lists
# "1: Buy peacock feathers" as an item in a to-do list
inputbox.send_keys(Keys.ENTER)
user_list_url = self.browser.current_url
self.assertRegexpMatches(user_list_url, '/lists/.+')
self.check_for_row_in_list_table('1: Buy peacock feathers')
# There is still a text box inviting user to add another item
# user enters "Use peacock feathers to make a fly"
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Use peacock feathers to make a fly')
inputbox.send_keys(Keys.ENTER)
# The page updates again, and now shows both items on her list
self.check_for_row_in_list_table('1: Buy peacock feathers')
self.check_for_row_in_list_table('2: Use peacock feathers to make a fly')
# A new user comes to the site
## Make new browser session
self.browser.quit()
self.browser = webdriver.Chrome()
# The new user visits the home page. There is no sighn of old user's
# list
self.browser.get(self.server_url)
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertNotIn('make a fly', page_text)
# New user starts a new list
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Buy milk')
inputbox.send_keys(Keys.ENTER)
# New user gets his own unique URL
new_user_list_url = self.browser.current_url
self.assertRegexpMatches(new_user_list_url, '/lists/.+')
self.assertNotEqual(new_user_list_url, user_list_url)
# Again, there is no sign of old user's list
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertIn('Buy milk', page_text)
# Satisfied, they go to sleep
def test_layout_and_styling(self):
# user goes to home page
self.browser.get(self.server_url)
self.browser.set_window_size(1024, 768)
# user notices the input box is nicely centered
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] / 2,
512,
delta=5)
# user starts a new list and sees the input is nicely centered too
inputbox.send_keys('testing\n')
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] / 2,
512,
delta=5)
|
mit
| -2,506,169,859,326,756,400
| 38.258333
| 81
| 0.63426
| false
|
onia/pygobject
|
tests/test_docstring.py
|
1
|
2588
|
import unittest
import gi.docstring
from gi.repository import GIMarshallingTests
from gi.repository import Gio
class Test(unittest.TestCase):
def test_api(self):
new_func = lambda info: 'docstring test'
old_func = gi.docstring.get_doc_string_generator()
gi.docstring.set_doc_string_generator(new_func)
self.assertEqual(gi.docstring.get_doc_string_generator(),
new_func)
self.assertEqual(gi.docstring.generate_doc_string(None),
'docstring test')
# Set back to original generator
gi.docstring.set_doc_string_generator(old_func)
self.assertEqual(gi.docstring.get_doc_string_generator(),
old_func)
def test_split_args_multi_out(self):
in_args, out_args = gi.docstring.split_function_info_args(GIMarshallingTests.int_out_out)
self.assertEqual(len(in_args), 0)
self.assertEqual(len(out_args), 2)
self.assertEqual(out_args[0].get_pytype_hint(), 'int')
self.assertEqual(out_args[1].get_pytype_hint(), 'int')
def test_split_args_inout(self):
in_args, out_args = gi.docstring.split_function_info_args(GIMarshallingTests.long_inout_max_min)
self.assertEqual(len(in_args), 1)
self.assertEqual(len(out_args), 1)
self.assertEqual(in_args[0].get_name(), out_args[0].get_name())
self.assertEqual(in_args[0].get_pytype_hint(), out_args[0].get_pytype_hint())
def test_split_args_none(self):
obj = GIMarshallingTests.Object(int=33)
in_args, out_args = gi.docstring.split_function_info_args(obj.none_inout)
self.assertEqual(len(in_args), 1)
self.assertEqual(len(out_args), 1)
def test_final_signature_with_full_inout(self):
self.assertEqual(GIMarshallingTests.Object.full_inout.__doc__,
'full_inout(object:GIMarshallingTests.Object) -> object:GIMarshallingTests.Object')
def test_overridden_doc_is_not_clobbered(self):
self.assertEqual(GIMarshallingTests.OverridesObject.method.__doc__,
'Overridden doc string.')
def test_allow_none_with_user_data_defaults(self):
g_file_copy_doc = 'copy(self, destination:Gio.File, ' \
'flags:Gio.FileCopyFlags, ' \
'cancellable:Gio.Cancellable=None, ' \
'progress_callback:Gio.FileProgressCallback=None, ' \
'progress_callback_data=None)'
self.assertEqual(Gio.File.copy.__doc__, g_file_copy_doc)
|
lgpl-2.1
| -728,240,659,837,348,100
| 42.864407
| 108
| 0.630216
| false
|
wasim21k/pihome
|
cron/login.py
|
1
|
2609
|
#!/usr/bin/python
# add following line to show up when some one ssh to pi /etc/profile
# sudo python /var/www/cron/login.py
# clear everything from /etc/motd to remove generic message.
import socket, os, re, time, sys, subprocess, fcntl, struct
from threading import Thread
class bc:
HEADER = '\033[0;36;40m'
ENDC = '\033[0m'
SUB = '\033[3;30;45m'
WARN = '\033[0;31;40m'
GREEN = '\033[0;32;40m'
org = '\033[91m'
print bc.HEADER + " "
print " _____ _ _ _ "
print " | __ \ (_) | | | | "
print " | |__) | _ | |__| | ___ _ __ ___ ___ "
print " | ___/ | | | __ | / _ \ | |_ \_ \ / _ \ "
print " | | | | | | | | | (_) | | | | | | | | __/"
print " |_| |_| |_| |_| \___/ |_| |_| |_| \___|"
print " "
print " "+bc.SUB + "S M A R T H E A T I N G C O N T R O L "+ bc.ENDC
print bc.WARN +" "
print "*************************************************************************"
print "* PiHome is Raspberry Pi based Central Heating Control systems. It runs *"
print "* from web interface and it comes with ABSOLUTELY NO WARRANTY, to the *"
print "* extent permitted by applicable law. I take no responsibility for any *"
print "* loss or damage to you or your property. *"
print "* DO NOT MAKE ANY CHANGES TO YOUR HEATING SYSTEM UNTILL UNLESS YOU KNOW *"
print "* WHAT YOU ARE DOING *"
print "*************************************************************************"
print bc.GREEN +" Have Fun - PiHome" + bc.ENDC
df = subprocess.Popen(["df", "-h"], stdout=subprocess.PIPE)
output = df.communicate()[0]
device, size, used, available, percent, mountpoint = \
output.split("\n")[1].split()
print bc.org +"Disk/SD Card Usage" + bc.ENDC
print "Filesystem Size Used Avail Used%"
print device+" "+size+" "+used+" "+available+" "+percent
def get_interface_ip(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s', ifname[:15]))[20:24])
def get_ip():
ip = socket.gethostbyname(socket.gethostname())
if ip.startswith("127."):
interfaces = ["eth0","eth1","eth2","wlan0","wlan1","wifi0","ath0","ath1","ppp0"]
for ifname in interfaces:
try:
ip = get_interface_ip(ifname)
break
except IOError:
pass
return ip
print "WebServer: "+bc.GREEN +"http://"+str(get_ip())+"/"+ bc.ENDC
print "PhpMyAdmin: "+bc.GREEN +"http://"+str(get_ip())+"/phpmyadmin"+ bc.ENDC
|
gpl-3.0
| -8,385,152,448,797,073,000
| 44
| 100
| 0.51744
| false
|
melodous/designate
|
designate/cmd/central.py
|
1
|
1243
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo.config import cfg
from designate.openstack.common import log as logging
from designate import service
from designate import utils
from designate.central import service as central
CONF = cfg.CONF
CONF.import_opt('workers', 'designate.central', group='service:central')
def main():
utils.read_config('designate', sys.argv)
logging.setup('designate')
server = central.Service.create(binary='designate-central',
service_name='central')
service.serve(server, workers=CONF['service:central'].workers)
service.wait()
|
apache-2.0
| -8,144,857,311,752,288,000
| 32.594595
| 75
| 0.735318
| false
|
howthebodyworks/parking_sun_lib
|
Scripts/castanet/test_http_client.py
|
1
|
3717
|
#!/usr/bin/env python
# encoding: utf-8
"""
test_http_client.py
Created by dan mackinlay on 2011-05-06.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
from gevent import monkey; monkey.patch_all()
import unittest
import sys
import os.path
import subprocess
import urllib2
from time import sleep, time
from gevent.queue import Queue, Full, Empty
import gevent.hub
from proxy_osc import SimplerOSCRequestHandler, SimplerOSCServer
from OSC import OSCServer, OSCRequestHandler, getUrlStr
import json
def get_castanet_proxy_path():
import main
return main.__file__
class test_http_client(unittest.TestCase):
def setUp(self):
self.local_http_address = ('', 8088)
self.remote_osc_address = ('127.0.0.1', 5055)
castanet_proxy_path = get_castanet_proxy_path()
print 'castanet_proxy_path', castanet_proxy_path
self.castanet_proxy = subprocess.Popen(['python', castanet_proxy_path])
self.osc_endpoint = self._get_osc_test_server()
self.testQueue = Queue(maxsize=1000)
#these all take time to initialise.
sleep(0.5)
def tearDown(self):
self.castanet_proxy.kill()
self.osc_endpoint.close()
gevent.hub.shutdown()
def _get_osc_test_server(self, port=None):
def intercepting_handler(addr, tags, data, source):
msg_string = "%s [%s] %s" % (addr, tags, str(data))
sys.stdout.write(
"OSCServer Got: '%s' from %s\n" % (
msg_string, getUrlStr(source)
))
self.testQueue.put(data)
port = port or self.remote_osc_address[1]
s = OSCServer(('localhost', port))
s.addMsgHandler('default', intercepting_handler)
return s
def _getOscResponse(self):
self.osc_endpoint.handle_request()
full_response = self.testQueue.get()
return full_response
def _sendHttpData(self, path, data=None):
"""If data is supplied, this will be a POST, otherwise GET. Because
that's how the ancient old urllib2 rolls."""
http_address = "http://%s:%d%s" % (
'127.0.0.1',
self.local_http_address[1],
path
)
print 'http_address', http_address
return urllib2.urlopen(http_address, data)
def testHttpProxy(self):
test_vars = (
('POST', '/sequence/3/note/4', '{"a": "b", "c": 2}'),
('POST', '/sequence/3/note/4', '["ee", "ff", "GG", 555.3, 7.1]')
)
for query in test_vars:
verb, path, data = query # we don't really support multiple verb atm
self._sendHttpData('/forward' + path, data)
resp_verb, resp_path, resp_data = self._getOscResponse()
# import pdb; pdb.set_trace()
self.assertEquals(query, (resp_verb, resp_path, resp_data))
def testHttpTime(self):
#Note python uses seconds, JS, ms
request_now = time()*1000.
resp = json.load(
self._sendHttpData('/timestamp/' + str(request_now))
)
response_now = time()*1000.
#these tests are only valid because we know bother server and
# client times. In general, these values could be anything.
self.assertTrue(resp["proxy_time"]>request_now,
"%f is not greater than %f" %(resp["proxy_time"], request_now))
self.assertTrue((resp["proxy_time"]-request_now)<1,
"%f is much greater than %f" %(resp["proxy_time"], request_now)
)
self.assertTrue((resp["request_lag"]>0),
"%f is negative" % resp["request_lag"])
if __name__=='__main__':
unittest.main()
|
gpl-3.0
| 6,088,890,338,808,105,000
| 34.75
| 80
| 0.592413
| false
|
jds2001/sos
|
sos/plugins/filesys.py
|
1
|
2190
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class Filesys(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""Local file systems
"""
plugin_name = 'filesys'
profiles = ('storage',)
option_list = [("lsof", 'gathers information on all open files', 'slow',
False),
("dumpe2fs", 'dump filesystem information', 'slow', False)]
def setup(self):
self.add_copy_spec([
"/proc/filesystems",
"/etc/fstab",
"/proc/self/mounts",
"/proc/self/mountinfo",
"/proc/self/mountstats",
"/proc/mounts"
])
self.add_cmd_output("mount -l", root_symlink="mount")
self.add_cmd_output("df -al", root_symlink="df")
self.add_cmd_output([
"df -ali",
"findmnt"
])
if self.get_option('lsof'):
self.add_cmd_output("lsof -b +M -n -l -P", root_symlink="lsof")
dumpe2fs_opts = '-h'
if self.get_option('dumpe2fs'):
dumpe2fs_opts = ''
mounts = '/proc/mounts'
ext_fs_regex = r"^(/dev/.+).+ext[234]\s+"
for dev in self.do_regex_find_all(ext_fs_regex, mounts):
self.add_cmd_output("dumpe2fs %s %s" % (dumpe2fs_opts, dev))
def postproc(self):
self.do_file_sub(
"/etc/fstab",
r"(password=)[^\s]*",
r"\1********"
)
# vim: set et ts=4 sw=4 :
|
gpl-2.0
| -7,307,108,655,078,007,000
| 33.761905
| 78
| 0.594064
| false
|
yephper/django
|
django/contrib/gis/db/backends/mysql/operations.py
|
1
|
3451
|
from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import aggregates
from django.db.backends.mysql.operations import DatabaseOperations
from django.utils.functional import cached_property
class MySQLOperations(BaseSpatialOperations, DatabaseOperations):
mysql = True
name = 'mysql'
Adapter = WKTAdapter
@cached_property
def select(self):
if self.connection.mysql_version < (5, 6, 0):
return 'AsText(%s)'
return 'ST_AsText(%s)'
@cached_property
def from_wkb(self):
if self.connection.mysql_version < (5, 6, 0):
return 'GeomFromWKB'
return 'ST_GeomFromWKB'
@cached_property
def from_text(self):
if self.connection.mysql_version < (5, 6, 0):
return 'GeomFromText'
return 'ST_GeomFromText'
gis_operators = {
'bbcontains': SpatialOperator(func='MBRContains'), # For consistency w/PostGIS API
'bboverlaps': SpatialOperator(func='MBROverlaps'), # .. ..
'contained': SpatialOperator(func='MBRWithin'), # .. ..
'contains': SpatialOperator(func='MBRContains'),
'disjoint': SpatialOperator(func='MBRDisjoint'),
'equals': SpatialOperator(func='MBREqual'),
'exact': SpatialOperator(func='MBREqual'),
'intersects': SpatialOperator(func='MBRIntersects'),
'overlaps': SpatialOperator(func='MBROverlaps'),
'same_as': SpatialOperator(func='MBREqual'),
'touches': SpatialOperator(func='MBRTouches'),
'within': SpatialOperator(func='MBRWithin'),
}
@cached_property
def function_names(self):
return {
'Difference': 'ST_Difference',
'Distance': 'ST_Distance',
'Intersection': 'ST_Intersection',
'Length': 'GLength' if self.connection.mysql_version < (5, 6, 0) else 'ST_Length',
'SymDifference': 'ST_SymDifference',
'Union': 'ST_Union',
}
disallowed_aggregates = (
aggregates.Collect, aggregates.Extent, aggregates.Extent3D,
aggregates.MakeLine, aggregates.Union,
)
@cached_property
def unsupported_functions(self):
unsupported = {
'AsGeoJSON', 'AsGML', 'AsKML', 'AsSVG', 'BoundingCircle',
'ForceRHR', 'GeoHash', 'MemSize',
'Perimeter', 'PointOnSurface', 'Reverse', 'Scale', 'SnapToGrid',
'Transform', 'Translate',
}
if self.connection.mysql_version < (5, 6, 1):
unsupported.update({'Difference', 'Distance', 'Intersection', 'SymDifference', 'Union'})
return unsupported
def geo_db_type(self, f):
return f.geom_type
def get_geom_placeholder(self, f, value, compiler):
"""
The placeholder here has to include MySQL's WKT constructor. Because
MySQL does not support spatial transformations, there is no need to
modify the placeholder based on the contents of the given value.
"""
if hasattr(value, 'as_sql'):
placeholder, _ = compiler.compile(value)
else:
placeholder = '%s(%%s)' % self.from_text
return placeholder
|
bsd-3-clause
| 4,677,460,586,627,847,000
| 35.923077
| 100
| 0.611417
| false
|
opencorato/sayit
|
speeches/search_indexes.py
|
2
|
1643
|
from haystack import indexes
from speeches.models import Speech, Speaker, Section
class SpeechIndex(indexes.SearchIndex, indexes.Indexable):
# Use a template here to include speaker name as well... TODO
text = indexes.CharField(document=True, model_attr='text') # , use_template=True)
title = indexes.CharField(model_attr='heading') # use_template=True)
start_date = indexes.DateTimeField(model_attr='start_date', null=True)
instance = indexes.CharField(model_attr='instance__label')
speaker = indexes.IntegerField(model_attr='speaker_id', null=True)
def get_model(self):
return Speech
def index_queryset(self, using=None):
return self.get_model()._default_manager.select_related('instance')
def get_updated_field(self):
return 'modified'
class SpeakerIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr='name')
instance = indexes.CharField(model_attr='instance__label')
def get_model(self):
return Speaker
def index_queryset(self, using=None):
return self.get_model()._default_manager.select_related('instance')
def get_updated_field(self):
return 'updated_at'
class SectionIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr='heading')
instance = indexes.CharField(model_attr='instance__label')
def get_model(self):
return Section
def index_queryset(self, using=None):
return self.get_model()._default_manager.select_related('instance')
def get_updated_field(self):
return 'modified'
|
agpl-3.0
| -7,003,036,244,537,974,000
| 33.229167
| 86
| 0.702982
| false
|
robbiet480/home-assistant
|
tests/common.py
|
1
|
33228
|
"""Test the helper method for writing tests."""
import asyncio
import collections
from collections import OrderedDict
from contextlib import contextmanager
from datetime import timedelta
import functools as ft
from io import StringIO
import json
import logging
import os
import sys
import threading
import uuid
from aiohttp.test_utils import unused_port as get_test_instance_port # noqa
from homeassistant import auth, config_entries, core as ha, loader
from homeassistant.auth import (
auth_store,
models as auth_models,
permissions as auth_permissions,
providers as auth_providers,
)
from homeassistant.auth.permissions import system_policies
from homeassistant.components import recorder
from homeassistant.components.device_automation import ( # noqa: F401
_async_get_device_automation_capabilities as async_get_device_automation_capabilities,
_async_get_device_automations as async_get_device_automations,
)
from homeassistant.components.mqtt.models import Message
from homeassistant.config import async_process_component_config
from homeassistant.const import (
ATTR_DISCOVERED,
ATTR_SERVICE,
DEVICE_DEFAULT_NAME,
EVENT_HOMEASSISTANT_CLOSE,
EVENT_PLATFORM_DISCOVERED,
EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import State
from homeassistant.helpers import (
area_registry,
device_registry,
entity,
entity_platform,
entity_registry,
intent,
restore_state,
storage,
)
from homeassistant.helpers.json import JSONEncoder
from homeassistant.setup import setup_component
from homeassistant.util.async_ import run_callback_threadsafe
import homeassistant.util.dt as date_util
from homeassistant.util.unit_system import METRIC_SYSTEM
import homeassistant.util.yaml.loader as yaml_loader
from tests.async_mock import AsyncMock, Mock, patch
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
CLIENT_ID = "https://example.com/app"
CLIENT_REDIRECT_URI = "https://example.com/app/callback"
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)
).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return asyncio.run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop
).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), "testing_config", *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
hass = loop.run_until_complete(async_test_home_assistant(loop))
stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
stop_event.set()
orig_stop = hass.stop
def start_hass(*mocks):
"""Start hass."""
asyncio.run_coroutine_threadsafe(hass.async_start(), loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
async def async_test_home_assistant(loop):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant(loop)
store = auth_store.AuthStore(hass)
hass.auth = auth.AuthManager(hass, store, {}, {})
ensure_auth_manager_loaded(hass.auth)
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
orig_async_add_executor_job = hass.async_add_executor_job
orig_async_create_task = hass.async_create_task
def async_add_job(target, *args):
"""Add job."""
check_target = target
while isinstance(check_target, ft.partial):
check_target = check_target.func
if isinstance(check_target, Mock) and not isinstance(target, AsyncMock):
fut = asyncio.Future()
fut.set_result(target(*args))
return fut
return orig_async_add_job(target, *args)
def async_add_executor_job(target, *args):
"""Add executor job."""
check_target = target
while isinstance(check_target, ft.partial):
check_target = check_target.func
if isinstance(check_target, Mock):
fut = asyncio.Future()
fut.set_result(target(*args))
return fut
return orig_async_add_executor_job(target, *args)
def async_create_task(coroutine):
"""Create task."""
if isinstance(coroutine, Mock) and not isinstance(coroutine, AsyncMock):
fut = asyncio.Future()
fut.set_result(None)
return fut
return orig_async_create_task(coroutine)
hass.async_add_job = async_add_job
hass.async_add_executor_job = async_add_executor_job
hass.async_create_task = async_create_task
hass.config.location_name = "test home"
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone("US/Pacific")
hass.config.units = METRIC_SYSTEM
hass.config.skip_pip = True
hass.config_entries = config_entries.ConfigEntries(hass, {})
hass.config_entries._entries = []
hass.config_entries._store._async_ensure_stop_listener = lambda: None
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
async def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch("homeassistant.core._async_create_timer"), patch.object(
hass, "async_stop_track_tasks"
):
await orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@ha.callback
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
@asyncio.coroutine
def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode("utf-8")
msg = Message(topic, payload, qos, retain)
hass.data["mqtt"]._mqtt_handle_message(msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(hass, time):
"""Fire a time changes event."""
hass.bus.async_fire(EVENT_TIME_CHANGED, {"now": date_util.as_utc(time)})
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.fire(
EVENT_PLATFORM_DISCOVERED, {ATTR_SERVICE: service, ATTR_DISCOVERED: info}
)
@ha.callback
def async_fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.async_fire(
EVENT_PLATFORM_DISCOVERED, {ATTR_SERVICE: service, ATTR_DISCOVERED: info}
)
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), "fixtures", filename)
with open(path, encoding="utf-8") as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {"entity_id": new_state.entity_id, "new_state": new_state}
if old_state:
event_data["old_state"] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError(f"Integration {component} is already setup")
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
registry.entities = mock_entries or OrderedDict()
hass.data[entity_registry.DATA_REGISTRY] = registry
return registry
def mock_area_registry(hass, mock_entries=None):
"""Mock the Area Registry."""
registry = area_registry.AreaRegistry(hass)
registry.areas = mock_entries or OrderedDict()
hass.data[area_registry.DATA_REGISTRY] = registry
return registry
def mock_device_registry(hass, mock_entries=None, mock_deleted_entries=None):
"""Mock the Device Registry."""
registry = device_registry.DeviceRegistry(hass)
registry.devices = mock_entries or OrderedDict()
registry.deleted_devices = mock_deleted_entries or OrderedDict()
hass.data[device_registry.DATA_REGISTRY] = registry
return registry
class MockGroup(auth_models.Group):
"""Mock a group in Home Assistant."""
def __init__(self, id=None, name="Mock Group", policy=system_policies.ADMIN_POLICY):
"""Mock a group."""
kwargs = {"name": name, "policy": policy}
if id is not None:
kwargs["id"] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._groups[self.id] = self
return self
class MockUser(auth_models.User):
"""Mock a user in Home Assistant."""
def __init__(
self,
id=None,
is_owner=False,
is_active=True,
name="Mock User",
system_generated=False,
groups=None,
):
"""Initialize mock user."""
kwargs = {
"is_owner": is_owner,
"is_active": is_active,
"name": name,
"system_generated": system_generated,
"groups": groups or [],
"perm_lookup": None,
}
if id is not None:
kwargs["id"] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._users[self.id] = self
return self
def mock_policy(self, policy):
"""Mock a policy for a user."""
self._permissions = auth_permissions.PolicyPermissions(policy, self.perm_lookup)
async def register_auth_provider(hass, config):
"""Register an auth provider."""
provider = await auth_providers.auth_provider_from_config(
hass, hass.auth._store, config
)
assert provider is not None, "Invalid config specified"
key = (provider.type, provider.id)
providers = hass.auth._providers
if key in providers:
raise ValueError("Provider already registered")
providers[key] = provider
return provider
@ha.callback
def ensure_auth_manager_loaded(auth_mgr):
"""Ensure an auth manager is considered loaded."""
store = auth_mgr._store
if store._users is None:
store._set_defaults()
class MockModule:
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(
self,
domain=None,
dependencies=None,
setup=None,
requirements=None,
config_schema=None,
platform_schema=None,
platform_schema_base=None,
async_setup=None,
async_setup_entry=None,
async_unload_entry=None,
async_migrate_entry=None,
async_remove_entry=None,
partial_manifest=None,
):
"""Initialize the mock module."""
self.__name__ = f"homeassistant.components.{domain}"
self.__file__ = f"homeassistant/components/{domain}"
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
# Overlay to be used when generating manifest from this module
self._partial_manifest = partial_manifest
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if platform_schema_base is not None:
self.PLATFORM_SCHEMA_BASE = platform_schema_base
if setup is not None:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = AsyncMock(return_value=True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
if async_migrate_entry is not None:
self.async_migrate_entry = async_migrate_entry
if async_remove_entry is not None:
self.async_remove_entry = async_remove_entry
def mock_manifest(self):
"""Generate a mock manifest to represent this module."""
return {
**loader.manifest_from_legacy_module(self.DOMAIN, self),
**(self._partial_manifest or {}),
}
class MockPlatform:
"""Provide a fake platform."""
__name__ = "homeassistant.components.light.bla"
__file__ = "homeassistant/components/blah/light"
# pylint: disable=invalid-name
def __init__(
self,
setup_platform=None,
dependencies=None,
platform_schema=None,
async_setup_platform=None,
async_setup_entry=None,
scan_interval=None,
):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if scan_interval is not None:
self.SCAN_INTERVAL = scan_interval
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = AsyncMock(return_value=None)
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self,
hass,
logger=None,
domain="test_domain",
platform_name="test_platform",
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
):
"""Initialize a mock entity platform."""
if logger is None:
logger = logging.getLogger("homeassistant.helpers.entity_platform")
# Otherwise the constructor will blow up.
if isinstance(platform, Mock) and isinstance(platform.PARALLEL_UPDATES, Mock):
platform.PARALLEL_UPDATES = 0
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
)
class MockToggleEntity(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state, unique_id=None):
"""Initialize the mock entity."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the entity if any."""
self.calls.append(("name", {}))
return self._name
@property
def state(self):
"""Return the state of the entity if any."""
self.calls.append(("state", {}))
return self._state
@property
def is_on(self):
"""Return true if entity is on."""
self.calls.append(("is_on", {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the entity on."""
self.calls.append(("turn_on", kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the entity off."""
self.calls.append(("turn_off", kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
if method is None:
return self.calls[-1]
try:
return next(call for call in reversed(self.calls) if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(
self,
*,
domain="test",
data=None,
version=1,
entry_id=None,
source=config_entries.SOURCE_USER,
title="Mock Title",
state=None,
options={},
system_options={},
connection_class=config_entries.CONN_CLASS_UNKNOWN,
unique_id=None,
):
"""Initialize a mock config entry."""
kwargs = {
"entry_id": entry_id or uuid.uuid4().hex,
"domain": domain,
"data": data or {},
"system_options": system_options,
"options": options,
"version": version,
"title": title,
"connection_class": connection_class,
"unique_id": unique_id,
}
if source is not None:
kwargs["source"] = source
if state is not None:
kwargs["state"] = state
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries.append(self)
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries.append(self)
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(list(files_dict.keys()), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, "name", fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, "name", fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if "homeassistant/components" in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding="utf-8")
# Not found
raise FileNotFoundError(f"File not found: {fname}")
return patch.object(yaml_loader, "open", mock_open_f, create=True)
def mock_coro(return_value=None, exception=None):
"""Return a coro that returns a value or raise an exception."""
fut = asyncio.Future()
if exception is not None:
fut.set_exception(exception)
else:
fut.set_result(return_value)
return fut
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
async def mock_psc(hass, config_input, integration):
"""Mock the prepare_setup_component to capture config."""
domain_input = integration.domain
res = await async_process_component_config(hass, config_input, integration)
config[domain_input] = None if res is None else res.get(domain_input)
_LOGGER.debug(
"Configuration for %s, Validated: %s, Original %s",
domain_input,
config[domain_input],
config_input.get(domain_input),
)
return res
assert isinstance(config, dict)
with patch("homeassistant.config.async_process_component_config", mock_psc):
yield config
if domain is None:
assert len(config) == 1, "assert_setup_component requires DOMAIN: {}".format(
list(config.keys())
)
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert (
res_len == count
), f"setup_component failed, expected {count} got {res_len}: {res}"
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = "sqlite://" # In memory DB
with patch("homeassistant.components.recorder.migration.migrate_schema"):
assert setup_component(hass, recorder.DOMAIN, {recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_STATE_TASK
data = restore_state.RestoreStateData(hass)
now = date_util.utcnow()
last_states = {}
for state in states:
restored_state = state.as_dict()
restored_state["attributes"] = json.loads(
json.dumps(restored_state["attributes"], cls=JSONEncoder)
)
last_states[state.entity_id] = restore_state.StoredState(
State.from_dict(restored_state), now
)
data.last_states = last_states
_LOGGER.debug("Restore cache: %s", data.last_states)
assert len(data.last_states) == len(states), f"Duplicate entity_id? {states}"
async def get_restore_state_data() -> restore_state.RestoreStateData:
return data
# Patch the singleton task in hass.data to return our new RestoreStateData
hass.data[key] = hass.async_create_task(get_restore_state_data())
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if "entity_id" in values:
self.entity_id = values["entity_id"]
@property
def name(self):
"""Return the name of the entity."""
return self._handle("name")
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle("should_poll")
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle("unique_id")
@property
def state(self):
"""Return the state of the entity."""
return self._handle("state")
@property
def available(self):
"""Return True if entity is available."""
return self._handle("available")
@property
def device_info(self):
"""Info how it links to a device."""
return self._handle("device_info")
@property
def device_class(self):
"""Info how device should be classified."""
return self._handle("device_class")
@property
def unit_of_measurement(self):
"""Info on the units the entity state is in."""
return self._handle("unit_of_measurement")
@property
def capability_attributes(self):
"""Info about capabilities."""
return self._handle("capability_attributes")
@property
def supported_features(self):
"""Info about supported features."""
return self._handle("supported_features")
@property
def entity_registry_enabled_default(self):
"""Return if the entity should be enabled when first added to the entity registry."""
return self._handle("entity_registry_enabled_default")
def _handle(self, attr):
"""Return attribute value."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
@contextmanager
def mock_storage(data=None):
"""Mock storage.
Data is a dict {'key': {'version': version, 'data': data}}
Written data will be converted to JSON to ensure JSON parsing works.
"""
if data is None:
data = {}
orig_load = storage.Store._async_load
async def mock_async_load(store):
"""Mock version of load."""
if store._data is None:
# No data to load
if store.key not in data:
return None
mock_data = data.get(store.key)
if "data" not in mock_data or "version" not in mock_data:
_LOGGER.error('Mock data needs "version" and "data"')
raise ValueError('Mock data needs "version" and "data"')
store._data = mock_data
# Route through original load so that we trigger migration
loaded = await orig_load(store)
_LOGGER.info("Loading data for %s: %s", store.key, loaded)
return loaded
def mock_write_data(store, path, data_to_write):
"""Mock version of write data."""
_LOGGER.info("Writing data to %s: %s", store.key, data_to_write)
# To ensure that the data can be serialized
data[store.key] = json.loads(json.dumps(data_to_write, cls=store._encoder))
async def mock_remove(store):
"""Remove data."""
data.pop(store.key, None)
with patch(
"homeassistant.helpers.storage.Store._async_load",
side_effect=mock_async_load,
autospec=True,
), patch(
"homeassistant.helpers.storage.Store._write_data",
side_effect=mock_write_data,
autospec=True,
), patch(
"homeassistant.helpers.storage.Store.async_remove",
side_effect=mock_remove,
autospec=True,
):
yield data
async def flush_store(store):
"""Make sure all delayed writes of a store are written."""
if store._data is None:
return
store._async_cleanup_final_write_listener()
store._async_cleanup_delay_listener()
await store._async_handle_write_data()
async def get_system_health_info(hass, domain):
"""Get system health info."""
return await hass.data["system_health"]["info"][domain](hass)
def mock_integration(hass, module):
"""Mock an integration."""
integration = loader.Integration(
hass, f"homeassistant.components.{module.DOMAIN}", None, module.mock_manifest()
)
_LOGGER.info("Adding mock integration: %s", module.DOMAIN)
hass.data.setdefault(loader.DATA_INTEGRATIONS, {})[module.DOMAIN] = integration
hass.data.setdefault(loader.DATA_COMPONENTS, {})[module.DOMAIN] = module
return integration
def mock_entity_platform(hass, platform_path, module):
"""Mock a entity platform.
platform_path is in form light.hue. Will create platform
hue.light.
"""
domain, platform_name = platform_path.split(".")
mock_platform(hass, f"{platform_name}.{domain}", module)
def mock_platform(hass, platform_path, module=None):
"""Mock a platform.
platform_path is in form hue.config_flow.
"""
domain, platform_name = platform_path.split(".")
integration_cache = hass.data.setdefault(loader.DATA_INTEGRATIONS, {})
module_cache = hass.data.setdefault(loader.DATA_COMPONENTS, {})
if domain not in integration_cache:
mock_integration(hass, MockModule(domain))
_LOGGER.info("Adding mock integration platform: %s", platform_path)
module_cache[platform_path] = module or Mock()
def async_capture_events(hass, event_name):
"""Create a helper that captures events."""
events = []
@ha.callback
def capture_events(event):
events.append(event)
hass.bus.async_listen(event_name, capture_events)
return events
@ha.callback
def async_mock_signal(hass, signal):
"""Catch all dispatches to a signal."""
calls = []
@ha.callback
def mock_signal_handler(*args):
"""Mock service call."""
calls.append(args)
hass.helpers.dispatcher.async_dispatcher_connect(signal, mock_signal_handler)
return calls
class hashdict(dict):
"""
hashable dict implementation, suitable for use as a key into other dicts.
>>> h1 = hashdict({"apples": 1, "bananas":2})
>>> h2 = hashdict({"bananas": 3, "mangoes": 5})
>>> h1+h2
hashdict(apples=1, bananas=3, mangoes=5)
>>> d1 = {}
>>> d1[h1] = "salad"
>>> d1[h1]
'salad'
>>> d1[h2]
Traceback (most recent call last):
...
KeyError: hashdict(bananas=3, mangoes=5)
based on answers from
http://stackoverflow.com/questions/1151658/python-hashable-dicts
"""
def __key(self):
return tuple(sorted(self.items()))
def __repr__(self): # noqa: D105 no docstring
return ", ".join(f"{i[0]!s}={i[1]!r}" for i in self.__key())
def __hash__(self): # noqa: D105 no docstring
return hash(self.__key())
def __setitem__(self, key, value): # noqa: D105 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def __delitem__(self, key): # noqa: D105 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def clear(self): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def pop(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def popitem(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def setdefault(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def update(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
# update is not ok because it mutates the object
# __add__ is ok because it creates a new object
# while the new object is under construction, it's ok to mutate it
def __add__(self, right): # noqa: D105 no docstring
result = hashdict(self)
dict.update(result, right)
return result
def assert_lists_same(a, b):
"""Compare two lists, ignoring order."""
assert collections.Counter([hashdict(i) for i in a]) == collections.Counter(
[hashdict(i) for i in b]
)
|
apache-2.0
| -5,347,246,741,492,934,000
| 29.596685
| 93
| 0.627182
| false
|
jocelynj/weboob
|
weboob/applications/weboorrents/weboorrents.py
|
1
|
5788
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010 Romain Bignon
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from __future__ import with_statement
import sys
from weboob.capabilities.torrent import ICapTorrent
from weboob.tools.application.repl import ReplApplication
from weboob.tools.application.formatters.iformatter import IFormatter
__all__ = ['Weboorrents']
def sizeof_fmt(num):
for x in ['bytes','KB','MB','GB','TB']:
if num < 1024.0:
return "%-4.1f%s" % (num, x)
num /= 1024.0
class TorrentInfoFormatter(IFormatter):
MANDATORY_FIELDS = ('id', 'name', 'size', 'seeders', 'leechers', 'url', 'files', 'description')
def flush(self):
pass
def format_dict(self, item):
result = u'%s%s%s\n' % (ReplApplication.BOLD, item['name'], ReplApplication.NC)
result += 'ID: %s\n' % item['id']
result += 'Size: %s\n' % sizeof_fmt(item['size'])
result += 'Seeders: %s\n' % item['seeders']
result += 'Leechers: %s\n' % item['leechers']
result += 'URL: %s\n' % item['url']
result += '\n%sFiles%s\n' % (ReplApplication.BOLD, ReplApplication.NC)
for f in item['files']:
result += ' * %s\n' % f
result += '\n%sDescription%s\n' % (ReplApplication.BOLD, ReplApplication.NC)
result += item['description']
return result
class TorrentListFormatter(IFormatter):
MANDATORY_FIELDS = ('id', 'name', 'size', 'seeders', 'leechers')
count = 0
def flush(self):
self.count = 0
pass
def format_dict(self, item):
self.count += 1
if self.interactive:
backend = item['id'].split('@', 1)[1]
result = u'%s* (%d) %s (%s)%s\n' % (ReplApplication.BOLD, self.count, item['name'], backend, ReplApplication.NC)
else:
result = u'%s* (%s) %s%s\n' % (ReplApplication.BOLD, item['id'], item['name'], ReplApplication.NC)
size = sizeof_fmt(item['size'])
result += ' %10s (Seed: %2d / Leech: %2d)' % (size, item['seeders'], item['leechers'])
return result
class Weboorrents(ReplApplication):
APPNAME = 'weboorrents'
VERSION = '0.4'
COPYRIGHT = 'Copyright(C) 2010 Romain Bignon'
CAPS = ICapTorrent
EXTRA_FORMATTERS = {'torrent_list': TorrentListFormatter,
'torrent_info': TorrentInfoFormatter,
}
COMMANDS_FORMATTERS = {'search': 'torrent_list',
'info': 'torrent_info',
}
torrents = []
def _complete_id(self):
return ['%s@%s' % (torrent.id, torrent.backend) for torrent in self.torrents]
def complete_info(self, text, line, *ignored):
args = line.split(' ')
if len(args) == 2:
return self._complete_id()
def parse_id(self, id):
if self.interactive:
try:
torrent = self.torrents[int(id) - 1]
except (IndexError,ValueError):
pass
else:
id = '%s@%s' % (torrent.id, torrent.backend)
return ReplApplication.parse_id(self, id)
def do_info(self, id):
"""
info ID
Get information about a torrent.
"""
_id, backend_name = self.parse_id(id)
found = 0
for backend, torrent in self.do('get_torrent', _id, backends=backend_name):
if torrent:
self.format(torrent)
found = 1
if not found:
print >>sys.stderr, 'Torrent "%s" not found' % id
else:
self.flush()
def complete_getfile(self, text, line, *ignored):
args = line.split(' ', 2)
if len(args) == 2:
return self._complete_id()
elif len(args) >= 3:
return self.path_completer(args[2])
def do_getfile(self, line):
"""
getfile ID FILENAME
Get the .torrent file.
FILENAME is where to write the file. If FILENAME is '-',
the file is written to stdout.
"""
id, dest = self.parseargs(line, 2, 2)
_id, backend_name = self.parse_id(id)
for backend, buf in self.do('get_torrent_file', _id, backends=backend_name):
if buf:
if dest == '-':
print buf
else:
try:
with open(dest, 'w') as f:
f.write(buf)
except IOError, e:
print >>sys.stderr, 'Unable to write .torrent in "%s": %s' % (dest, e)
return 1
return
print >>sys.stderr, 'Torrent "%s" not found' % id
def do_search(self, pattern):
"""
search [PATTERN]
Search torrents.
"""
self.torrents = []
if not pattern:
pattern = None
self.set_formatter_header(u'Search pattern: %s' % pattern if pattern else u'Latest torrents')
for backend, torrent in self.do('iter_torrents', pattern=pattern):
self.torrents.append(torrent)
self.format(torrent)
self.flush()
|
gpl-3.0
| -737,716,371,771,267,100
| 31.700565
| 124
| 0.554423
| false
|
jbaragry/ardoq-archimate
|
setup.py
|
1
|
1672
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='ardoqarchimate',
version='0.0.6',
description='ArchiMate Open Exchange Format (R) importer for Ardoq (R)',
long_description=long_description,
url='https://github.com/jbaragry/ardoq-archimate',
author='Jason Baragry',
license='MIT',
packages=find_packages(exclude=['resources']),
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Documentation',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords='architecture ardoq archimate import development tool',
install_requires=['ardoqpy', 'xmltodict', 'configparser'],
)
|
mit
| 7,526,193,376,280,982,000
| 33.833333
| 77
| 0.678828
| false
|
abingham/ackward
|
site_scons/ackward/class_property.py
|
1
|
1765
|
from .element import SigTemplateElement
from .include import ImplInclude
from .trace import trace
header_getter = 'static $type $property_name();'
header_setter = 'static void $property_name($header_signature);'
impl_getter = '''
$type $class_name::$property_name() {
using namespace boost::python;
try {
object prop =
$class_name::cls().attr("$property_name");
return extract<$type>(prop);
} TRANSLATE_PYTHON_EXCEPTION()
}'''
impl_setter = '''
void $class_name::$property_name($impl_signature) {
using namespace boost::python;
try {
object prop =
$class_name::cls().attr("$property_name");
prop = val;
} TRANSLATE_PYTHON_EXCEPTION()
}'''
class ClassProperty(SigTemplateElement):
'''A static property on a class.
Args:
* name: The name of the property.
* type: The type of the property.
* read_only: Whether the property is read-only or read-write.
'''
@trace
def __init__(self,
name,
type,
read_only=False,
parent=None):
header = header_getter
impl = impl_getter
if not read_only:
header = '\n'.join([header, header_setter])
impl = '\n'.join([impl, impl_setter])
SigTemplateElement.__init__(
self,
open_templates={
'header': header,
'impl': impl,
},
symbols={
'property_name': name,
'type': type,
'signature': [(type, 'val')]
},
parent=parent)
self.add_child(
ImplInclude(
('ackward', 'core', 'ExceptionTranslation.hpp')))
|
mit
| 3,386,836,769,049,771,000
| 25.343284
| 67
| 0.529178
| false
|
alexandercrosson/ml
|
tensorflow/cnn_text_classifier/train.py
|
1
|
7609
|
#! /usr/bin/env python
"""
Taken from Denny Britz's tutorial on CNNs
http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/
"""
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helpers
from text_cnn import TextCNN
from tensorflow.contrib import learn
# Parameters
# ------------------------
# Hyperparameters
tf.flags.DEFINE_integer('embedding_dim', 128, 'Dimensionality of character embedding (default: 128')
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularizaion lambda (default: 0.0)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 200, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print '\nParameters:'
for attr, value in sorted(FLAGS.__flags.items()):
print('{}{}'.format(attr.upper(), value))
print ''
# Data Preprocessing
# ------------------------
# Load data
print 'Loading data...'
x_test, y = data_helpers.load_data_and_labels()
# Build Vocabulary
max_document_length = max([len(x.split(' ')) for x in x_test])
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
x = np.array(list(vocab_processor.fit_transform(x_test)))
# Randomly Shuffle the data
np.random.seed(10)
shuffle_indicies = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indicies]
y_shuffled = y[shuffle_indicies]
# Train / Test Split
x_train, x_dev = x_shuffled[:-1000], x_shuffled[-1000:]
y_train, y_dev = y_shuffled[:-1000], y_shuffled[-1000:]
print 'Vocabulary size: {:d}'.format(len(vocab_processor.vocabulary_))
print 'Train/Dev split {:d}/{:d}'.format(len(y_train), len(y_dev))
# Training
# ------------------------
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
cnn = TextCNN(
sequence_length=x_train.shape[1],
num_classes=2,
vocab_size=len(vocab_processor.vocabulary_),
embedding_size=FLAGS.embedding_dim,
filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
num_filters=FLAGS.num_filters)
# Training Procecure
global_step = tf.Variable(0, name='global_step', trainable=False)
optimizer = tf.train.AdamOptimizer(1e-3)
grads_and_vars = optimizer.compute_gradients(cnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# Keep track of gradien values and sparsity (optional)
grad_summaries = []
for g, v in grads_and_vars:
if g is not None:
grad_hist_summary = tf.histogram_summary('{}/grad/hist'.\
format(v.name), g)
sparsity_summary = tf.scalar_summary('{}/grad/sparsity'.\
format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.merge_summary(grad_summaries)
# Output directory for models and summaries
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
print("Writing to {}\n".format(out_dir))
# Summaries for loss and accuracy
loss_summary = tf.scalar_summary('loss', cnn.loss)
acc_summary = tf.scalar_summary('accuracy', cnn.accuracy)
# Train summaries
train_summary_op = tf.merge_summary([loss_summary, acc_summary,
grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, 'summaries', 'train')
train_summary_writer = tf.train.SummaryWriter(train_summary_dir,
sess.graph)
# Dev Summaries
dev_summary_op = tf.merge_summary([loss_summary, acc_summary])
dev_summary_dir = os.path.join(out_dir, 'summaries', 'dev')
dev_summary_writer = tf.train.SummaryWriter(dev_summary_dir, sess.graph)
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.all_variables())
# Write vocabulary
vocab_processor.save(os.path.join(out_dir, "vocab"))
# Initialize all variables
sess.run(tf.initialize_all_variables())
def train_step(x_batch, y_batch):
"""
A single training step
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: FLAGS.dropout_keep_prob
}
_, step, summaries, loss, accuracy = sess.run(
[train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
train_summary_writer.add_summary(summaries, step)
def dev_step(x_batch, y_batch, writer=None):
"""
Evaluates model on a dev set
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: 1.0
}
step, summaries, loss, accuracy = sess.run(
[global_step, dev_summary_op, cnn.loss, cnn.accuracy],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
if writer:
writer.add_summary(summaries, step)
# Generate batches
batches = data_helpers.batch_iter(
list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)
# Training loop. For each batch...
for batch in batches:
x_batch, y_batch = zip(*batch)
train_step(x_batch, y_batch)
current_step = tf.train.global_step(sess, global_step)
if current_step % FLAGS.evaluate_every == 0:
print("\nEvaluation:")
dev_step(x_dev, y_dev, writer=dev_summary_writer)
print("")
if current_step % FLAGS.checkpoint_every == 0:
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
print("Saved model checkpoint to {}\n".format(path))
|
mit
| -7,158,994,837,752,444,000
| 38.630208
| 112
| 0.613353
| false
|
bibanon/tubeup
|
tests/test_tubeup.py
|
1
|
25904
|
import unittest
import os
import shutil
import json
import time
import requests_mock
import glob
import logging
from tubeup.TubeUp import TubeUp, DOWNLOAD_DIR_NAME
from tubeup import __version__
from youtube_dl import YoutubeDL
from .constants import info_dict_playlist, info_dict_video
current_path = os.path.dirname(os.path.realpath(__file__))
SCANNER = 'TubeUp Video Stream Mirroring Application {}'.format(__version__)
def get_testfile_path(name):
return os.path.join(current_path, 'test_tubeup_files', name)
def mocked_ydl_progress_hook(d):
pass
def mock_upload_response_by_videobasename(m, ia_id, videobasename):
files_to_upload = glob.glob(videobasename + '*')
for file_path in files_to_upload:
filename = os.path.basename(file_path)
m.put('https://s3.us.archive.org/%s/%s' % (ia_id, filename),
content=b'',
headers={'content-type': 'text/plain'})
def copy_testfiles_to_tubeup_rootdir_test():
# Copy testfiles to rootdir path of TubeUp.
# This method was created because after the uploading done by
# internetarchive library, it deletes the files that has been uploaded.
testfiles_dir = os.path.join(current_path, 'test_tubeup_files',
'files_for_upload_and_download_tests')
for filepath in os.listdir(testfiles_dir):
shutil.copy(
os.path.join(testfiles_dir, filepath),
os.path.join(current_path, 'test_tubeup_rootdir', 'downloads',
filepath))
class TubeUpTests(unittest.TestCase):
def setUp(self):
self.tu = TubeUp()
self.maxDiff = 999999999
def test_set_dir_path(self):
root_path = os.path.join(
current_path, '.directory_for_tubeup_set_dir_path_test')
dir_paths_dict = dict(root=root_path,
downloads=os.path.join(root_path,
DOWNLOAD_DIR_NAME))
self.tu.dir_path = root_path
self.assertEqual(self.tu.dir_path, dir_paths_dict)
# Make sure that other directories are created as well
self.assertTrue(os.path.exists(dir_paths_dict['downloads']))
# Clean the test directory
shutil.rmtree(root_path, ignore_errors=True)
def test_tubeup_attribute_logger_when_quiet_mode(self):
# self.tu is already `TubeUp` instance with quiet mode, so we don't
# create a new instance here.
self.assertIsInstance(self.tu.logger, logging.Logger)
self.assertEqual(self.tu.logger.level, logging.ERROR)
def test_tubeup_attribute_logger_when_verbose_mode(self):
tu = TubeUp(verbose=True)
self.assertIsInstance(tu.logger, logging.Logger)
def test_determine_collection_type(self):
soundcloud_colltype = self.tu.determine_collection_type(
'https://soundcloud.com/testurl')
another_colltype = self.tu.determine_collection_type(
'https://www.youtube.com/watch?v=testVideo'
)
self.assertEqual(soundcloud_colltype, 'opensource_audio')
self.assertEqual(another_colltype, 'opensource_movies')
def test_create_basenames_from_ydl_info_dict_video(self):
ydl = YoutubeDL()
result = self.tu.create_basenames_from_ydl_info_dict(
ydl, info_dict_video)
expected_result = set(
['Video and Blog Competition 2017 - Bank Indonesia & '
'NET TV #BIGoesToCampus-hlG3LeFaQwU'])
self.assertEqual(result, expected_result)
def test_create_basenames_from_ydl_info_dict_playlist(self):
ydl = YoutubeDL()
result = self.tu.create_basenames_from_ydl_info_dict(
ydl, info_dict_playlist)
expected_result = set([
'Live Streaming Rafid Aslam-7gjgkH5iPaE',
'Live Streaming Rafid Aslam-q92kxPm-pqM',
'Cara Membuat Laptop Menjadi Hotspot WiFi Dengan CMD-YjFwMSDNphM',
'[CSO] Defeat Boss in Dead End With Thanatos 7-EEm6MwXLse0',
'Cara Bermain Minecraft Multiplayer Dengan LAN-g2vTZ2ka-tM',
'Live Streaming Rafid Aslam-AXhuSS5_9YU',
'Cara Membuat Disk Baru di Komputer-KDOygJnK7Sw',
'Cara Mendownload Lewat Torrent-cC-9RghkvXs']
)
self.assertEqual(result, expected_result)
def test_generate_ydl_options_with_download_archive(self):
result = self.tu.generate_ydl_options(mocked_ydl_progress_hook,
use_download_archive=True)
expected_result = {
'outtmpl': os.path.join(
self.tu.dir_path['downloads'], '%(id)s.%(ext)s'),
'restrictfilenames': True,
'verbose': False,
'quiet': True,
'download_archive': os.path.join(self.tu.dir_path['root'],
'.ytdlarchive'),
'progress_with_newline': True,
'forcetitle': True,
'continuedl': True,
'retries': 9001,
'fragment_retries': 9001,
'forcejson': True,
'writeinfojson': True,
'writedescription': True,
'writethumbnail': True,
'writeannotations': True,
'writesubtitles': True,
'allsubtitles': True,
'ignoreerrors': True,
'fixup': 'warn',
'nooverwrites': True,
'consoletitle': True,
'prefer_ffmpeg': True,
'call_home': False,
'logger': self.tu.logger,
'progress_hooks': [mocked_ydl_progress_hook]}
self.assertEqual(result, expected_result)
def test_generate_ydl_options(self):
result = self.tu.generate_ydl_options(mocked_ydl_progress_hook)
expected_result = {
'outtmpl': os.path.join(
self.tu.dir_path['downloads'], '%(id)s.%(ext)s'),
'restrictfilenames': True,
'verbose': False,
'quiet': True,
'progress_with_newline': True,
'forcetitle': True,
'continuedl': True,
'retries': 9001,
'fragment_retries': 9001,
'forcejson': True,
'writeinfojson': True,
'writedescription': True,
'writethumbnail': True,
'writeannotations': True,
'writesubtitles': True,
'allsubtitles': True,
'ignoreerrors': True,
'fixup': 'warn',
'nooverwrites': True,
'consoletitle': True,
'prefer_ffmpeg': True,
'call_home': False,
'logger': self.tu.logger,
'progress_hooks': [mocked_ydl_progress_hook]}
self.assertEqual(result, expected_result)
def test_generate_ydl_options_with_proxy(self):
result = self.tu.generate_ydl_options(
mocked_ydl_progress_hook, proxy_url='http://proxytest.com:8080')
expected_result = {
'outtmpl': os.path.join(
self.tu.dir_path['downloads'], '%(id)s.%(ext)s'),
'restrictfilenames': True,
'verbose': False,
'quiet': True,
'progress_with_newline': True,
'forcetitle': True,
'continuedl': True,
'retries': 9001,
'fragment_retries': 9001,
'forcejson': True,
'writeinfojson': True,
'writedescription': True,
'writethumbnail': True,
'writeannotations': True,
'writesubtitles': True,
'allsubtitles': True,
'ignoreerrors': True,
'fixup': 'warn',
'nooverwrites': True,
'consoletitle': True,
'prefer_ffmpeg': True,
'call_home': False,
'logger': self.tu.logger,
'progress_hooks': [mocked_ydl_progress_hook],
'proxy': 'http://proxytest.com:8080'}
self.assertEqual(result, expected_result)
def test_generate_ydl_options_with_ydl_account(self):
result = self.tu.generate_ydl_options(
mocked_ydl_progress_hook, ydl_username='testUsername',
ydl_password='testPassword')
expected_result = {
'outtmpl': os.path.join(
self.tu.dir_path['downloads'], '%(id)s.%(ext)s'),
'restrictfilenames': True,
'verbose': False,
'quiet': True,
'progress_with_newline': True,
'forcetitle': True,
'continuedl': True,
'retries': 9001,
'fragment_retries': 9001,
'forcejson': True,
'writeinfojson': True,
'writedescription': True,
'writethumbnail': True,
'writeannotations': True,
'writesubtitles': True,
'allsubtitles': True,
'ignoreerrors': True,
'fixup': 'warn',
'nooverwrites': True,
'consoletitle': True,
'prefer_ffmpeg': True,
'call_home': False,
'logger': self.tu.logger,
'progress_hooks': [mocked_ydl_progress_hook],
'username': 'testUsername',
'password': 'testPassword'}
self.assertEqual(result, expected_result)
def test_generate_ydl_options_with_verbose_mode(self):
tu = TubeUp(verbose=True)
result = tu.generate_ydl_options(
mocked_ydl_progress_hook, ydl_username='testUsername',
ydl_password='testPassword')
expected_result = {
'outtmpl': os.path.join(
self.tu.dir_path['downloads'], '%(id)s.%(ext)s'),
'restrictfilenames': True,
'verbose': True,
'quiet': False,
'progress_with_newline': True,
'forcetitle': True,
'continuedl': True,
'retries': 9001,
'fragment_retries': 9001,
'forcejson': True,
'writeinfojson': True,
'writedescription': True,
'writethumbnail': True,
'writeannotations': True,
'writesubtitles': True,
'allsubtitles': True,
'ignoreerrors': True,
'fixup': 'warn',
'nooverwrites': True,
'consoletitle': True,
'prefer_ffmpeg': True,
'call_home': False,
'logger': tu.logger,
'progress_hooks': [mocked_ydl_progress_hook],
'username': 'testUsername',
'password': 'testPassword'}
self.assertEqual(result, expected_result)
def test_create_archive_org_metadata_from_youtubedl_meta(self):
with open(get_testfile_path(
'Mountain_3_-_Video_Background_HD_1080p-6iRV8liah8A.info.json')
) as f:
vid_meta = json.load(f)
result = TubeUp.create_archive_org_metadata_from_youtubedl_meta(
vid_meta
)
expected_result = {
'mediatype': 'movies',
'creator': 'Video Background',
'collection': 'opensource_movies',
'title': 'Mountain 3 - Video Background HD 1080p',
'description': ('Mountain 3 - Video Background HD 1080p<br>'
'If you use this video please put credits to my '
'channel in description:<br>https://www.youtube.com'
'/channel/UCWpsozCMdAnfI16rZHQ9XDg<br>© Don\'t '
'forget to SUBSCRIBE, LIKE, COMMENT and RATE. '
'Hope you all enjoy! <br/><br/>Source: '
'<a href="https://www.youtube.com/watch?v='
'6iRV8liah8A">https://www.youtube.com/watch?v='
'6iRV8liah8A</a><br/>Uploader: <a href="http://ww'
'w.youtube.com/channel/UCWpsozCMdAnfI16rZHQ9XDg">'
'Video Background</a>'),
'date': '2015-01-05',
'year': '2015',
'subject': ('Youtube;video;Entertainment;Video Background;Footage;'
'Animation;Cinema;stock video footage;Royalty '
'free videos;Creative Commons videos;free movies '
'online;youtube;HD;1080p;Amazing Nature;Mountain;'),
'originalurl': 'https://www.youtube.com/watch?v=6iRV8liah8A',
'licenseurl': 'https://creativecommons.org/licenses/by/3.0/',
'scanner': SCANNER}
self.assertEqual(expected_result, result)
def test_create_archive_org_metadata_from_youtubedl_meta_description_text_null(self):
with open(get_testfile_path(
'description_text_null.json')
) as f:
vid_meta = json.load(f)
result = TubeUp.create_archive_org_metadata_from_youtubedl_meta(
vid_meta
)
expected_description = (' <br/><br/>Source: <a href="url">url</a><br/>'
'Uploader: <a href="url">tubeup.py</a>')
self.assertEqual(expected_description, result.get('description'))
def test_create_archive_org_metadata_from_youtubedl_meta_no_uploader(self):
with open(get_testfile_path(
'Mountain_3_-_Video_Background_HD_1080p-6iRV8liah8A.info_no_'
'uploader.json')
) as f:
vid_meta = json.load(f)
result = TubeUp.create_archive_org_metadata_from_youtubedl_meta(
vid_meta
)
expected_result = {
'mediatype': 'movies',
'creator': 'http://www.youtube.com/channel/UCWpsozCMdAnfI16rZHQ9XDg',
'collection': 'opensource_movies',
'title': 'Mountain 3 - Video Background HD 1080p',
'description': ('Mountain 3 - Video Background HD 1080p<br>'
'If you use this video please put credits to my '
'channel in description:<br>https://www.youtube.com'
'/channel/UCWpsozCMdAnfI16rZHQ9XDg<br>© Don\'t '
'forget to SUBSCRIBE, LIKE, COMMENT and RATE. '
'Hope you all enjoy! <br/><br/>Source: '
'<a href="https://www.youtube.com/watch?v='
'6iRV8liah8A">https://www.youtube.com/watch?v='
'6iRV8liah8A</a><br/>Uploader: <a href="http://ww'
'w.youtube.com/channel/UCWpsozCMdAnfI16rZHQ9XDg">'
'http://www.youtube.com/channel/UCWpsozCMdAnfI16rZ'
'HQ9XDg</a>'),
'date': '2015-01-05',
'year': '2015',
'subject': ('Youtube;video;Entertainment;Video Background;Footage;'
'Animation;Cinema;stock video footage;Royalty '
'free videos;Creative Commons videos;free movies '
'online;youtube;HD;1080p;Amazing Nature;Mountain;'),
'originalurl': 'https://www.youtube.com/watch?v=6iRV8liah8A',
'licenseurl': 'https://creativecommons.org/licenses/by/3.0/',
'scanner': SCANNER}
self.assertEqual(expected_result, result)
def test_create_archive_org_metadata_from_youtubedl_meta_no_date(self):
with open(get_testfile_path(
'Mountain_3_-_Video_Background_HD_1080p-6iRV8liah8A.'
'info_no_date.json')
) as f:
vid_meta = json.load(f)
result = TubeUp.create_archive_org_metadata_from_youtubedl_meta(
vid_meta
)
upload_date = time.strftime("%Y-%m-%d")
upload_year = time.strftime("%Y")
expected_result = {
'mediatype': 'movies',
'creator': 'Video Background',
'collection': 'opensource_movies',
'title': 'Mountain 3 - Video Background HD 1080p',
'description': ('Mountain 3 - Video Background HD 1080p<br>'
'If you use this video please put credits to my '
'channel in description:<br>https://www.youtube.com'
'/channel/UCWpsozCMdAnfI16rZHQ9XDg<br>© Don\'t '
'forget to SUBSCRIBE, LIKE, COMMENT and RATE. '
'Hope you all enjoy! <br/><br/>Source: '
'<a href="https://www.youtube.com/watch?v='
'6iRV8liah8A">https://www.youtube.com/watch?v='
'6iRV8liah8A</a><br/>Uploader: <a href="http://ww'
'w.youtube.com/channel/UCWpsozCMdAnfI16rZHQ9XDg">'
'Video Background</a>'),
'date': upload_date,
'year': upload_year,
'subject': ('Youtube;video;Entertainment;Video Background;Footage;'
'Animation;Cinema;stock video footage;Royalty '
'free videos;Creative Commons videos;free movies '
'online;youtube;HD;1080p;Amazing Nature;Mountain;'),
'originalurl': 'https://www.youtube.com/watch?v=6iRV8liah8A',
'licenseurl': 'https://creativecommons.org/licenses/by/3.0/',
'scanner': SCANNER}
self.assertEqual(expected_result, result)
def test_create_archive_org_metadata_from_youtubedl_meta_twitch_clips(self):
with open(get_testfile_path(
'EA_Play_2016_Live_from_the_Novo_Theatre-42850523.info.json')
) as f:
vid_meta = json.load(f)
result = TubeUp.create_archive_org_metadata_from_youtubedl_meta(
vid_meta
)
expected_result = {
'mediatype': 'movies',
'creator': 'EA',
'collection': 'opensource_movies',
'title': 'EA Play 2016 Live from the Novo Theatre',
'description': (' <br/><br/>Source: <a href="https://clips.twitch.tv/FaintLightGullWholeWheat">'
'https://clips.twitch.tv/FaintLightGullWholeWheat</a><br/>Uploader: '
'<a href="https://clips.twitch.tv/FaintLightGullWholeWheat">EA</a>'),
'date': '2016-06-12',
'year': '2016',
'subject': 'TwitchClips;video;',
'originalurl': 'https://clips.twitch.tv/FaintLightGullWholeWheat',
'licenseurl': '',
'scanner': SCANNER}
self.assertEqual(expected_result, result)
def test_get_resource_basenames(self):
tu = TubeUp(dir_path=os.path.join(current_path,
'test_tubeup_rootdir'))
copy_testfiles_to_tubeup_rootdir_test()
result = tu.get_resource_basenames(
['https://www.youtube.com/watch?v=KdsN9YhkDrY'])
expected_result = {os.path.join(
current_path, 'test_tubeup_rootdir', 'downloads',
'KdsN9YhkDrY')}
self.assertEqual(expected_result, result)
def test_upload_ia(self):
tu = TubeUp(dir_path=os.path.join(current_path,
'test_tubeup_rootdir'),
# Use custom ia configuration file so we don't need
# to login with username and password.
ia_config_path=get_testfile_path('ia_config_for_test.ini'))
videobasename = os.path.join(
current_path, 'test_tubeup_rootdir', 'downloads',
'Mountain_3_-_Video_Background_HD_1080p-6iRV8liah8A')
copy_testfiles_to_tubeup_rootdir_test()
with requests_mock.Mocker() as m:
# Mock the request to s3.us.archive.org, so it will responds
# a custom json. `internetarchive` library sends GET request to
# that url to check that we don't violate the upload limit.
m.get('https://s3.us.archive.org',
content=b'{"over_limit": 0}',
headers={'content-type': 'application/json'})
m.get('https://archive.org/metadata/youtube-6iRV8liah8A',
content=b'{}',
headers={'content-type': 'application/json'})
# Mock the PUT requests for internetarchive urls that defined
# in mock_upload_response_by_videobasename(), so this test
# doesn't perform upload to the real archive.org server.
mock_upload_response_by_videobasename(
m, 'youtube-6iRV8liah8A', videobasename)
result = tu.upload_ia(videobasename)
expected_result = (
'youtube-6iRV8liah8A',
{'mediatype': 'movies',
'creator': 'Video Background',
'collection': 'opensource_movies',
'title': 'Mountain 3 - Video Background HD 1080p',
'description': ('Mountain 3 - Video Background HD 1080p<br>If '
'you use this video please put credits to my'
' channel in description:<br>https://www.youtub'
'e.com/channel/UCWpsozCMdAnfI16rZHQ9XDg<br>© D'
'on\'t forget to SUBSCRIBE, LIKE, COMMENT an'
'd RATE. Hope you all enjoy! <br/><br/>Sourc'
'e: <a href="https://www.youtube.com/watch?v'
'=6iRV8liah8A">https://www.youtube.com/watch'
'?v=6iRV8liah8A</a><br/>Uploader: <a href="h'
'ttp://www.youtube.com/channel/UCWpsozCMdAnf'
'I16rZHQ9XDg">Video Background</a>'),
'date': '2015-01-05',
'year': '2015',
'subject': ('Youtube;video;Entertainment;Video Background;'
'Footage;Animation;Cinema;stock video footage;'
'Royalty free videos;Creative Commons videos;'
'free movies online;youtube;HD;1080p;Amazing '
'Nature;Mountain;'),
'originalurl': 'https://www.youtube.com/watch?v=6iRV8liah8A',
'licenseurl': 'https://creativecommons.org/licenses/by/3.0/',
'scanner': SCANNER})
self.assertEqual(expected_result, result)
def test_archive_urls(self):
tu = TubeUp(dir_path=os.path.join(current_path,
'test_tubeup_rootdir'),
ia_config_path=get_testfile_path('ia_config_for_test.ini'))
videobasename = os.path.join(
current_path, 'test_tubeup_rootdir', 'downloads',
'KdsN9YhkDrY')
copy_testfiles_to_tubeup_rootdir_test()
with requests_mock.Mocker() as m:
# Mock the request to s3.us.archive.org, so it will responds
# a custom json. `internetarchive` library sends GET request to
# that url to check that we don't violate the upload limit.
m.get('https://s3.us.archive.org',
content=b'{"over_limit": 0}',
headers={'content-type': 'application/json'})
m.get('https://archive.org/metadata/youtube-KdsN9YhkDrY',
content=b'{}',
headers={'content-type': 'application/json'})
# Mock the PUT requests for internetarchive urls that defined
# in mock_upload_response_by_videobasename(), so this test
# doesn't perform upload to the real archive.org server.
mock_upload_response_by_videobasename(
m, 'youtube-KdsN9YhkDrY', videobasename)
result = list(tu.archive_urls(
['https://www.youtube.com/watch?v=KdsN9YhkDrY']))
expected_result = [(
'youtube-KdsN9YhkDrY',
{'mediatype': 'movies',
'creator': 'RelaxingWorld',
'collection': 'opensource_movies',
'title': 'Epic Ramadan - Video Background HD1080p',
'description': ('If you enjoy my work, please consider Subscribe to my NEW '
'channel for more videos: <br>'
'https://www.youtube.com/MusicForRelaxation?sub_confirmation=1 <br>'
'▷ If you use this video, please put credits to my channel '
'in description: <br>'
'Source from RelaxingWorld: https://goo.gl/HsW75m<br>'
'<br>'
'▷ Also, do not forget to Subscribe to my channel. Thanks! '
'<br/><br/>Source: <a '
'href="https://www.youtube.com/watch?v=KdsN9YhkDrY">'
'https://www.youtube.com/watch?v=KdsN9YhkDrY</a><br/>Uploader: '
'<a '
'href="http://www.youtube.com/channel/UCWpsozCMdAnfI16rZHQ9XDg">'
'RelaxingWorld</a>'
),
'date': '2016-06-25',
'year': '2016',
'subject': ('Youtube;video;Film & Animation;Video Background;'
'Footage;Animation;Cinema;Royalty Free Videos;'
'Stock Video Footage;Video Backdrops;'
'Amazing Nature;youtube;HD;1080p;Creative Commons Videos;'
'relaxing music;Ramadan;'),
'originalurl': 'https://www.youtube.com/watch?v=KdsN9YhkDrY',
'licenseurl': '',
'scanner': SCANNER})]
self.assertEqual(expected_result, result)
|
gpl-3.0
| 3,364,535,364,861,585,000
| 42.016611
| 108
| 0.534639
| false
|
idaholab/raven
|
scripts/TestHarness/testers/RavenFramework.py
|
1
|
11704
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
RavenFramework is a tool to test raven inputs.
"""
from __future__ import absolute_import
import os
import subprocess
import sys
import distutils.version
import platform
from Tester import Tester
import OrderedCSVDiffer
import UnorderedCSVDiffer
import XMLDiff
import TextDiff
import ExistsDiff
import RAVENImageDiff
# Set this outside the class because the framework directory is constant for
# each instance of this Tester, and in addition, there is a problem with the
# path by the time you call it in __init__ that causes it to think its absolute
# path is somewhere under tests/framework.
# Be aware that if this file changes its location, this variable should also be
# changed.
myDir = os.path.dirname(os.path.realpath(__file__))
RAVENDIR = os.path.abspath(os.path.join(myDir, '..', '..', '..', 'framework'))
#Need to add the directory for AMSC for doing module checks.
os.environ["PYTHONPATH"] = os.path.join(RAVENDIR, 'contrib') +\
os.pathsep + os.environ.get("PYTHONPATH", "")
scriptDir = os.path.abspath(os.path.join(RAVENDIR, '..', 'scripts'))
sys.path.append(scriptDir)
import library_handler
sys.path.pop()
_missingModules, _notQAModules = library_handler.checkLibraries()
_checkVersions = library_handler.checkVersions()
class RavenFramework(Tester):
"""
RavenFramework is the class to use for testing standard raven inputs.
"""
@staticmethod
def get_valid_params():
"""
Returns the parameters that can be used for this class.
@ In, None
@ Out, params, _ValidParameters, return the parameters.
"""
params = Tester.get_valid_params()
params.add_required_param('input', "The input file to use for this test.")
params.add_param('output', '', "List of output files that the input should create.")
params.add_param('csv', '', "List of csv files to check")
params.add_param('UnorderedCsv', '', "List of unordered csv files to check")
params.add_param('xml', '', "List of xml files to check")
params.add_param('UnorderedXml', '', "List of unordered xml files to check")
params.add_param('xmlopts', '', "Options for xml checking")
params.add_param('text', '', "List of generic text files to check")
params.add_param('comment', '-20021986', "Character or string denoting "+
"comments, all text to the right of the symbol will be "+
"ignored in the diff of text files")
params.add_param('image', '', "List of image files to check")
params.add_param('rel_err', '', 'Relative Error for csv files or floats in xml ones')
params.add_param('required_executable', '', 'Skip test if this executable is not found')
params.add_param('required_libraries', '', 'Skip test if any of these libraries are not found')
params.add_param('minimum_library_versions', '',
'Skip test if the library listed is below the supplied'+
' version (e.g. minimum_library_versions = \"name1 version1 name2 version2\")')
params.add_param('skip_if_env', '', 'Skip test if this environmental variable is defined')
params.add_param('skip_if_OS', '', 'Skip test if the operating system defined')
params.add_param('test_interface_only', False,
'Test the interface only (without running the driven code')
params.add_param('check_absolute_value', False,
'if true the values are compared to the tolerance '+
'directectly, instead of relatively.')
params.add_param('zero_threshold', sys.float_info.min*4.0,
'it represents the value below which a float is'+
'considered zero (XML comparison only)')
params.add_param('remove_whitespace', False,
'Removes whitespace before comparing xml node text if True')
params.add_param('remove_unicode_identifier', False,
'if true, then remove u infront of a single quote')
params.add_param('interactive', False,
'if true, then RAVEN will be run with interactivity enabled.')
params.add_param('python3_only', False, 'if true, then only use with Python3')
params.add_param('ignore_sign', False, 'if true, then only compare the absolute values')
return params
def get_command(self):
"""
Gets the raven command to run this test.
@ In, None
@ Out, get_command, string, command to run.
"""
ravenflag = ''
if self.specs['test_interface_only']:
ravenflag += ' interfaceCheck '
if self.specs['interactive']:
ravenflag += ' interactiveCheck '
return self._get_python_command() + " " + self.driver + " " + ravenflag + self.specs["input"]
def __make_differ(self, specName, differClass, extra=None):
"""
This adds a differ if the specName has files.
@ In, specName, string of the list of files to use with the differ.
@ In, differClass, subclass of Differ, for use with the files.
@ In, extra, dictionary, extra parameters
@ Out, None
"""
if len(self.specs[specName]) == 0:
#No files, so quit
return
differParams = dict(self.specs)
differParams["output"] = self.specs[specName]
differParams["type"] = differClass.__name__
if extra is not None:
differParams.update(extra)
self.add_differ(differClass(specName, differParams, self.get_test_dir()))
def __init__(self, name, params):
Tester.__init__(self, name, params)
self.all_files = []
self.__make_differ('output', ExistsDiff.Exists)
self.__make_differ('csv', OrderedCSVDiffer.OrderedCSV)
self.__make_differ('UnorderedCsv', UnorderedCSVDiffer.UnorderedCSV)
self.__make_differ('xml', XMLDiff.XML, {"unordered":False})
self.__make_differ('UnorderedXml', XMLDiff.XML, {"unordered":True})
self.__make_differ('text', TextDiff.Text)
self.__make_differ('image', RAVENImageDiff.ImageDiffer)
self.required_executable = self.specs['required_executable']
self.required_libraries = self.specs['required_libraries'].split(' ')\
if len(self.specs['required_libraries']) > 0 else []
self.minimum_libraries = self.specs['minimum_library_versions'].split(' ')\
if len(self.specs['minimum_library_versions']) > 0 else []
self.required_executable = self.required_executable.replace("%METHOD%",
os.environ.get("METHOD", "opt"))
self.specs['scale_refine'] = False
self.driver = os.path.join(RAVENDIR, 'Driver.py')
def check_runnable(self):
"""
Checks if this test can run.
@ In, None
@ Out, check_runnable, boolean, if True can run this test.
"""
# remove tests based on skipping criteria
## required module is missing
if _missingModules:
self.set_fail('skipped (Missing python modules: '+" ".join([m[0] for m in _missingModules])+
" PYTHONPATH="+os.environ.get("PYTHONPATH", "")+')')
return False
## required module is present, but too old
if _notQAModules and _checkVersions:
self.set_fail('skipped (Incorrectly versioned python modules: ' +
" ".join(['{}-{}!={}'.format(*m) for m in _notQAModules]) +
" PYTHONPATH="+os.environ.get("PYTHONPATH", "")+')')
return False
## an environment varible value causes a skip
if len(self.specs['skip_if_env']) > 0:
envVar = self.specs['skip_if_env']
if envVar in os.environ:
self.set_skip('skipped (found environmental variable "'+envVar+'")')
return False
## OS
if len(self.specs['skip_if_OS']) > 0:
skipOs = [x.strip().lower() for x in self.specs['skip_if_OS'].split(',')]
# get simple-name platform (options are Linux, Windows, Darwin, or SunOS that I've seen)
currentOs = platform.system().lower()
# replace Darwin with more expected "mac"
if currentOs == 'darwin':
currentOs = 'mac'
if currentOs in skipOs:
self.set_skip('skipped (OS is "{}")'.format(currentOs))
return False
for lib in self.required_libraries:
found, _, _ = library_handler.checkSingleLibrary(lib)
if not found:
self.set_skip('skipped (Unable to import library: "{}")'.format(lib))
return False
if self.specs['python3_only'] and not library_handler.inPython3():
self.set_skip('Python 3 only')
return False
i = 0
if len(self.minimum_libraries) % 2:
self.set_skip('skipped (libraries are not matched to versions numbers: '
+str(self.minimum_libraries)+')')
return False
while i < len(self.minimum_libraries):
libraryName = self.minimum_libraries[i]
libraryVersion = self.minimum_libraries[i+1]
found, _, actualVersion = library_handler.checkSingleLibrary(libraryName, version='check')
if not found:
self.set_skip('skipped (Unable to import library: "'+libraryName+'")')
return False
if distutils.version.LooseVersion(actualVersion) < \
distutils.version.LooseVersion(libraryVersion):
self.set_skip('skipped (Outdated library: "'+libraryName+'")')
return False
i += 2
if len(self.required_executable) > 0 and \
not os.path.exists(self.required_executable):
self.set_skip('skipped (Missing executable: "'+self.required_executable+'")')
return False
try:
if len(self.required_executable) > 0 and \
subprocess.call([self.required_executable], stdout=subprocess.PIPE) != 0:
self.set_skip('skipped (Failing executable: "'+self.required_executable+'")')
return False
except Exception as exp:
self.set_skip('skipped (Error when trying executable: "'
+self.required_executable+'")'+str(exp))
return False
filenameSet = set()
duplicateFiles = []
for filename in self.__get_created_files():
if filename not in filenameSet:
filenameSet.add(filename)
else:
duplicateFiles.append(filename)
if len(duplicateFiles) > 0:
self.set_skip('[incorrect test] duplicated files specified: '+
" ".join(duplicateFiles))
return False
return True
def __get_created_files(self):
"""
Returns all the files used by this test that need to be created
by the test. Note that they will be deleted at the start of running
the test.
@ In, None
@ Out, createdFiles, [str], list of files created by the test.
"""
runpath = self.get_test_dir()
removeFiles = self.get_differ_remove_files()
return removeFiles+list(os.path.join(runpath, file) for file in self.all_files)
def prepare(self):
"""
Get the test ready to run by removing files that should be created.
@ In, None
@ Out, None
"""
for filename in self.__get_created_files():
if os.path.exists(filename):
os.remove(filename)
def process_results(self, _):
"""
Check to see if the test has passed.
@ In, ignored, string, output of test.
@ Out, None
"""
self.set_success()
|
apache-2.0
| -1,925,632,087,378,102,500
| 42.029412
| 100
| 0.648838
| false
|
bkuczenski/lca-tools
|
antelope_reports/tables/base.py
|
1
|
11488
|
"""
Functions for creating tables for useful / important comparisons. These are analogous to charts in that they
are forms of output and it's not clear where they belong.
Lists of tabular outputs:
* process or fragment Inventory
* compare process inventories
* compare allocations of a multioutput process
* compare LCIA factors for different methods
* compare an LCIA method with the components of one or more Lcia Results using it
Here's another thing: right now I'm using dynamic grid to show these in the window... but wouldn't it perhaps be
preferable to use pandas? doesn't pandas afford all sorts of useful features, like ...
um...
what is pandas good for again? for working with data frames. Not necessarily for creating data frames.
Most likely, I could modify dynamic_grid to *return* a dataframe instead of drawing a table.
"""
from collections import defaultdict
from pandas import DataFrame
def printable(tup, width=8):
out = []
for k in tup:
if isinstance(k, str):
out.append(k)
elif k is None:
out.append('')
else:
try:
g = '%*.3g' % (width, k)
except TypeError:
g = '%*.*s' % (width, width, '----')
out.append(g)
return tuple(out)
class BaseTableOutput(object):
"""
A prototype class for storing and returning tabular information. This should ultimately be adopted in places
where dynamic_grids are used, or where TeX or excel tables are produced (like in lca_matrix foreground output
generators) but for now it is just being used to provide some separation of concerns for the flowables super-grid.
At the heart is a dict whose key is a 2-tuple of (row signifier, column index). The row signifier can be any
hashable object, but the column indices are always sequential. re-ordering columns is something we do not feel
particularly like enabling at the present time.
The user creates the table with initialization parameters as desired, and then builds out the table by adding
columns in sequence.
The table has an inclusion criterion for the iterables (which could be None)-- if the criterion is met, the object
is added; if not, it is skipped. The criterion can change, but (since the table contents are static) this will not
result in columns being re-iterated.
Subclasses MAY redefine:
_returns_sets: determines whether each grid item is singly or multiply valued
Subclasses MUST implement:
_near_headings -- column names for left-side headings
_generate_items(col) -- argument is a column iterable - generates items
_pull_row_from_item(item) -- argument is one of the objects returned by the column iteration, returns row key
_extract_data_from_item -- argument is an dict from the grid dict, returns either a dict or an immutable object
"""
_near_headings = '', # should be overridden
_far_headings = '', # should be overridden
_returns_sets = False
def _pull_row_from_item(self, item):
"""
Returns the row tuple from an item, for insertion into the rows set. meant to be overridden
:param item:
:return: always a tuple. default item,
"""
row = item
# if not self._returns_sets:
return row,
def _pull_note_from_item(self, item):
"""
Returns the "long" / descriptive text appended to the right-hand side of the table. should return a str.
Only used if _returns_sets is false (otherwise, the sets indicate the row + subrow labels)
This is may turn out to be totally silly / pointless.
:param item:
:return:
"""
return ''
def _generate_items(self, iterable):
"""
yields the items from a column entry. Meant to be overridden.
:param iterable:
:return:
"""
for item in iterable:
if self._criterion(item):
yield item
def _extract_data_from_item(self, item):
"""
note: dict item is a list of components
Determines how to get the data point from the item/list. Meant to be overridden.
If self._returns_sets is true, should return a dict. Else should return an immutable.
:param item:
:return: a string
"""
return item
def _header_row(self):
"""
Returns a tuple of columns for the header row
:return:
"""
header = self._near_headings
for i, _ in enumerate(self._columns):
header += ('C%d' % i),
header += self._far_headings # placeholder for row notes / subitem keys
return header
def _build_near_header(self, row, prev):
the_row = []
for i, _ in enumerate(self._near_headings):
if prev is not None:
if prev[i] == row[i]:
the_row.append('""')
continue
the_row.append('%s' % row[i])
return the_row
def _build_row(self, row, prev=None):
"""
Returns a single row as a tuple.
:param row:
:param prev: [None] previous row printed (input, not output). Used to suppress header output for repeat entries.
:return:
"""
# first build the near header
the_row = self._build_near_header(row, prev)
data_keys = set()
data_vals = []
# first pass: get all the data / keys
for i, _ in enumerate(self._columns):
data = self._extract_data_from_item(self._d[row, i])
if isinstance(data, dict):
if not self._returns_sets:
raise TypeError('multiple values returned but subclass does not allow them!')
for k in data.keys():
data_keys.add(k)
data_vals.append(data)
# second pass: build the sub-table by rows
if self._returns_sets:
the_rows = []
_ftt = True # first time through
keys = tuple(sorted(data_keys, key=lambda x: x[-2]))
for k in keys:
if not _ftt:
the_row = ['' for i in range(len(self._near_headings))]
for i, _ in enumerate(self._columns):
if k in data_vals[i]:
the_row.append(data_vals[i][k])
else:
the_row.append(None)
the_row.append(k)
if _ftt:
the_row.append(self._notes[row])
else:
the_row.append('')
the_rows.append(the_row)
_ftt = False
return the_rows
else:
the_row.extend(data_vals)
# add notes
the_row.append(self._notes[row])
return the_row
def __init__(self, *args, criterion=None):
"""
Provide 0 or more positional arguments as data columns; add data columns later with add_column(arg)
:param args: sequential data columns
:param criterion: A callable expression that returns true if a given
"""
self._d = defaultdict(list)
if callable(criterion):
self._criterion = criterion
else:
if criterion is not None:
print('Ignoring non-callable criterion')
self._criterion = lambda x: True
self._rows = set() # set of valid keys to dict
self._notes = dict()
self._columns = [] # list of columns in the order added
# a valid reference consists of (x, y) where x in self._rows and y < len(self._columns)
for arg in args:
self.add_column(arg)
def _add_rowitem(self, col_idx, item, row=None):
if row is None:
row = self._pull_row_from_item(item)
self._rows.add(row)
if row not in self._notes:
self._notes[row] = self._pull_note_from_item(item)
self._d[row, col_idx].append(item)
def add_column(self, arg):
col_idx = len(self._columns)
for k in self._generate_items(arg):
self._add_rowitem(col_idx, k)
self._columns.append(arg)
def _sorted_rows(self):
for row in sorted(self._rows, key=lambda x: tuple([str(k) for k in x])):
yield row
def text(self, width=10, hdr_width=24, max_width=112, expanded=True):
"""
Outputs the table in text format
:return: nothing.
"""
header = self._header_row()
prev = None
body = []
width = max(6, width)
wds = [len(header[i]) for i in range(len(self._near_headings))]
# determine column widths
for row in self._sorted_rows():
prt_row = self._build_row(row, prev=prev)
if self._returns_sets:
wds = [min(max(wds[i], len('%s' % prt_row[0][i])), hdr_width) for i in range(len(self._near_headings))]
else:
wds = [min(max(wds[i], len('%s' % prt_row[i])), hdr_width) for i in range(len(self._near_headings))]
body.append(prt_row)
prev = row
# build display string
rem_width = max_width
fmt = ''
for i in wds:
rem_width -= i
fmt += '%%-%d.%ds ' % (i, i)
rem_width -= 1
for i in range(len(self._columns)):
rem_width -= width
fmt += '%%-%d.%ds ' % (width, width)
rem_width -= 1
if rem_width < 0:
# uh oh negative rem width: widen freely; set remainder to 10 chars
max_width -= (rem_width - 10)
rem_width = 10
fmt += '%%-%d.%ds' % (rem_width, rem_width)
if self._returns_sets:
fmt += ' %s'
print(fmt % header)
print('-' * max_width)
for row in body:
if self._returns_sets:
for subrow in row: # sorted(row, key=lambda x: x[-2])
print(fmt % printable(subrow, width=width))
else:
print(fmt % printable(row, width=width))
print(fmt % header)
print('\nColumns:')
for i, c in enumerate(self._columns):
print('C%d: %s' % (i, c))
def dataframe(self):
df = DataFrame(columns=self._header_row())
prev = None
for row in self._sorted_rows():
if self._returns_sets:
for r in self._build_row(row):
d = dict(zip(self._header_row(), printable(r)))
df = df.append(d, ignore_index=True)
else:
d = dict(zip(self._header_row(), printable(self._build_row(row, prev=prev))))
df = df.append(d, ignore_index=True)
prev = row
return df
def to_excel(self, xl_writer, sheetname, width_scaling=0.75):
"""
Must supply a pandas XlsxWriter. This routine does not save the document.
:param xl_writer:
:param sheetname:
:param width_scaling:
:return:
"""
df = self.dataframe()
df.to_excel(xl_writer, sheet_name=sheetname)
sht = xl_writer.sheets[sheetname]
for k in self._near_headings + self._far_headings:
ix = df.columns.tolist().index(k) + 1
mx = max([7, width_scaling * df[k].astype(str).str.len().max()])
sht.set_column(ix, ix, width=mx)
|
gpl-2.0
| -5,190,139,048,839,691,000
| 34.9
| 120
| 0.566591
| false
|
saaros/pghoard
|
pghoard/config.py
|
1
|
5845
|
"""
pghoard - configuration validation
Copyright (c) 2016 Ohmu Ltd
See LICENSE for details
"""
from pghoard.common import convert_pg_command_version_to_number
from pghoard.postgres_command import PGHOARD_HOST, PGHOARD_PORT
from pghoard.rohmu import get_class_for_transfer
from pghoard.rohmu.errors import InvalidConfigurationError
from pghoard.rohmu.snappyfile import snappy
import json
import os
import subprocess
def set_config_defaults(config, *, check_commands=True):
# TODO: consider implementing a real configuration schema at some point
# misc global defaults
config.setdefault("backup_location", None)
config.setdefault("http_address", PGHOARD_HOST)
config.setdefault("http_port", PGHOARD_PORT)
config.setdefault("alert_file_dir", config.get("backup_location") or os.getcwd())
config.setdefault("json_state_file_path", "/tmp/pghoard_state.json") # XXX: get a better default
config.setdefault("log_level", "INFO")
config.setdefault("path_prefix", "")
config.setdefault("upload_retries_warning_limit", 3)
# set command paths and check their versions
for command in ["pg_basebackup", "pg_receivexlog"]:
command_path = config.setdefault(command + "_path", "/usr/bin/" + command)
if check_commands:
version_output = subprocess.check_output([command_path, "--version"])
version_string = version_output.decode("ascii").strip()
config[command + "_version"] = convert_pg_command_version_to_number(version_string)
else:
config[command + "_version"] = None
# default to 5 compression and transfer threads
config.setdefault("compression", {}).setdefault("thread_count", 5)
config.setdefault("transfer", {}).setdefault("thread_count", 5)
# default to prefetching min(#compressors, #transferagents) - 1 objects so all
# operations where prefetching is used run fully in parallel without waiting to start
config.setdefault("restore_prefetch", min(
config["compression"]["thread_count"],
config["transfer"]["thread_count"]) - 1)
# if compression algorithm is not explicitly set prefer snappy if it's available
if snappy is not None:
config["compression"].setdefault("algorithm", "snappy")
else:
config["compression"].setdefault("algorithm", "lzma")
config["compression"].setdefault("level", 0)
# defaults for sites
config.setdefault("backup_sites", {})
for site_name, site_config in config["backup_sites"].items():
site_config.setdefault("active", True)
site_config.setdefault("active_backup_mode", "pg_receivexlog")
site_config.setdefault("basebackup_count", 2)
site_config.setdefault("basebackup_interval_hours", 24)
site_config.setdefault("basebackup_mode",
"pipe" if site_config.get("stream_compression") else "basic")
site_config.setdefault("encryption_key_id", None)
site_config.setdefault("object_storage", None)
site_config.setdefault("pg_xlog_directory", "/var/lib/pgsql/data/pg_xlog")
obj_store = site_config["object_storage"] or {}
if not obj_store:
pass
elif "storage_type" not in obj_store:
raise InvalidConfigurationError("Site {!r}: storage_type not defined for object_storage".format(site_name))
elif obj_store["storage_type"] == "local" and obj_store.get("directory") == config.get("backup_location"):
raise InvalidConfigurationError(
"Site {!r}: invalid 'local' target directory {!r}, must be different from 'backup_location'".format(
site_name, config.get("backup_location")))
else:
try:
get_class_for_transfer(obj_store["storage_type"])
except ImportError as ex:
raise InvalidConfigurationError(
"Site {0!r} object_storage: {1.__class__.__name__!s}: {1!s}".format(site_name, ex))
return config
def read_json_config_file(filename, *, check_commands=True, add_defaults=True):
try:
with open(filename, "r") as fp:
config = json.load(fp)
except FileNotFoundError:
raise InvalidConfigurationError("Configuration file {!r} does not exist".format(filename))
except ValueError as ex:
raise InvalidConfigurationError("Configuration file {!r} does not contain valid JSON: {}"
.format(filename, str(ex)))
except OSError as ex:
raise InvalidConfigurationError("Configuration file {!r} can't be opened: {}"
.format(filename, ex.__class__.__name__))
if not add_defaults:
return config
return set_config_defaults(config, check_commands=check_commands)
def get_site_from_config(config, site):
if not config.get("backup_sites"):
raise InvalidConfigurationError("No backup sites defined in configuration")
site_count = len(config["backup_sites"])
if site is None:
if site_count > 1:
raise InvalidConfigurationError("Backup site not set and configuration file defines {} sites: {}"
.format(site_count, sorted(config["backup_sites"])))
site = list(config["backup_sites"])[0]
elif site not in config["backup_sites"]:
n_sites = "{} other site{}".format(site_count, "s" if site_count > 1 else "")
raise InvalidConfigurationError("Site {!r} not defined in configuration file. {} are defined: {}"
.format(site, n_sites, sorted(config["backup_sites"])))
return site
def key_lookup_for_site(config, site):
def key_lookup(key_id):
return config["backup_sites"][site]["encryption_keys"][key_id]["private"]
return key_lookup
|
apache-2.0
| 8,807,986,599,779,522,000
| 45.76
| 119
| 0.648417
| false
|
anntzer/seaborn
|
seaborn/tests/test_rcmod.py
|
1
|
8200
|
import numpy as np
import matplotlib as mpl
import nose
import matplotlib.pyplot as plt
import nose.tools as nt
import numpy.testing as npt
from .. import rcmod, palettes, utils
class RCParamTester(object):
def flatten_list(self, orig_list):
iter_list = map(np.atleast_1d, orig_list)
flat_list = [item for sublist in iter_list for item in sublist]
return flat_list
def assert_rc_params(self, params):
for k, v in params.items():
# Various subtle issues in matplotlib lead to unexpected
# values for the backend rcParam, which isn't relevant here
if k == "backend":
continue
if isinstance(v, np.ndarray):
npt.assert_array_equal(mpl.rcParams[k], v)
else:
nt.assert_equal((k, mpl.rcParams[k]), (k, v))
class TestAxesStyle(RCParamTester):
styles = ["white", "dark", "whitegrid", "darkgrid", "ticks"]
def test_default_return(self):
current = rcmod.axes_style()
self.assert_rc_params(current)
def test_key_usage(self):
_style_keys = set(rcmod._style_keys)
for style in self.styles:
nt.assert_true(not set(rcmod.axes_style(style)) ^ _style_keys)
def test_bad_style(self):
with nt.assert_raises(ValueError):
rcmod.axes_style("i_am_not_a_style")
def test_rc_override(self):
rc = {"axes.facecolor": "blue", "foo.notaparam": "bar"}
out = rcmod.axes_style("darkgrid", rc)
nt.assert_equal(out["axes.facecolor"], "blue")
nt.assert_not_in("foo.notaparam", out)
def test_set_style(self):
for style in self.styles:
style_dict = rcmod.axes_style(style)
rcmod.set_style(style)
self.assert_rc_params(style_dict)
def test_style_context_manager(self):
rcmod.set_style("darkgrid")
orig_params = rcmod.axes_style()
context_params = rcmod.axes_style("whitegrid")
with rcmod.axes_style("whitegrid"):
self.assert_rc_params(context_params)
self.assert_rc_params(orig_params)
@rcmod.axes_style("whitegrid")
def func():
self.assert_rc_params(context_params)
func()
self.assert_rc_params(orig_params)
def test_style_context_independence(self):
nt.assert_true(set(rcmod._style_keys) ^ set(rcmod._context_keys))
def test_set_rc(self):
rcmod.set(rc={"lines.linewidth": 4})
nt.assert_equal(mpl.rcParams["lines.linewidth"], 4)
rcmod.set()
def test_set_with_palette(self):
rcmod.reset_orig()
rcmod.set(palette="deep")
assert utils.get_color_cycle() == palettes.color_palette("deep", 10)
rcmod.reset_orig()
rcmod.set(palette="deep", color_codes=False)
assert utils.get_color_cycle() == palettes.color_palette("deep", 10)
rcmod.reset_orig()
pal = palettes.color_palette("deep")
rcmod.set(palette=pal)
assert utils.get_color_cycle() == palettes.color_palette("deep", 10)
rcmod.reset_orig()
rcmod.set(palette=pal, color_codes=False)
assert utils.get_color_cycle() == palettes.color_palette("deep", 10)
rcmod.reset_orig()
rcmod.set()
def test_reset_defaults(self):
rcmod.reset_defaults()
self.assert_rc_params(mpl.rcParamsDefault)
rcmod.set()
def test_reset_orig(self):
rcmod.reset_orig()
self.assert_rc_params(mpl.rcParamsOrig)
rcmod.set()
class TestPlottingContext(RCParamTester):
contexts = ["paper", "notebook", "talk", "poster"]
def test_default_return(self):
current = rcmod.plotting_context()
self.assert_rc_params(current)
def test_key_usage(self):
_context_keys = set(rcmod._context_keys)
for context in self.contexts:
missing = set(rcmod.plotting_context(context)) ^ _context_keys
nt.assert_true(not missing)
def test_bad_context(self):
with nt.assert_raises(ValueError):
rcmod.plotting_context("i_am_not_a_context")
def test_font_scale(self):
notebook_ref = rcmod.plotting_context("notebook")
notebook_big = rcmod.plotting_context("notebook", 2)
font_keys = ["axes.labelsize", "axes.titlesize", "legend.fontsize",
"xtick.labelsize", "ytick.labelsize", "font.size"]
for k in font_keys:
nt.assert_equal(notebook_ref[k] * 2, notebook_big[k])
def test_rc_override(self):
key, val = "grid.linewidth", 5
rc = {key: val, "foo": "bar"}
out = rcmod.plotting_context("talk", rc=rc)
nt.assert_equal(out[key], val)
nt.assert_not_in("foo", out)
def test_set_context(self):
for context in self.contexts:
context_dict = rcmod.plotting_context(context)
rcmod.set_context(context)
self.assert_rc_params(context_dict)
def test_context_context_manager(self):
rcmod.set_context("notebook")
orig_params = rcmod.plotting_context()
context_params = rcmod.plotting_context("paper")
with rcmod.plotting_context("paper"):
self.assert_rc_params(context_params)
self.assert_rc_params(orig_params)
@rcmod.plotting_context("paper")
def func():
self.assert_rc_params(context_params)
func()
self.assert_rc_params(orig_params)
class TestPalette(object):
def test_set_palette(self):
rcmod.set_palette("deep")
assert utils.get_color_cycle() == palettes.color_palette("deep", 10)
rcmod.set_palette("pastel6")
assert utils.get_color_cycle() == palettes.color_palette("pastel6", 6)
rcmod.set_palette("dark", 4)
assert utils.get_color_cycle() == palettes.color_palette("dark", 4)
rcmod.set_palette("Set2", color_codes=True)
assert utils.get_color_cycle() == palettes.color_palette("Set2", 8)
class TestFonts(object):
def test_set_font(self):
rcmod.set(font="Verdana")
_, ax = plt.subplots()
ax.set_xlabel("foo")
try:
nt.assert_equal(ax.xaxis.label.get_fontname(),
"Verdana")
except AssertionError:
if has_verdana():
raise
else:
raise nose.SkipTest("Verdana font is not present")
finally:
rcmod.set()
def test_set_serif_font(self):
rcmod.set(font="serif")
_, ax = plt.subplots()
ax.set_xlabel("foo")
nt.assert_in(ax.xaxis.label.get_fontname(),
mpl.rcParams["font.serif"])
rcmod.set()
def test_different_sans_serif(self):
rcmod.set()
rcmod.set_style(rc={"font.sans-serif": ["Verdana"]})
_, ax = plt.subplots()
ax.set_xlabel("foo")
try:
nt.assert_equal(ax.xaxis.label.get_fontname(),
"Verdana")
except AssertionError:
if has_verdana():
raise
else:
raise nose.SkipTest("Verdana font is not present")
finally:
rcmod.set()
def has_verdana():
"""Helper to verify if Verdana font is present"""
# This import is relatively lengthy, so to prevent its import for
# testing other tests in this module not requiring this knowledge,
# import font_manager here
import matplotlib.font_manager as mplfm
try:
verdana_font = mplfm.findfont('Verdana', fallback_to_default=False)
except: # noqa
# if https://github.com/matplotlib/matplotlib/pull/3435
# gets accepted
return False
# otherwise check if not matching the logic for a 'default' one
try:
unlikely_font = mplfm.findfont("very_unlikely_to_exist1234",
fallback_to_default=False)
except: # noqa
# if matched verdana but not unlikely, Verdana must exist
return True
# otherwise -- if they match, must be the same default
return verdana_font != unlikely_font
|
bsd-3-clause
| -3,412,312,456,598,771,000
| 27.975265
| 78
| 0.593293
| false
|
glaudsonml/kurgan-ai
|
tools/sqlmap/plugins/dbms/oracle/fingerprint.py
|
1
|
3732
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import re
from lib.core.common import Backend
from lib.core.common import Format
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import DBMS
from lib.core.session import setDbms
from lib.core.settings import ORACLE_ALIASES
from lib.request import inject
from plugins.generic.fingerprint import Fingerprint as GenericFingerprint
class Fingerprint(GenericFingerprint):
def __init__(self):
GenericFingerprint.__init__(self, DBMS.ORACLE)
def getFingerprint(self):
value = ""
wsOsFp = Format.getOs("web server", kb.headersFp)
if wsOsFp:
value += "%s\n" % wsOsFp
if kb.data.banner:
dbmsOsFp = Format.getOs("back-end DBMS", kb.bannerFp)
if dbmsOsFp:
value += "%s\n" % dbmsOsFp
value += "back-end DBMS: "
if not conf.extensiveFp:
value += DBMS.ORACLE
return value
actVer = Format.getDbms()
blank = " " * 15
value += "active fingerprint: %s" % actVer
if kb.bannerFp:
banVer = kb.bannerFp["dbmsVersion"] if 'dbmsVersion' in kb.bannerFp else None
banVer = Format.getDbms([banVer])
value += "\n%sbanner parsing fingerprint: %s" % (blank, banVer)
htmlErrorFp = Format.getErrorParsedDBMSes()
if htmlErrorFp:
value += "\n%shtml error message fingerprint: %s" % (blank, htmlErrorFp)
return value
def checkDbms(self):
if not conf.extensiveFp and (Backend.isDbmsWithin(ORACLE_ALIASES) or (conf.dbms or "").lower() in ORACLE_ALIASES):
setDbms(DBMS.ORACLE)
self.getBanner()
return True
infoMsg = "testing %s" % DBMS.ORACLE
logger.info(infoMsg)
# NOTE: SELECT ROWNUM=ROWNUM FROM DUAL does not work connecting
# directly to the Oracle database
if conf.direct:
result = True
else:
result = inject.checkBooleanExpression("ROWNUM=ROWNUM")
if result:
infoMsg = "confirming %s" % DBMS.ORACLE
logger.info(infoMsg)
# NOTE: SELECT LENGTH(SYSDATE)=LENGTH(SYSDATE) FROM DUAL does
# not work connecting directly to the Oracle database
if conf.direct:
result = True
else:
result = inject.checkBooleanExpression("LENGTH(SYSDATE)=LENGTH(SYSDATE)")
if not result:
warnMsg = "the back-end DBMS is not %s" % DBMS.ORACLE
logger.warn(warnMsg)
return False
setDbms(DBMS.ORACLE)
self.getBanner()
if not conf.extensiveFp:
return True
infoMsg = "actively fingerprinting %s" % DBMS.ORACLE
logger.info(infoMsg)
for version in ("11i", "10g", "9i", "8i"):
number = int(re.search("([\d]+)", version).group(1))
output = inject.checkBooleanExpression("%d=(SELECT SUBSTR((VERSION),1,%d) FROM SYS.PRODUCT_COMPONENT_VERSION WHERE ROWNUM=1)" % (number, 1 if number < 10 else 2))
if output:
Backend.setVersion(version)
break
return True
else:
warnMsg = "the back-end DBMS is not %s" % DBMS.ORACLE
logger.warn(warnMsg)
return False
def forceDbmsEnum(self):
if conf.db:
conf.db = conf.db.upper()
if conf.tbl:
conf.tbl = conf.tbl.upper()
|
apache-2.0
| -7,529,427,755,926,559,000
| 28.856
| 178
| 0.580118
| false
|
noca/pythonlibs
|
cache.py
|
1
|
1939
|
# -*- coding: utf-8 -*-
'''
Common cache method for python. Check usage on
Example.
'''
class DontCache(Exception):
pass
def cache(compute_key, container_factory):
marker = object()
def decorator(func):
def replacement(*args, **kwargs):
cache = container_factory()
if cache is None:
return func(*args, **kwargs)
try:
key = compute_key(*args, **kwargs)
except DontCache:
return func(*args, **kwargs)
key = '{0}.{1}:{2}'.format(func.__module__,
func.__name__,
key)
cached_value = cache.get(key, marker)
if cached_value is marker:
cached_value = cache[key] = func(*args, **kwargs)
else:
pass
return cached_value
replacement.__doc__ = func.__doc__
return replacement
return decorator
# Show Example
if __name__ == '__main__':
# container is an factory function provide dict like object
# for storing cache, the scope is limited by this factory
def local_container():
if 'example_cache' not in globals():
globals()['example_cache'] = dict()
return globals()['example_cache']
# we always provide a more sofisticated cache function for
# a given cache factory
def local_cache(compute_key):
return cache(compute_key, local_container)
# compute_key takes exactly parameters as to be cached function
# , it's function specified
def _cachekey_exmample_func(selects, filters):
key = ''
for s in selects:
key += s + ':'
for f in filters:
key += f + '-'
return key
# decorate the normal function is all
@local_cache(_cachekey_exmample_func)
def sql_query(selects, filters):
return
|
bsd-2-clause
| -4,467,158,069,026,433,000
| 25.561644
| 67
| 0.542548
| false
|
clchiou/garage
|
py/g1/devtools/buildtools/tests/test_buildtools.py
|
1
|
2374
|
import unittest
import unittest.mock
import distutils.errors
from g1.devtools import buildtools
class BuildtoolsTest(unittest.TestCase):
@unittest.mock.patch(buildtools.__name__ + '.distutils.file_util')
def test_make_copy_files(self, mock_file_util):
mock_cmd = unittest.mock.Mock()
mock_cmd.FILENAMES = []
mock_cmd.SRC_DIR = None
mock_cmd.DST_DIR = None
cls = buildtools.make_copy_files(filenames=[])
cls.initialize_options(mock_cmd)
self.assertIsNone(mock_cmd.src_dir)
self.assertIsNone(mock_cmd.dst_dir)
with self.assertRaisesRegex(
distutils.errors.DistutilsOptionError,
r'--src-dir is required',
):
cls.finalize_options(mock_cmd)
mock_cmd.src_dir = 'a/b'
with self.assertRaisesRegex(
distutils.errors.DistutilsOptionError,
r'--dst-dir is required',
):
cls.finalize_options(mock_cmd)
mock_cmd.dst_dir = 'c/d'
mock_cmd.FILENAMES = ['e', 'f']
with self.assertRaisesRegex(
distutils.errors.DistutilsOptionError,
r'source file does not exist: a/b/e',
):
cls.finalize_options(mock_cmd)
mock_file_util.copy_file.assert_not_called()
cls.run(mock_cmd)
self.assertEqual(
mock_file_util.copy_file.mock_calls,
[
unittest.mock.call('a/b/e', 'c/d/e', preserve_mode=False),
unittest.mock.call('a/b/f', 'c/d/f', preserve_mode=False),
],
)
@unittest.mock.patch(buildtools.__name__ + '.subprocess')
def test_read_pkg_config(self, subprocess_mock):
subprocess_mock.run.return_value.stdout = (
b'-I"/s o m e/where/include" -I"/s o m e/where/include" '
b'-L"/s o m e/where/lib" -L"/s o m e/where/lib" '
b'-lfoo -lfoo '
b'-DMSG="hello world" -DMSG="hello world" '
)
self.assertEqual(
buildtools.read_package_config(''),
buildtools.PackageConfig(
include_dirs=['/s o m e/where/include'],
library_dirs=['/s o m e/where/lib'],
libraries=['foo'],
extra_compile_args=['-DMSG=hello world'],
),
)
if __name__ == '__main__':
unittest.main()
|
mit
| -6,761,329,704,171,718,000
| 31.081081
| 74
| 0.556024
| false
|
mblachford/conductor
|
src/util/vcenter.py
|
1
|
9674
|
import os
import sys
import time
import logging
from string import Template
from random import choice
import psphere.client as vcsa_client
from psphere.managedobjects import ClusterComputeResource
from psphere.managedobjects import VirtualMachine
from psphere.managedobjects import HostSystem
from psphere.managedobjects import ResourcePool
from psphere.managedobjects import Task
from psphere.soap import VimFault
log = logging.getLogger(__name__)
class Transport():
def __init__(self, target, username, password):
self.client = self.connect(target, username, password)
def connect(self,target,username,password,wsdl_location='remote'):
''' instantiate a connection to target instance '''
try:
log.info("Attempting to connect to vCenter {}".format(target))
client = vcsa_client.Client(target,username,password)
log.info("Connected to vCenter {}".format(target))
return client
except Exception, e:
log.error("Connection to vCenter {} failed. Reason: {}".format(target,e))
sys.exit(1)
def disconnect(self):
''' close connection to target instance '''
try:
log.info("Closing connection to vCenter")
self.client.logout()
except Exception, e:
log.info("Failed to gracefully close the connection to vCenter. Reason: {}".format(e))
sys.exit(1)
def clone(self,cursor,payload):
''' clone the template with our payload '''
while payload:
data = payload.pop(0)
try:
cluster = ClusterComputeResource.get(cursor, name=data['cluster'])
except Exception, e:
log.error("Unable to locate a cluster resource witht the name {}. Omitting build".format(data['cluster']))
else:
pool = cluster.resourcePool
esxhost = choice(cluster.host)
datastore = choice(cluster.datastore)
log.info("Cloning virtual machine named {} into cluster {} from template {}".format(data['vm_name'],data['cluster'],data['template']))
template = VirtualMachine.get(cursor, name=data['template'])
folder = cluster.parent.parent.vmFolder
_ip_spec = self._vm_ip_spec(cursor, domain = data['domain'],
dns = data['dns'],
gateway = data['gateway'],
ip = data['ip'],
netmask = data['netmask'])
_adapter_spec = self._vm_adapter_spec(cursor,_ip_spec)
_net_spec = self._vm_net_spec(cursor,cluster.network, vlan = data['vlan'])
_custom_spec = self._vm_custom_spec(cursor, _adapter_spec, template = data['template'],
domain = data['domain'], name = data['vm_name'],
ip = data['ip'], gateway = data['gateway'],
netmask = data['netmask'], dns = data['dns'])
_config_spec = self._vm_config_spec(cursor, _net_spec, memory = data['memory'],
cpus = data['cpus'], cores = data['cores'],
name = data['vm_name'])
_relo_spec = self._vm_relo_spec(cursor,esxhost,datastore,pool)
_clone_spec = self._vm_clone_spec(cursor, _relo_spec, _config_spec, _custom_spec)
try:
#self.wait_for_task(template.CloneVM_Task(folder = folder, name = data['vm_name'], spec=_clone_spec))
template.CloneVM_Task(folder = folder, name = data['vm_name'], spec=_clone_spec)
except VimFault, e:
print e
def _vm_config_spec(self,cursor,net_spec,**kwargs):
config_spec = cursor.create("VirtualMachineConfigSpec")
config_spec.memoryMB = kwargs['memory']
config_spec.numCPUs = kwargs['cpus']
config_spec.name = kwargs['name']
if not net_spec == None:
config_spec.deviceChange = net_spec
else:
pass
#config_spec.numCoresPerSocket = kwargs['cores']
return config_spec
def _vm_ip_spec(self,cursor,**kwargs):
ip_spec = cursor.create("CustomizationIPSettings")
fixed_ip = cursor.create("CustomizationFixedIp")
fixed_ip.ipAddress = kwargs['ip']
ip_spec.dnsDomain = kwargs['domain']
ip_spec.dnsServerList = kwargs['dns']
ip_spec.gateway = kwargs['gateway']
ip_spec.ip = fixed_ip
ip_spec.subnetMask = kwargs['netmask']
ip_spec.netBIOS = None
return ip_spec
def _vm_net_spec(self,cursor,netinfo,**kwargs):
for network in netinfo:
if network.name == kwargs["vlan"]:
log.info("Customizing VM network configuration for vlan {}.".format(kwargs['vlan']))
net = network
ds_conn = cursor.create("DistributedVirtualSwitchPortConnection")
ds_conn.portgroupKey = net.key
ds_conn.switchUuid = "{}".format(net.config.distributedVirtualSwitch.uuid)
backing = cursor.create("VirtualEthernetCardDistributedVirtualPortBackingInfo")
backing.port = ds_conn
connect_info = cursor.create("VirtualDeviceConnectInfo")
connect_info.allowGuestControl = True
connect_info.connected = True
connect_info.startConnected = True
nic = cursor.create("VirtualVmxnet3")
nic.backing = backing
nic.key = 4000
nic.unitNumber = 0
nic.addressType = "generated"
nic.connectable = connect_info
net_spec = cursor.create("VirtualDeviceConfigSpec")
net_spec.device = nic
net_spec.fileOperation = None
operation = cursor.create("VirtualDeviceConfigSpecOperation")
net_spec.operation = (operation.add)
return net_spec
else:
pass
log.error("Unable to find the network named {}. Continuing with out formal network specifciation".format(kwargs['vlan']))
net_spec = None
return net_spec
def _vm_adapter_spec(self,cursor,ip_spec):
nic_config = cursor.create("CustomizationAdapterMapping")
nic_config.adapter = ip_spec
return nic_config
def _vm_custom_spec(self,cursor,adapter_spec,**kwargs):
custom_spec = cursor.create("CustomizationSpec")
host_name = cursor.create("CustomizationFixedName")
host_name.name = kwargs['name']
ip_spec = cursor.create("CustomizationGlobalIPSettings")
ip_spec.dnsServerList = kwargs['dns']
ip_spec.dnsSuffixList = kwargs['domain']
if 'windows' in kwargs['template'].lower():
log.info("Calling windows customization specification")
sysprep = self._gen_sysprep(**kwargs)
identity_spec = cursor.create("CustomizationSysprepText")
identity_spec.value = sysprep
else:
log.info("Calling Linux customization specification")
identity_spec = cursor.create("CustomizationLinuxPrep")
identity_spec.domain = kwargs['domain']
identity_spec.hostName = host_name
identity_spec.hwClockUTC = True
custom_spec.globalIPSettings = ip_spec
custom_spec.identity = identity_spec
custom_spec.nicSettingMap = adapter_spec
return custom_spec
def _vm_relo_spec(self,cursor,host,disk,pool):
relo_spec = cursor.create("VirtualMachineRelocateSpec")
relo_spec.host = host
relo_spec.datastore = disk
relo_spec.transform = "sparse"
relo_spec.pool = pool
return relo_spec
def _vm_clone_spec(self,cursor,relo_spec, config_spec, custom_spec):
clone_spec = cursor.create("VirtualMachineCloneSpec")
clone_spec.config = config_spec
clone_spec.customization = custom_spec
clone_spec.location = relo_spec
clone_spec.powerOn = True
clone_spec.snapshot = None
clone_spec.template = False
return clone_spec
def _gen_sysprep(self,**kwargs):
''' modify the sysprep file '''
dir = os.path.abspath(os.path.dirname(__file__))
raw_file = open('{}/.unattend.xml'.format(dir)).read()
mod = Template(raw_file)
if len(kwargs['name']) > 15:
hname = kwargs['name'][0:15]
else:
hname = kwargs['name']
sysprep = mod.substitute(name = hname,
gateway = kwargs['gateway'],
ip = kwargs['ip'],
cidr = '26',
dns1 = kwargs['dns'].split(',')[0],
dns2 = kwargs['dns'].split(',')[1])
return sysprep
def wait_for_task(self,task):
if isinstance(task, Task):
while task.info.state in ["queued", "running"]:
time.sleep(1)
task.update()
if task.info.state == "success":
return True
else:
log.warn("Task failed: {0}".format(task.info))
return False
else:
log.warning("Passed non task object into wait_for_task")
return False
|
gpl-2.0
| -5,157,268,249,106,957,000
| 42.576577
| 150
| 0.561919
| false
|
dmayer/time_trial
|
time_trial_gui/lib/rq_result_processor.py
|
1
|
1207
|
from datetime import datetime
from time import sleep
from rq.job import Job
from models.trial import Trial
from redis import Redis
__author__ = 'daniel'
import threading
class RqResultsProcessor(threading.Thread):
session = None
stopped = False
def stop(self):
self.stopped = True
def run(self):
redis_conn = Redis()
# get all
while True:
incomplete = self.session.query(Trial).filter(Trial.end_date == None).filter(Trial.start_date!=None).all()
for t in incomplete:
try:
job = Job.fetch(t.job, connection=redis_conn)
except:
print("Exception occurred. Moving on.")
sleep(1)
continue
if job.result is not None:
print("Result for " + t.name + " found.")
t.result = job.result
t.end_date = datetime.now()
self.session.add(t)
self.session.commit()
self.session.expire(t)
if self.stopped:
self.session.close()
return
sleep(1)
|
mit
| -4,295,996,394,186,815,000
| 26.431818
| 118
| 0.509528
| false
|
futurely/openai-universe-agents
|
ga3c/NetworkVP.py
|
1
|
12245
|
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import numpy as np
import tensorflow as tf
from Config import Config
class NetworkVP:
def __init__(self, device, model_name, num_actions):
self.device = device
self.model_name = model_name
self.num_actions = num_actions
self.img_width = Config.IMAGE_WIDTH
self.img_height = Config.IMAGE_HEIGHT
self.img_channels = Config.STACKED_FRAMES
self.learning_rate = Config.LEARNING_RATE_START
self.beta = Config.BETA_START
self.log_epsilon = Config.LOG_EPSILON
self.graph = tf.Graph()
with self.graph.as_default() as g:
with tf.device(self.device):
self._create_graph()
self.sess = tf.Session(
graph=self.graph,
config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
gpu_options=tf.GPUOptions(allow_growth=True)))
self.sess.run(tf.global_variables_initializer())
if Config.TENSORBOARD: self._create_tensor_board()
if Config.LOAD_CHECKPOINT or Config.SAVE_MODELS:
vars = tf.global_variables()
self.saver = tf.train.Saver(
{var.name: var
for var in vars}, max_to_keep=0)
def _create_graph(self):
self.x = tf.placeholder(
tf.float32, [None, self.img_height, self.img_width, self.img_channels],
name='X')
self.y_r = tf.placeholder(tf.float32, [None], name='Yr')
self.var_beta = tf.placeholder(tf.float32, name='beta', shape=[])
self.var_learning_rate = tf.placeholder(tf.float32, name='lr', shape=[])
self.global_step = tf.Variable(0, trainable=False, name='step')
# As implemented in A3C paper
self.n1 = self.conv2d_layer(self.x, 8, 16, 'conv11', strides=[1, 4, 4, 1])
self.n2 = self.conv2d_layer(self.n1, 4, 32, 'conv12', strides=[1, 2, 2, 1])
self.action_index = tf.placeholder(tf.float32, [None, self.num_actions])
_input = self.n2
flatten_input_shape = _input.get_shape()
nb_elements = flatten_input_shape[1] * flatten_input_shape[
2] * flatten_input_shape[3]
self.flat = tf.reshape(_input, shape=[-1, nb_elements._value])
self.d1 = self.dense_layer(self.flat, 256, 'dense1')
self.logits_v = tf.squeeze(
self.dense_layer(self.d1, 1, 'logits_v', func=None), squeeze_dims=[1])
self.cost_v = 0.5 * tf.reduce_sum(
tf.square(self.y_r - self.logits_v), reduction_indices=0)
self.logits_p = self.dense_layer(self.d1, self.num_actions, 'logits_p')
if Config.USE_LOG_SOFTMAX:
self.softmax_p = tf.nn.softmax(self.logits_p)
self.log_softmax_p = tf.nn.log_softmax(self.logits_p)
self.log_selected_action_prob = tf.reduce_sum(
self.log_softmax_p * self.action_index, reduction_indices=1)
self.cost_p_1 = self.log_selected_action_prob * (
self.y_r - tf.stop_gradient(self.logits_v))
self.cost_p_2 = -1 * self.var_beta * \
tf.reduce_sum(self.log_softmax_p * self.softmax_p, reduction_indices=1)
else:
self.softmax_p = (tf.nn.softmax(self.logits_p) + Config.MIN_POLICY) / (
1.0 + Config.MIN_POLICY * self.num_actions)
self.selected_action_prob = tf.reduce_sum(
self.softmax_p * self.action_index, reduction_indices=1)
self.cost_p_1 = tf.log(tf.maximum(self.selected_action_prob, self.log_epsilon)) \
* (self.y_r - tf.stop_gradient(self.logits_v))
self.cost_p_2 = -1 * self.var_beta * \
tf.reduce_sum(tf.log(tf.maximum(self.softmax_p, self.log_epsilon)) *
self.softmax_p, reduction_indices=1)
self.cost_p_1_agg = tf.reduce_sum(self.cost_p_1, reduction_indices=0)
self.cost_p_2_agg = tf.reduce_sum(self.cost_p_2, reduction_indices=0)
self.cost_p = -(self.cost_p_1_agg + self.cost_p_2_agg)
if Config.DUAL_RMSPROP:
self.opt_p = tf.train.RMSPropOptimizer(
learning_rate=self.var_learning_rate,
decay=Config.RMSPROP_DECAY,
momentum=Config.RMSPROP_MOMENTUM,
epsilon=Config.RMSPROP_EPSILON)
self.opt_v = tf.train.RMSPropOptimizer(
learning_rate=self.var_learning_rate,
decay=Config.RMSPROP_DECAY,
momentum=Config.RMSPROP_MOMENTUM,
epsilon=Config.RMSPROP_EPSILON)
else:
self.cost_all = self.cost_p + self.cost_v
self.opt = tf.train.RMSPropOptimizer(
learning_rate=self.var_learning_rate,
decay=Config.RMSPROP_DECAY,
momentum=Config.RMSPROP_MOMENTUM,
epsilon=Config.RMSPROP_EPSILON)
if Config.USE_GRAD_CLIP:
if Config.DUAL_RMSPROP:
self.opt_grad_v = self.opt_v.compute_gradients(self.cost_v)
self.opt_grad_v_clipped = [
(tf.clip_by_norm(g, Config.GRAD_CLIP_NORM), v)
for g, v in self.opt_grad_v if not g is None
]
self.train_op_v = self.opt_v.apply_gradients(self.opt_grad_v_clipped)
self.opt_grad_p = self.opt_p.compute_gradients(self.cost_p)
self.opt_grad_p_clipped = [
(tf.clip_by_norm(g, Config.GRAD_CLIP_NORM), v)
for g, v in self.opt_grad_p if not g is None
]
self.train_op_p = self.opt_p.apply_gradients(self.opt_grad_p_clipped)
self.train_op = [self.train_op_p, self.train_op_v]
else:
self.opt_grad = self.opt.compute_gradients(self.cost_all)
self.opt_grad_clipped = [
(tf.clip_by_average_norm(g, Config.GRAD_CLIP_NORM), v)
for g, v in self.opt_grad
]
self.train_op = self.opt.apply_gradients(self.opt_grad_clipped)
else:
if Config.DUAL_RMSPROP:
self.train_op_v = self.opt_p.minimize(
self.cost_v, global_step=self.global_step)
self.train_op_p = self.opt_v.minimize(
self.cost_p, global_step=self.global_step)
self.train_op = [self.train_op_p, self.train_op_v]
else:
self.train_op = self.opt.minimize(
self.cost_all, global_step=self.global_step)
def _create_tensor_board(self):
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES)
summaries.append(tf.summary.scalar("Pcost_advantage", self.cost_p_1_agg))
summaries.append(tf.summary.scalar("Pcost_entropy", self.cost_p_2_agg))
summaries.append(tf.summary.scalar("Pcost", self.cost_p))
summaries.append(tf.summary.scalar("Vcost", self.cost_v))
summaries.append(tf.summary.scalar("LearningRate", self.var_learning_rate))
summaries.append(tf.summary.scalar("Beta", self.var_beta))
for var in tf.trainable_variables():
summaries.append(tf.summary.histogram("weights_%s" % var.name, var))
summaries.append(tf.summary.histogram("activation_n1", self.n1))
summaries.append(tf.summary.histogram("activation_n2", self.n2))
summaries.append(tf.summary.histogram("activation_d2", self.d1))
summaries.append(tf.summary.histogram("activation_v", self.logits_v))
summaries.append(tf.summary.histogram("activation_p", self.softmax_p))
self.summary_op = tf.summary.merge(summaries)
self.log_writer = tf.summary.FileWriter("logs/%s" % self.model_name,
self.sess.graph)
def dense_layer(self, input, out_dim, name, func=tf.nn.relu):
in_dim = input.get_shape().as_list()[-1]
d = 1.0 / np.sqrt(in_dim)
with tf.variable_scope(name):
w_init = tf.random_uniform_initializer(-d, d)
b_init = tf.random_uniform_initializer(-d, d)
w = tf.get_variable(
'w', dtype=tf.float32, shape=[in_dim, out_dim], initializer=w_init)
b = tf.get_variable('b', shape=[out_dim], initializer=b_init)
output = tf.matmul(input, w) + b
if func is not None:
output = func(output)
return output
def conv2d_layer(self,
input,
filter_size,
out_dim,
name,
strides,
func=tf.nn.relu):
in_dim = input.get_shape().as_list()[-1]
d = 1.0 / np.sqrt(filter_size * filter_size * in_dim)
with tf.variable_scope(name):
w_init = tf.random_uniform_initializer(-d, d)
b_init = tf.random_uniform_initializer(-d, d)
w = tf.get_variable(
'w',
shape=[filter_size, filter_size, in_dim, out_dim],
dtype=tf.float32,
initializer=w_init)
b = tf.get_variable('b', shape=[out_dim], initializer=b_init)
output = tf.nn.conv2d(input, w, strides=strides, padding='SAME') + b
if func is not None:
output = func(output)
return output
def __get_base_feed_dict(self):
return {
self.var_beta: self.beta,
self.var_learning_rate: self.learning_rate
}
def get_global_step(self):
step = self.sess.run(self.global_step)
return step
def predict_single(self, x):
return self.predict_p(x[None, :])[0]
def predict_v(self, x):
prediction = self.sess.run(self.logits_v, feed_dict={self.x: x})
return prediction
def predict_p(self, x):
prediction = self.sess.run(self.softmax_p, feed_dict={self.x: x})
return prediction
def predict_p_and_v(self, x):
return self.sess.run(
[self.softmax_p, self.logits_v], feed_dict={self.x: x})
def train(self, x, y_r, a, trainer_id):
feed_dict = self.__get_base_feed_dict()
feed_dict.update({self.x: x, self.y_r: y_r, self.action_index: a})
self.sess.run(self.train_op, feed_dict=feed_dict)
def log(self, x, y_r, a):
feed_dict = self.__get_base_feed_dict()
feed_dict.update({self.x: x, self.y_r: y_r, self.action_index: a})
step, summary = self.sess.run(
[self.global_step, self.summary_op], feed_dict=feed_dict)
self.log_writer.add_summary(summary, step)
def _checkpoint_filename(self, episode):
return 'checkpoints/%s_%08d' % (self.model_name, episode)
def _get_episode_from_filename(self, filename):
# TODO: hacky way of getting the episode. ideally episode should be stored as a TF variable
return int(re.split('/|_|\.', filename)[2])
def save(self, episode):
self.saver.save(self.sess, self._checkpoint_filename(episode))
def load(self):
filename = tf.train.latest_checkpoint(
os.path.dirname(self._checkpoint_filename(episode=0)))
if Config.LOAD_EPISODE > 0:
filename = self._checkpoint_filename(Config.LOAD_EPISODE)
self.saver.restore(self.sess, filename)
return self._get_episode_from_filename(filename)
def get_variables_names(self):
return [
var.name for var in self.graph.get_collection('trainable_variables')
]
def get_variable_value(self, name):
return self.sess.run(self.graph.get_tensor_by_name(name))
|
mit
| 7,946,365,921,996,443,000
| 39.546358
| 95
| 0.644753
| false
|
efforia/eos-dashboard
|
invent/store/store/views.py
|
1
|
3721
|
# -*- coding: UTF-8 -*-
import paypalrestsdk,urlparse,urllib2
from xml.etree import ElementTree as ETree
from hooks import paypal_api,pagseguro_api
from django.core.mail import send_mail
from django.conf import settings
from django.http import Http404,HttpResponse
from django.http import HttpResponse as response
from django.shortcuts import get_object_or_404,redirect,render
from cartridge.shop.models import Product, ProductVariation, Order, OrderItem
from paypalrestsdk import Payment
def payment_cancel(request):
# Not implemented already
return redirect('/')
def paypal_redirect(request,order):
paypal_api()
payment = paypalrestsdk.Payment.find(order.transaction_id)
for link in payment.links:
if link.method == "REDIRECT":
redirect_url = link.href
url = urlparse.urlparse(link.href)
params = urlparse.parse_qs(url.query)
redirect_token = params['token'][0]
order.paypal_redirect_token = redirect_token
order.save()
return redirect(redirect_url)
def payment_redirect(request, order_id):
lookup = {"id": order_id}
if not request.user.is_authenticated(): lookup["key"] = request.session.session_key
elif not request.user.is_staff: lookup["user_id"] = request.user.id
order = get_object_or_404(Order, **lookup)
is_pagseguro = order.pagseguro_redirect
is_paypal = order.paypal_redirect_token
if 'none' not in is_pagseguro: return redirect(str(is_pagseguro))
elif 'none' not in is_paypal: return paypal_redirect(request,order)
else: return redirect("/store/execute?orderid=%s" % lookup["id"])
def payment_slip(request):
orderid = request.GET['id']
order = Order.objects.filter(id=orderid)[0]
send_mail('Pedido de boleto', 'O pedido de boleto foi solicitado ao Efforia para o pedido %s. Em instantes você estará recebendo pelo e-mail. Aguarde instruções.' % order.id, 'oi@efforia.com.br',
[order.billing_detail_email,'contato@efforia.com.br'], fail_silently=False)
context = { "order": order }
resp = render(request,"shop/slip_confirmation.html",context)
return resp
def payment_bank(request):
orderid = request.GET['order_id']
order = Order.objects.filter(id=orderid)[0]
context = {
"order": order,
"agency": settings.BANK_AGENCY,
"account": settings.BANK_ACCOUNT,
"socname": settings.BANK_SOCIALNAME
}
resp = render(request,"shop/bank_confirmation.html",context)
return resp
def payment_execute(request, template="shop/payment_confirmation.html"):
order = None
lookup = {}
if request.GET.has_key('token'):
paypal_api()
token = request.GET['token']
payer_id = request.GET['PayerID']
order = get_object_or_404(Order, paypal_redirect_token=token)
payment = Payment.find(order.transaction_id)
payment.execute({ "payer_id": payer_id })
elif request.GET.has_key('transaction_id'):
api = pagseguro_api()
email = api.data['email']
token = api.data['token']
transaction = request.GET['transaction_id']
url = api.config.TRANSACTION_URL % transaction
resp = urllib2.urlopen("%s?email=%s&token=%s" % (url,email,token)).read()
lookup["id"] = ETree.fromstring(resp).findall("reference")[0].text
print ETree.fromstring(resp).findall("reference")[0].text
if not request.user.is_authenticated(): lookup["key"] = request.session.session_key
if not request.user.is_staff: lookup["user_id"] = request.user.id
order = get_object_or_404(Order, **lookup)
order.transaction_id = transaction
elif request.GET.has_key('orderid'):
return redirect("/store/bank?order_id=%s" % request.GET['orderid'])
order.status = 2
order.save()
context = { "order" : order }
response = render(request, template, context)
return response
|
lgpl-3.0
| -6,523,896,887,667,410,000
| 39.445652
| 196
| 0.71083
| false
|
saga-project/bliss
|
bliss/plugins/local/localjob.py
|
1
|
11546
|
# -*- coding: utf-8 -*-
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
__author__ = "Ole Christian Weidner"
__copyright__ = "Copyright 2011-2012, Ole Christian Weidner"
__license__ = "MIT"
from bliss.interface import JobPluginInterface
from bliss.plugins.local.process import LocalJobProcess
import bliss.saga
class LocalJobPlugin(JobPluginInterface):
'''Implements a job plugin that can submit jobs to the local machine'''
########################################
##
class BookKeeper:
'''Keeps track of job and service objects'''
def __init__(self, parent):
self.objects = {}
self.processes = {}
self.parent = parent
def add_service_object(self, service_obj):
self.objects[hex(id(service_obj))] = {'instance' : service_obj, 'jobs' : []}
def del_service_obj(self, service_obj):
try:
self.objects.remove((hex(id(service_obj))))
except Exception:
pass
def add_job_object(self, job_obj, service_obj):
service_id = hex(id(service_obj))
job_id = hex(id(job_obj))
try:
self.objects[service_id]['jobs'].append(job_obj)
self.processes[job_id] = LocalJobProcess(jobdescription=job_obj.get_description(),
plugin=self.parent)
except Exception, ex:
self.parent.log_error_and_raise(bliss.saga.Error.NoSuccess,
"Can't register job: %s" % (ex))
def del_job_object(self, job_obj):
pass
def get_service_for_job(self, job_obj):
'''Return the service object the job is registered with'''
for key in self.objects.keys():
if job_obj in self.objects[key]['jobs']:
return self.objects[key]['instance']
self.parrent.log_error_and_raise(bliss.saga.Error.NoSuccess,
"INTERNAL ERROR: Job object %s is not known by this plugin" % (job))
def get_job_for_jobid(self, service_obj, job_id):
'''Return the job object associated with the given job id'''
for job in self.list_jobs_for_service(service_obj):
proc = self.get_process_for_job(job)
if proc.getpid(str(service_obj._url)) == job_id:
return job
self.parrent.log_error_and_raise(bliss.saga.Error.NoSuccess, "Job ID not known by this plugin.")
def list_jobs_for_service(self, service_obj):
'''List all jobs that are registered with the given service'''
service_id = hex(id(service_obj))
return self.objects[service_id]['jobs']
def get_process_for_job(self, job_obj):
'''Return the local process object for a given job'''
try:
return self.processes[hex(id(job_obj))]
except Exception, ex:
self.parrent.log_error_and_raise(bliss.saga.Error.NoSuccess,
"INTERNAL ERROR: Job object %s is not associated with a process" % (job_obj))
##
########################################
## Step 1: Define adaptor name. Convention is:
## saga.plugin.<package>.<name>
_name = 'saga.plugin.job.local'
## Step 2: Define supported url schemas
##
_schemas = ['fork']
## Step 3: Define apis supported by this adaptor
##
_apis = ['saga.job']
def __init__(self, url):
'''Class constructor'''
JobPluginInterface.__init__(self, name=self._name, schemas=self._schemas)
self.bookkeeper = self.BookKeeper(self)
@classmethod
def sanity_check(self):
'''Implements interface from _PluginBase'''
## Step 3: Implement sanity_check. This method is called *once* on
## Bliss startup. Here you should check if everything this
## adaptor needs is available, e.g., certain command line tools,
## python modules and so on.
##
try:
import subprocess
except Exception, ex:
print "module missing -- plugin disabled. (NEEDS LOGGING SUPPORT)"
return False
return True
def get_runtime_info(self):
'''Implements interface from _PluginBase'''
#str = "Plugin: %s. Registered job.service objects: %s.\n%s".format(
# self.name, len(self.objects), repr(self.objects))
#return str
def register_service_object(self, service_obj):
'''Implements interface from _JobPluginBase'''
## Step 4: Implement register_service_object. This method is called if
## a service object is instantiated with a url schema that matches
## this adaptor. You can still reject it by throwing an exception.
if service_obj._url.host != "localhost":
self.log_error_and_raise(bliss.saga.Error.BadParameter, "Only 'localhost' can be used as hostname")
self.bookkeeper.add_service_object(service_obj)
self.log_info("Registered new service object %s" % (repr(service_obj)))
def unregister_service_object(self, service_obj):
'''Implements interface from _JobPluginBase'''
## Step 5: Implement unregister_service_object. This method is called if
## a service object associated with this plugin is deleted. You
## shouldn't throw an exception here, since this method is called
## by the destructor!
self.bookkeeper.del_service_object(service_obj)
self.log_info("Unegistered new service object %s" % (repr(service_obj)))
#def register_job_object(self, job_obj, service_obj):
# '''Implements interface from _JobPluginBase'''
# ## Step 6: Implement register_job_object. This method is called if
# ## a job object is instantiated via the service.create_job() call.
# ## You can still reject it by throwing an exception.
# self.bookkeeper.add_job_object(job_obj, service_obj)
# self.log_info("Registered new job object %s" % (repr(job_obj)))
def unregister_job_object(self, job_obj):
'''Implements interface from _JobPluginBase'''
self.bookkeeper.del_job_object(job_obj)
self.log_info("Unegisteredjob object %s" % (repr(job_obj)))
def service_create_job(self, service_obj, job_description):
'''Implements interface from _JobPluginBase.
This method is called for saga.Service.create_job().
'''
if job_description.executable is None:
self.log_error_and_raise(bliss.saga.Error.BadParameter,
"No executable defined in job description")
try:
job = bliss.saga.job.Job()
job._Job__init_from_service(service_obj=service_obj,
job_desc=job_description)
#self.bookkeeper.add_job_object_to_service(job, service_obj,
# bliss.saga.job.JobID(service_obj._url, None))
self.bookkeeper.add_job_object(job, service_obj)
return job
except Exception, ex:
self.log_error_and_raise(bliss.saga.Error.NoSuccess,
"Couldn't create a new job because: %s " % (str(ex)))
def service_list(self, service_obj):
'''Implements interface from _JobPluginBase'''
## Step 76: Implement service_list_jobs()
try:
return self.bookkeeper.list_jobs_for_service(service_obj)
except Exception, ex:
self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't retreive job list because: %s " % (str(ex)))
def service_get_job(self, service_obj, job_id):
'''Implements interface from _JobPluginBase'''
## Step 76: Implement service_get_job()
try:
return self.bookkeeper.get_job_for_jobid(service_obj, job_id)
except Exception, ex:
self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't get job list because: %s " % (str(ex)))
def job_get_state(self, job):
'''Implements interface from _JobPluginBase'''
try:
service = self.bookkeeper.get_service_for_job(job)
return self.bookkeeper.get_process_for_job(job).getstate()
except Exception, ex:
self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't get job state because: %s " % (str(ex)))
def job_get_job_id(self, job):
'''Implements interface from _JobPluginBase'''
try:
service = self.bookkeeper.get_service_for_job(job)
return self.bookkeeper.get_process_for_job(job).getpid(str(service._url))
#self.log_info("Started local process: %s %s" % (job.get_description().executable, job.get_description().arguments))
except Exception, ex:
self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't get job id because: %s " % (str(ex)))
def job_run(self, job):
'''Implements interface from _JobPluginBase'''
## Step X: implement job.run()
if job.get_description().executable is None:
self.log_error_and_raise(bliss.saga.Error.BadParameter, "No executable defined in job description")
try:
service = self.bookkeeper.get_service_for_job(job)
self.bookkeeper.get_process_for_job(job).run(job.get_description())
#self.log_info("Started local process: %s %s" % (job.get_description().executable, job.get_description().arguments))
except Exception, ex:
self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't run job because: %s " % (str(ex)))
def job_cancel(self, job):
'''Implements interface from _JobPluginBase'''
## Step X: implement job.cancel()
try:
self.bookkeeper.get_process_for_job(job).terminate()
self.log_info("Terminated local process: %s %s" % (job.get_description().executable, job.get_description().arguments))
except Exception, ex:
self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't cancel job because: %s (already finished?)" % (str(ex)))
def job_wait(self, job, timeout):
'''Implements interface from _JobPluginBase'''
## Step X: implement job.wait()
try:
service = self.bookkeeper.get_service_for_job(job)
self.bookkeeper.get_process_for_job(job).wait(timeout)
except Exception, ex:
self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't wait for the job because: %s " % (str(ex)))
def job_get_exitcode(self, job_obj):
'''Implements interface from _JobPluginBase'''
try:
service = self.bookkeeper.get_service_for_job(job_obj)
#process = self.bookkeeper.get_process_for_job(job_obj)
#jobstate = process.getstate()
#if jobstate != bliss.saga.Job.Done or jobstate != bliss.saga.job.Failed:
# self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't get the job's exitcode. Job must be in 'Done' or 'Failed' state.")
#else:
return self.bookkeeper.get_process_for_job(job_obj).get_exitcode()
except Exception, ex:
self.log_error_and_raise(bliss.saga.Error.NoSuccess, "Couldn't get exitcode for job because: %s " % (str(ex)))
|
mit
| -7,737,438,792,153,225,000
| 43.237548
| 146
| 0.594838
| false
|
great-expectations/great_expectations
|
great_expectations/validator/validator.py
|
1
|
64280
|
import copy
import datetime
import inspect
import json
import logging
import traceback
import warnings
from collections import defaultdict, namedtuple
from collections.abc import Hashable
from typing import Any, Dict, Iterable, List, Optional, Set
import pandas as pd
from dateutil.parser import parse
from tqdm.auto import tqdm
from great_expectations import __version__ as ge_version
from great_expectations.core.batch import Batch
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.core.expectation_suite import (
ExpectationSuite,
expectationSuiteSchema,
)
from great_expectations.core.expectation_validation_result import (
ExpectationSuiteValidationResult,
ExpectationValidationResult,
)
from great_expectations.core.id_dict import BatchSpec
from great_expectations.core.run_identifier import RunIdentifier
from great_expectations.data_asset.util import recursively_convert_to_json_serializable
from great_expectations.dataset import PandasDataset, SparkDFDataset, SqlAlchemyDataset
from great_expectations.dataset.sqlalchemy_dataset import SqlAlchemyBatchReference
from great_expectations.exceptions import (
GreatExpectationsError,
InvalidExpectationConfigurationError,
)
from great_expectations.execution_engine import (
ExecutionEngine,
PandasExecutionEngine,
SparkDFExecutionEngine,
SqlAlchemyExecutionEngine,
)
from great_expectations.execution_engine.pandas_batch_data import PandasBatchData
from great_expectations.expectations.registry import (
get_expectation_impl,
get_metric_provider,
list_registered_expectation_implementations,
)
from great_expectations.marshmallow__shade import ValidationError
from great_expectations.types import ClassConfig
from great_expectations.util import load_class, verify_dynamic_loading_support
from great_expectations.validator.validation_graph import (
MetricConfiguration,
MetricEdge,
ValidationGraph,
)
logger = logging.getLogger(__name__)
logging.captureWarnings(True)
class Validator:
def __init__(
self,
execution_engine,
interactive_evaluation=True,
expectation_suite=None,
expectation_suite_name=None,
data_context=None,
batches=None,
**kwargs,
):
"""
Initialize the DataAsset.
:param profiler (profiler class) = None: The profiler that should be run on the data_asset to
build a baseline expectation suite.
Note: DataAsset is designed to support multiple inheritance (e.g. PandasDataset inherits from both a
Pandas DataFrame and Dataset which inherits from DataAsset), so it accepts generic *args and **kwargs arguments
so that they can also be passed to other parent classes. In python 2, there isn't a clean way to include all of
*args, **kwargs, and a named kwarg...so we use the inelegant solution of popping from kwargs, leaving the
support for the profiler parameter not obvious from the signature.
"""
self._data_context = data_context
self._execution_engine = execution_engine
self._expose_dataframe_methods = False
self._validator_config = {}
if batches is None:
batches = tuple()
self._batches = dict()
for batch in batches:
assert isinstance(
batch, Batch
), "batches provided to Validator must be Great Expectations Batch objects"
self._execution_engine.load_batch_data(batch.id, batch.data)
self._batches[batch.id] = batch
if len(batches) > 1:
logger.warning(
f"{len(batches)} batches will be added to this Validator. The batch_identifiers for the active "
f"batch are {self.active_batch.batch_definition['batch_identifiers'].items()}"
)
self.interactive_evaluation = interactive_evaluation
self._initialize_expectations(
expectation_suite=expectation_suite,
expectation_suite_name=expectation_suite_name,
)
self._default_expectation_args = {
"include_config": True,
"catch_exceptions": False,
"result_format": "BASIC",
}
self._validator_config = {}
# This special state variable tracks whether a validation run is going on, which will disable
# saving expectation config objects
self._active_validation = False
if self._data_context and hasattr(
self._data_context, "_expectation_explorer_manager"
):
# TODO: verify flow of default expectation arguments
self.set_default_expectation_argument("include_config", True)
def __dir__(self):
"""
This custom magic method is used to enable expectation tab completion on Validator objects.
It also allows users to call Pandas.DataFrame methods on Validator objects
"""
validator_attrs = set(super().__dir__())
class_expectation_impls = set(list_registered_expectation_implementations())
# execution_engine_expectation_impls = (
# {
# attr_name
# for attr_name in self.execution_engine.__dir__()
# if attr_name.startswith("expect_")
# }
# if self.execution_engine
# else set()
# )
combined_dir = (
validator_attrs
| class_expectation_impls
# | execution_engine_expectation_impls
)
if self._expose_dataframe_methods:
combined_dir | set(dir(pd.DataFrame))
return list(combined_dir)
@property
def expose_dataframe_methods(self):
return self._expose_dataframe_methods
@expose_dataframe_methods.setter
def expose_dataframe_methods(self, value: bool):
self._expose_dataframe_methods = value
def __getattr__(self, name):
if name.startswith("expect_") and get_expectation_impl(name):
return self.validate_expectation(name)
elif (
self._expose_dataframe_methods
and isinstance(self.active_batch.data, PandasBatchData)
and hasattr(pd.DataFrame, name)
):
return getattr(self.active_batch.data.dataframe, name)
else:
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{name}'"
)
def validate_expectation(self, name):
"""
Given the name of an Expectation, obtains the Class-first Expectation implementation and utilizes the expectation's
validate method to obtain a validation result. Also adds in the runtime configuration
Args:
name (str): The name of the Expectation being validated
Returns:
The Expectation's validation result
"""
def inst_expectation(*args, **kwargs):
try:
expectation_impl = get_expectation_impl(name)
allowed_config_keys = expectation_impl.get_allowed_config_keys()
expectation_kwargs = recursively_convert_to_json_serializable(kwargs)
meta = None
# This section uses Expectation class' legacy_method_parameters attribute to maintain support for passing
# positional arguments to expectation methods
legacy_arg_names = expectation_impl.legacy_method_parameters.get(
name, tuple()
)
for idx, arg in enumerate(args):
try:
arg_name = legacy_arg_names[idx]
if arg_name in allowed_config_keys:
expectation_kwargs[arg_name] = arg
if arg_name == "meta":
meta = arg
except IndexError:
raise InvalidExpectationConfigurationError(
f"Invalid positional argument: {arg}"
)
# this is used so that exceptions are caught appropriately when they occur in expectation config
basic_runtime_configuration = {
k: v
for k, v in kwargs.items()
if k in ("result_format", "include_config", "catch_exceptions")
}
configuration = ExpectationConfiguration(
expectation_type=name, kwargs=expectation_kwargs, meta=meta
)
# runtime_configuration = configuration.get_runtime_kwargs()
expectation = expectation_impl(configuration)
"""Given an implementation and a configuration for any Expectation, returns its validation result"""
if not self.interactive_evaluation and not self._active_validation:
validation_result = ExpectationValidationResult(
expectation_config=copy.deepcopy(expectation.configuration)
)
else:
validation_result = expectation.validate(
validator=self,
evaluation_parameters=self._expectation_suite.evaluation_parameters,
data_context=self._data_context,
runtime_configuration=basic_runtime_configuration,
)
# If validate has set active_validation to true, then we do not save the config to avoid
# saving updating expectation configs to the same suite during validation runs
if self._active_validation is True:
stored_config = configuration.get_raw_configuration()
else:
# Append the expectation to the config.
stored_config = self._expectation_suite.add_expectation(
configuration.get_raw_configuration()
)
# If there was no interactive evaluation, success will not have been computed.
if validation_result.success is not None:
# Add a "success" object to the config
stored_config.success_on_last_run = validation_result.success
if self._data_context is not None:
validation_result = self._data_context.update_return_obj(
self, validation_result
)
except Exception as err:
if basic_runtime_configuration.get("catch_exceptions"):
raised_exception = True
exception_traceback = traceback.format_exc()
exception_message = "{}: {}".format(type(err).__name__, str(err))
validation_result = ExpectationValidationResult(success=False)
validation_result.exception_info = {
"raised_exception": raised_exception,
"exception_message": exception_message,
"exception_traceback": exception_traceback,
}
else:
raise err
return validation_result
inst_expectation.__name__ = name
return inst_expectation
@property
def execution_engine(self):
"""Returns the execution engine being used by the validator at the given time"""
return self._execution_engine
def list_available_expectation_types(self):
""" Returns a list of all expectations available to the validator"""
keys = dir(self)
return [
expectation for expectation in keys if expectation.startswith("expect_")
]
def get_metrics(self, metrics: Dict[str, MetricConfiguration]) -> Dict[str, Any]:
"""Return a dictionary with the requested metrics"""
graph = ValidationGraph()
resolved_metrics = {}
for metric_name, metric_configuration in metrics.items():
provider_cls, _ = get_metric_provider(
metric_configuration.metric_name, self.execution_engine
)
for key in provider_cls.domain_keys:
if (
key not in metric_configuration.metric_domain_kwargs
and key in provider_cls.default_kwarg_values
):
metric_configuration.metric_domain_kwargs[
key
] = provider_cls.default_kwarg_values[key]
for key in provider_cls.value_keys:
if (
key not in metric_configuration.metric_value_kwargs
and key in provider_cls.default_kwarg_values
):
metric_configuration.metric_value_kwargs[
key
] = provider_cls.default_kwarg_values[key]
self.build_metric_dependency_graph(
graph,
child_node=metric_configuration,
configuration=None,
execution_engine=self._execution_engine,
runtime_configuration=None,
)
self.resolve_validation_graph(graph, resolved_metrics)
return {
metric_name: resolved_metrics[metric_configuration.id]
for (metric_name, metric_configuration) in metrics.items()
}
def get_metric(self, metric: MetricConfiguration) -> Any:
"""return the value of the requested metric."""
return self.get_metrics({"_": metric})["_"]
def build_metric_dependency_graph(
self,
graph: ValidationGraph,
child_node: MetricConfiguration,
configuration: Optional[ExpectationConfiguration],
execution_engine: "ExecutionEngine",
parent_node: Optional[MetricConfiguration] = None,
runtime_configuration: Optional[dict] = None,
) -> None:
"""Obtain domain and value keys for metrics and proceeds to add these metrics to the validation graph
until all metrics have been added."""
# metric_kwargs = get_metric_kwargs(metric_name)
metric_impl = get_metric_provider(
child_node.metric_name, execution_engine=execution_engine
)[0]
metric_dependencies = metric_impl.get_evaluation_dependencies(
metric=child_node,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
child_node.metric_dependencies = metric_dependencies
if parent_node:
graph.add(
MetricEdge(
parent_node,
child_node,
)
)
if len(metric_dependencies) == 0:
graph.add(
MetricEdge(
child_node,
None,
)
)
else:
for metric_dependency in metric_dependencies.values():
if metric_dependency.id == child_node.id:
logger.warning(
f"Metric {str(child_node.id)} has created a circular dependency"
)
continue
self.build_metric_dependency_graph(
graph,
metric_dependency,
configuration,
execution_engine,
child_node,
runtime_configuration=runtime_configuration,
)
def graph_validate(
self,
configurations: List[ExpectationConfiguration],
metrics: dict = None,
runtime_configuration: dict = None,
) -> List[ExpectationValidationResult]:
"""Obtains validation dependencies for each metric using the implementation of their associated expectation,
then proceeds to add these dependencies to the validation graph, supply readily available metric implementations
to fulfill current metric requirements, and validate these metrics.
Args:
batches (Dict[str, Batch]): A Dictionary of batches and their corresponding names that will be used
for Expectation Validation.
configurations(List[ExpectationConfiguration]): A list of needed Expectation Configurations that will
be used to supply domain and values for metrics.
execution_engine (ExecutionEngine): An Execution Engine that will be used for extraction of metrics
from the registry.
metrics (dict): A list of currently registered metrics in the registry
runtime_configuration (dict): A dictionary of runtime keyword arguments, controlling semantics
such as the result_format.
Returns:
A list of Validations, validating that all necessary metrics are available.
"""
graph = ValidationGraph()
if runtime_configuration is None:
runtime_configuration = dict()
if runtime_configuration.get("catch_exceptions", True):
catch_exceptions = True
else:
catch_exceptions = False
processed_configurations = []
evrs = []
for configuration in configurations:
# Validating
try:
assert (
configuration.expectation_type is not None
), "Given configuration should include expectation type"
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
expectation_impl = get_expectation_impl(configuration.expectation_type)
validation_dependencies = expectation_impl().get_validation_dependencies(
configuration, self._execution_engine, runtime_configuration
)["metrics"]
try:
for metric in validation_dependencies.values():
self.build_metric_dependency_graph(
graph,
metric,
configuration,
self._execution_engine,
runtime_configuration=runtime_configuration,
)
processed_configurations.append(configuration)
except Exception as err:
if catch_exceptions:
raised_exception = True
exception_traceback = traceback.format_exc()
result = ExpectationValidationResult(
success=False,
exception_info={
"raised_exception": raised_exception,
"exception_traceback": exception_traceback,
"exception_message": str(err),
},
expectation_config=configuration,
)
evrs.append(result)
else:
raise err
if metrics is None:
metrics = dict()
metrics = self.resolve_validation_graph(graph, metrics, runtime_configuration)
for configuration in processed_configurations:
try:
result = configuration.metrics_validate(
metrics,
execution_engine=self._execution_engine,
runtime_configuration=runtime_configuration,
)
evrs.append(result)
except Exception as err:
if catch_exceptions:
raised_exception = True
exception_traceback = traceback.format_exc()
result = ExpectationValidationResult(
success=False,
exception_info={
"raised_exception": raised_exception,
"exception_traceback": exception_traceback,
"exception_message": str(err),
},
expectation_config=configuration,
)
evrs.append(result)
else:
raise err
return evrs
def resolve_validation_graph(self, graph, metrics, runtime_configuration=None):
done: bool = False
pbar = None
while not done:
ready_metrics, needed_metrics = self._parse_validation_graph(graph, metrics)
if pbar is None:
pbar = tqdm(
total=len(ready_metrics) + len(needed_metrics),
desc="Calculating Metrics",
disable=len(graph._edges) < 3,
)
pbar.update(0)
metrics.update(
self._resolve_metrics(
execution_engine=self._execution_engine,
metrics_to_resolve=ready_metrics,
metrics=metrics,
runtime_configuration=runtime_configuration,
)
)
pbar.update(len(ready_metrics))
if len(ready_metrics) + len(needed_metrics) == 0:
done = True
pbar.close()
return metrics
def _parse_validation_graph(self, validation_graph, metrics):
"""Given validation graph, returns the ready and needed metrics necessary for validation using a traversal of
validation graph (a graph structure of metric ids) edges"""
unmet_dependency_ids = set()
unmet_dependency = set()
maybe_ready_ids = set()
maybe_ready = set()
for edge in validation_graph.edges:
if edge.left.id not in metrics:
if edge.right is None or edge.right.id in metrics:
if edge.left.id not in maybe_ready_ids:
maybe_ready_ids.add(edge.left.id)
maybe_ready.add(edge.left)
else:
if edge.left.id not in unmet_dependency_ids:
unmet_dependency_ids.add(edge.left.id)
unmet_dependency.add(edge.left)
return maybe_ready - unmet_dependency, unmet_dependency
def _resolve_metrics(
self,
execution_engine: "ExecutionEngine",
metrics_to_resolve: Iterable[MetricConfiguration],
metrics: Dict,
runtime_configuration: dict = None,
):
"""A means of accessing the Execution Engine's resolve_metrics method, where missing metric configurations are
resolved"""
return execution_engine.resolve_metrics(
metrics_to_resolve, metrics, runtime_configuration
)
def _initialize_expectations(
self,
expectation_suite: ExpectationSuite = None,
expectation_suite_name: str = None,
):
"""Instantiates `_expectation_suite` as empty by default or with a specified expectation `config`.
In addition, this always sets the `default_expectation_args` to:
`include_config`: False,
`catch_exceptions`: False,
`output_format`: 'BASIC'
By default, initializes data_asset_type to the name of the implementing class, but subclasses
that have interoperable semantics (e.g. Dataset) may override that parameter to clarify their
interoperability.
Args:
expectation_suite (json): \
A json-serializable expectation config. \
If None, creates default `_expectation_suite` with an empty list of expectations and \
key value `data_asset_name` as `data_asset_name`.
expectation_suite_name (string): \
The name to assign to the `expectation_suite.expectation_suite_name`
Returns:
None
"""
# Checking type of expectation_suite.
# Check for expectation_suite_name is already done by ExpectationSuiteIdentifier
if expectation_suite and not isinstance(expectation_suite, ExpectationSuite):
raise TypeError(
"expectation_suite must be of type ExpectationSuite, not {}".format(
type(expectation_suite)
)
)
if expectation_suite is not None:
if isinstance(expectation_suite, dict):
expectation_suite = expectationSuiteSchema.load(expectation_suite)
else:
expectation_suite = copy.deepcopy(expectation_suite)
self._expectation_suite = expectation_suite
if expectation_suite_name is not None:
if (
self._expectation_suite.expectation_suite_name
!= expectation_suite_name
):
logger.warning(
"Overriding existing expectation_suite_name {n1} with new name {n2}".format(
n1=self._expectation_suite.expectation_suite_name,
n2=expectation_suite_name,
)
)
self._expectation_suite.expectation_suite_name = expectation_suite_name
else:
if expectation_suite_name is None:
expectation_suite_name = "default"
self._expectation_suite = ExpectationSuite(
expectation_suite_name=expectation_suite_name
)
self._expectation_suite.execution_engine_type = type(
self.execution_engine
).__name__
def append_expectation(self, expectation_config):
"""This method is a thin wrapper for ExpectationSuite.append_expectation"""
warnings.warn(
"append_expectation is deprecated, and will be removed in a future release. "
+ "Please use ExpectationSuite.add_expectation instead.",
DeprecationWarning,
)
self._expectation_suite.append_expectation(expectation_config)
def find_expectation_indexes(
self,
expectation_configuration: ExpectationConfiguration,
match_type: str = "domain",
) -> List[int]:
"""This method is a thin wrapper for ExpectationSuite.find_expectation_indexes"""
warnings.warn(
"find_expectation_indexes is deprecated, and will be removed in a future release. "
+ "Please use ExpectationSuite.find_expectation_indexes instead.",
DeprecationWarning,
)
return self._expectation_suite.find_expectation_indexes(
expectation_configuration=expectation_configuration, match_type=match_type
)
def find_expectations(
self,
expectation_configuration: ExpectationConfiguration,
match_type: str = "domain",
) -> List[ExpectationConfiguration]:
"""This method is a thin wrapper for ExpectationSuite.find_expectations()"""
warnings.warn(
"find_expectations is deprecated, and will be removed in a future release. "
+ "Please use ExpectationSuite.find_expectation_indexes instead.",
DeprecationWarning,
)
return self._expectation_suite.find_expectations(
expectation_configuration=expectation_configuration, match_type=match_type
)
def remove_expectation(
self,
expectation_configuration: ExpectationConfiguration,
match_type: str = "domain",
remove_multiple_matches: bool = False,
) -> List[ExpectationConfiguration]:
"""This method is a thin wrapper for ExpectationSuite.remove()"""
warnings.warn(
"DataAsset.remove_expectations is deprecated, and will be removed in a future release. "
+ "Please use ExpectationSuite.remove_expectation instead.",
DeprecationWarning,
)
return self._expectation_suite.remove_expectation(
expectation_configuration=expectation_configuration,
match_type=match_type,
remove_multiple_matches=remove_multiple_matches,
)
def set_config_value(self, key, value):
"""Setter for config value"""
self._validator_config[key] = value
def get_config_value(self, key):
"""Getter for config value"""
return self._validator_config.get(key)
def load_batch(self, batch_list: List[Batch]):
for batch in batch_list:
self._execution_engine.load_batch_data(batch.id, batch.data)
self._batches[batch.id] = batch
# We set the active_batch_id in each iteration of the loop to keep in sync with the active_batch_id for the
# execution_engine. The final active_batch_id will be that of the final batch loaded.
self.active_batch_id = batch.id
return batch_list
@property
def batches(self) -> Dict[str, Batch]:
"""Getter for batches"""
return self._batches
@property
def loaded_batch_ids(self) -> List[str]:
return self.execution_engine.loaded_batch_data_ids
@property
def active_batch(self) -> Batch:
"""Getter for active batch"""
active_batch_id: str = self.execution_engine.active_batch_data_id
batch: Batch = self.batches.get(active_batch_id) if active_batch_id else None
return batch
@property
def active_batch_spec(self) -> Optional[BatchSpec]:
"""Getter for active batch's batch_spec"""
if not self.active_batch:
return None
else:
return self.active_batch.batch_spec
@property
def active_batch_id(self) -> str:
"""Getter for active batch id"""
return self.execution_engine.active_batch_data_id
@active_batch_id.setter
def active_batch_id(self, batch_id: str):
assert set(self.batches.keys()).issubset(set(self.loaded_batch_ids))
available_batch_ids: Set[str] = set(self.batches.keys()).union(
set(self.loaded_batch_ids)
)
if batch_id not in available_batch_ids:
raise ValueError(
f"""batch_id {batch_id} not found in loaded batches. Batches must first be loaded before they can be \
set as active.
"""
)
else:
self.execution_engine._active_batch_data_id = batch_id
@property
def active_batch_markers(self):
"""Getter for active batch's batch markers"""
if not self.active_batch:
return None
else:
return self.active_batch.batch_markers
@property
def active_batch_definition(self):
"""Getter for the active batch's batch definition"""
if not self.active_batch:
return None
else:
return self.active_batch.batch_definition
def discard_failing_expectations(self):
"""Removes any expectations from the validator where the validation has failed"""
res = self.validate(only_return_failures=True).results
if any(res):
for item in res:
self.remove_expectation(
expectation_configuration=item.expectation_config,
match_type="runtime",
)
warnings.warn("Removed %s expectations that were 'False'" % len(res))
def get_default_expectation_arguments(self):
"""Fetch default expectation arguments for this data_asset
Returns:
A dictionary containing all the current default expectation arguments for a data_asset
Ex::
{
"include_config" : True,
"catch_exceptions" : False,
"result_format" : 'BASIC'
}
See also:
set_default_expectation_arguments
"""
return self._default_expectation_args
@property
def default_expectation_args(self):
"""A getter for default Expectation arguments"""
return self._default_expectation_args
def set_default_expectation_argument(self, argument, value):
"""
Set a default expectation argument for this data_asset
Args:
argument (string): The argument to be replaced
value : The New argument to use for replacement
Returns:
None
See also:
get_default_expectation_arguments
"""
self._default_expectation_args[argument] = value
def get_expectations_config(
self,
discard_failed_expectations=True,
discard_result_format_kwargs=True,
discard_include_config_kwargs=True,
discard_catch_exceptions_kwargs=True,
suppress_warnings=False,
):
"""
Returns an expectation configuration, providing an option to discard failed expectation and discard/ include'
different result aspects, such as exceptions and result format.
"""
warnings.warn(
"get_expectations_config is deprecated, and will be removed in a future release. "
+ "Please use get_expectation_suite instead.",
DeprecationWarning,
)
return self.get_expectation_suite(
discard_failed_expectations,
discard_result_format_kwargs,
discard_include_config_kwargs,
discard_catch_exceptions_kwargs,
suppress_warnings,
)
def get_expectation_suite(
self,
discard_failed_expectations=True,
discard_result_format_kwargs=True,
discard_include_config_kwargs=True,
discard_catch_exceptions_kwargs=True,
suppress_warnings=False,
suppress_logging=False,
):
"""Returns _expectation_config as a JSON object, and perform some cleaning along the way.
Args:
discard_failed_expectations (boolean): \
Only include expectations with success_on_last_run=True in the exported config. Defaults to `True`.
discard_result_format_kwargs (boolean): \
In returned expectation objects, suppress the `result_format` parameter. Defaults to `True`.
discard_include_config_kwargs (boolean): \
In returned expectation objects, suppress the `include_config` parameter. Defaults to `True`.
discard_catch_exceptions_kwargs (boolean): \
In returned expectation objects, suppress the `catch_exceptions` parameter. Defaults to `True`.
suppress_warnings (boolean): \
If true, do not include warnings in logging information about the operation.
suppress_logging (boolean): \
If true, do not create a log entry (useful when using get_expectation_suite programmatically)
Returns:
An expectation suite.
Note:
get_expectation_suite does not affect the underlying expectation suite at all. The returned suite is a \
copy of _expectation_suite, not the original object.
"""
expectation_suite = copy.deepcopy(self._expectation_suite)
expectations = expectation_suite.expectations
discards = defaultdict(int)
if discard_failed_expectations:
new_expectations = []
for expectation in expectations:
# Note: This is conservative logic.
# Instead of retaining expectations IFF success==True, it discard expectations IFF success==False.
# In cases where expectation.success is missing or None, expectations are *retained*.
# Such a case could occur if expectations were loaded from a config file and never run.
if expectation.success_on_last_run is False:
discards["failed_expectations"] += 1
else:
new_expectations.append(expectation)
expectations = new_expectations
message = "\t%d expectation(s) included in expectation_suite." % len(
expectations
)
if discards["failed_expectations"] > 0 and not suppress_warnings:
message += (
" Omitting %d expectation(s) that failed when last run; set "
"discard_failed_expectations=False to include them."
% discards["failed_expectations"]
)
for expectation in expectations:
# FIXME: Factor this out into a new function. The logic is duplicated in remove_expectation,
# which calls _copy_and_clean_up_expectation
expectation.success_on_last_run = None
if discard_result_format_kwargs:
if "result_format" in expectation.kwargs:
del expectation.kwargs["result_format"]
discards["result_format"] += 1
if discard_include_config_kwargs:
if "include_config" in expectation.kwargs:
del expectation.kwargs["include_config"]
discards["include_config"] += 1
if discard_catch_exceptions_kwargs:
if "catch_exceptions" in expectation.kwargs:
del expectation.kwargs["catch_exceptions"]
discards["catch_exceptions"] += 1
settings_message = ""
if discards["result_format"] > 0 and not suppress_warnings:
settings_message += " result_format"
if discards["include_config"] > 0 and not suppress_warnings:
settings_message += " include_config"
if discards["catch_exceptions"] > 0 and not suppress_warnings:
settings_message += " catch_exceptions"
if (
len(settings_message) > 1
): # Only add this if we added one of the settings above.
settings_message += " settings filtered."
expectation_suite.expectations = expectations
if not suppress_logging:
logger.info(message + settings_message)
return expectation_suite
def save_expectation_suite(
self,
filepath=None,
discard_failed_expectations=True,
discard_result_format_kwargs=True,
discard_include_config_kwargs=True,
discard_catch_exceptions_kwargs=True,
suppress_warnings=False,
):
"""Writes ``_expectation_config`` to a JSON file.
Writes the DataAsset's expectation config to the specified JSON ``filepath``. Failing expectations \
can be excluded from the JSON expectations config with ``discard_failed_expectations``. The kwarg key-value \
pairs :ref:`result_format`, :ref:`include_config`, and :ref:`catch_exceptions` are optionally excluded from \
the JSON expectations config.
Args:
filepath (string): \
The location and name to write the JSON config file to.
discard_failed_expectations (boolean): \
If True, excludes expectations that do not return ``success = True``. \
If False, all expectations are written to the JSON config file.
discard_result_format_kwargs (boolean): \
If True, the :ref:`result_format` attribute for each expectation is not written to the JSON config \
file.
discard_include_config_kwargs (boolean): \
If True, the :ref:`include_config` attribute for each expectation is not written to the JSON config \
file.
discard_catch_exceptions_kwargs (boolean): \
If True, the :ref:`catch_exceptions` attribute for each expectation is not written to the JSON \
config file.
suppress_warnings (boolean): \
It True, all warnings raised by Great Expectations, as a result of dropped expectations, are \
suppressed.
"""
expectation_suite = self.get_expectation_suite(
discard_failed_expectations,
discard_result_format_kwargs,
discard_include_config_kwargs,
discard_catch_exceptions_kwargs,
suppress_warnings,
)
if filepath is None and self._data_context is not None:
self._data_context.save_expectation_suite(expectation_suite)
elif filepath is not None:
with open(filepath, "w") as outfile:
json.dump(
expectationSuiteSchema.dump(expectation_suite),
outfile,
indent=2,
sort_keys=True,
)
else:
raise ValueError(
"Unable to save config: filepath or data_context must be available."
)
def validate(
self,
expectation_suite=None,
run_id=None,
data_context=None,
evaluation_parameters=None,
catch_exceptions=True,
result_format=None,
only_return_failures=False,
run_name=None,
run_time=None,
):
"""Generates a JSON-formatted report describing the outcome of all expectations.
Use the default expectation_suite=None to validate the expectations config associated with the DataAsset.
Args:
expectation_suite (json or None): \
If None, uses the expectations config generated with the DataAsset during the current session. \
If a JSON file, validates those expectations.
run_name (str): \
Used to identify this validation result as part of a collection of validations. \
See DataContext for more information.
data_context (DataContext): \
A datacontext object to use as part of validation for binding evaluation parameters and \
registering validation results.
evaluation_parameters (dict or None): \
If None, uses the evaluation_paramters from the expectation_suite provided or as part of the \
data_asset. If a dict, uses the evaluation parameters in the dictionary.
catch_exceptions (boolean): \
If True, exceptions raised by tests will not end validation and will be described in the returned \
report.
result_format (string or None): \
If None, uses the default value ('BASIC' or as specified). \
If string, the returned expectation output follows the specified format ('BOOLEAN_ONLY','BASIC', \
etc.).
only_return_failures (boolean): \
If True, expectation results are only returned when ``success = False`` \
Returns:
A JSON-formatted dictionary containing a list of the validation results. \
An example of the returned format::
{
"results": [
{
"unexpected_list": [unexpected_value_1, unexpected_value_2],
"expectation_type": "expect_*",
"kwargs": {
"column": "Column_Name",
"output_format": "SUMMARY"
},
"success": true,
"raised_exception: false.
"exception_traceback": null
},
{
... (Second expectation results)
},
... (More expectations results)
],
"success": true,
"statistics": {
"evaluated_expectations": n,
"successful_expectations": m,
"unsuccessful_expectations": n - m,
"success_percent": m / n
}
}
Notes:
If the configuration object was built with a different version of great expectations then the \
current environment. If no version was found in the configuration file.
Raises:
AttributeError - if 'catch_exceptions'=None and an expectation throws an AttributeError
"""
try:
validation_time = datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
assert not (run_id and run_name) and not (
run_id and run_time
), "Please provide either a run_id or run_name and/or run_time."
if isinstance(run_id, str) and not run_name:
warnings.warn(
"String run_ids will be deprecated in the future. Please provide a run_id of type "
"RunIdentifier(run_name=None, run_time=None), or a dictionary containing run_name "
"and run_time (both optional). Instead of providing a run_id, you may also provide"
"run_name and run_time separately.",
DeprecationWarning,
)
try:
run_time = parse(run_id)
except (ValueError, TypeError):
pass
run_id = RunIdentifier(run_name=run_id, run_time=run_time)
elif isinstance(run_id, dict):
run_id = RunIdentifier(**run_id)
elif not isinstance(run_id, RunIdentifier):
run_id = RunIdentifier(run_name=run_name, run_time=run_time)
self._active_validation = True
if result_format is None:
result_format = {"result_format": "BASIC"}
# If a different validation data context was provided, override
validate__data_context = self._data_context
if data_context is None and self._data_context is not None:
data_context = self._data_context
elif data_context is not None:
# temporarily set self._data_context so it is used inside the expectation decorator
self._data_context = data_context
if expectation_suite is None:
expectation_suite = self.get_expectation_suite(
discard_failed_expectations=False,
discard_result_format_kwargs=False,
discard_include_config_kwargs=False,
discard_catch_exceptions_kwargs=False,
)
elif isinstance(expectation_suite, str):
try:
with open(expectation_suite) as infile:
expectation_suite = expectationSuiteSchema.loads(infile.read())
except ValidationError:
raise
except OSError:
raise GreatExpectationsError(
"Unable to load expectation suite: IO error while reading %s"
% expectation_suite
)
elif not isinstance(expectation_suite, ExpectationSuite):
logger.error(
"Unable to validate using the provided value for expectation suite; does it need to be "
"loaded from a dictionary?"
)
if getattr(data_context, "_usage_statistics_handler", None):
handler = data_context._usage_statistics_handler
handler.send_usage_message(
event="data_asset.validate",
event_payload=handler._batch_anonymizer.anonymize_batch_info(
self
),
success=False,
)
return ExpectationValidationResult(success=False)
# Evaluation parameter priority is
# 1. from provided parameters
# 2. from expectation configuration
# 3. from data context
# So, we load them in reverse order
if data_context is not None:
runtime_evaluation_parameters = (
data_context.evaluation_parameter_store.get_bind_params(run_id)
)
else:
runtime_evaluation_parameters = {}
if expectation_suite.evaluation_parameters:
runtime_evaluation_parameters.update(
expectation_suite.evaluation_parameters
)
if evaluation_parameters is not None:
runtime_evaluation_parameters.update(evaluation_parameters)
# Convert evaluation parameters to be json-serializable
runtime_evaluation_parameters = recursively_convert_to_json_serializable(
runtime_evaluation_parameters
)
# Warn if our version is different from the version in the configuration
# TODO: Deprecate "great_expectations.__version__"
suite_ge_version = expectation_suite.meta.get(
"great_expectations_version"
) or expectation_suite.meta.get("great_expectations.__version__")
# Group expectations by column
columns = {}
for expectation in expectation_suite.expectations:
expectation.process_evaluation_parameters(
evaluation_parameters=runtime_evaluation_parameters,
interactive_evaluation=self.interactive_evaluation,
data_context=self._data_context,
)
if "column" in expectation.kwargs and isinstance(
expectation.kwargs["column"], Hashable
):
column = expectation.kwargs["column"]
else:
column = "_nocolumn"
if column not in columns:
columns[column] = []
columns[column].append(expectation)
expectations_to_evaluate = []
for col in columns:
expectations_to_evaluate.extend(columns[col])
results = self.graph_validate(
expectations_to_evaluate,
runtime_configuration={
"catch_exceptions": catch_exceptions,
"result_format": result_format,
},
)
statistics = _calc_validation_statistics(results)
if only_return_failures:
abbrev_results = []
for exp in results:
if not exp.success:
abbrev_results.append(exp)
results = abbrev_results
expectation_suite_name = expectation_suite.expectation_suite_name
result = ExpectationSuiteValidationResult(
results=results,
success=statistics.success,
statistics={
"evaluated_expectations": statistics.evaluated_expectations,
"successful_expectations": statistics.successful_expectations,
"unsuccessful_expectations": statistics.unsuccessful_expectations,
"success_percent": statistics.success_percent,
},
evaluation_parameters=runtime_evaluation_parameters,
meta={
"great_expectations_version": ge_version,
"expectation_suite_name": expectation_suite_name,
"run_id": run_id,
"batch_spec": self.active_batch_spec,
"batch_markers": self.active_batch_markers,
"active_batch_definition": self.active_batch_definition,
"validation_time": validation_time,
},
)
self._data_context = validate__data_context
except Exception as e:
if getattr(data_context, "_usage_statistics_handler", None):
handler = data_context._usage_statistics_handler
handler.send_usage_message(
event="data_asset.validate",
event_payload=handler._batch_anonymizer.anonymize_batch_info(self),
success=False,
)
raise
finally:
self._active_validation = False
if getattr(data_context, "_usage_statistics_handler", None):
handler = data_context._usage_statistics_handler
handler.send_usage_message(
event="data_asset.validate",
event_payload=handler._batch_anonymizer.anonymize_batch_info(self),
success=True,
)
return result
def get_evaluation_parameter(self, parameter_name, default_value=None):
"""
Get an evaluation parameter value that has been stored in meta.
Args:
parameter_name (string): The name of the parameter to store.
default_value (any): The default value to be returned if the parameter is not found.
Returns:
The current value of the evaluation parameter.
"""
if parameter_name in self._expectation_suite.evaluation_parameters:
return self._expectation_suite.evaluation_parameters[parameter_name]
else:
return default_value
def set_evaluation_parameter(self, parameter_name, parameter_value):
"""
Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate
parameterized expectations.
Args:
parameter_name (string): The name of the kwarg to be replaced at evaluation time
parameter_value (any): The value to be used
"""
self._expectation_suite.evaluation_parameters.update(
{parameter_name: parameter_value}
)
def add_citation(
self,
comment,
batch_spec=None,
batch_markers=None,
batch_definition=None,
citation_date=None,
):
"""Adds a citation to an existing Expectation Suite within the validator"""
if batch_spec is None:
batch_spec = self.batch_spec
if batch_markers is None:
batch_markers = self.active_batch_markers
if batch_definition is None:
batch_definition = self.active_batch_definition
self._expectation_suite.add_citation(
comment,
batch_spec=batch_spec,
batch_markers=batch_markers,
batch_definition=batch_definition,
citation_date=citation_date,
)
@property
def expectation_suite_name(self):
"""Gets the current expectation_suite name of this data_asset as stored in the expectations configuration."""
return self._expectation_suite.expectation_suite_name
@expectation_suite_name.setter
def expectation_suite_name(self, expectation_suite_name):
"""Sets the expectation_suite name of this data_asset as stored in the expectations configuration."""
self._expectation_suite.expectation_suite_name = expectation_suite_name
def test_expectation_function(self, function, *args, **kwargs):
"""Test a generic expectation function
Args:
function (func): The function to be tested. (Must be a valid expectation function.)
*args : Positional arguments to be passed the the function
**kwargs : Keyword arguments to be passed the the function
Returns:
A JSON-serializable expectation result object.
Notes:
This function is a thin layer to allow quick testing of new expectation functions, without having to \
define custom classes, etc. To use developed expectations from the command-line tool, you will still need \
to define custom classes, etc.
Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for more information.
"""
argspec = inspect.getfullargspec(function)[0][1:]
new_function = self.expectation(argspec)(function)
return new_function(self, *args, **kwargs)
def columns(self, domain_kwargs: Optional[Dict[str, Any]] = None) -> List[str]:
if domain_kwargs is None:
domain_kwargs = {
"batch_id": self.execution_engine.active_batch_data_id,
}
columns: List[str] = self.get_metric(
metric=MetricConfiguration(
metric_name="table.columns",
metric_domain_kwargs=domain_kwargs,
)
)
return columns
def head(
self,
n_rows: Optional[int] = 5,
domain_kwargs: Optional[Dict[str, Any]] = None,
fetch_all: Optional[bool] = False,
) -> pd.DataFrame:
if domain_kwargs is None:
domain_kwargs = {
"batch_id": self.execution_engine.active_batch_data_id,
}
data: Any = self.get_metric(
metric=MetricConfiguration(
metric_name="table.head",
metric_domain_kwargs=domain_kwargs,
metric_value_kwargs={
"n_rows": n_rows,
"fetch_all": fetch_all,
},
)
)
df: pd.DataFrame
if isinstance(
self.execution_engine, (PandasExecutionEngine, SqlAlchemyExecutionEngine)
):
df = pd.DataFrame(data=data)
elif isinstance(self.execution_engine, SparkDFExecutionEngine):
rows: List[Dict[str, Any]] = [datum.asDict() for datum in data]
df = pd.DataFrame(data=rows)
else:
raise GreatExpectationsError(
"Unsupported or unknown ExecutionEngine type encountered in Validator class."
)
return df.reset_index(drop=True, inplace=False)
ValidationStatistics = namedtuple(
"ValidationStatistics",
[
"evaluated_expectations",
"successful_expectations",
"unsuccessful_expectations",
"success_percent",
"success",
],
)
def _calc_validation_statistics(validation_results):
"""
Calculate summary statistics for the validation results and
return ``ExpectationStatistics``.
"""
# calc stats
successful_expectations = sum(exp.success for exp in validation_results)
evaluated_expectations = len(validation_results)
unsuccessful_expectations = evaluated_expectations - successful_expectations
success = successful_expectations == evaluated_expectations
try:
success_percent = successful_expectations / evaluated_expectations * 100
except ZeroDivisionError:
# success_percent = float("nan")
success_percent = None
return ValidationStatistics(
successful_expectations=successful_expectations,
evaluated_expectations=evaluated_expectations,
unsuccessful_expectations=unsuccessful_expectations,
success=success,
success_percent=success_percent,
)
class BridgeValidator:
"""This is currently helping bridge APIs"""
def __init__(self, batch, expectation_suite, expectation_engine=None, **kwargs):
"""Builds an expectation_engine object using an expectation suite and a batch, with the expectation engine being
determined either by the user or by the type of batch data (pandas dataframe, SqlAlchemy table, etc.)
Args:
batch (Batch): A Batch in Pandas, Spark, or SQL format
expectation_suite (ExpectationSuite): The Expectation Suite available to the validator within the current Data
Context
expectation_engine (ExecutionEngine): The current Execution Engine being utilized. If this is not set, it is
determined by the type of data within the given batch
"""
self.batch = batch
self.expectation_suite = expectation_suite
if isinstance(expectation_engine, dict):
expectation_engine = ClassConfig(**expectation_engine)
if isinstance(expectation_engine, ClassConfig):
module_name = expectation_engine.module_name or "great_expectations.dataset"
verify_dynamic_loading_support(module_name=module_name)
expectation_engine = load_class(
class_name=expectation_engine.class_name, module_name=module_name
)
self.expectation_engine = expectation_engine
if self.expectation_engine is None:
# Guess the engine
try:
import pandas as pd
if isinstance(batch.data, pd.DataFrame):
self.expectation_engine = PandasDataset
except ImportError:
pass
if self.expectation_engine is None:
if isinstance(batch.data, SqlAlchemyBatchReference):
self.expectation_engine = SqlAlchemyDataset
if self.expectation_engine is None:
try:
import pyspark
if isinstance(batch.data, pyspark.sql.DataFrame):
self.expectation_engine = SparkDFDataset
except ImportError:
pass
if self.expectation_engine is None:
raise ValueError(
"Unable to identify expectation_engine. It must be a subclass of DataAsset."
)
self.init_kwargs = kwargs
def get_dataset(self):
"""
Bridges between Execution Engines in providing access to the batch data. Validates that Dataset classes
contain proper type of data (i.e. a Pandas Dataset does not contain SqlAlchemy data)
"""
if issubclass(self.expectation_engine, PandasDataset):
import pandas as pd
if not isinstance(self.batch["data"], pd.DataFrame):
raise ValueError(
"PandasDataset expectation_engine requires a Pandas Dataframe for its batch"
)
return self.expectation_engine(
self.batch.data,
expectation_suite=self.expectation_suite,
batch_kwargs=self.batch.batch_kwargs,
batch_parameters=self.batch.batch_parameters,
batch_markers=self.batch.batch_markers,
data_context=self.batch.data_context,
**self.init_kwargs,
**self.batch.batch_kwargs.get("dataset_options", {}),
)
elif issubclass(self.expectation_engine, SqlAlchemyDataset):
if not isinstance(self.batch.data, SqlAlchemyBatchReference):
raise ValueError(
"SqlAlchemyDataset expectation_engine requires a SqlAlchemyBatchReference for its batch"
)
init_kwargs = self.batch.data.get_init_kwargs()
init_kwargs.update(self.init_kwargs)
return self.expectation_engine(
batch_kwargs=self.batch.batch_kwargs,
batch_parameters=self.batch.batch_parameters,
batch_markers=self.batch.batch_markers,
data_context=self.batch.data_context,
expectation_suite=self.expectation_suite,
**init_kwargs,
**self.batch.batch_kwargs.get("dataset_options", {}),
)
elif issubclass(self.expectation_engine, SparkDFDataset):
import pyspark
if not isinstance(self.batch.data, pyspark.sql.DataFrame):
raise ValueError(
"SparkDFDataset expectation_engine requires a spark DataFrame for its batch"
)
return self.expectation_engine(
spark_df=self.batch.data,
expectation_suite=self.expectation_suite,
batch_kwargs=self.batch.batch_kwargs,
batch_parameters=self.batch.batch_parameters,
batch_markers=self.batch.batch_markers,
data_context=self.batch.data_context,
**self.init_kwargs,
**self.batch.batch_kwargs.get("dataset_options", {}),
)
|
apache-2.0
| 4,999,121,242,791,270,000
| 40.337621
| 134
| 0.584879
| false
|
srozb/osqonnector
|
apps/osquery_api.py
|
1
|
8094
|
import os
import binascii
import json
import re
import redis
import config
from ipaddress import ip_address, ip_network
from datetime import datetime
from bottle import Bottle, request, response, HTTPResponse
from dbconn.dbconn import get_connection
from logger.logger import Logger
# TODO: node_key check decorator
# TODO: check parameter escape
app = Bottle()
response.content_type = 'application/json'
r = redis.StrictRedis(host=config.REDIS_HOST,
port=config.REDIS_PORT, db=config.REDIS_DB)
db = get_connection()
l = Logger(__name__)
def _get_client():
"get bussiness unit assigned to specific client"
client = db['osquery_client'].find_one(node_key=request.json['node_key'])
if not client:
l.info("Node key: {} not in db. Asking to reenroll.".format(
request.json['node_key']))
raise HTTPResponse(status=200, content_type='application/json',
body='{"node_invalid": true}\n')
return client
def _get_client_tags(client):
"return tag ids assigned to a given client"
tags_table = db['osquery_client_tag']
tag_id = []
for tag in tags_table.find(osqueryclient_id=client['id']):
tag_id.append(tag['tag_id'])
return tag_id
def _update_client_communication(client):
"update last_communication datetime"
client_table = db['osquery_client']
client_table.update(
dict(id=client['id'], last_communication=datetime.utcnow()), ['id'])
def _enrich_message(client, message):
client_data = {'client_id': client['id'], 'hostname': client['hostname'],
'uuid': client['uuid'], 'version': client['version'], 'ip': client['ip'],
'bu_id': client['bussiness_unit_id']}
return json.dumps({'client': client_data, 'message': message})
@app.route('/osquery/enroll', method='POST')
def enroll(): # TODO: autotag based on tag_rules
"enroll a new osquery client"
def _get_bussiness_unit(enroll_secret):
if config.ZENTRAL_COMPATIBILITY:
enroll_secret = enroll_secret.split(":")[0]
bu_table = db['bussiness_unit']
return bu_table.find_one(secret=enroll_secret)
def _generate_node_key():
return binascii.b2a_hex(os.urandom(16))
# TODO: check if already enrolled
def _insert_new_client(node_key, hostname, bussiness_unit, ip, useragent):
osq_clients = db['osquery_client']
return osq_clients.insert(dict(hostname=hostname,
node_key=node_key,
bussiness_unit_id=bussiness_unit['id'],
registered_date=datetime.utcnow(),
last_communication=datetime.utcnow(),
ip=ip, version=useragent,
last_distributed_id=0))
def _auto_assign_tags():
"assign tags based on TagAssignmentRules"
def _rule_matches(rule):
if rule['type'] == 'IP':
return ip_address(unicode(client_ip)) == ip_address(rule['value'])
elif rule['type'] == 'SUBNET':
return ip_address(unicode(client_ip)) in ip_network(rule['value'])
elif rule['type'] == 'REGEX':
return re.match(rule['value'], req['host_identifier'])
l.error("unsupported rule type")
for rule in db['osquery_tagassignmentrules'].find(enabled=True):
if _rule_matches(rule):
db['osquery_client_tag'].insert(dict(osqueryclient_id=client_id,
tag_id=rule['tag_id']))
req = request.json
l.info("enrollment request from: {}".format(req['host_identifier']))
b_unit = _get_bussiness_unit(req['enroll_secret'])
if not b_unit:
return {"node_invalid": True}
node_key = _generate_node_key()
client_ip = request.remote_addr
useragent = request.get_header("user-agent")
client_id = _insert_new_client(
node_key, req['host_identifier'], b_unit, client_ip, useragent)
_auto_assign_tags()
l.debug("client {} enrolled sucessfully.".format(
req['host_identifier']))
return {
"node_key": node_key,
"node_invalid": False
}
@app.route('/osquery/config', method='POST')
def get_config():
"deploy config file based on bussiness unit"
def _get_options(client):
"get bussiness unit specific options"
client_config_table = db['client_config']
options = client_config_table.find_one(
bussiness_unit_id=client['bussiness_unit_id'])
if not options:
options = client_config_table.find_one(name="default")
return json.loads(options['template_config'])
# TODO: make sure queries not duplicated if multiple tags assigned
def _get_event_quieries(tags):
"get client specific quieries"
ids = []
for row in db['event_query_tag'].find(tag_id=tags):
ids.append(row['id'])
event_queries = db['event_query'].find(
enabled=True, id=ids) # TODO: test what if tag=None
enabled_queries = {} # TODO: append untagged queries
for query in event_queries:
sql = {'query': str(query['value']),
'interval': str(query['interval'])}
enabled_queries[str(query['name'])] = sql
return enabled_queries
client = _get_client()
_update_client_communication(client)
l.debug("config request from: {}".format(client['hostname']))
client_tags = _get_client_tags(client)
options = _get_options(client)
schedule = _get_event_quieries(client_tags)
response_body = {'options': options}
if schedule: # append to config only if not empty, TODO: remove if not needed
response_body['schedule'] = schedule
return response_body
@app.route('/osquery/log', method='POST')
def log_query_result():
"receive logs and query results from client"
client = _get_client()
#l.debug(request.json))
message = _enrich_message(client, request.json)
r.lpush('osq_preprocessed', message)
return {"node_invalid": False}
@app.route('/osquery/distributed/read', method='POST')
def distributed_read():
"deploy distributed queries to client"
def _get_query_ids_by_tag(tags):
ids = []
for row in db['distributed_query_tag'].find(tag_id=tags):
ids.append(row['id'])
return ids
def _update_last_distributed_id(query_id):
client_table = db['osquery_client']
client_table.update(
dict(id=client['id'], last_distributed_id=query_id), ['id'])
def _get_distributed_queries(tags):
"get client specific quieries"
ids = _get_query_ids_by_tag(tags)
distributed_queries = db['distributed_query'].find(
enabled=True, id=ids, order_by='id') # BUG: not getting anything if tag=2
query_id = 0
enabled_queries = {} # TODO: append untagged queries
for query in distributed_queries:
if query['id'] > client['last_distributed_id']:
enabled_queries[query['name']] = query['value']
query_id = query['id']
# BUG: if disabled distributed queries
if query_id > client['last_distributed_id']:
_update_last_distributed_id(query_id)
return enabled_queries
client = _get_client()
_update_client_communication(client)
client_tags = _get_client_tags(client)
queries = _get_distributed_queries(tags=client_tags)
#l.debug("get distributed queries for host:{}".format(client['hostname']))
response_body = {'queries': queries}
response_body['node invalid'] = False
return response_body
@app.route('/osquery/distributed/write', method='POST')
def distributed_write():
"receive distributed query result"
client = _get_client()
#l.debug(request.json)
message = _enrich_message(client, request.json)
r.lpush('osq_preprocessed', message)
return {"node_invalid": False}
|
gpl-3.0
| -888,840,970,014,117,800
| 37.542857
| 92
| 0.611317
| false
|
Jyrsa/hoppy.fi
|
hoppy/settings.py
|
1
|
3844
|
"""
Django settings for hoppy project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'my name is my passport, verify me'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
DEFAULT_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
THIRD_PARTY_APPS = (
'south',
'autoslug',
'huey.djhuey',
'tastypie',
)
LOCAL_APPS = (
'beerstatus',
)
INSTALLED_APPS = DEFAULT_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'hoppy.urls'
WSGI_APPLICATION = 'hoppy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
HUEY = {
'backend': 'huey.backends.redis_backend',
'name': 'hoppy-connection',
'connection': {'host': 'localhost', 'port':6379},
'always_eager': False,
'consumer_options': {'workers': 4},
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'syslog': {
'level':'INFO',
'class':'logging.handlers.SysLogHandler',
'address': '/dev/log',
},
'console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'huey.consumer': {
'handlers': ['syslog', 'console'],
'level': 'DEBUG',
'propagate': True,
}
}
}
#having a local_settings isn't mandatory but
#if one exists, it overrides stuff
try:
from local_settings import *
except ImportError:
pass
|
mit
| -4,624,239,657,169,039,000
| 22.728395
| 95
| 0.601197
| false
|
hypernicon/pyec
|
pyec/distribution/truncation.py
|
1
|
2812
|
"""
Copyright (C) 2012 Alan J Lockett
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import numpy as np
from pyec.distribution.basic import PopulationDistribution
from pyec.config import Config
from pyec.history import DelayedHistory
class TrajectoryTruncation(PopulationDistribution):
"""Build a optimizer with a truncated trajectory.
:param sub: The subordinate optimizer
:type sub: :class:`PopulationDistribution`
:param delay: The number of steps to truncate
:type delay: ``int``
"""
config = Config()
def __init__(self, sub, delay, **kwargs):
kwargs['history'] = self.makeHistory(sub)
super(TrajectoryTruncation, self).__init__(**kwargs)
self.opt = sub
self.delay = delay
def makeHistory(self, sub):
"""Build a :class:`DelayedHistory` suitable for the subordinate
optimizer
:param sub: The subordinate optimizer
:type sub: :class:`PopulationDistribution`
:returns: A suitable :class:`DelayedHistory`object
"""
def generator(config):
return DelayedHistory(config,
sub.config.history(sub.config),
self.delay)
return generator
def update(self, history, fitness):
super(TrajectoryTruncation, self).update(history, fitness)
self.opt.update(history.history, fitness)
return self
def batch(self, popSize):
return self.opt()
def needsScores(self):
return self.opt.needsScores()
def compatible(self, history):
return (isinstance(history, DelayedHistory) and
history.delay == self.delay and
self.opt.compatible(history.history))
|
mit
| -8,117,894,013,886,897,000
| 42.9375
| 460
| 0.676743
| false
|
ConnectBox/wifi-test-framework
|
ansible/plugins/mitogen-0.2.3/tests/minimize_source_test.py
|
1
|
1647
|
import unittest2
import mitogen.minify
import testlib
def read_sample(fname):
sample_path = testlib.data_path('minimize_samples/' + fname)
sample_file = open(sample_path)
sample = sample_file.read()
sample_file.close()
return sample
class MinimizeSource(unittest2.TestCase):
func = staticmethod(mitogen.minify.minimize_source)
def test_class(self):
original = read_sample('class.py')
expected = read_sample('class_min.py')
self.assertEqual(expected, self.func(original))
def test_comment(self):
original = read_sample('comment.py')
expected = read_sample('comment_min.py')
self.assertEqual(expected, self.func(original))
def test_def(self):
original = read_sample('def.py')
expected = read_sample('def_min.py')
self.assertEqual(expected, self.func(original))
def test_hashbang(self):
original = read_sample('hashbang.py')
expected = read_sample('hashbang_min.py')
self.assertEqual(expected, self.func(original))
def test_mod(self):
original = read_sample('mod.py')
expected = read_sample('mod_min.py')
self.assertEqual(expected, self.func(original))
def test_pass(self):
original = read_sample('pass.py')
expected = read_sample('pass_min.py')
self.assertEqual(expected, self.func(original))
def test_obstacle_course(self):
original = read_sample('obstacle_course.py')
expected = read_sample('obstacle_course_min.py')
self.assertEqual(expected, self.func(original))
if __name__ == '__main__':
unittest2.main()
|
mit
| 2,491,417,689,942,566,400
| 28.945455
| 64
| 0.647237
| false
|
dreadrel/UWF_2014_spring_COP3990C-2507
|
notebooks/scripts/book_code/code/timeseqs.py
|
1
|
1033
|
# File timeseqs.py
"Test the relative speed of iteration tool alternatives."
import sys, timer # Import timer functions
reps = 10000
repslist = list(range(reps)) # Hoist out, list in both 2.X/3.X
def forLoop():
res = []
for x in repslist:
res.append(abs(x))
return res
def listComp():
return [abs(x) for x in repslist]
def mapCall():
return list(map(abs, repslist)) # Use list() here in 3.X only!
# return map(abs, repslist)
def genExpr():
return list(abs(x) for x in repslist) # list() required to force results
def genFunc():
def gen():
for x in repslist:
yield abs(x)
return list(gen()) # list() required to force results
print(sys.version)
for test in (forLoop, listComp, mapCall, genExpr, genFunc):
(bestof, (total, result)) = timer.bestoftotal(5, 1000, test)
print ('%-9s: %.5f => [%s...%s]' %
(test.__name__, bestof, result[0], result[-1]))
|
apache-2.0
| -2,903,239,872,491,046,000
| 29.382353
| 83
| 0.568248
| false
|
elegion/djangodash2012
|
fortuitus/feditor/migrations/0006_operator_max_length.py
|
1
|
3869
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'TestCaseAssert.operator'
db.alter_column('feditor_testcaseassert', 'operator', self.gf('django.db.models.fields.CharField')(max_length=256))
def backwards(self, orm):
# Changing field 'TestCaseAssert.operator'
db.alter_column('feditor_testcaseassert', 'operator', self.gf('django.db.models.fields.CharField')(max_length='16'))
models = {
'fcore.company': {
'Meta': {'object_name': 'Company'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'})
},
'feditor.params': {
'Meta': {'object_name': 'Params'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'feditor.testcase': {
'Meta': {'object_name': 'TestCase'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feditor.TestProject']"}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'})
},
'feditor.testcaseassert': {
'Meta': {'object_name': 'TestCaseAssert'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lhs': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256'}),
'operator': ('django.db.models.fields.CharField', [], {'default': "'Eq'", 'max_length': '256'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'rhs': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256'}),
'step': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assertions'", 'to': "orm['feditor.TestCaseStep']"})
},
'feditor.testcasestep': {
'Meta': {'object_name': 'TestCaseStep'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'params': ('fortuitus.feditor.dbfields.ParamsField', [], {'null': 'True', 'blank': 'True'}),
'testcase': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'steps'", 'to': "orm['feditor.TestCase']"}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'feditor.testproject': {
'Meta': {'object_name': 'TestProject'},
'base_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'common_params': ('fortuitus.feditor.dbfields.ParamsField', [], {'null': 'True', 'blank': 'True'}),
'company': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['fcore.Company']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'})
}
}
complete_apps = ['feditor']
|
mit
| -3,951,128,518,662,032,400
| 56.761194
| 143
| 0.552339
| false
|
mdavid/cherokee-webserver-svnclone
|
admin/util.py
|
1
|
8071
|
# -*- coding: utf-8 -*-
#
# Cherokee-admin
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2001-2010 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
import os
import sys
import glob
import socket
import CTK
#
# Strings
#
def bool_to_active (b):
return (_('Inactive'), _('Active'))[bool(b)]
def bool_to_onoff (b):
return (_('Off'), _('On'))[bool(b)]
def bool_to_yesno (b):
return (_('No'), _('Yes'))[bool(b)]
#
# Virtual Server
#
def cfg_vsrv_get_next():
""" Get the prefix of the next vserver """
tmp = [int(x) for x in CTK.cfg.keys("vserver")]
tmp.sort()
next = str(tmp[-1] + 10)
return "vserver!%s" % (next)
def cfg_vsrv_rule_get_next (pre):
""" Get the prefix of the next rule of a vserver """
tmp = [int(x) for x in CTK.cfg.keys("%s!rule"%(pre))]
tmp.sort()
if tmp:
next = tmp[-1] + 100
else:
next = 100
return (next, "%s!rule!%d" % (pre, next))
def cfg_vsrv_rule_find_extension (pre, extension):
"""Find an extension rule in a virtual server """
for r in CTK.cfg.keys("%s!rule"%(pre)):
p = "%s!rule!%s" % (pre, r)
if CTK.cfg.get_val ("%s!match"%(p)) == "extensions":
if extension in CTK.cfg.get_val ("%s!match!extensions"%(p)):
return p
def cfg_vsrv_rule_find_regexp (pre, regexp):
"""Find a regular expresion rule in a virtual server """
for r in CTK.cfg.keys("%s!rule"%(pre)):
p = "%s!rule!%s" % (pre, r)
if CTK.cfg.get_val ("%s!match"%(p)) == "request":
if regexp == CTK.cfg.get_val ("%s!match!request"%(p)):
return p
#
# Information Sources
#
def cfg_source_get_next ():
tmp = [int(x) for x in CTK.cfg.keys("source")]
if not tmp:
return (1, "source!1")
tmp.sort()
next = tmp[-1] + 10
return (next, "source!%d" % (next))
def cfg_source_find_interpreter (in_interpreter = None,
in_nick = None):
for i in CTK.cfg.keys("source"):
if CTK.cfg.get_val("source!%s!type"%(i)) != 'interpreter':
continue
if (in_interpreter and
in_interpreter in CTK.cfg.get_val("source!%s!interpreter"%(i))):
return "source!%s" % (i)
if (in_nick and
in_nick in CTK.cfg.get_val("source!%s!nick"%(i))):
return "source!%s" % (i)
def cfg_source_find_empty_port (n_ports=1):
ports = []
for i in CTK.cfg.keys("source"):
host = CTK.cfg.get_val ("source!%s!host"%(i))
if not host: continue
colon = host.rfind(':')
if colon < 0: continue
port = int (host[colon+1:])
if port < 1024: continue
ports.append (port)
pport = 1025
for x in ports:
if pport + n_ports < x:
return pport
assert (False)
def cfg_source_find_free_port (host_name='localhost'):
"""Return a port not currently running anything"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host_name, 0))
addr, port = s.getsockname()
s.close()
return port
def cfg_source_get_localhost_addr ():
x, x, addrs = socket.gethostbyname_ex('localhost')
if addrs:
return addrs[0]
return None
def cfg_get_surrounding_repls (macro, value, n_minus=9, n_plus=9):
replacements = {}
tmp = value.split('!')
pre = '!'.join(tmp[:-1])
num = int(tmp[-1])
for n in range(n_minus):
replacements['%s_minus%d'%(macro,n+1)] = '%s!%d' %(pre, num-(n+1))
for n in range(n_plus):
replacements['%s_plus%d'%(macro,n+1)] = '%s!%d' %(pre, num+(n+1))
return replacements
#
# Version strings management
#
def version_to_int (v):
num = 0
tmp = v.split('.')
if len(tmp) >= 3:
num += int(tmp[2]) * (10**3)
if len(tmp) >= 2:
num += int(tmp[1]) * (10**6)
if len(tmp) >= 1:
num += int(tmp[0]) * (10**9)
return num
def version_cmp (x, y):
xp = x.split('b')
yp = y.split('b')
if len(xp) > 1:
x_ver = version_to_int(xp[0])
x_beta = xp[1]
else:
x_ver = version_to_int(xp[0])
x_beta = None
if len(yp) > 1:
y_ver = version_to_int(yp[0])
y_beta = yp[1]
else:
y_ver = version_to_int(yp[0])
y_beta = None
if x_ver == y_ver:
if not x_beta and not y_beta: return 0
if not y_beta: return -1
if not x_beta: return 1
return cmp(int(x_beta),int(y_beta))
elif x_ver > y_ver:
return 1
return -1
#
# Paths
#
def path_find_binary (executable, extra_dirs=[], custom_test=None):
"""Find an executable.
It checks 'extra_dirs' and the PATH.
The 'executable' parameter can be either a string or a list.
"""
assert (type(executable) in [str, list])
dirs = extra_dirs
env_path = os.getenv("PATH")
if env_path:
dirs += filter (lambda x: x, env_path.split(":"))
for dir in dirs:
if type(executable) == str:
tmp = os.path.join (dir, executable)
if os.path.exists (tmp):
if custom_test:
if not custom_test(tmp):
continue
return tmp
elif type(executable) == list:
for n in executable:
tmp = os.path.join (dir, n)
if os.path.exists (tmp):
if custom_test:
if not custom_test(tmp):
continue
return tmp
def path_find_w_default (path_list, default=''):
"""Find a path.
It checks a list of paths (that can contain wildcards),
if none exists default is returned.
"""
for path in path_list:
if '*' in path or '?' in path:
to_check = glob.glob (path)
else:
to_check = [path]
for p in to_check:
if os.path.exists (p):
return p
return default
#
# OS
#
def os_get_document_root():
if sys.platform == 'darwin':
return "/Library/WebServer/Documents"
elif sys.platform == 'linux2':
if os.path.exists ("/etc/redhat-release"):
return '/var/www'
elif os.path.exists ("/etc/fedora-release"):
return '/var/www'
elif os.path.exists ("/etc/SuSE-release"):
return '/srv/www/htdocs'
elif os.path.exists ("/etc/debian_version"):
return '/var/www'
elif os.path.exists ("/etc/gentoo-release"):
return '/var/www'
elif os.path.exists ("/etc/slackware-version"):
return '/var/www'
return '/var/www'
return ''
#
# Misc
#
def split_list (value):
ids = []
for t1 in value.split(','):
for t2 in t1.split(' '):
id = t2.strip()
if not id:
continue
ids.append(id)
return ids
def lists_differ (a, b):
"""Compare lists disregarding order"""
if len(a) != len(b):
return True
if bool (set(a)-set(b)):
return True
if bool (set(b)-set(a)):
return True
return False
def get_real_path (name, nochroot=False):
"""Get real path accounting for chrooted environments"""
chroot = CTK.cfg.get_val('server!chroot')
if chroot and not nochroot:
fullname = os.path.normpath (chroot + os.path.sep + name)
else:
fullname = name
return fullname
|
gpl-2.0
| 7,725,153,230,824,267,000
| 24.86859
| 76
| 0.548755
| false
|
mjs7231/python-plexapi
|
plexapi/library.py
|
1
|
60945
|
# -*- coding: utf-8 -*-
from urllib.parse import quote, quote_plus, unquote, urlencode
from plexapi import X_PLEX_CONTAINER_SIZE, log, utils
from plexapi.base import PlexObject
from plexapi.exceptions import BadRequest, NotFound
from plexapi.media import MediaTag
from plexapi.settings import Setting
class Library(PlexObject):
""" Represents a PlexServer library. This contains all sections of media defined
in your Plex server including video, shows and audio.
Attributes:
key (str): '/library'
identifier (str): Unknown ('com.plexapp.plugins.library').
mediaTagVersion (str): Unknown (/system/bundle/media/flags/)
server (:class:`~plexapi.server.PlexServer`): PlexServer this client is connected to.
title1 (str): 'Plex Library' (not sure how useful this is).
title2 (str): Second title (this is blank on my setup).
"""
key = '/library'
def _loadData(self, data):
self._data = data
self._sectionsByID = {} # cached Section UUIDs
self.identifier = data.attrib.get('identifier')
self.mediaTagVersion = data.attrib.get('mediaTagVersion')
self.title1 = data.attrib.get('title1')
self.title2 = data.attrib.get('title2')
def sections(self):
""" Returns a list of all media sections in this library. Library sections may be any of
:class:`~plexapi.library.MovieSection`, :class:`~plexapi.library.ShowSection`,
:class:`~plexapi.library.MusicSection`, :class:`~plexapi.library.PhotoSection`.
"""
key = '/library/sections'
sections = []
for elem in self._server.query(key):
for cls in (MovieSection, ShowSection, MusicSection, PhotoSection):
if elem.attrib.get('type') == cls.TYPE:
section = cls(self._server, elem, key)
self._sectionsByID[section.key] = section
sections.append(section)
return sections
def section(self, title=None):
""" Returns the :class:`~plexapi.library.LibrarySection` that matches the specified title.
Parameters:
title (str): Title of the section to return.
"""
for section in self.sections():
if section.title.lower() == title.lower():
return section
raise NotFound('Invalid library section: %s' % title)
def sectionByID(self, sectionID):
""" Returns the :class:`~plexapi.library.LibrarySection` that matches the specified sectionID.
Parameters:
sectionID (str): ID of the section to return.
"""
if not self._sectionsByID or sectionID not in self._sectionsByID:
self.sections()
return self._sectionsByID[sectionID]
def all(self, **kwargs):
""" Returns a list of all media from all library sections.
This may be a very large dataset to retrieve.
"""
items = []
for section in self.sections():
for item in section.all(**kwargs):
items.append(item)
return items
def onDeck(self):
""" Returns a list of all media items on deck. """
return self.fetchItems('/library/onDeck')
def recentlyAdded(self):
""" Returns a list of all media items recently added. """
return self.fetchItems('/library/recentlyAdded')
def search(self, title=None, libtype=None, **kwargs):
""" Searching within a library section is much more powerful. It seems certain
attributes on the media objects can be targeted to filter this search down
a bit, but I havent found the documentation for it.
Example: "studio=Comedy%20Central" or "year=1999" "title=Kung Fu" all work. Other items
such as actor=<id> seem to work, but require you already know the id of the actor.
TLDR: This is untested but seems to work. Use library section search when you can.
"""
args = {}
if title:
args['title'] = title
if libtype:
args['type'] = utils.searchType(libtype)
for attr, value in kwargs.items():
args[attr] = value
key = '/library/all%s' % utils.joinArgs(args)
return self.fetchItems(key)
def cleanBundles(self):
""" Poster images and other metadata for items in your library are kept in "bundle"
packages. When you remove items from your library, these bundles aren't immediately
removed. Removing these old bundles can reduce the size of your install. By default, your
server will automatically clean up old bundles once a week as part of Scheduled Tasks.
"""
# TODO: Should this check the response for success or the correct mediaprefix?
self._server.query('/library/clean/bundles')
def emptyTrash(self):
""" If a library has items in the Library Trash, use this option to empty the Trash. """
for section in self.sections():
section.emptyTrash()
def optimize(self):
""" The Optimize option cleans up the server database from unused or fragmented data.
For example, if you have deleted or added an entire library or many items in a
library, you may like to optimize the database.
"""
self._server.query('/library/optimize')
def update(self):
""" Scan this library for new items."""
self._server.query('/library/sections/all/refresh')
def cancelUpdate(self):
""" Cancel a library update. """
key = '/library/sections/all/refresh'
self._server.query(key, method=self._server._session.delete)
def refresh(self):
""" Forces a download of fresh media information from the internet.
This can take a long time. Any locked fields are not modified.
"""
self._server.query('/library/sections/all/refresh?force=1')
def deleteMediaPreviews(self):
""" Delete the preview thumbnails for the all sections. This cannot be
undone. Recreating media preview files can take hours or even days.
"""
for section in self.sections():
section.deleteMediaPreviews()
def add(self, name='', type='', agent='', scanner='', location='', language='en', *args, **kwargs):
""" Simplified add for the most common options.
Parameters:
name (str): Name of the library
agent (str): Example com.plexapp.agents.imdb
type (str): movie, show, # check me
location (str): /path/to/files
language (str): Two letter language fx en
kwargs (dict): Advanced options should be passed as a dict. where the id is the key.
**Photo Preferences**
* **agent** (str): com.plexapp.agents.none
* **enableAutoPhotoTags** (bool): Tag photos. Default value false.
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Photo Scanner
**Movie Preferences**
* **agent** (str): com.plexapp.agents.none, com.plexapp.agents.imdb, com.plexapp.agents.themoviedb
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **enableCinemaTrailers** (bool): Enable Cinema Trailers. Default value true.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Movie Scanner, Plex Video Files Scanner
**IMDB Movie Options** (com.plexapp.agents.imdb)
* **title** (bool): Localized titles. Default value false.
* **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true.
* **only_trailers** (bool): Skip extras which aren't trailers. Default value false.
* **redband** (bool): Use red band (restricted audiences) trailers when available. Default value false.
* **native_subs** (bool): Include extras with subtitles in Library language. Default value false.
* **cast_list** (int): Cast List Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **ratings** (int): Ratings Source, Default value 0 Possible options:
0:Rotten Tomatoes, 1:IMDb, 2:The Movie Database.
* **summary** (int): Plot Summary Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **country** (int): Default value 46 Possible options 0:Argentina, 1:Australia, 2:Austria,
3:Belgium, 4:Belize, 5:Bolivia, 6:Brazil, 7:Canada, 8:Chile, 9:Colombia, 10:Costa Rica,
11:Czech Republic, 12:Denmark, 13:Dominican Republic, 14:Ecuador, 15:El Salvador,
16:France, 17:Germany, 18:Guatemala, 19:Honduras, 20:Hong Kong SAR, 21:Ireland,
22:Italy, 23:Jamaica, 24:Korea, 25:Liechtenstein, 26:Luxembourg, 27:Mexico, 28:Netherlands,
29:New Zealand, 30:Nicaragua, 31:Panama, 32:Paraguay, 33:Peru, 34:Portugal,
35:Peoples Republic of China, 36:Puerto Rico, 37:Russia, 38:Singapore, 39:South Africa,
40:Spain, 41:Sweden, 42:Switzerland, 43:Taiwan, 44:Trinidad, 45:United Kingdom,
46:United States, 47:Uruguay, 48:Venezuela.
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **usage** (bool): Send anonymous usage data to Plex. Default value true.
**TheMovieDB Movie Options** (com.plexapp.agents.themoviedb)
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **country** (int): Country (used for release date and content rating). Default value 47 Possible
options 0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize, 6:Bolivia, 7:Brazil, 8:Canada,
9:Chile, 10:Colombia, 11:Costa Rica, 12:Czech Republic, 13:Denmark, 14:Dominican Republic, 15:Ecuador,
16:El Salvador, 17:France, 18:Germany, 19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland,
23:Italy, 24:Jamaica, 25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands,
30:New Zealand, 31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal,
36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore, 40:South Africa, 41:Spain,
42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad, 46:United Kingdom, 47:United States, 48:Uruguay,
49:Venezuela.
**Show Preferences**
* **agent** (str): com.plexapp.agents.none, com.plexapp.agents.thetvdb, com.plexapp.agents.themoviedb
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **episodeSort** (int): Episode order. Default -1 Possible options: 0:Oldest first, 1:Newest first.
* **flattenSeasons** (int): Seasons. Default value 0 Possible options: 0:Show,1:Hide.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Series Scanner
**TheTVDB Show Options** (com.plexapp.agents.thetvdb)
* **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true.
* **native_subs** (bool): Include extras with subtitles in Library language. Default value false.
**TheMovieDB Show Options** (com.plexapp.agents.themoviedb)
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **country** (int): Country (used for release date and content rating). Default value 47 options
0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize, 6:Bolivia, 7:Brazil, 8:Canada, 9:Chile,
10:Colombia, 11:Costa Rica, 12:Czech Republic, 13:Denmark, 14:Dominican Republic, 15:Ecuador,
16:El Salvador, 17:France, 18:Germany, 19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland,
23:Italy, 24:Jamaica, 25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands,
30:New Zealand, 31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal,
36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore, 40:South Africa,
41:Spain, 42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad, 46:United Kingdom, 47:United States,
48:Uruguay, 49:Venezuela.
**Other Video Preferences**
* **agent** (str): com.plexapp.agents.none, com.plexapp.agents.imdb, com.plexapp.agents.themoviedb
* **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true.
* **enableCinemaTrailers** (bool): Enable Cinema Trailers. Default value true.
* **includeInGlobal** (bool): Include in dashboard. Default value true.
* **scanner** (str): Plex Movie Scanner, Plex Video Files Scanner
**IMDB Other Video Options** (com.plexapp.agents.imdb)
* **title** (bool): Localized titles. Default value false.
* **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true.
* **only_trailers** (bool): Skip extras which aren't trailers. Default value false.
* **redband** (bool): Use red band (restricted audiences) trailers when available. Default value false.
* **native_subs** (bool): Include extras with subtitles in Library language. Default value false.
* **cast_list** (int): Cast List Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **ratings** (int): Ratings Source Default value 0 Possible options:
0:Rotten Tomatoes,1:IMDb,2:The Movie Database.
* **summary** (int): Plot Summary Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database.
* **country** (int): Country: Default value 46 Possible options: 0:Argentina, 1:Australia, 2:Austria,
3:Belgium, 4:Belize, 5:Bolivia, 6:Brazil, 7:Canada, 8:Chile, 9:Colombia, 10:Costa Rica,
11:Czech Republic, 12:Denmark, 13:Dominican Republic, 14:Ecuador, 15:El Salvador, 16:France,
17:Germany, 18:Guatemala, 19:Honduras, 20:Hong Kong SAR, 21:Ireland, 22:Italy, 23:Jamaica,
24:Korea, 25:Liechtenstein, 26:Luxembourg, 27:Mexico, 28:Netherlands, 29:New Zealand, 30:Nicaragua,
31:Panama, 32:Paraguay, 33:Peru, 34:Portugal, 35:Peoples Republic of China, 36:Puerto Rico,
37:Russia, 38:Singapore, 39:South Africa, 40:Spain, 41:Sweden, 42:Switzerland, 43:Taiwan, 44:Trinidad,
45:United Kingdom, 46:United States, 47:Uruguay, 48:Venezuela.
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **usage** (bool): Send anonymous usage data to Plex. Default value true.
**TheMovieDB Other Video Options** (com.plexapp.agents.themoviedb)
* **collections** (bool): Use collection info from The Movie Database. Default value false.
* **localart** (bool): Prefer artwork based on library language. Default value true.
* **adult** (bool): Include adult content. Default value false.
* **country** (int): Country (used for release date and content rating). Default
value 47 Possible options 0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize,
6:Bolivia, 7:Brazil, 8:Canada, 9:Chile, 10:Colombia, 11:Costa Rica, 12:Czech Republic,
13:Denmark, 14:Dominican Republic, 15:Ecuador, 16:El Salvador, 17:France, 18:Germany,
19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland, 23:Italy, 24:Jamaica,
25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands, 30:New Zealand,
31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal,
36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore,
40:South Africa, 41:Spain, 42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad,
46:United Kingdom, 47:United States, 48:Uruguay, 49:Venezuela.
"""
part = '/library/sections?name=%s&type=%s&agent=%s&scanner=%s&language=%s&location=%s' % (
quote_plus(name), type, agent, quote_plus(scanner), language, quote_plus(location)) # noqa E126
if kwargs:
part += urlencode(kwargs)
return self._server.query(part, method=self._server._session.post)
def history(self, maxresults=9999999, mindate=None):
""" Get Play History for all library Sections for the owner.
Parameters:
maxresults (int): Only return the specified number of results (optional).
mindate (datetime): Min datetime to return results from.
"""
hist = []
for section in self.sections():
hist.extend(section.history(maxresults=maxresults, mindate=mindate))
return hist
class LibrarySection(PlexObject):
""" Base class for a single library section.
Attributes:
ALLOWED_FILTERS (tuple): ()
ALLOWED_SORT (tuple): ()
BOOLEAN_FILTERS (tuple<str>): ('unwatched', 'duplicate')
server (:class:`~plexapi.server.PlexServer`): Server this client is connected to.
initpath (str): Path requested when building this object.
agent (str): Unknown (com.plexapp.agents.imdb, etc)
allowSync (bool): True if you allow syncing content from this section.
art (str): Wallpaper artwork used to respresent this section.
composite (str): Composit image used to represent this section.
createdAt (datetime): Datetime this library section was created.
filters (str): Unknown
key (str): Key (or ID) of this library section.
language (str): Language represented in this section (en, xn, etc).
locations (str): Paths on disk where section content is stored.
refreshing (str): True if this section is currently being refreshed.
scanner (str): Internal scanner used to find media (Plex Movie Scanner, Plex Premium Music Scanner, etc.)
thumb (str): Thumbnail image used to represent this section.
title (str): Title of this section.
type (str): Type of content section represents (movie, artist, photo, show).
updatedAt (datetime): Datetime this library section was last updated.
uuid (str): Unique id for this section (32258d7c-3e6c-4ac5-98ad-bad7a3b78c63)
totalSize (int): Total number of item in the library
"""
ALLOWED_FILTERS = ()
ALLOWED_SORT = ()
BOOLEAN_FILTERS = ('unwatched', 'duplicate')
def _loadData(self, data):
self._data = data
self.agent = data.attrib.get('agent')
self.allowSync = utils.cast(bool, data.attrib.get('allowSync'))
self.art = data.attrib.get('art')
self.composite = data.attrib.get('composite')
self.createdAt = utils.toDatetime(data.attrib.get('createdAt'))
self.filters = data.attrib.get('filters')
self.key = data.attrib.get('key') # invalid key from plex
self.language = data.attrib.get('language')
self.locations = self.listAttrs(data, 'path', etag='Location')
self.refreshing = utils.cast(bool, data.attrib.get('refreshing'))
self.scanner = data.attrib.get('scanner')
self.thumb = data.attrib.get('thumb')
self.title = data.attrib.get('title')
self.type = data.attrib.get('type')
self.updatedAt = utils.toDatetime(data.attrib.get('updatedAt'))
self.uuid = data.attrib.get('uuid')
# Private attrs as we dont want a reload.
self._total_size = None
def fetchItems(self, ekey, cls=None, container_start=None, container_size=None, **kwargs):
""" Load the specified key to find and build all items with the specified tag
and attrs. See :func:`~plexapi.base.PlexObject.fetchItem` for more details
on how this is used.
Parameters:
container_start (None, int): offset to get a subset of the data
container_size (None, int): How many items in data
"""
url_kw = {}
if container_start is not None:
url_kw["X-Plex-Container-Start"] = container_start
if container_size is not None:
url_kw["X-Plex-Container-Size"] = container_size
if ekey is None:
raise BadRequest('ekey was not provided')
data = self._server.query(ekey, params=url_kw)
if '/all' in ekey:
# totalSize is only included in the xml response
# if container size is used.
total_size = data.attrib.get("totalSize") or data.attrib.get("size")
self._total_size = utils.cast(int, total_size)
items = self.findItems(data, cls, ekey, **kwargs)
librarySectionID = data.attrib.get('librarySectionID')
if librarySectionID:
for item in items:
item.librarySectionID = librarySectionID
return items
@property
def totalSize(self):
if self._total_size is None:
part = '/library/sections/%s/all?X-Plex-Container-Start=0&X-Plex-Container-Size=1' % self.key
data = self._server.query(part)
self._total_size = int(data.attrib.get("totalSize"))
return self._total_size
def delete(self):
""" Delete a library section. """
try:
return self._server.query('/library/sections/%s' % self.key, method=self._server._session.delete)
except BadRequest: # pragma: no cover
msg = 'Failed to delete library %s' % self.key
msg += 'You may need to allow this permission in your Plex settings.'
log.error(msg)
raise
def reload(self, key=None):
return self._server.library.section(self.title)
def edit(self, agent=None, **kwargs):
""" Edit a library (Note: agent is required). See :class:`~plexapi.library.Library` for example usage.
Parameters:
kwargs (dict): Dict of settings to edit.
"""
if not agent:
agent = self.agent
part = '/library/sections/%s?agent=%s&%s' % (self.key, agent, urlencode(kwargs))
self._server.query(part, method=self._server._session.put)
# Reload this way since the self.key dont have a full path, but is simply a id.
for s in self._server.library.sections():
if s.key == self.key:
return s
def get(self, title):
""" Returns the media item with the specified title.
Parameters:
title (str): Title of the item to return.
"""
key = '/library/sections/%s/all?title=%s' % (self.key, quote(title, safe=''))
return self.fetchItem(key, title__iexact=title)
def all(self, sort=None, **kwargs):
""" Returns a list of media from this library section.
Parameters:
sort (string): The sort string
"""
sortStr = ''
if sort is not None:
sortStr = '?sort=' + sort
key = '/library/sections/%s/all%s' % (self.key, sortStr)
return self.fetchItems(key, **kwargs)
def agents(self):
""" Returns a list of available `:class:`~plexapi.media.Agent` for this library section.
"""
return self._server.agents(utils.searchType(self.type))
def settings(self):
""" Returns a list of all library settings. """
key = '/library/sections/%s/prefs' % self.key
data = self._server.query(key)
return self.findItems(data, cls=Setting)
def onDeck(self):
""" Returns a list of media items on deck from this library section. """
key = '/library/sections/%s/onDeck' % self.key
return self.fetchItems(key)
def recentlyAdded(self, maxresults=50):
""" Returns a list of media items recently added from this library section.
Parameters:
maxresults (int): Max number of items to return (default 50).
"""
return self.search(sort='addedAt:desc', maxresults=maxresults)
def analyze(self):
""" Run an analysis on all of the items in this library section. See
See :func:`~plexapi.base.PlexPartialObject.analyze` for more details.
"""
key = '/library/sections/%s/analyze' % self.key
self._server.query(key, method=self._server._session.put)
def emptyTrash(self):
""" If a section has items in the Trash, use this option to empty the Trash. """
key = '/library/sections/%s/emptyTrash' % self.key
self._server.query(key, method=self._server._session.put)
def update(self):
""" Scan this section for new media. """
key = '/library/sections/%s/refresh' % self.key
self._server.query(key)
def cancelUpdate(self):
""" Cancel update of this Library Section. """
key = '/library/sections/%s/refresh' % self.key
self._server.query(key, method=self._server._session.delete)
def refresh(self):
""" Forces a download of fresh media information from the internet.
This can take a long time. Any locked fields are not modified.
"""
key = '/library/sections/%s/refresh?force=1' % self.key
self._server.query(key)
def deleteMediaPreviews(self):
""" Delete the preview thumbnails for items in this library. This cannot
be undone. Recreating media preview files can take hours or even days.
"""
key = '/library/sections/%s/indexes' % self.key
self._server.query(key, method=self._server._session.delete)
def listChoices(self, category, libtype=None, **kwargs):
""" Returns a list of :class:`~plexapi.library.FilterChoice` objects for the
specified category and libtype. kwargs can be any of the same kwargs in
:func:`plexapi.library.LibraySection.search()` to help narrow down the choices
to only those that matter in your current context.
Parameters:
category (str): Category to list choices for (genre, contentRating, etc).
libtype (int): Library type of item filter.
**kwargs (dict): Additional kwargs to narrow down the choices.
Raises:
:class:`plexapi.exceptions.BadRequest`: Cannot include kwarg equal to specified category.
"""
# TODO: Should this be moved to base?
if category in kwargs:
raise BadRequest('Cannot include kwarg equal to specified category: %s' % category)
args = {}
for subcategory, value in kwargs.items():
args[category] = self._cleanSearchFilter(subcategory, value)
if libtype is not None:
args['type'] = utils.searchType(libtype)
key = '/library/sections/%s/%s%s' % (self.key, category, utils.joinArgs(args))
return self.fetchItems(key, cls=FilterChoice)
def search(self, title=None, sort=None, maxresults=None,
libtype=None, container_start=0, container_size=X_PLEX_CONTAINER_SIZE, **kwargs):
""" Search the library. The http requests will be batched in container_size. If you're only looking for the first <num>
results, it would be wise to set the maxresults option to that amount so this functions
doesn't iterate over all results on the server.
Parameters:
title (str): General string query to search for (optional).
sort (str): column:dir; column can be any of {addedAt, originallyAvailableAt, lastViewedAt,
titleSort, rating, mediaHeight, duration}. dir can be asc or desc (optional).
maxresults (int): Only return the specified number of results (optional).
libtype (str): Filter results to a spcifiec libtype (movie, show, episode, artist,
album, track; optional).
container_start (int): default 0
container_size (int): default X_PLEX_CONTAINER_SIZE in your config file.
**kwargs (dict): Any of the available filters for the current library section. Partial string
matches allowed. Multiple matches OR together. Negative filtering also possible, just add an
exclamation mark to the end of filter name, e.g. `resolution!=1x1`.
* unwatched: Display or hide unwatched content (True, False). [all]
* duplicate: Display or hide duplicate items (True, False). [movie]
* actor: List of actors to search ([actor_or_id, ...]). [movie]
* collection: List of collections to search within ([collection_or_id, ...]). [all]
* contentRating: List of content ratings to search within ([rating_or_key, ...]). [movie,tv]
* country: List of countries to search within ([country_or_key, ...]). [movie,music]
* decade: List of decades to search within ([yyy0, ...]). [movie]
* director: List of directors to search ([director_or_id, ...]). [movie]
* genre: List Genres to search within ([genere_or_id, ...]). [all]
* network: List of TV networks to search within ([resolution_or_key, ...]). [tv]
* resolution: List of video resolutions to search within ([resolution_or_key, ...]). [movie]
* studio: List of studios to search within ([studio_or_key, ...]). [music]
* year: List of years to search within ([yyyy, ...]). [all]
Raises:
:class:`plexapi.exceptions.BadRequest`: when applying unknown filter
"""
# cleanup the core arguments
args = {}
for category, value in kwargs.items():
args[category] = self._cleanSearchFilter(category, value, libtype)
if title is not None:
args['title'] = title
if sort is not None:
args['sort'] = self._cleanSearchSort(sort)
if libtype is not None:
args['type'] = utils.searchType(libtype)
results = []
subresults = []
offset = container_start
if maxresults is not None:
container_size = min(container_size, maxresults)
while True:
key = '/library/sections/%s/all%s' % (self.key, utils.joinArgs(args))
subresults = self.fetchItems(key, container_start=container_start,
container_size=container_size)
if not len(subresults):
if offset > self.totalSize:
log.info("container_start is higher then the number of items in the library")
break
results.extend(subresults)
# self.totalSize is not used as a condition in the while loop as
# this require a additional http request.
# self.totalSize is updated from .fetchItems
wanted_number_of_items = self.totalSize - offset
if maxresults is not None:
wanted_number_of_items = min(maxresults, wanted_number_of_items)
container_size = min(container_size, maxresults - len(results))
if wanted_number_of_items <= len(results):
break
container_start += container_size
return results
def _cleanSearchFilter(self, category, value, libtype=None):
# check a few things before we begin
if category.endswith('!'):
if category[:-1] not in self.ALLOWED_FILTERS:
raise BadRequest('Unknown filter category: %s' % category[:-1])
elif category not in self.ALLOWED_FILTERS:
raise BadRequest('Unknown filter category: %s' % category)
if category in self.BOOLEAN_FILTERS:
return '1' if value else '0'
if not isinstance(value, (list, tuple)):
value = [value]
# convert list of values to list of keys or ids
result = set()
choices = self.listChoices(category, libtype)
lookup = {c.title.lower(): unquote(unquote(c.key)) for c in choices}
allowed = set(c.key for c in choices)
for item in value:
item = str((item.id or item.tag) if isinstance(item, MediaTag) else item).lower()
# find most logical choice(s) to use in url
if item in allowed: result.add(item); continue
if item in lookup: result.add(lookup[item]); continue
matches = [k for t, k in lookup.items() if item in t]
if matches: map(result.add, matches); continue
# nothing matched; use raw item value
log.debug('Filter value not listed, using raw item value: %s' % item)
result.add(item)
return ','.join(result)
def _cleanSearchSort(self, sort):
sort = '%s:asc' % sort if ':' not in sort else sort
scol, sdir = sort.lower().split(':')
lookup = {s.lower(): s for s in self.ALLOWED_SORT}
if scol not in lookup:
raise BadRequest('Unknown sort column: %s' % scol)
if sdir not in ('asc', 'desc'):
raise BadRequest('Unknown sort dir: %s' % sdir)
return '%s:%s' % (lookup[scol], sdir)
def sync(self, policy, mediaSettings, client=None, clientId=None, title=None, sort=None, libtype=None,
**kwargs):
""" Add current library section as sync item for specified device.
See description of :func:`~plexapi.library.LibrarySection.search()` for details about filtering / sorting
and :func:`plexapi.myplex.MyPlexAccount.sync()` for possible exceptions.
Parameters:
policy (:class:`plexapi.sync.Policy`): policy of syncing the media (how many items to sync and process
watched media or not), generated automatically when method
called on specific LibrarySection object.
mediaSettings (:class:`plexapi.sync.MediaSettings`): Transcoding settings used for the media, generated
automatically when method called on specific
LibrarySection object.
client (:class:`plexapi.myplex.MyPlexDevice`): sync destination, see
:func:`plexapi.myplex.MyPlexAccount.sync`.
clientId (str): sync destination, see :func:`plexapi.myplex.MyPlexAccount.sync`.
title (str): descriptive title for the new :class:`plexapi.sync.SyncItem`, if empty the value would be
generated from metadata of current media.
sort (str): formatted as `column:dir`; column can be any of {`addedAt`, `originallyAvailableAt`,
`lastViewedAt`, `titleSort`, `rating`, `mediaHeight`, `duration`}. dir can be `asc` or
`desc`.
libtype (str): Filter results to a specific libtype (`movie`, `show`, `episode`, `artist`, `album`,
`track`).
Returns:
:class:`plexapi.sync.SyncItem`: an instance of created syncItem.
Raises:
:class:`plexapi.exceptions.BadRequest`: when the library is not allowed to sync
Example:
.. code-block:: python
from plexapi import myplex
from plexapi.sync import Policy, MediaSettings, VIDEO_QUALITY_3_MBPS_720p
c = myplex.MyPlexAccount()
target = c.device('Plex Client')
sync_items_wd = c.syncItems(target.clientIdentifier)
srv = c.resource('Server Name').connect()
section = srv.library.section('Movies')
policy = Policy('count', unwatched=True, value=1)
media_settings = MediaSettings.create(VIDEO_QUALITY_3_MBPS_720p)
section.sync(target, policy, media_settings, title='Next best movie', sort='rating:desc')
"""
from plexapi.sync import SyncItem
if not self.allowSync:
raise BadRequest('The requested library is not allowed to sync')
args = {}
for category, value in kwargs.items():
args[category] = self._cleanSearchFilter(category, value, libtype)
if sort is not None:
args['sort'] = self._cleanSearchSort(sort)
if libtype is not None:
args['type'] = utils.searchType(libtype)
myplex = self._server.myPlexAccount()
sync_item = SyncItem(self._server, None)
sync_item.title = title if title else self.title
sync_item.rootTitle = self.title
sync_item.contentType = self.CONTENT_TYPE
sync_item.metadataType = self.METADATA_TYPE
sync_item.machineIdentifier = self._server.machineIdentifier
key = '/library/sections/%s/all' % self.key
sync_item.location = 'library://%s/directory/%s' % (self.uuid, quote_plus(key + utils.joinArgs(args)))
sync_item.policy = policy
sync_item.mediaSettings = mediaSettings
return myplex.sync(client=client, clientId=clientId, sync_item=sync_item)
def history(self, maxresults=9999999, mindate=None):
""" Get Play History for this library Section for the owner.
Parameters:
maxresults (int): Only return the specified number of results (optional).
mindate (datetime): Min datetime to return results from.
"""
return self._server.history(maxresults=maxresults, mindate=mindate, librarySectionID=self.key, accountID=1)
class MovieSection(LibrarySection):
""" Represents a :class:`~plexapi.library.LibrarySection` section containing movies.
Attributes:
ALLOWED_FILTERS (list<str>): List of allowed search filters. ('unwatched',
'duplicate', 'year', 'decade', 'genre', 'contentRating', 'collection',
'director', 'actor', 'country', 'studio', 'resolution', 'guid', 'label')
ALLOWED_SORT (list<str>): List of allowed sorting keys. ('addedAt',
'originallyAvailableAt', 'lastViewedAt', 'titleSort', 'rating',
'mediaHeight', 'duration')
TAG (str): 'Directory'
TYPE (str): 'movie'
"""
ALLOWED_FILTERS = ('unwatched', 'duplicate', 'year', 'decade', 'genre', 'contentRating',
'collection', 'director', 'actor', 'country', 'studio', 'resolution',
'guid', 'label', 'writer', 'producer', 'subtitleLanguage', 'audioLanguage',
'lastViewedAt', 'viewCount', 'addedAt')
ALLOWED_SORT = ('addedAt', 'originallyAvailableAt', 'lastViewedAt', 'titleSort', 'rating',
'mediaHeight', 'duration')
TAG = 'Directory'
TYPE = 'movie'
METADATA_TYPE = 'movie'
CONTENT_TYPE = 'video'
def collection(self, **kwargs):
""" Returns a list of collections from this library section. """
return self.search(libtype='collection', **kwargs)
def sync(self, videoQuality, limit=None, unwatched=False, **kwargs):
""" Add current Movie library section as sync item for specified device.
See description of :func:`plexapi.library.LibrarySection.search()` for details about filtering / sorting and
:func:`plexapi.library.LibrarySection.sync()` for details on syncing libraries and possible exceptions.
Parameters:
videoQuality (int): idx of quality of the video, one of VIDEO_QUALITY_* values defined in
:mod:`plexapi.sync` module.
limit (int): maximum count of movies to sync, unlimited if `None`.
unwatched (bool): if `True` watched videos wouldn't be synced.
Returns:
:class:`plexapi.sync.SyncItem`: an instance of created syncItem.
Example:
.. code-block:: python
from plexapi import myplex
from plexapi.sync import VIDEO_QUALITY_3_MBPS_720p
c = myplex.MyPlexAccount()
target = c.device('Plex Client')
sync_items_wd = c.syncItems(target.clientIdentifier)
srv = c.resource('Server Name').connect()
section = srv.library.section('Movies')
section.sync(VIDEO_QUALITY_3_MBPS_720p, client=target, limit=1, unwatched=True,
title='Next best movie', sort='rating:desc')
"""
from plexapi.sync import Policy, MediaSettings
kwargs['mediaSettings'] = MediaSettings.createVideo(videoQuality)
kwargs['policy'] = Policy.create(limit, unwatched)
return super(MovieSection, self).sync(**kwargs)
class ShowSection(LibrarySection):
""" Represents a :class:`~plexapi.library.LibrarySection` section containing tv shows.
Attributes:
ALLOWED_FILTERS (list<str>): List of allowed search filters. ('unwatched',
'year', 'genre', 'contentRating', 'network', 'collection', 'guid', 'label')
ALLOWED_SORT (list<str>): List of allowed sorting keys. ('addedAt', 'lastViewedAt',
'originallyAvailableAt', 'titleSort', 'rating', 'unwatched')
TAG (str): 'Directory'
TYPE (str): 'show'
"""
ALLOWED_FILTERS = ('unwatched', 'year', 'genre', 'contentRating', 'network', 'collection',
'guid', 'duplicate', 'label', 'show.title', 'show.year', 'show.userRating',
'show.viewCount', 'show.lastViewedAt', 'show.actor', 'show.addedAt', 'episode.title',
'episode.originallyAvailableAt', 'episode.resolution', 'episode.subtitleLanguage',
'episode.unwatched', 'episode.addedAt', 'episode.userRating', 'episode.viewCount',
'episode.lastViewedAt')
ALLOWED_SORT = ('addedAt', 'lastViewedAt', 'originallyAvailableAt', 'titleSort',
'rating', 'unwatched')
TAG = 'Directory'
TYPE = 'show'
METADATA_TYPE = 'episode'
CONTENT_TYPE = 'video'
def searchShows(self, **kwargs):
""" Search for a show. See :func:`~plexapi.library.LibrarySection.search()` for usage. """
return self.search(libtype='show', **kwargs)
def searchEpisodes(self, **kwargs):
""" Search for an episode. See :func:`~plexapi.library.LibrarySection.search()` for usage. """
return self.search(libtype='episode', **kwargs)
def recentlyAdded(self, libtype='episode', maxresults=50):
""" Returns a list of recently added episodes from this library section.
Parameters:
maxresults (int): Max number of items to return (default 50).
"""
return self.search(sort='addedAt:desc', libtype=libtype, maxresults=maxresults)
def collection(self, **kwargs):
""" Returns a list of collections from this library section. """
return self.search(libtype='collection', **kwargs)
def sync(self, videoQuality, limit=None, unwatched=False, **kwargs):
""" Add current Show library section as sync item for specified device.
See description of :func:`plexapi.library.LibrarySection.search()` for details about filtering / sorting and
:func:`plexapi.library.LibrarySection.sync()` for details on syncing libraries and possible exceptions.
Parameters:
videoQuality (int): idx of quality of the video, one of VIDEO_QUALITY_* values defined in
:mod:`plexapi.sync` module.
limit (int): maximum count of episodes to sync, unlimited if `None`.
unwatched (bool): if `True` watched videos wouldn't be synced.
Returns:
:class:`plexapi.sync.SyncItem`: an instance of created syncItem.
Example:
.. code-block:: python
from plexapi import myplex
from plexapi.sync import VIDEO_QUALITY_3_MBPS_720p
c = myplex.MyPlexAccount()
target = c.device('Plex Client')
sync_items_wd = c.syncItems(target.clientIdentifier)
srv = c.resource('Server Name').connect()
section = srv.library.section('TV-Shows')
section.sync(VIDEO_QUALITY_3_MBPS_720p, client=target, limit=1, unwatched=True,
title='Next unwatched episode')
"""
from plexapi.sync import Policy, MediaSettings
kwargs['mediaSettings'] = MediaSettings.createVideo(videoQuality)
kwargs['policy'] = Policy.create(limit, unwatched)
return super(ShowSection, self).sync(**kwargs)
class MusicSection(LibrarySection):
""" Represents a :class:`~plexapi.library.LibrarySection` section containing music artists.
Attributes:
ALLOWED_FILTERS (list<str>): List of allowed search filters. ('genre',
'country', 'collection')
ALLOWED_SORT (list<str>): List of allowed sorting keys. ('addedAt',
'lastViewedAt', 'viewCount', 'titleSort')
TAG (str): 'Directory'
TYPE (str): 'artist'
"""
ALLOWED_FILTERS = ('genre', 'country', 'collection', 'mood', 'year', 'track.userRating', 'artist.title',
'artist.userRating', 'artist.genre', 'artist.country', 'artist.collection', 'artist.addedAt',
'album.title', 'album.userRating', 'album.genre', 'album.decade', 'album.collection',
'album.viewCount', 'album.lastViewedAt', 'album.studio', 'album.addedAt', 'track.title',
'track.userRating', 'track.viewCount', 'track.lastViewedAt', 'track.skipCount',
'track.lastSkippedAt')
ALLOWED_SORT = ('addedAt', 'lastViewedAt', 'viewCount', 'titleSort', 'userRating')
TAG = 'Directory'
TYPE = 'artist'
CONTENT_TYPE = 'audio'
METADATA_TYPE = 'track'
def albums(self):
""" Returns a list of :class:`~plexapi.audio.Album` objects in this section. """
key = '/library/sections/%s/albums' % self.key
return self.fetchItems(key)
def searchArtists(self, **kwargs):
""" Search for an artist. See :func:`~plexapi.library.LibrarySection.search()` for usage. """
return self.search(libtype='artist', **kwargs)
def searchAlbums(self, **kwargs):
""" Search for an album. See :func:`~plexapi.library.LibrarySection.search()` for usage. """
return self.search(libtype='album', **kwargs)
def searchTracks(self, **kwargs):
""" Search for a track. See :func:`~plexapi.library.LibrarySection.search()` for usage. """
return self.search(libtype='track', **kwargs)
def collection(self, **kwargs):
""" Returns a list of collections from this library section. """
return self.search(libtype='collection', **kwargs)
def sync(self, bitrate, limit=None, **kwargs):
""" Add current Music library section as sync item for specified device.
See description of :func:`plexapi.library.LibrarySection.search()` for details about filtering / sorting and
:func:`plexapi.library.LibrarySection.sync()` for details on syncing libraries and possible exceptions.
Parameters:
bitrate (int): maximum bitrate for synchronized music, better use one of MUSIC_BITRATE_* values from the
module :mod:`plexapi.sync`.
limit (int): maximum count of tracks to sync, unlimited if `None`.
Returns:
:class:`plexapi.sync.SyncItem`: an instance of created syncItem.
Example:
.. code-block:: python
from plexapi import myplex
from plexapi.sync import AUDIO_BITRATE_320_KBPS
c = myplex.MyPlexAccount()
target = c.device('Plex Client')
sync_items_wd = c.syncItems(target.clientIdentifier)
srv = c.resource('Server Name').connect()
section = srv.library.section('Music')
section.sync(AUDIO_BITRATE_320_KBPS, client=target, limit=100, sort='addedAt:desc',
title='New music')
"""
from plexapi.sync import Policy, MediaSettings
kwargs['mediaSettings'] = MediaSettings.createMusic(bitrate)
kwargs['policy'] = Policy.create(limit)
return super(MusicSection, self).sync(**kwargs)
class PhotoSection(LibrarySection):
""" Represents a :class:`~plexapi.library.LibrarySection` section containing photos.
Attributes:
ALLOWED_FILTERS (list<str>): List of allowed search filters. ('all', 'iso',
'make', 'lens', 'aperture', 'exposure', 'device', 'resolution')
ALLOWED_SORT (list<str>): List of allowed sorting keys. ('addedAt')
TAG (str): 'Directory'
TYPE (str): 'photo'
"""
ALLOWED_FILTERS = ('all', 'iso', 'make', 'lens', 'aperture', 'exposure', 'device', 'resolution', 'place',
'originallyAvailableAt', 'addedAt', 'title', 'userRating', 'tag', 'year')
ALLOWED_SORT = ('addedAt',)
TAG = 'Directory'
TYPE = 'photo'
CONTENT_TYPE = 'photo'
METADATA_TYPE = 'photo'
def searchAlbums(self, title, **kwargs):
""" Search for an album. See :func:`~plexapi.library.LibrarySection.search()` for usage. """
return self.search(libtype='photoalbum', title=title, **kwargs)
def searchPhotos(self, title, **kwargs):
""" Search for a photo. See :func:`~plexapi.library.LibrarySection.search()` for usage. """
return self.search(libtype='photo', title=title, **kwargs)
def sync(self, resolution, limit=None, **kwargs):
""" Add current Music library section as sync item for specified device.
See description of :func:`plexapi.library.LibrarySection.search()` for details about filtering / sorting and
:func:`plexapi.library.LibrarySection.sync()` for details on syncing libraries and possible exceptions.
Parameters:
resolution (str): maximum allowed resolution for synchronized photos, see PHOTO_QUALITY_* values in the
module :mod:`plexapi.sync`.
limit (int): maximum count of tracks to sync, unlimited if `None`.
Returns:
:class:`plexapi.sync.SyncItem`: an instance of created syncItem.
Example:
.. code-block:: python
from plexapi import myplex
from plexapi.sync import PHOTO_QUALITY_HIGH
c = myplex.MyPlexAccount()
target = c.device('Plex Client')
sync_items_wd = c.syncItems(target.clientIdentifier)
srv = c.resource('Server Name').connect()
section = srv.library.section('Photos')
section.sync(PHOTO_QUALITY_HIGH, client=target, limit=100, sort='addedAt:desc',
title='Fresh photos')
"""
from plexapi.sync import Policy, MediaSettings
kwargs['mediaSettings'] = MediaSettings.createPhoto(resolution)
kwargs['policy'] = Policy.create(limit)
return super(PhotoSection, self).sync(**kwargs)
class FilterChoice(PlexObject):
""" Represents a single filter choice. These objects are gathered when using filters
while searching for library items and is the object returned in the result set of
:func:`~plexapi.library.LibrarySection.listChoices()`.
Attributes:
TAG (str): 'Directory'
server (:class:`~plexapi.server.PlexServer`): PlexServer this client is connected to.
initpath (str): Relative path requested when retrieving specified `data` (optional).
fastKey (str): API path to quickly list all items in this filter
(/library/sections/<section>/all?genre=<key>)
key (str): Short key (id) of this filter option (used ad <key> in fastKey above).
thumb (str): Thumbnail used to represent this filter option.
title (str): Human readable name for this filter option.
type (str): Filter type (genre, contentRating, etc).
"""
TAG = 'Directory'
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self.fastKey = data.attrib.get('fastKey')
self.key = data.attrib.get('key')
self.thumb = data.attrib.get('thumb')
self.title = data.attrib.get('title')
self.type = data.attrib.get('type')
@utils.registerPlexObject
class Hub(PlexObject):
""" Represents a single Hub (or category) in the PlexServer search.
Attributes:
TAG (str): 'Hub'
hubIdentifier (str): Unknown.
size (int): Number of items found.
title (str): Title of this Hub.
type (str): Type of items in the Hub.
items (str): List of items in the Hub.
"""
TAG = 'Hub'
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self.hubIdentifier = data.attrib.get('hubIdentifier')
self.size = utils.cast(int, data.attrib.get('size'))
self.title = data.attrib.get('title')
self.type = data.attrib.get('type')
self.key = data.attrib.get('key')
self.items = self.findItems(data)
def __len__(self):
return self.size
@utils.registerPlexObject
class Collections(PlexObject):
TAG = 'Directory'
TYPE = 'collection'
_include = "?includeExternalMedia=1&includePreferences=1"
def _loadData(self, data):
self.ratingKey = utils.cast(int, data.attrib.get('ratingKey'))
self._details_key = "/library/metadata/%s%s" % (self.ratingKey, self._include)
self.key = data.attrib.get('key')
self.type = data.attrib.get('type')
self.title = data.attrib.get('title')
self.subtype = data.attrib.get('subtype')
self.summary = data.attrib.get('summary')
self.index = utils.cast(int, data.attrib.get('index'))
self.thumb = data.attrib.get('thumb')
self.addedAt = utils.toDatetime(data.attrib.get('addedAt'))
self.updatedAt = utils.toDatetime(data.attrib.get('updatedAt'))
self.childCount = utils.cast(int, data.attrib.get('childCount'))
self.minYear = utils.cast(int, data.attrib.get('minYear'))
self.maxYear = utils.cast(int, data.attrib.get('maxYear'))
self.collectionMode = data.attrib.get('collectionMode')
self.collectionSort = data.attrib.get('collectionSort')
@property
def children(self):
return self.fetchItems(self.key)
def __len__(self):
return self.childCount
def delete(self):
part = '/library/metadata/%s' % self.ratingKey
return self._server.query(part, method=self._server._session.delete)
def modeUpdate(self, mode=None):
""" Update Collection Mode
Parameters:
mode: default (Library default)
hide (Hide Collection)
hideItems (Hide Items in this Collection)
showItems (Show this Collection and its Items)
Example:
collection = 'plexapi.library.Collections'
collection.updateMode(mode="hide")
"""
mode_dict = {'default': '-2',
'hide': '0',
'hideItems': '1',
'showItems': '2'}
key = mode_dict.get(mode)
if key is None:
raise BadRequest('Unknown collection mode : %s. Options %s' % (mode, list(mode_dict)))
part = '/library/metadata/%s/prefs?collectionMode=%s' % (self.ratingKey, key)
return self._server.query(part, method=self._server._session.put)
def sortUpdate(self, sort=None):
""" Update Collection Sorting
Parameters:
sort: realease (Order Collection by realease dates)
alpha (Order Collection Alphabetically)
Example:
colleciton = 'plexapi.library.Collections'
collection.updateSort(mode="alpha")
"""
sort_dict = {'release': '0',
'alpha': '1'}
key = sort_dict.get(sort)
if key is None:
raise BadRequest('Unknown sort dir: %s. Options: %s' % (sort, list(sort_dict)))
part = '/library/metadata/%s/prefs?collectionSort=%s' % (self.ratingKey, key)
return self._server.query(part, method=self._server._session.put)
def posters(self):
""" Returns list of available poster objects. :class:`~plexapi.media.Poster`. """
return self.fetchItems('/library/metadata/%s/posters' % self.ratingKey)
def uploadPoster(self, url=None, filepath=None):
""" Upload poster from url or filepath. :class:`~plexapi.media.Poster` to :class:`~plexapi.video.Video`. """
if url:
key = '/library/metadata/%s/posters?url=%s' % (self.ratingKey, quote_plus(url))
self._server.query(key, method=self._server._session.post)
elif filepath:
key = '/library/metadata/%s/posters?' % self.ratingKey
data = open(filepath, 'rb').read()
self._server.query(key, method=self._server._session.post, data=data)
def setPoster(self, poster):
""" Set . :class:`~plexapi.media.Poster` to :class:`~plexapi.video.Video` """
poster.select()
def arts(self):
""" Returns list of available art objects. :class:`~plexapi.media.Poster`. """
return self.fetchItems('/library/metadata/%s/arts' % self.ratingKey)
def uploadArt(self, url=None, filepath=None):
""" Upload art from url or filepath. :class:`~plexapi.media.Poster` to :class:`~plexapi.video.Video`. """
if url:
key = '/library/metadata/%s/arts?url=%s' % (self.ratingKey, quote_plus(url))
self._server.query(key, method=self._server._session.post)
elif filepath:
key = '/library/metadata/%s/arts?' % self.ratingKey
data = open(filepath, 'rb').read()
self._server.query(key, method=self._server._session.post, data=data)
def setArt(self, art):
""" Set :class:`~plexapi.media.Poster` to :class:`~plexapi.video.Video` """
art.select()
# def edit(self, **kwargs):
# TODO
|
bsd-3-clause
| 3,471,805,347,811,345,400
| 49.618771
| 127
| 0.599278
| false
|
Martin09/E-BeamPatterns
|
100 Wafers - 1cm Squares/Multi-Use Pattern/v1.4/MembraneDesign_100Wafer_v1.4.py
|
1
|
20307
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 18 14:11:31 2015
@author: Martin Friedl
"""
import itertools
from datetime import date
from random import choice as random_choice
import numpy as np
from Patterns.GrowthTheoryCell import make_theory_cell
from Patterns.GrowthTheoryCell_100_3BranchDevices import make_theory_cell_3br
from Patterns.GrowthTheoryCell_100_4BranchDevices import make_theory_cell_4br
from Patterns.QuantumPlayground_100_v1 import make_qp
from gdsCAD_py3.core import Cell, Boundary, CellArray, Layout, Path
from gdsCAD_py3.shapes import Box, Rectangle, Label
from gdsCAD_py3.templates100 import Wafer_GridStyle, dashed_line
WAFER_ID = '000050254318SL' # CHANGE THIS FOR EACH DIFFERENT WAFER
PATTERN = 'SQ1.4'
putOnWafer = True # Output full wafer or just a single pattern?
HighDensity = False # High density of triangles?
glbAlignmentMarks = False
tDicingMarks = 10. # Dicing mark line thickness (um)
rotAngle = 0. # Rotation angle of the membranes
wafer_r = 25e3
waferVer = '100 Membranes Multi-Use v1.4'.format(int(wafer_r / 1000))
waferLabel = waferVer + '\n' + date.today().strftime("%d%m%Y")
# Layers
l_smBeam = 0
l_lgBeam = 1
l_drawing = 100
# %% Wafer template for MBE growth
class MBE100Wafer(Wafer_GridStyle):
"""
A 2" wafer divided into square cells
"""
def __init__(self, name, cells=None):
Wafer_GridStyle.__init__(self, name=name, cells=cells, block_gap=1200.)
# The placement of the wafer alignment markers
am_x = 1.5e4
am_y = 1.5e4
self.align_pts = np.array([am_x, am_y])
self.align_pts = np.vstack((self.align_pts, self.align_pts *
(-1, 1))) # Reflect about y-axis
self.align_pts = np.vstack((self.align_pts, self.align_pts *
(1, -1))) # Reflect about x-axis
self.wafer_r = 25e3
self.block_size = np.array([10e3, 10e3])
self._place_blocks(radius=self.wafer_r + 5e3)
# if glbAlignmentMarks:
# self.add_aligment_marks(l_lgBeam)
# self.add_orientation_text(l_lgBeam)
# self.add_dicing_marks() # l_lgBeam, mkWidth=mkWidth Width of dicing marks
self.add_blocks()
self.add_wafer_outline(layers=l_drawing)
self.add_dashed_dicing_marks(layers=[l_lgBeam])
self.add_subdicing_marks(200, 5, layers=[l_lgBeam])
self.add_block_labels(l_lgBeam, quasi_unique_labels=True)
self.add_prealignment_markers(layers=[l_lgBeam])
self.add_tem_membranes([0.02, 0.04, 0.06, 0.08], 500, 1, l_smBeam)
self.add_theory_cells()
self.add_chip_labels()
# self.add_blockLabels(l_lgBeam)
# self.add_cellLabels(l_lgBeam)
bottom = np.array([0, -self.wafer_r * 0.9])
# top = np.array([0, -1]) * bottom
self.add_waferLabel(waferLabel, l_drawing, pos=bottom)
def add_block_labels(self, layers, quasi_unique_labels=False):
if type(layers) is not list:
layers = [layers]
txtSize = 800
if quasi_unique_labels:
unique_label_string = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'
possible_labels = ["".join(x) for x in itertools.product(unique_label_string, repeat=2)]
blockids_set = set()
while len(blockids_set) < len(self.blocks):
blockids_set.add(random_choice(possible_labels))
blockids = list(blockids_set)
for i, block in enumerate(self.blocks):
blocklabel = Cell('LBL_B_' + blockids[i])
for l in layers:
txt = Label(blockids[i], txtSize, layer=l)
bbox = txt.bounding_box
offset = (0, 0)
txt.translate(-np.mean(bbox, 0)) # Center text around origin
txt.translate(offset) # Translate it to bottom of wafer
blocklabel.add(txt)
block.add(blocklabel, origin=(self.block_size[0] / 2., self.block_size[1] / 2.))
else:
for (i, pt) in enumerate(self.block_pts):
origin = (pt + np.array([0.5, 0.5])) * self.block_size
blk_lbl = self.blockcols[pt[0]] + self.blockrows[pt[1]]
for l in layers:
txt = Label(blk_lbl, txtSize, layer=l_lgBeam)
bbox = txt.bounding_box
offset = np.array(pt)
txt.translate(-np.mean(bbox, 0)) # Center text around origin
lbl_cell = Cell("lbl_" + blk_lbl)
lbl_cell.add(txt)
origin += np.array([0, 2000]) # Translate it up by 2mm
self.add(lbl_cell, origin=origin)
def add_dashed_dicing_marks(self, layers):
if type(layers) is not list:
layers = [layers]
width = 10. / 2
dashlength = 2000
r = self.wafer_r
rng = np.floor(self.wafer_r / self.block_size).astype(int)
dmarks = Cell('DIC_MRKS')
for l in layers:
for x in np.arange(-rng[0], rng[0] + 1) * self.block_size[0]:
y = np.sqrt(r ** 2 - x ** 2)
vm = dashed_line([x, y], [x, -y], dashlength, width, layer=l)
dmarks.add(vm)
for y in np.arange(-rng[1], rng[1] + 1) * self.block_size[1]:
x = np.sqrt(r ** 2 - y ** 2)
hm = dashed_line([x, y], [-x, y], dashlength, width, layer=l)
dmarks.add(hm)
self.add(dmarks)
def add_subdicing_marks(self, length, width, layers):
if type(layers) is not list:
layers = [layers]
for l in layers:
mark_cell = Cell("SubdicingMark")
line = Path([[0, 0], [0, length]], width=width, layer=l)
mark_cell.add(line)
for block in self.blocks:
block.add(mark_cell, origin=(self.block_size[0] / 2., 0), rotation=0)
block.add(mark_cell, origin=(0, self.block_size[1] / 2.), rotation=-90)
block.add(mark_cell, origin=(self.block_size[0], self.block_size[1] / 2.), rotation=90)
block.add(mark_cell, origin=(self.block_size[0] / 2., self.block_size[1]), rotation=180)
def add_prealignment_markers(self, layers, mrkr_size=7):
if mrkr_size % 2 == 0: # Number is even, but we need odd numbers
mrkr_size += 1
if type(layers) is not list:
layers = [layers]
for l in layers:
rect_size = 10. # 10 um large PAMM rectangles
marker_rect = Rectangle([-rect_size / 2., -rect_size / 2.], [rect_size / 2., rect_size / 2.], layer=l)
marker = Cell('10umMarker')
marker.add(marker_rect)
# Make one arm of the PAMM array
marker_arm = Cell('PAMM_Arm')
# Define the positions of the markers, they increase in spacing by 1 um each time:
mrkr_positions = [75 * n + (n - 1) * n // 2 for n in range(1, (mrkr_size - 1) // 2 + 1)]
for pos in mrkr_positions:
marker_arm.add(marker, origin=[pos, 0])
# Build the final PAMM Marker
pamm_cell = Cell('PAMM_Marker')
pamm_cell.add(marker) # Center marker
pamm_cell.add(marker_arm) # Right arm
pamm_cell.add(marker_arm, rotation=180) # Left arm
pamm_cell.add(marker_arm, rotation=90) # Top arm
pamm_cell.add(marker_arm, rotation=-90) # Bottom arm
for pos in mrkr_positions:
pamm_cell.add(marker_arm, origin=[pos, 0], rotation=90) # Top arms
pamm_cell.add(marker_arm, origin=[-pos, 0], rotation=90)
pamm_cell.add(marker_arm, origin=[pos, 0], rotation=-90) # Bottom arms
pamm_cell.add(marker_arm, origin=[-pos, 0], rotation=-90)
# Make the 4 tick marks that mark the center of the array
h = 30.
w = 100.
tick_mrk = Rectangle([-w / 2., -h / 2.], [w / 2, h / 2.], layer=l)
tick_mrk_cell = Cell("TickMark")
tick_mrk_cell.add(tick_mrk)
pos = mrkr_positions[-1] + 75 + w / 2.
pamm_cell.add(tick_mrk_cell, origin=[pos, 0])
pamm_cell.add(tick_mrk_cell, origin=[-pos, 0])
pamm_cell.add(tick_mrk_cell, origin=[0, pos], rotation=90)
pamm_cell.add(tick_mrk_cell, origin=[0, -pos], rotation=90)
center_x, center_y = (5000, 5000)
for block in self.blocks:
block.add(pamm_cell, origin=(center_x + 2000, center_y))
block.add(pamm_cell, origin=(center_x - 2000, center_y))
def add_tem_membranes(self, widths, length, pitch, layer):
tem_membranes = Cell('TEM_Membranes')
n = 4
curr_y = 0
for width in widths:
membrane = Path([(-length / 2., 0), (length / 2., 0)], width=width, layer=layer)
membrane_cell = Cell('Membrane_w{:.0f}'.format(width * 1000))
membrane_cell.add(membrane)
membrane_array = CellArray(membrane_cell, 1, n, (0, pitch))
membrane_array_cell = Cell('MembraneArray_w{:.0f}'.format(width * 1000))
membrane_array_cell.add(membrane_array)
tem_membranes.add(membrane_array_cell, origin=(0, curr_y))
curr_y += n * pitch
n2 = 3
tem_membranes2 = Cell('Many_TEM_Membranes')
tem_membranes2.add(CellArray(tem_membranes, 1, n2, (0, n * len(widths) * pitch)))
center_x, center_y = (5000, 5000)
for block in self.blocks:
block.add(tem_membranes2, origin=(center_x, center_y + 2000))
block.add(tem_membranes2, origin=(center_x, center_y + 1500), rotation=45)
def add_theory_cells(self):
theory_cells = Cell('TheoryCells')
theory_cells.add(make_theory_cell(wafer_orient='100'), origin=(-400, 0))
theory_cells.add(make_theory_cell_3br(), origin=(0, 0))
theory_cells.add(make_theory_cell_4br(), origin=(400, 0))
theory_cells.add(make_theory_cell(wafer_orient='100'), origin=(-500, -400), rotation=45)
theory_cells.add(make_theory_cell_3br(), origin=(-50, -400), rotation=45)
theory_cells.add(make_theory_cell_4br(), origin=(370, -400), rotation=45)
center_x, center_y = (5000, 5000)
for block in self.blocks:
block.add(theory_cells, origin=(center_x, center_y - 1700))
def add_chip_labels(self):
wafer_lbl = PATTERN + '\n' + WAFER_ID
text = Label(wafer_lbl, 20., layer=l_lgBeam)
text.translate(tuple(np.array(-text.bounding_box.mean(0)))) # Center justify label
chip_lbl_cell = Cell('chip_label')
chip_lbl_cell.add(text)
center_x, center_y = (5000, 5000)
for block in self.blocks:
block.add(chip_lbl_cell, origin=(center_x, center_y - 2850))
class Frame(Cell):
"""
Make a frame for writing to with ebeam lithography
Params:
-name of the frame, just like when naming a cell
-size: the size of the frame as an array [xsize,ysize]
"""
def __init__(self, name, size, border_layers):
if not (type(border_layers) == list):
border_layers = [border_layers]
Cell.__init__(self, name)
self.size_x, self.size_y = size
# Create the border of the cell
for l in border_layers:
self.border = Box(
(-self.size_x / 2., -self.size_y / 2.),
(self.size_x / 2., self.size_y / 2.),
1,
layer=l)
self.add(self.border) # Add border to the frame
self.align_markers = None
def make_align_markers(self, t, w, position, layers, joy_markers=False, camps_markers=False):
if not (type(layers) == list):
layers = [layers]
top_mk_cell = Cell('AlignmentMark')
for l in layers:
if not joy_markers:
am0 = Rectangle((-w / 2., -w / 2.), (w / 2., w / 2.), layer=l)
rect_mk_cell = Cell("RectMarker")
rect_mk_cell.add(am0)
top_mk_cell.add(rect_mk_cell)
elif joy_markers:
crosspts = [(0, 0), (w / 2., 0), (w / 2., t), (t, t), (t, w / 2), (0, w / 2), (0, 0)]
crosspts.extend(tuple(map(tuple, (-np.array(crosspts)).tolist())))
am0 = Boundary(crosspts, layer=l) # Create gdsCAD shape
joy_mk_cell = Cell("JOYMarker")
joy_mk_cell.add(am0)
top_mk_cell.add(joy_mk_cell)
if camps_markers:
emw = 20. # 20 um e-beam marker width
camps_mk = Rectangle((-emw / 2., -emw / 2.), (emw / 2., emw / 2.), layer=l)
camps_mk_cell = Cell("CAMPSMarker")
camps_mk_cell.add(camps_mk)
top_mk_cell.add(camps_mk_cell, origin=[100., 100.])
top_mk_cell.add(camps_mk_cell, origin=[100., -100.])
top_mk_cell.add(camps_mk_cell, origin=[-100., 100.])
top_mk_cell.add(camps_mk_cell, origin=[-100., -100.])
self.align_markers = Cell("AlignMarkers")
self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([1, -1]))
self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([-1, -1]))
self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([1, 1]))
self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([-1, 1]))
self.add(self.align_markers)
def make_slit_array(self, _pitches, spacing, _widths, _lengths, rot_angle,
array_height, array_width, array_spacing, layers):
if not (type(layers) == list):
layers = [layers]
if not (type(_pitches) == list):
_pitches = [_pitches]
if not (type(_lengths) == list):
_lengths = [_lengths]
if not (type(_widths) == list):
_widths = [_widths]
manyslits = i = j = None
for l in layers:
i = -1
j = -1
manyslits = Cell("SlitArray")
pitch = _pitches[0]
for length in _lengths:
j += 1
i = -1
for width in _widths:
# for pitch in pitches:
i += 1
if i % 3 == 0:
j += 1 # Move to array to next line
i = 0 # Restart at left
nx = int(array_width / (length + spacing))
ny = int(array_height / pitch)
# Define the slits
slit = Cell("Slits")
rect = Rectangle((-length / 2., -width / 2.), (length / 2., width / 2.), layer=l)
slit.add(rect)
slits = CellArray(slit, nx, ny, (length + spacing, pitch))
slits.translate((-(nx - 1) * (length + spacing) / 2., -(ny - 1) * pitch / 2.))
slit_array = Cell("SlitArray")
slit_array.add(slits)
text = Label('w/p/l\n%i/%i/%i' % (width * 1000, pitch, length), 5, layer=l)
lbl_vertical_offset = 1.35
if j % 2 == 0:
text.translate(
tuple(np.array(-text.bounding_box.mean(0)) + np.array((
0, -array_height / lbl_vertical_offset)))) # Center justify label
else:
text.translate(
tuple(np.array(-text.bounding_box.mean(0)) + np.array((
0, array_height / lbl_vertical_offset)))) # Center justify label
slit_array.add(text)
manyslits.add(slit_array,
origin=((array_width + array_spacing) * i, (
array_height + 2. * array_spacing) * j - array_spacing / 2.))
# This is an ugly hack to center rotated slits, should fix this properly...
if rot_angle == 45: # TODO: fix this ugly thing
hacky_offset_x = 200
hacky_offset_y = -25
elif rot_angle == 90:
hacky_offset_x = 356
hacky_offset_y = 96.5
else:
hacky_offset_x = 0
hacky_offset_y = 0
self.add(manyslits, origin=(-i * (array_width + array_spacing) / 2 + hacky_offset_x,
-(j + 1.5) * (array_height + array_spacing) / 2 + hacky_offset_y),
rotation=rot_angle)
# %%Create the pattern that we want to write
lgField = Frame("LargeField", (2000., 2000.), []) # Create the large write field
lgField.make_align_markers(20., 200., (850., 850.), l_lgBeam, joy_markers=True, camps_markers=True)
# Define parameters that we will use for the slits
widths = [0.01, 0.015, 0.020, 0.030, 0.040, 0.050]
pitches = [2.0, 4.0]
length = 20.
smFrameSize = 400
slitColumnSpacing = 3.
# Create the smaller write field and corresponding markers
smField1 = Frame("SmallField1", (smFrameSize, smFrameSize), [])
smField1.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True)
smField1.make_slit_array(pitches[0], slitColumnSpacing, widths, length, 0, 100, 100, 30, l_smBeam)
smField2 = Frame("SmallField2", (smFrameSize, smFrameSize), [])
smField2.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True)
smField2.make_slit_array(pitches[0], slitColumnSpacing, widths, length, 45, 100, 100, 30, l_smBeam)
smField3 = Frame("SmallField3", (smFrameSize, smFrameSize), [])
smField3.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True)
smField3.make_slit_array(pitches[1], slitColumnSpacing, widths, length, 0, 100, 100, 30, l_smBeam)
smField4 = Frame("SmallField4", (smFrameSize, smFrameSize), [])
smField4.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True)
smField4.make_slit_array(pitches[0], slitColumnSpacing, widths, length, 90, 100, 100, 30, l_smBeam)
quantum_playground = make_qp()
centerAlignField = Frame("CenterAlignField", (smFrameSize, smFrameSize), [])
centerAlignField.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True)
centerLeftAlignField = Frame("CenterLeftAlignField", (smFrameSize, smFrameSize), [])
centerLeftAlignField.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True)
centerLeftAlignField.add(quantum_playground)
centerRightAlignField = Frame("CenterRightAlignField", (smFrameSize, smFrameSize), [])
centerRightAlignField.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True)
centerRightAlignField.add(quantum_playground, rotation=45)
# Add everything together to a top cell
topCell = Cell("TopCell")
topCell.add(lgField)
smFrameSpacing = 400 # Spacing between the three small frames
dx = smFrameSpacing + smFrameSize
dy = smFrameSpacing + smFrameSize
topCell.add(smField1, origin=(-dx / 2., dy / 2.))
topCell.add(smField2, origin=(dx / 2., dy / 2.))
topCell.add(smField3, origin=(-dx / 2., -dy / 2.))
topCell.add(smField4, origin=(dx / 2., -dy / 2.))
topCell.add(centerLeftAlignField, origin=(-dx / 2, 0.))
topCell.add(centerRightAlignField, origin=(dx / 2, 0.))
topCell.add(centerAlignField, origin=(0., 0.))
topCell.spacing = np.array([4000., 4000.])
# %%Create the layout and output GDS file
layout = Layout('LIBRARY')
if putOnWafer: # Fit as many patterns on a 2inch wafer as possible
wafer = MBE100Wafer('MembranesWafer', cells=[topCell])
layout.add(wafer)
# layout.show()
else: # Only output a single copy of the pattern (not on a wafer)
layout.add(topCell)
layout.show()
filestring = str(waferVer) + '_' + WAFER_ID + '_' + date.today().strftime("%d%m%Y") + ' dMark' + str(tDicingMarks)
filename = filestring.replace(' ', '_') + '.gds'
layout.save(filename)
cell_layout = Layout('LIBRARY')
cell_layout.add(wafer.blocks[0])
cell_layout.save(filestring.replace(' ', '_') + '_block' + '.gds')
# Output up chip for doing aligned jobs
layout_field = Layout('LIBRARY')
layout_field.add(topCell)
layout_field.save(filestring.replace(' ', '_') + '_2mmField.gds')
|
gpl-3.0
| -19,201,809,848,424,624
| 42.859611
| 114
| 0.566406
| false
|
Zero-Projects/Mozart
|
mozart/core/validators.py
|
1
|
2098
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django import forms
from django.utils.text import slugify
from django.contrib.auth import authenticate
from mozart.core.messages import custom_error_messages, media_messages
def eval_blank(data):
if str(data).isspace():
raise forms.ValidationError(custom_error_messages['blank'], code='blank')
return data
def eval_iexact(data, model, field, label):
original = data
model_name = (model._meta.verbose_name).lower()
field_label = (model._meta.get_field(label).verbose_name).lower()
lookup = '%s__iexact' % field
if field == 'slug':
data = slugify(data)
lookup = field
try:
model.objects.get(**{lookup: data})
except model.DoesNotExist:
return original
raise forms.ValidationError(custom_error_messages['unique'], code='unique',
params={'model_name': model_name, 'field_label': field_label})
def eval_matching(data_1, data_2):
if data_1 != data_2:
raise forms.ValidationError(custom_error_messages['mismatch'],)
return data_1 and data_2
def eval_password(username, password):
user_cache = authenticate(username=username, password=password)
if user_cache is None:
raise forms.ValidationError(custom_error_messages['incorrect_password'])
return username and password
# Media Validators
def eval_audio(data):
file_type = str(data.content_type)
if file_type == 'audio/mp3':
return data
raise forms.ValidationError(media_messages['invalid_audio'],)
def eval_image(data):
file_type = str(data.content_type)
if file_type == 'image/jpeg' or file_type == 'image/bmp' \
or file_type == 'image/png':
return data
raise forms.ValidationError(media_messages['invalid_image'],)
def eval_general(data):
file_type = str(data.content_type)
if file_type == 'image/jpeg' or file_type == 'image/bmp' \
or file_type == 'image/png' or file_type == 'audio/mp3':
return data
raise forms.ValidationError(media_messages['invalid_archive'],)
|
bsd-3-clause
| -6,490,742,904,031,472,000
| 29.405797
| 94
| 0.662536
| false
|
exaile/exaile
|
plugins/daapclient/test.py
|
1
|
1484
|
# This file contains some code to test the DAAPClient as stand-alone application.
import sys
import logging
from .client import DAAPClient
log = logging.getLogger(__name__)
def main():
connection = DAAPClient()
if len(sys.argv) > 1:
host = sys.argv[1]
else:
host = "localhost"
if len(sys.argv) > 2:
port = sys.argv[2]
else:
port = 3689
logging.basicConfig(
level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s'
)
try:
# do everything in a big try, so we can disconnect at the end
connection.connect(host, port)
# auth isn't supported yet. Just log in
session = connection.login()
library = session.library()
log.debug("Library name is `%r`", library.name)
tracks = library.tracks()
# demo - save the first track to disk
# print("Saving %s by %s to disk as 'track.mp3'"%(tracks[0].name, tracks[0].artist))
# tracks[0].save("track.mp3")
if len(tracks) > 0:
tracks[0].atom.printTree()
else:
print('No Tracks')
session.update()
print(session.revision)
finally:
# this here, so we logout even if there's an error somewhere,
# or itunes will eventually refuse more connections.
print("--------------")
try:
session.logout()
except Exception:
pass
if __name__ == '__main__':
main()
|
gpl-2.0
| 1,291,806,273,974,802,000
| 23.733333
| 92
| 0.566712
| false
|
hannahborje/myTodoList
|
todoView.py
|
1
|
1345
|
from flask import request, jsonify, render_template
from todoModel import TodoModel
import flask.views
import json
RETRIEVE_DEFAULT_NR = 5
# Render template for main.html
class TodoView(flask.views.MethodView):
def get(self):
return render_template('main.html')
# Add todo (item) and if it is checked or not (value=false)
class TodoAdd(flask.views.MethodView):
def post(self):
args = json.loads(request.data)
TodoModel.add_todo(args['item'], args['value'])
return jsonify({ 'success': True })
# When a todo is checked - change its value (true or false)
class TodoAddValue(flask.views.MethodView):
def post(self):
args = json.loads(request.data)
print("Changed done value to:", args)
TodoModel.add_value(args['id'], args['value'])
return jsonify({'success' : True})
# Retrieves all the todos from the database, including id and value
class TodoRetrieve(flask.views.MethodView):
def get(self, n):
try:
n = int(n)
except ValueError:
n = RETRIEVE_DEFAULT_NR
if n <= 0:
n = RETRIEVE_DEFAULT_NR
todoList = TodoModel.retrieve_todos(n)
return jsonify({
'success': True,
'todoList': [{ 'id': item[0], 'text':item[1], 'value':item[2] } for item in todoList]
})
|
mit
| 6,281,056,490,317,521,000
| 31.02381
| 97
| 0.62974
| false
|
tranquilit/WAPT
|
waptsetupgui/deb/createdeb.py
|
1
|
10151
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------
# This file is part of WAPT
# Copyright (C) 2013-2014 Tranquil IT Systems http://www.tranquil.it
# WAPT aims to help Windows systems administrators to deploy
# setup and update applications on users PC.
#
# WAPT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WAPT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WAPT. If not, see <http://www.gnu.org/licenses/>.
#
# -----------------------------------------------------------------------
from __future__ import print_function
import sys
import os
import platform
import logging
import re
import types
import shutil
import subprocess
import argparse
import stat
import glob
import jinja2
from git import Repo
makepath = os.path.join
from shutil import copyfile
def run(*args, **kwargs):
return subprocess.check_output(*args, shell=True, **kwargs)
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def mkdir(path):
if not os.path.isdir(path):
os.makedirs(path)
def debian_major():
return platform.linux_distribution()[1].split('.')[0]
def get_distrib():
return platform.linux_distribution()[0].lower()
def git_hash():
r = Repo('.',search_parent_directories=True)
return '%s' % (r.active_branch.object.name_rev[:8],)
def dev_revision():
return '%s' % (git_hash())
def setloglevel(alogger,loglevel):
"""set loglevel as string"""
if loglevel in ('debug','warning','info','error','critical'):
numeric_level = getattr(logging, loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
alogger.setLevel(numeric_level)
def rsync(src, dst, excludes=[]):
excludes_list = ['*.pyc','*~','.svn','deb','.git','.gitignore']
excludes_list.extend(excludes)
rsync_source = src
rsync_destination = dst
rsync_options = ['-a','--stats']
for x in excludes_list:
rsync_options.extend(['--exclude',x])
rsync_command = ['/usr/bin/rsync'] + rsync_options + [rsync_source,rsync_destination]
eprint(rsync_command)
return subprocess.check_output(rsync_command)
def add_symlink(link_target,link_name):
if link_target.startswith('/'):
link_target = link_target[1:]
relative_link_target_path = os.path.join('builddir',link_target)
eprint("adding symlink %s -> %s" % (link_name, relative_link_target_path ))
mkdir(os.path.dirname(relative_link_target_path))
if not os.path.exists(relative_link_target_path):
cmd = 'ln -s %s %s ' % (relative_link_target_path,link_name)
eprint(cmd)
eprint(subprocess.check_output(cmd))
class Version(object):
"""Version object of form 0.0.0
can compare with respect to natural numbering and not alphabetical
Args:
version (str) : version string
member_count (int) : number of version memebers to take in account.
If actual members in version is less, add missing memeber with 0 value
If actual members count is higher, removes last ones.
>>> Version('0.10.2') > Version('0.2.5')
True
>>> Version('0.1.2') < Version('0.2.5')
True
>>> Version('0.1.2') == Version('0.1.2')
True
>>> Version('7') < Version('7.1')
True
.. versionchanged:: 1.6.2.5
truncate version members list to members_count if provided.
"""
def __init__(self,version,members_count=None):
if version is None:
version = ''
assert isinstance(version,types.ModuleType) or isinstance(version,bytes) or isinstance(version,bytes) or isinstance(version,Version)
if isinstance(version,types.ModuleType):
self.versionstring = getattr(version,'__version__',None)
elif isinstance(version,Version):
self.versionstring = getattr(version,'versionstring',None)
else:
self.versionstring = version
self.members = [ v.strip() for v in self.versionstring.split('.')]
self.members_count = members_count
if members_count is not None:
if len(self.members)<members_count:
self.members.extend(['0'] * (members_count-len(self.members)))
else:
self.members = self.members[0:members_count]
def __cmp__(self,aversion):
def nat_cmp(a, b):
a = a or ''
b = b or ''
def convert(text):
if text.isdigit():
return int(text)
else:
return text.lower()
def alphanum_key(key):
return [convert(c) for c in re.split('([0-9]+)', key)]
return cmp(alphanum_key(a), alphanum_key(b))
if not isinstance(aversion,Version):
aversion = Version(aversion,self.members_count)
for i in range(0,max([len(self.members),len(aversion.members)])):
if i<len(self.members):
i1 = self.members[i]
else:
i1 = ''
if i<len(aversion.members):
i2 = aversion.members[i]
else:
i2=''
v = nat_cmp(i1,i2)
if v:
return v
return 0
def __str__(self):
return '.'.join(self.members)
def __repr__(self):
return "Version('{}')".format('.'.join(self.members))
current_path = os.path.realpath(__file__)
wapt_source_dir = os.path.abspath(os.path.join(os.path.dirname(current_path),'../..'))
parser = argparse.ArgumentParser(u'Build a Debian package with already compiled executables in root directory.')
parser.add_argument('-l', '--loglevel', help='Change log level (error, warning, info, debug...)')
parser.add_argument('-r', '--revision',default=dev_revision(), help='revision to append to package version')
options = parser.parse_args()
logger = logging.getLogger()
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s')
if options.loglevel is not None:
setloglevel(logger,options.loglevel)
if platform.system() != 'Linux':
logger.error("This script should be used on Debian Linux")
sys.exit(1)
#########################################
BDIR = './builddir/'
dict_exe = {
'WAPTSELF':'waptself.bin',
'WAPTEXIT':'waptexit.bin',
}
WAPTEDITION=os.environ.get('WAPTEDITION','community')
#########################################
logger.debug('Getting version from waptutils')
for line in open(os.path.join(wapt_source_dir,"waptutils.py")):
if line.strip().startswith('__version__'):
wapt_version = str(Version(line.split('=')[1].strip().replace('"', '').replace("'", ''),3))
if not wapt_version:
eprint(u'version not found in %s/config.py' % os.path.abspath('..'))
sys.exit(1)
r = Repo('.',search_parent_directories=True)
rev_count = '%04d' % (r.active_branch.commit.count(),)
wapt_version = wapt_version +'.'+rev_count
if options.revision:
full_version = wapt_version + '-' + options.revision
else:
full_version = wapt_version
logger.info('Create templates for control and postinst')
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader('./debian/'))
template_control = jinja_env.get_template('control.tmpl')
template_vars = {
'version': wapt_version,
'description': 'WAPT Agent executables for Debian/Ubuntu\n',
}
render_control = template_control.render(template_vars)
if os.path.exists(BDIR):
shutil.rmtree(BDIR)
os.makedirs(os.path.join(BDIR,'DEBIAN'))
with open(os.path.join(BDIR,'DEBIAN','control'),'w') as f_control:
f_control.write(render_control)
shutil.copy('./debian/postinst',os.path.join(BDIR,'DEBIAN','postinst'))
shutil.copy('./debian/postrm',os.path.join(BDIR,'DEBIAN','postrm'))
dir_desktop = os.path.join(BDIR,'opt/wapt')
os.makedirs(dir_desktop)
shutil.copy('../common/waptexit.desktop',os.path.join(dir_desktop,'tis-waptexit.desktop'))
shutil.copy('../common/waptself.desktop',os.path.join(dir_desktop,'tis-waptself.desktop'))
translation_path = '../../languages'
translation_path_deb = makepath(BDIR,'opt/wapt/languages')
files_translation = glob.glob(makepath(translation_path,'waptself*')) + glob.glob(makepath(translation_path,'waptexit*'))
os.makedirs(translation_path_deb)
for file in files_translation:
shutil.copy2(file,translation_path_payload)
if WAPTEDITION.lower()=='community':
waptself_png = '../common/waptself-community.png'
waptexit_png = '../common/waptexit-community.png'
else:
waptself_png = '../common/waptself-enterprise.png'
waptexit_png = '../common/waptexit-enterprise.png'
os.makedirs(os.path.join(BDIR,'opt/wapt/icons'))
icons_to_convert=[(waptself_png,makepath(BDIR,'opt/wapt/icons/waptself-%s.png')),(waptexit_png,makepath(BDIR,'opt/wapt/icons/waptexit-%s.png'))]
for icon in icons_to_convert:
for size in ["16","32","64","128"]:
run("convert %s -resize %sx%s %s" % (icon[0],size,size,icon[1] % size))
os.chmod(os.path.join(BDIR,'DEBIAN/'), 0755)
os.chmod(os.path.join(BDIR,'DEBIAN','postinst'), 0755)
os.chmod(os.path.join(BDIR,'DEBIAN','postrm'), 0755)
# creates package file structure
opt_wapt = os.path.join(BDIR,'opt/wapt')
mkdir(opt_wapt)
for afile in dict_exe.keys():
os.chmod(dict_exe[afile],0755)
shutil.copy(dict_exe[afile],opt_wapt)
# build
if WAPTEDITION=='enterprise':
package_filename = 'tis-waptagent-gui-enterprise-%s.deb' % (full_version)
else:
package_filename = 'tis-waptagent-gui-%s.deb' % (full_version)
eprint(subprocess.check_output(['dpkg-deb', '--build', BDIR, package_filename]))
print(package_filename)
|
gpl-3.0
| -6,633,762,645,145,644,000
| 34.003448
| 144
| 0.63442
| false
|
jiajunshen/partsNet
|
scripts/test2.py
|
1
|
9708
|
from __future__ import division, print_function,absolute_import
import pylab as plt
import amitgroup.plot as gr
import numpy as np
import amitgroup as ag
import os
import pnet
import matplotlib.pylab as plot
def extract(ims,allLayers):
print(allLayers)
curX = ims
for layer in allLayers:
curX = layer.extract(curX)
return curX
def test2():
X = np.load('test4.npy')
model = X.item()
net = pnet.PartsNet.load_from_dict(model)
allLayer = net.layers
#print(allLayer)
ims, labels = ag.io.load_mnist('testing')
extractedParts = extract(ims[0:200],allLayer[0:2])
#return extractedParts
allParts = extractedParts[0]
parts_layer = allLayer[1]
parts = parts_layer._parts.reshape(50,4,4)
#for i in range(200):
ims = ims[0:200]
labels = labels[0:200]
#print(ims.shape)
classifiedLabel = net.classify(ims)
#print out all the misclassified images
misclassify = np.nonzero(classifiedLabel!=labels)
misclassify = np.append([],np.asarray(misclassify, dtype=np.int))
numMisclassify = len(misclassify)
image = np.ones((numMisclassify,25 * 5,25*5)) * 0.5
print(misclassify)
for j in range(numMisclassify):
i = int(misclassify[j])
print(allParts[i].shape)
thisParts = allParts[i].reshape(allParts[i].shape[0:2])
for m in range(25):
for n in range(25):
if(thisParts[m,n]!=-1):
image[j,m*5:m*5+4,n*5:n*5+4] = parts[thisParts[m,n]]
else:
image[j,m*5:m*5+4,n*5:n*5+4] = 0
gr.images(image)
def displayParts():
# load the trained Image
X = np.load('test4.npy')
model = X.item()
# get the parts Layer
numParts1 = model['layers'][1]['num_parts']
numParts2 = model['layers'][3]['num_parts']
net = pnet.PartsNet.load_from_dict(model)
allLayer = net.layers
print(allLayer)
ims,labels = ag.io.load_mnist('training')
extractedFeature = []
for i in range(5):
extractedFeature.append(extract(ims[0:1000],allLayer[0:i])[0])
extractedParts1 = extractedFeature[2]
#extractedParts11 = extract(ims[0:1000],allLayer[0:6])[1]
#print(extractedParts11)
extractedParts2 = extractedFeature[4]
#print(extractedParts2.shape)
partsPlot1 = np.zeros((numParts1,4,4))
partsCodedNumber1 = np.zeros(numParts1)
partsPlot2 = np.zeros((numParts2,6,6))
partsCodedNumber2 = np.zeros(numParts2)
for i in range(1000):
codeParts1 = extractedParts1[i].reshape(extractedParts1[i].shape[0:2])
codeParts2 = extractedParts2[i].reshape(extractedParts2[i].shape[0:2])
for m in range(25):
for n in range(25):
if(codeParts1[m,n]!=-1):
partsPlot1[codeParts1[m,n]]+=ims[i,m:m+4,n:n+4]
partsCodedNumber1[codeParts1[m,n]]+=1
for p in range(8):
for q in range(8):
if(codeParts2[p,q]!=-1):
partsPlot2[codeParts2[p,q]]+=ims[i,3 * p:3 * p+6,3 * q:3 * q+6]
partsCodedNumber2[codeParts2[p,q]]+=1
#if(codeParts2[p,q,1]!=-1):
# partsPlot2[codeParts2[p,q,1]]+=ims[i,p:p+10,q:q+10]
# partsCodedNumber2[codeParts2[p,q,1]]+=1
for j in range(numParts1):
partsPlot1[j] = partsPlot1[j]/partsCodedNumber1[j]
for k in range(numParts2):
partsPlot2[k] = partsPlot2[k]/partsCodedNumber2[k]
#print(partsPlot1.shape)
#gr.images(partsPlot1[0:200],vmin = 0,vmax = 1)
#gr.images(partsPlot2,vmin = 0,vmax = 1)
#print(partsCodedNumber1)
#print("-----------------")
#print(partsCodedNumber2)
return partsPlot1,partsPlot2,extractedFeature
def investigate():
partsPlot1,partsPlot2,extractedFeature = displayParts()
for partIndex in range(20):
test = []
smallerPart = []
for k in range(1000):
x = extractedFeature[4][k]
for m in range(8):
for n in range(8):
if(x[m,n,0] == partIndex):
test.append((k,m,n))
smallerPart.append(extractedFeature[2][k,3 * m + 1,3 * n + 1])
number = np.zeros(200)
for x in smallerPart:
if(x!=-1):
number[x]+=1
#plot1 = plot.figure(partIndex)
#plot.plot(number)
#plot.savefig('frequency %i.png' %partIndex)
#plot.close()
index = np.where(number > 100)[0]
partNew2 = np.ones((index.shape[0] + 1,6,6))
partNew2[0] = partsPlot2[partIndex]
for i in range(index.shape[0]):
partNew2[i + 1,0:4,0:4] = partsPlot1[index[i],:,:]
fileString = 'part%i.png' %partIndex
gr.images(partNew2,zero_to_one=False, show=False,vmin = 0, vmax = 1, fileName = fileString)
def partsPool(originalPartsRegion, numParts):
partsGrid = np.zeros((1,1,numParts))
for i in range(originalPartsRegion.shape[0]):
for j in range(originalPartsRegion.shape[1]):
if(originalPartsRegion[i,j]!=-1):
partsGrid[0,0,originalPartsRegion[i,j]] = 1
return partsGrid
def trainPOP():
X = np.load("test4.npy")
model = X.item()
# get num of Parts
numParts = model['layers'][1]['num_parts']
net = pnet.PartsNet.load_from_dict(model)
allLayer = net.layers
ims,labels = ag.io.load_mnist('training')
trainingDataNum = 1000
extractedFeature = extract(ims[0:trainingDataNum],allLayer[0:2])[0]
#print(extractedFeature.shape)
extractedFeature = extractedFeature.reshape(extractedFeature.shape[0:3])
partsPlot = np.zeros((numParts,6,6))
partsCodedNumber = np.zeros(numParts)
#every list corresponding to the larger region surrounding 10x10 region of the 5*5 region coded by this part
imgRegion = [[] for x in range(numParts)]
partsRegion = [[] for x in range(numParts)]
#Part Visualize#
for i in range(trainingDataNum):
codeParts = extractedFeature[i]
for m in range(23):
for n in range(23):
if(codeParts[m,n]!=-1):
partsPlot[codeParts[m,n]]+=ims[i,m:m+6,n:n+6]
partsCodedNumber[codeParts[m,n]]+=1
for j in range(numParts):
partsPlot[j] = partsPlot[j]/partsCodedNumber[j]
secondLayerCodedNumber = 0
if 1:
for i in range(trainingDataNum):
codeParts = extractedFeature[i]
for m in range(23)[3:20]:
for n in range(23)[3:20]:
if(codeParts[m,n]!=-1):
imgRegion[codeParts[m,n]].append(ims[i,m-3:m+9,n-3:n+9])
secondLayerCodedNumber+=1
partsGrid = partsPool(codeParts[m-3:m+4,n-3:n+4],numParts)
partsRegion[codeParts[m,n]].append(partsGrid)
for i in range(numParts):
print(len(partsRegion[i]))
##Second-Layer Parts
numSecondLayerParts = 20
allPartsLayer = [[pnet.PartsLayer(numSecondLayerParts,(1,1),settings=dict(outer_frame=0,threshold=5,
sample_per_image=1,
max_samples=10000,
min_prob=0.005))] for i in range(numParts)]
allPartsLayerImg = np.zeros((numParts,numSecondLayerParts,12,12))
allPartsLayerImgNumber = np.zeros((numParts,numSecondLayerParts))
#print("====================================================")
zeroParts = 0
for i in range(numParts):
#print("test")
allPartsLayer[i][0].train_from_samples(np.array(partsRegion[i]),None)
extractedFeaturePart = extract(np.array(partsRegion[i],dtype = np.uint8),allPartsLayer[i])[0]
#print(extractedFeaturePart.shape)
for j in range(len(partsRegion[i])):
if(extractedFeaturePart[j,0,0,0]!=-1):
partIndex = extractedFeaturePart[j,0,0,0]
allPartsLayerImg[i,partIndex]+=imgRegion[i][j]
allPartsLayerImgNumber[i,partIndex]+=1
else:
zeroParts+=1
for i in range(numParts):
for j in range(numSecondLayerParts):
allPartsLayerImg[i,j] = allPartsLayerImg[i,j]/allPartsLayerImgNumber[i,j]
print(allPartsLayer[i][0]._weights)
#print(zeroParts)
#print(np.sum(allPartsLayerImgNumber),secondLayerCodedNumber)
settings = {'interpolation':'nearest','cmap':plot.cm.gray,}
settings['vmin'] = 0
settings['vmax'] = 1
plotData = np.ones((14*100+2,14*(numSecondLayerParts + 1)+2))*0.8
visualShiftParts = 0
if 0:
allPartsPlot = np.zeros((20,11,12,12))
gr.images(partsPlot.reshape(numParts,6,6),zero_to_one=False,vmin = 0, vmax = 1)
allPartsPlot[:,0] = 0.5
allPartsPlot[:,0,3:9,3:9] = partsPlot[20:40]
allPartsPlot[:,1:,:,:] = allPartsLayerImg[20:40]
gr.images(allPartsPlot.reshape(220,12,12),zero_to_one=False, vmin = 0, vmax =1)
elif 0:
for i in range(numSecondLayerParts + 1):
for j in range(100):
if i == 0:
plotData[5 + j * 14:11 + j * 14, 5 + i * 14: 11 + i * 14] = partsPlot[j+visualShiftParts]
else:
plotData[2 + j * 14:14 + j * 14,2 + i * 14: 14 + i * 14] = allPartsLayerImg[j+visualShiftParts,i-1]
plot.figure(figsize=(10,40))
plot.axis('off')
plot.imshow(plotData, **settings)
plot.savefig('test.pdf',format='pdf',dpi=900)
else:
pass
|
bsd-3-clause
| 7,992,028,049,966,963,000
| 37.220472
| 119
| 0.576844
| false
|
GoogleCloudDataproc/cloud-dataproc
|
codelabs/spark-bigquery/counts_by_subreddit.py
|
1
|
3261
|
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script accompanies this codelab:
# https://codelabs.developers.google.com/codelabs/pyspark-bigquery/
# This script outputs subreddits counts for a given set of years and month
# This data comes from BigQuery via the dataset "fh-bigquery.reddit_comments"
# These allow us to create a schema for our data
from pyspark.sql.types import StructField, StructType, StringType, LongType
# A Spark Session is how we interact with Spark SQL to create Dataframes
from pyspark.sql import SparkSession
# This will help catch some PySpark errors
from py4j.protocol import Py4JJavaError
# Create a SparkSession under the name "reddit". Viewable via the Spark UI
spark = SparkSession.builder.appName("reddit").getOrCreate()
# Create a two column schema consisting of a string and a long integer
fields = [StructField("subreddit", StringType(), True),
StructField("count", LongType(), True)]
schema = StructType(fields)
# Create an empty DataFrame. We will continuously union our output with this
subreddit_counts = spark.createDataFrame([], schema)
# Establish a set of years and months to iterate over
years = ['2017', '2018', '2019']
months = ['01', '02', '03', '04', '05', '06',
'07', '08', '09', '10', '11', '12']
# Keep track of all tables accessed via the job
tables_read = []
for year in years:
for month in months:
# In the form of <project-id>.<dataset>.<table>
table = f"fh-bigquery.reddit_posts.{year}_{month}"
# If the table doesn't exist we will simply continue and not
# log it into our "tables_read" list
try:
table_df = (spark.read.format('bigquery').option('table', table)
.load())
tables_read.append(table)
except Py4JJavaError as e:
if f"Table {table} not found" in str(e):
continue
else:
raise
# We perform a group-by on subreddit, aggregating by the count and then
# unioning the output to our base dataframe
subreddit_counts = (
table_df
.groupBy("subreddit")
.count()
.union(subreddit_counts)
)
print("The following list of tables will be accounted for in our analysis:")
for table in tables_read:
print(table)
# From our base table, we perform a group-by, summing over the counts.
# We then rename the column and sort in descending order both for readability.
# show() will collect the table into memory output the table to std out.
(
subreddit_counts
.groupBy("subreddit")
.sum("count")
.withColumnRenamed("sum(count)", "count")
.sort("count", ascending=False)
.show()
)
|
apache-2.0
| 19,847,593,516,167,104
| 35.640449
| 79
| 0.680773
| false
|
kingtaurus/cs224d
|
assignment1/tensorflow_word2vec.py
|
1
|
3188
|
import os
import math
import random
import collections
import numpy as np
import tensorflow as tf
import cs224d.data_utils as data_utils
from tensorflow.models.embedding import gen_word2vec as word2vec
class Options(object):
def __init__(self):
#Model Options
self.emb_dim = 20
self.train_data = None
self.num_samples = 20
self.learning_rate = 1.0
self.epochs_to_train = 5
self.batch_size = 64
self.window_size = 5
self.min_count = 3
class Word2Vec(object):
"""Word2Vec model (skipgram) """
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
self._read_dataset()
def _read_dataset(self):
# dataset = data_utils.StanfordSentiment()
# #print(dataset.sent_labels()[0:100])
# #print(dataset.getSplitSentences(0)[0:100])
# #this is the labels vector :)
# #sentences = np.from_iter(dataset.sentences(), dtype="int32")
# self._word2id = dataset.tokens()
# print(self._word2id["UNK"])
# ids = [self._word2id.get(w) for w in self._word2id.keys()]
# print(ids)
pass
def forward(self, examples, labels):
return None,None
def nce_loss(self, true_logits, sampled_logits):
opts = self._options
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(true_logits, tf.ones_like(true_logits))
sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(sampled_logits, tf.zeros_like(sampled_logits))
nce_loss_tensor = (tf.reduce_sum(true_xent) +
tf.reduce_sum(sampled_xent)) / opts.batch_size
return nce_loss_tensor
def build_graph(self):
opts = self._options
(words, counts, words_per_epoch, self._epoch, self._words, examples,
labels) = word2vec.skipgram(filename="text8",
batch_size=opt.batch_size,
window_size=opt.window_size,
min_count=opt.min_count,
subsample=0)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._examples = examples
self._labels = labels
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
true_logits, sampled_logits = self.forward(examples, labels)
loss = self.nce_loss(true_logits, sampled_logits)
tf.scalar_summary("NCE loss", loss)
self._loss = loss
self.optimize(loss)
def build_eval_graph(self):
pass
def save_vocab(self):
pass
if __name__ == "__main__":
opt = Options()
session = tf.Session()
model = Word2Vec(opt, session)
|
mit
| -6,612,433,178,655,619,000
| 33.27957
| 109
| 0.584065
| false
|
chubbymaggie/barf-project
|
barf/utils/reil.py
|
1
|
7524
|
# Copyright (c) 2014, Fundacion Dr. Manuel Sadosky
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from barf.analysis.basicblock import CFGRecoverer
from barf.analysis.basicblock import ControlFlowGraph
from barf.analysis.basicblock import RecursiveDescent
from barf.arch.x86.x86base import X86ArchitectureInformation
from barf.arch.x86.x86disassembler import X86Disassembler
from barf.arch.x86.x86translator import X86Translator
from barf.core.reil import ReilContainer
from barf.core.reil import ReilSequence
from barf.core.reil import split_address
class ReilContainerBuilder(object):
def __init__(self, binary):
self.__binary = binary
self.__arch_mode = self.__binary.architecture_mode
self.__arch = X86ArchitectureInformation(self.__arch_mode)
self.__disassembler = X86Disassembler(architecture_mode=self.__arch_mode)
self.__translator = X86Translator(architecture_mode=self.__arch_mode)
self.__bb_builder = CFGRecoverer(RecursiveDescent(self.__disassembler, self.__binary.text_section,
self.__translator, self.__arch))
def build(self, functions):
reil_container = ReilContainer()
for _, start, end in functions:
bbs, _ = self.__bb_builder.build(start, end)
cfg = ControlFlowGraph(bbs)
reil_container = self.__translate_cfg(cfg, reil_container=reil_container)
return reil_container
# Auxiliary methods
# ======================================================================== #
def __translate_cfg(self, cfg, reil_container=None):
if not reil_container:
reil_container = ReilContainer()
asm_instrs = []
for bb in cfg.basic_blocks:
for dual_instr in bb:
asm_instrs += [dual_instr.asm_instr]
reil_container = self.__translate(asm_instrs, reil_container)
return reil_container
def __translate(self, asm_instrs, reil_container):
asm_instr_last = None
instr_seq_prev = None
for asm_instr in asm_instrs:
instr_seq = ReilSequence()
for reil_instr in self.__translator.translate(asm_instr):
instr_seq.append(reil_instr)
if instr_seq_prev:
instr_seq_prev.next_sequence_address = instr_seq.address
reil_container.add(instr_seq)
instr_seq_prev = instr_seq
if instr_seq_prev:
if asm_instr_last:
instr_seq_prev.next_sequence_address = (asm_instr_last.address + asm_instr_last.size) << 8
return reil_container
class ReilContainerEx(object):
def __init__(self, binary, symbols):
self.__binary = binary
self.__arch_mode = self.__binary.architecture_mode
self.__arch = X86ArchitectureInformation(self.__arch_mode)
self.__disassembler = X86Disassembler(architecture_mode=self.__arch_mode)
self.__translator = X86Translator(architecture_mode=self.__arch_mode)
self.__bb_builder = CFGRecoverer(RecursiveDescent(self.__disassembler, self.__binary.text_section,
self.__translator, self.__arch))
self.__container = {}
self.__symbols = symbols
self.__symbols_by_addr = {}
for name, start, end in symbols:
self.__symbols_by_addr[start] = (name, end)
# Auxiliary methods
# ======================================================================== #
def __translate_cfg(self, cfg, reil_container=None):
if not reil_container:
reil_container = ReilContainer()
asm_instrs = []
for bb in cfg.basic_blocks:
for dual_instr in bb:
asm_instrs += [dual_instr.asm_instr]
reil_container = self.__translate(asm_instrs, reil_container)
return reil_container
def __translate(self, asm_instrs, reil_container):
asm_instr_last = None
instr_seq_prev = None
for asm_instr in asm_instrs:
instr_seq = ReilSequence()
for reil_instr in self.__translator.translate(asm_instr):
instr_seq.append(reil_instr)
if instr_seq_prev:
instr_seq_prev.next_sequence_address = instr_seq.address
reil_container.add(instr_seq)
instr_seq_prev = instr_seq
if instr_seq_prev:
if asm_instr_last:
instr_seq_prev.next_sequence_address = (asm_instr_last.address + asm_instr_last.size) << 8
return reil_container
def add(self, sequence):
base_addr, _ = split_address(sequence.address)
if base_addr in self.__container.keys():
raise Exception("Invalid sequence")
else:
self.__container[base_addr] = sequence
def fetch(self, address):
base_addr, index = split_address(address)
if base_addr not in self.__container.keys():
self.__resolve_address(base_addr)
return self.__container[base_addr].get(index)
def get_next_address(self, address):
base_addr, index = split_address(address)
if base_addr not in self.__container.keys():
raise Exception("Invalid address.")
addr = address
if index < len(self.__container[base_addr]) - 1:
addr += 1
else:
addr = self.__container[base_addr].next_sequence_address
return addr
def dump(self):
for base_addr in sorted(self.__container.keys()):
self.__container[base_addr].dump()
print("-" * 80)
def __iter__(self):
for addr in sorted(self.__container.keys()):
for instr in self.__container[addr]:
yield instr
def __resolve_address(self, address):
if address not in self.__symbols_by_addr:
# print("Not symbol : {:#010x}".format(address))
raise Exception("Symbol not found!")
name, end = self.__symbols_by_addr[address]
# print("Resolving {:s} @ {:#010x}".format(name, address))
cfg = ControlFlowGraph(self.__bb_builder.build(address, end))
_ = self.__translate_cfg(cfg, reil_container=self)
|
bsd-2-clause
| -6,574,008,995,850,594,000
| 34.828571
| 106
| 0.624535
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.