text
stringlengths 29
850k
|
|---|
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
from unittest import TestCase
from pytz import UTC
from targetprocess.serializers import TargetProcessSerializer
class TargetProcessSerializerTest(TestCase):
def setUp(self):
self.maxDiff = None
def test_deserialize_dict(self):
data = {
'EndDate': '/Date(1441596445000-0500)/',
'Effort': 0.0,
'ResourceType': 'UserStory',
'Team': {
'Id': 298,
'Name': 'DevOps',
},
'LastCommentDate': None,
'CustomFields': [
{
'Name': 'UI Spec',
'Type': 'RichText',
'Value': None
},
{
'Name': 'Date',
'Type': 'DropDown',
'Value': '/Date(1441596445000-0500)/'
},
]
}
expected = {
'EndDate': datetime(2015, 9, 7, 3, 27, 25, tzinfo=UTC),
'Effort': 0.0,
'ResourceType': 'UserStory',
'Team': {
'Id': 298,
'Name': 'DevOps',
},
'LastCommentDate': None,
'CustomFields': [
{
'Name': 'UI Spec',
'Type': 'RichText',
'Value': None
},
{
'Name': 'Date',
'Type': 'DropDown',
'Value': datetime(2015, 9, 7, 3, 27, 25, tzinfo=UTC)
},
]
}
result = TargetProcessSerializer().deserialize(data)
self.assertEqual(result, expected)
def test_deserialize_dict_with_items(self):
data = {
'Items': [
{
'Date': '/Date(1441596445000-0500)/',
'NoneField': None,
'TextField': 'Text',
'NestedDict': {'Field': 'Value'},
'NestedList': [{'Field': 'Value'}]
}
]
}
expected = [
{
'Date': datetime(2015, 9, 7, 3, 27, 25, tzinfo=UTC),
'NoneField': None,
'TextField': 'Text',
'NestedDict': {'Field': 'Value'},
'NestedList': [{'Field': 'Value'}]
}
]
result = TargetProcessSerializer().deserialize(data)
self.assertEqual(result, expected)
|
Youโre sure to turn some heads when wearing this shaper. Employing curved, double panels around the perimeter of its backside, and less stretchy, wrap-around panels on the backs of its thighs, this garment will shape and lift your derriere while slimming your thighs by up to one inch. And donโt worry. The rearโs center has only one ply of fabric, so thereโs no danger of โtush smush.โ Also, its double front panel will deliver a flatter tummy, enhancing your complete silhouette. Choose this pantliner to transform your tummy, hips, rear, thighs and smooth your legs.
Fabric Content: 70% Nylon, 30% Elastane. Reinforcement: 79% Nylon, 21% Elastane.
Its elastic band made of Rubberflex that covers the high middle and low part of the abdomen. Band that enhances the buttocks. Its design goes down to the knees slims the legs and hips.
|
"""
Demo if adaptive adversary works against feature squeezing.
Embed the diffrentiable filter layers in a model.
Pass in the (average) gradient (part of loss) to an attack algorithm.
Implement the gaussian-noise-iterative method for non-diffrentiable filter layers (bit depth reduction.)
Introduce the randomized feature squeezing (need to verify with legitimate examples, should not harm the accuracy.)
"""
import os
import tensorflow as tf
import numpy as np
import math
# Core: Get the gradient of models for the attack algorithms.
# We will combine the gradient of several models.
from keras.models import Model
from keras.layers import Lambda, Input
def insert_pre_processing_layer_to_model(model, input_shape, func):
# Output model: accept [-0.5, 0.5] input range instead of [0,1], output logits instead of softmax.
# The output model will have three layers in abstract: Input, Lambda, TrainingModel.
model_logits = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
input_tensor = Input(shape=input_shape)
scaler_layer = Lambda(func, input_shape=input_shape)(input_tensor)
output_tensor = model_logits(scaler_layer)
model_new = Model(inputs=input_tensor, outputs=output_tensor)
return model_new
# maybe_generate_adv_examples(sess, model, x, y, X_test, Y_test_target, attack_name, attack_params, use_cache = x_adv_fpath, verbose=FLAGS.verbose, attack_log_fpath=attack_log_fpath)
def adaptive_attack(sess, model, squeezers, x, y, X_test, Y_test_target, attack_name, attack_params):
for squeeze_func in squeezers:
predictions = model(squeeze_func(x))
# tf.contrib.distributions.kl(dist_a, dist_b, allow_nan=False, name=None)
# from .median import median_filter as median_filter_tf
# from .median import median_random_filter as median_random_filter_tf
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from utils.squeeze import get_squeezer_by_name, reduce_precision_tf
# if FLAGS.dataset_name == "MNIST":
# # squeezers_name = ['median_smoothing_2', 'median_smoothing_3', 'binary_filter']
# squeezers_name = ['median_smoothing_2', 'binary_filter']
# elif FLAGS.dataset_name == "CIFAR-10":
# squeezers_name = ["bit_depth_5", "bit_depth_4", 'median_smoothing_1_2', 'median_smoothing_2_1','median_smoothing_2', 'median_smoothing_1_3']
# elif FLAGS.dataset_name == "ImageNet":
# squeezers_name = ["bit_depth_5", 'median_smoothing_1_2', 'median_smoothing_2_1','median_smoothing_2']
def get_tf_squeezer_by_name(name):
return get_squeezer_by_name(name, 'tensorflow')
tf_squeezers_name_mnist = ['median_filter_2_2', 'bit_depth_1']
tf_squeezers_name_cifar10 = ['median_filter_1_2', 'median_filter_2_1', 'median_filter_2_2', 'median_filter_1_3', 'bit_depth_5', 'bit_depth_4']
tf_squeezers_name_imagenet = ['median_filter_1_2', 'median_filter_2_1', 'median_filter_2_2', 'median_filter_1_3', 'bit_depth_5']
# tf_squeezers = map(get_tf_squeezer_by_name, tf_squeezers_name)
def get_tf_squeezers_by_str(tf_squeezers_str):
tf_squeezers_name = tf_squeezers_str.split(',')
return map(get_tf_squeezer_by_name, tf_squeezers_name)
def kl_tf(x1, x2, eps = 0.000000001):
x1 = tf.clip_by_value(x1, eps, 1)
x2 = tf.clip_by_value(x2, eps, 1)
return tf.reduce_sum(x1 * tf.log(x1/x2), reduction_indices=[1])
def generate_adaptive_carlini_l2_examples(sess, model, x, y, X, Y_target, attack_params, verbose, attack_log_fpath):
# (model, x, y, X, Y_target, tf_squeezers=tf_squeezers, detector_threshold = 0.2):
# tf_squeezers=tf_squeezers
eval_dir = os.path.dirname(attack_log_fpath)
default_params = {
'batch_size': 100,
'confidence': 0,
'targeted': False,
'learning_rate': 9e-2,
'binary_search_steps': 9,
'max_iterations': 5000,
'abort_early': False, # TODO: not suported.
'initial_const': 0.0,
'detector_threshold': 0.3,
'uint8_optimized': False,
'tf_squeezers': [],
'distance_measure': 'l1',
'between_squeezers': False,
}
if 'tf_squeezers' in attack_params:
tf_squeezers_str = attack_params['tf_squeezers']
tf_squeezers = get_tf_squeezers_by_str(tf_squeezers_str)
attack_params['tf_squeezers'] = tf_squeezers
accepted_params = default_params.keys()
for k in attack_params:
if k not in accepted_params:
raise NotImplementedError("Unsuporrted params in Carlini L2: %s" % k)
else:
default_params[k] = attack_params[k]
# assert batch_size <= len(X)
if 'batch_size' in default_params and default_params['batch_size'] > len(X):
default_params['batch_size'] = len(X)
return adaptive_CarliniL2(sess, model, X, Y_target, eval_dir, **default_params)
def adaptive_CarliniL2(sess, model, X, Y_target, eval_dir, batch_size, confidence, targeted, learning_rate, binary_search_steps, max_iterations, abort_early, initial_const, detector_threshold, uint8_optimized, tf_squeezers, distance_measure, between_squeezers):
model_logits = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
# Need a determined batch size for coefficient vectors.
x = tf.placeholder(shape=X.shape, dtype=tf.float32)
y = tf.placeholder(shape=Y_target.shape, dtype=tf.float32)
# Adapted from Warren and Carlini's code
N0, H0, W0, C0 = X.shape
# Range [0, 1], initialize as the original images.
batch_images = X
# Get the arctanh of the original images.
batch_images_tanh = np.arctanh((batch_images - 0.5) / 0.501)
batch_labels = Y_target
x_star_tanh = tf.Variable(batch_images_tanh, dtype=tf.float32)
# Range [0, 1], initialize as the original images.
x_star = tf.tanh(x_star_tanh) / 2. + 0.5
# The result is optimized for uint8.
x_star_uint8 = reduce_precision_tf(x_star, 256)
# Gradient required.
y_pred_logits = model_logits(x_star)
y_pred = model(x_star)
print ("tf_squezers: %s" % tf_squeezers)
y_squeezed_pred_list = [ model(func(x_star)) for func in tf_squeezers ]
coeff = tf.placeholder(shape=(N0,), dtype=tf.float32)
l2dist = tf.reduce_sum(tf.square(x_star - x), [1, 2, 3])
ground_truth_logits = tf.reduce_sum(y * y_pred_logits, 1)
top_other_logits = tf.reduce_max((1 - y) * y_pred_logits - (y * 10000), 1)
# Untargeted attack, minimize the ground_truth_logits.
# target_penalty = tf.maximum(0., ground_truth_logits - top_other_logits)
if targeted is False:
# if untargeted, optimize for making this class least likely.
target_penalty = tf.maximum(0.0, ground_truth_logits-top_other_logits+confidence)
else:
# if targetted, optimize for making the other class most likely
target_penalty = tf.maximum(0.0, top_other_logits-ground_truth_logits+confidence)
# Minimize the sum of L1 score.
detector_penalty = None
# TODO: include between squeezers l1.
all_pred_list = [y_pred] + y_squeezed_pred_list
if between_squeezers:
print ("#Between squeezers")
for i, pred_base in enumerate(all_pred_list):
for j in range(i+1, len(all_pred_list)):
pred_target = all_pred_list[j]
if distance_measure == "l1":
score = tf.reduce_sum(tf.abs(pred_base - pred_target), 1)
elif distance_measure == 'kl_f':
score = kl_tf(pred_base, pred_target)
elif distance_measure == 'kl_b':
score = kl_tf(pred_target, pred_base)
detector_penalty_sub = tf.maximum(0., score - detector_threshold)
if detector_penalty is None:
detector_penalty = detector_penalty_sub
else:
detector_penalty += detector_penalty_sub
else:
for y_squeezed_pred in y_squeezed_pred_list:
if distance_measure == "l1":
score = tf.reduce_sum(tf.abs(y_pred - y_squeezed_pred), 1)
elif distance_measure == 'kl_f':
score = kl_tf(y_pred, y_squeezed_pred)
elif distance_measure == 'kl_b':
score = kl_tf(y_squeezed_pred, y_pred)
detector_penalty_sub = tf.maximum(0., score - detector_threshold)
if detector_penalty is None:
detector_penalty = detector_penalty_sub
else:
detector_penalty += detector_penalty_sub
# There could be different desion choices. E.g. add one coefficient for the detector penalty.
loss = tf.add((target_penalty + detector_penalty) * coeff, l2dist)
# Minimize loss by updating variables in var_list.
train_adv_step = tf.train.AdamOptimizer(learning_rate).minimize(loss, var_list=[x_star_tanh])
# Why the last four global variables are the optimizer variables?
# <tf.Variable 'beta1_power:0' shape=() dtype=float32_ref>
# <tf.Variable 'beta2_power:0' shape=() dtype=float32_ref>
# <tf.Variable 'Variable/Adam:0' shape=(10, 28, 28, 1) dtype=float32_ref>
# <tf.Variable 'Variable/Adam_1:0' shape=(10, 28, 28, 1) dtype=float32_ref>
optimizer_variables = tf.global_variables()[-4:]
# The result is optimized for uint8. Added by Weilin.
if uint8_optimized:
predictions = tf.argmax(model_logits(x_star_uint8), 1)
else:
predictions = tf.argmax(model_logits(x_star), 1)
if targeted is False:
correct_prediction = tf.equal(predictions, tf.argmax(y, 1))
else:
correct_prediction = tf.not_equal(predictions, tf.argmax(y, 1))
# Initialize loss coefficients
coeff_block_log = np.tile([[initial_const], [float('nan')], [float('nan')]], (1, N0))
coeff_curr_log = coeff_block_log[0]
coeff_high_log = coeff_block_log[1]
coeff_low_log = coeff_block_log[2]
# Collect best adversarial images
best_l2 = np.zeros((N0,)) + float('nan')
best_coeff_log = np.zeros((N0,)) + float('nan')
best_iter = np.zeros((N0,)) + float('nan')
best_images = np.copy(batch_images)
# I didn't find the initialization of random perturbations?
for _ in range(binary_search_steps):
# Reset x_star_tanh and optimizer
sess.run(tf.variables_initializer([x_star_tanh] + optimizer_variables))
tf.assert_variables_initialized()
print (coeff_curr_log) # %%%
curr_coeff = np.exp(coeff_curr_log)
# Initially, all are failed adversarial examples.
all_fail = np.ones((N0,), dtype=np.bool)
# Training loop
improve_count = 0
# 5000 iterations by default.
for j in range(max_iterations):
# Correct prediction means it is failed untargeted attacks.
xst, adv_fail, l1o, l2d, _ = sess.run([x_star, correct_prediction, detector_penalty, l2dist, train_adv_step], feed_dict={
x: batch_images,
y: batch_labels,
coeff: curr_coeff,
})
all_fail = np.logical_and(all_fail, adv_fail)
for i in range(N0):
if adv_fail[i] or l1o[i] > 0:
continue
# Save the best sucessful adversarial examples, with lowest L2.
if math.isnan(best_l2[i]) or l2d[i] < best_l2[i]:
best_l2[i] = l2d[i]
best_coeff_log[i] = coeff_curr_log[i]
best_iter[i] = j
best_images[i] = xst[i]
improve_count += 1
if j % 100 == 0:
print("Adv. training iter. {}/{} improved {}".format(j, max_iterations, improve_count))
improve_count = 0
xst, adv_fail, l1o, l2d = sess.run([x_star, correct_prediction, detector_penalty, l2dist], feed_dict={
x: batch_images,
y: batch_labels,
})
# Run it once more, becase the last iteration in for loop doesn't get evaluated.
for i in range(N0):
if adv_fail[i] or l1o[i] > 0:
continue
if math.isnan(best_l2[i]) or l2d[i] < best_l2[i]:
best_l2[i] = l2d[i]
best_coeff_log[i] = coeff_curr_log[i]
best_iter[i] = max_iterations
best_images[i] = xst[i]
improve_count += 1
print("Finished training {}/{} improved {}".format(max_iterations, max_iterations, improve_count))
# Save generated examples and their coefficients
np.save(eval_dir + '/combined_adv_imgs.npy', best_images)
np.save(eval_dir + '/combined_adv_coeff_log.npy', best_coeff_log)
# Update coeff
for i, (fail, curr, high, low) in enumerate(zip(adv_fail, coeff_curr_log, coeff_high_log, coeff_low_log)):
if fail:
# increase to allow more distortion
coeff_low_log[i] = low = curr
if math.isnan(high):
coeff_curr_log[i] = curr + 2.3
else:
coeff_curr_log[i] = (high + low) / 2
else:
# decrease to penalize distortion
coeff_high_log[i] = high = curr
if math.isnan(low):
coeff_curr_log[i] = curr - 0.69
else:
coeff_curr_log[i] = (high + low) / 2
np.save(eval_dir + '/combined_coeff_log.npy', coeff_block_log)
return best_images
|
Resin transfer moulding processes are a family of processes wherein, the fabric reinforcements are laid up into the mould as a dry stack of materials, pre-compacted to the mould contour in some cases, and held together by a binder. Then, these โpreformsโ are more easily laid into the mould tool. A second mould tool is then clamped over the first, and resin is drawn into the cavity. Vacuum or pressure can be applied to the mould cavity to assist resin, in being drawn into the fabric reinforcement. Once the fabric is impregnated, the resin inlets are closed, and the laminate is allowed to cure. Following sections describe the variants of the RTM family.
The fabric reinforcements are laid up as dry stack of materials in a closed mould cavity, formed by clamping of two rigid mould surfaces (the male and the female) together (Fig. 6). The resin is injected into the fabric reinforcement held in the mould cavity under pressure. Once the fabric is impregnated, the resin is sighted at the outlet, the inlets are closed and the laminate is allowed to cure. Both injection and cure can take place at either ambient or elevated temperature. Use of temperature depends on the matrix system used, to control its flow characteristics against the resistance offered by the fibre stack.
Fabrics are laid up as a dry stack of materials as in RTM. The fiber stack is then covered with peel ply and a knitted type of non-structural fabric. The whole dry stack is then vacuum bagged, and once the bag leaks are checked and plugged, resin is allowed to flow into the dry laminate. The resin distribution over the whole laminate is aided by resin flowing easily through the non-structural fabric, and wetting the fabric out from above in figure 7.
An advanced variant of the mechanized resin flow infusion technique, wherein a thin resin film carried on a moving carrier/release film is transferred to a dry fabric moving in parallel through calendaring roll system without weave distortion. The wet layup so obtained is then rolled up and cold stored. The pre-impregnated fabric could be unrolled directly onto the tool and then vacuum bagged before curing. This technology has been termed as Just-In time-Prepreg (JIPREG) technique and is extensively used for making composite products of large surface area.
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Helpers for training an agent using imitation learning."""
from absl import logging
import numpy as np
import tensorflow.compat.v2 as tf
EPS = 1e-9
LARGE_NUM = 1e9
def metric_fixed_point(action_cost_matrix, gamma=1.0):
"""Computes the pseudo-metric satisfying F using fixed point iteration."""
n, m = action_cost_matrix.shape
d_metric = np.zeros_like(action_cost_matrix)
def fixed_point_operator(d_metric):
d_metric_new = np.empty_like(d_metric)
for i in range(n):
for j in range(m):
d_metric_new[i, j] = action_cost_matrix[i, j] + \
gamma * d_metric[min(i + 1, n - 1), min(j + 1, m - 1)]
return d_metric_new
while True:
d_metric_new = fixed_point_operator(d_metric)
if np.sum(np.abs(d_metric - d_metric_new)) < EPS:
break
else:
d_metric = d_metric_new
return d_metric
@tf.function
def tf_metric_fixed_point(action_cost_matrix, gamma):
return tf.numpy_function(
metric_fixed_point, [action_cost_matrix, gamma], Tout=tf.float32)
def calculate_action_cost_matrix(actions_1, actions_2):
action_equality = tf.math.equal(
tf.expand_dims(actions_1, axis=1), tf.expand_dims(actions_2, axis=0))
return 1.0 - tf.cast(action_equality, dtype=tf.float32)
def calculate_reward_cost_matrix(rewards_1, rewards_2):
diff = tf.expand_dims(rewards_1, axis=1) - tf.expand_dims(rewards_2, axis=0)
return tf.cast(tf.abs(diff), dtype=tf.float32)
def ground_truth_coupling(actions_1, actions_2):
"""Calculates ground truth coupling using optimal actions on two envs."""
diff = actions_2.index(1) - actions_1.index(1)
assert diff >= 0, 'Please pass the actions_2 as actions_1 and vice versa!'
n, m = len(actions_1), len(actions_2)
cost_matrix = np.ones((n, m), dtype=np.float32)
for i in range(n):
j = i + diff
if j < m:
cost_matrix[i, j] = 0.0
else:
break
return cost_matrix
@tf.function
def cosine_similarity(x, y):
"""Computes cosine similarity between all pairs of vectors in x and y."""
x_expanded, y_expanded = x[:, tf.newaxis], y[tf.newaxis, :]
similarity_matrix = tf.reduce_sum(x_expanded * y_expanded, axis=-1)
similarity_matrix /= (
tf.norm(x_expanded, axis=-1) * tf.norm(y_expanded, axis=-1) + EPS)
return similarity_matrix
@tf.function
def l2_distance(x, y):
"""Computes cosine similarity between all pairs of vectors in x and y."""
x_expanded, y_expanded = x[:, tf.newaxis], y[tf.newaxis, :]
return tf.sqrt(tf.reduce_sum((x_expanded - y_expanded)**2, axis=-1))
@tf.function
def contrastive_loss(similarity_matrix,
metric_values,
temperature,
coupling_temperature=1.0,
use_coupling_weights=True):
"""Contrative Loss with soft coupling."""
logging.info('Using alternative contrastive loss.')
metric_shape = tf.shape(metric_values)
similarity_matrix /= temperature
neg_logits1, neg_logits2 = similarity_matrix, similarity_matrix
col_indices = tf.cast(tf.argmin(metric_values, axis=1), dtype=tf.int32)
pos_indices1 = tf.stack(
(tf.range(metric_shape[0], dtype=tf.int32), col_indices), axis=1)
pos_logits1 = tf.gather_nd(similarity_matrix, pos_indices1)
row_indices = tf.cast(tf.argmin(metric_values, axis=0), dtype=tf.int32)
pos_indices2 = tf.stack(
(row_indices, tf.range(metric_shape[1], dtype=tf.int32)), axis=1)
pos_logits2 = tf.gather_nd(similarity_matrix, pos_indices2)
if use_coupling_weights:
metric_values /= coupling_temperature
coupling = tf.exp(-metric_values)
pos_weights1 = -tf.gather_nd(metric_values, pos_indices1)
pos_weights2 = -tf.gather_nd(metric_values, pos_indices2)
pos_logits1 += pos_weights1
pos_logits2 += pos_weights2
negative_weights = tf.math.log((1.0 - coupling) + EPS)
neg_logits1 += tf.tensor_scatter_nd_update(
negative_weights, pos_indices1, pos_weights1)
neg_logits2 += tf.tensor_scatter_nd_update(
negative_weights, pos_indices2, pos_weights2)
neg_logits1 = tf.math.reduce_logsumexp(neg_logits1, axis=1)
neg_logits2 = tf.math.reduce_logsumexp(neg_logits2, axis=0)
loss1 = tf.reduce_mean(neg_logits1 - pos_logits1)
loss2 = tf.reduce_mean(neg_logits2 - pos_logits2)
return loss1 + loss2
def representation_alignment_loss(nn_model,
optimal_data_tuple,
use_bisim=False,
gamma=0.99,
use_l2_loss=False,
use_coupling_weights=False,
coupling_temperature=1.0,
temperature=1.0,
ground_truth=False):
"""Representation alignment loss."""
obs_1, actions_1, rewards_1 = optimal_data_tuple[0]
obs_2, actions_2, rewards_2 = optimal_data_tuple[1]
representation_1 = nn_model.representation(obs_1)
representation_2 = nn_model.representation(obs_2)
if use_l2_loss:
similarity_matrix = l2_distance(representation_1, representation_2)
else:
similarity_matrix = cosine_similarity(representation_1, representation_2)
if ground_truth:
metric_vals = tf.convert_to_tensor(
ground_truth_coupling(actions_1, actions_2), dtype=tf.float32)
else:
if use_bisim:
cost_matrix = calculate_reward_cost_matrix(rewards_1, rewards_2)
else:
cost_matrix = calculate_action_cost_matrix(actions_1, actions_2)
metric_vals = tf_metric_fixed_point(cost_matrix, gamma)
if use_l2_loss:
# Directly match the l2 distance between representations to metric values
alignment_loss = tf.reduce_mean((similarity_matrix - metric_vals)**2)
else:
alignment_loss = contrastive_loss(
similarity_matrix,
metric_vals,
temperature,
coupling_temperature=coupling_temperature,
use_coupling_weights=use_coupling_weights)
return alignment_loss, metric_vals, similarity_matrix
@tf.function
def cross_entropy(logits, targets):
labels = tf.stack([1 - targets, targets], axis=1)
loss_vals = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
return tf.reduce_mean(loss_vals)
def cross_entropy_loss(model, inputs, targets, training=False):
predictions = model(inputs, training=training)
return cross_entropy(predictions, targets)
@tf.function
def weight_decay(model):
l2_losses = [tf.nn.l2_loss(x) for x in model.trainable_variables]
return tf.add_n(l2_losses) / len(l2_losses)
def create_balanced_dataset(x_train, y_train, batch_size):
"""Creates a balanced training dataset by upsampling the rare class."""
def partition_dataset(x_train, y_train):
neg_mask = (y_train == 0)
x_train_neg = x_train[neg_mask]
y_train_neg = np.zeros(len(x_train_neg), dtype=np.float32)
x_train_pos = x_train[~neg_mask]
y_train_pos = np.ones(len(x_train_pos), dtype=np.float32)
return (x_train_pos, y_train_pos), (x_train_neg, y_train_neg)
pos, neg = partition_dataset(x_train, y_train)
pos_dataset = tf.data.Dataset.from_tensor_slices(pos).apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=len(pos[0])))
neg_dataset = tf.data.Dataset.from_tensor_slices(neg).apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=len(neg[0])))
dataset = tf.data.experimental.sample_from_datasets(
[pos_dataset, neg_dataset])
ds_tensors = dataset.batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
return ds_tensors
def create_dataset(x_train, y_train, batch_size):
"""Creates a training dataset."""
dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=len(x_train[0])))
ds_tensors = dataset.batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
return ds_tensors
def create_iterators(datasets, batch_size):
"""Create tf.Dataset iterators from a list of numpy datasets."""
tf_datasets = [tf.data.Dataset.from_tensor_slices(data).batch(batch_size)
for data in datasets]
input_iterator = tf.data.Iterator.from_structure(
tf_datasets[0].output_types, tf_datasets[0].output_shapes)
init_ops = [input_iterator.make_initializer(data) for data in tf_datasets]
x_batch = input_iterator.get_next()
return x_batch, init_ops
|
Brought to you by Designerwear, Armani Junior kids jog pants. Shown here in navy, featuring - elasticated waistband & ankle cuffs, navy & grey, 95% cotton & 5% elastane & AJ Eagle Motif on the back pocket.
To view more children's wear at Designerwear, click here.
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import re
import itertools
import gtk
import tegakigtk.recognizer
import cjklib.dictionary
import cjklib.dictionary.search
import cjklib.reading
import cjklib.characterlookup
import tagtable
import sorder
MODULE_DIR = os.path.dirname(os.path.abspath( __file__ ))
CJKLIB_OPTS = {'databaseUrl': 'sqlite:///' +
os.path.join(MODULE_DIR, 'cjklib.db')}
GLADE_FILE = os.path.join(MODULE_DIR, "taipan.glade")
class DictionaryWidget(gtk.Frame):
"""Custom widget encapsulating dictionary functions including handwriting
recognition"""
def __init__(self):
"""Init the widget components and required modules from cjklib"""
gtk.Frame.__init__(self)
# Init cjklib
cjklib.dictionary.search.setDefaultWildcards(singleCharacter='?',
multipleCharacters='*')
self.cjk = cjklib.characterlookup.CharacterLookup('T', **CJKLIB_OPTS)
self.dict = cjklib.dictionary.CEDICT(**CJKLIB_OPTS)
self.reading = cjklib.reading.ReadingFactory(**CJKLIB_OPTS)
# Fire up GtkBuilder
builder = gtk.Builder()
builder.add_from_file(GLADE_FILE)
# Get dictionary layout from GtkBuilder and add it to this widget
gladewin = builder.get_object("DictionaryWidget")
layout = builder.get_object("DictionaryWidgetLayout")
gladewin.remove(layout)
self.add(layout)
# Get search box and connect events
self.entry = builder.get_object("ent_search")
self.entry.connect("key_press_event", self._on_entry_keypress)
self.entry.connect("changed", self._on_entry_changed)
# Setup popup completition for search box
compl = gtk.EntryCompletion()
compl.set_popup_set_width(False)
self.entry.set_completion(compl)
# ListStore will contain nice string for displaying in popup and
# simplified characters to put into searchbox
self.compl_model = gtk.ListStore(str, str)
compl.set_model(self.compl_model)
compl.set_text_column(0)
# Match function just accepts all items from the list, as we are doing
# filtering stuff elsewhere
compl.set_match_func(lambda c,k,r: True)
compl.connect("match_selected", self._on_compl_match_selected)
# Get search button and connect events
search = builder.get_object("btn_search")
search.connect("clicked", self._on_search_clicked)
# Get option checkboxes
self.chk_reading = builder.get_object("chk_reading")
self.chk_translation = builder.get_object("chk_translation")
# Get result text buffer
tag = tagtable.TaipanTagTable()
self.rbuf = gtk.TextBuffer(tag)
result = builder.get_object("txt_result")
result.set_buffer(self.rbuf)
result.connect("button_press_event", self._on_result_click)
result.connect("populate_popup", self._on_result_popup)
# Get expander and add recognizer to it
self.recognizer = tegakigtk.recognizer.SimpleRecognizerWidget()
self.recognizer.connect("commit-string", self._on_recognizer_commit)
self.exp_recognizer = builder.get_object("exp_recognize")
self.exp_recognizer.add(self.recognizer)
def search(self, what=None):
"""Do the dictionary search and display the nicely formatted result"""
# If the text was provided as an argument, update the searchbox
if what != None:
self.entry.set_text(what)
# don't bother to search for empty string
if self.entry.get_text() == '':
return
# search in characters (HeadWord)
res = self.dict.getForHeadword(unicode(self.entry.get_text()))
# search in reading (Pinyin)
if self.chk_reading.get_active():
res2 = self.dict.getForReading(unicode(self.entry.get_text()),
reading='Pinyin',
toneMarkType='numbers')
res = itertools.chain(res, res2)
# search in translation
if self.chk_translation.get_active():
res2 = self.dict.getForTranslation(unicode(self.entry.get_text()))
res = itertools.chain(res, res2)
# Display the result
self.rbuf.set_text('\n')
num_results = 0
for r in res:
num_results += 1
# Chinese
self.rbuf.insert_with_tags_by_name(self.rbuf.get_end_iter(),
r.HeadwordSimplified, "headword")
if r.HeadwordSimplified != r.HeadwordTraditional:
s = " (" + r.HeadwordTraditional + ")"
self.rbuf.insert_with_tags_by_name(self.rbuf.get_end_iter(),
s, "headword")
# Reading
self.rbuf.insert(self.rbuf.get_end_iter(), "\n[ ")
self._add_formatted_reading(r.Reading)
self.rbuf.insert(self.rbuf.get_end_iter(), " ]\n\n")
# Translation
s = r.Translation[1:-1].split('/')
basictrans = s[0] + "\n"
extended = ""
for i in range(1, min(len(s), 11)):
m = " " + unichr(12928+i-1) + " " + s[i] + "\n"
extended += m
for i in range(11, len(s)):
m = " (" + str(i) + ") " + s[i] + "\n"
extended += m
self._add_text_with_readings(basictrans, ["basictrans"])
self._add_text_with_readings(extended)
self.rbuf.insert(self.rbuf.get_end_iter(), "\n\n")
# Display an error message if the given expression was not found
if num_results == 0:
self.rbuf.set_text("\nExpression '"
+ unicode(self.entry.get_text())
+ "' was not found in the dictionary!")
def _add_text_with_readings(self, text, tags=[]):
"""Find readings in the text and format them properly"""
# add reading blocks and plaintext before them
last = 0
for match in re.finditer('\[(.*)\]', text):
s = match.start(1)
e = match.end(1)
rd = self.reading.convert(match.group(1), self.dict.READING,
self.dict.READING,
sourceOptions=self.dict.READING_OPTIONS)
self.rbuf.insert_with_tags_by_name(self.rbuf.get_end_iter(),
text[last:s], *tags)
self._add_formatted_reading(rd, tags)
last = e
# append final part
self.rbuf.insert_with_tags_by_name(self.rbuf.get_end_iter(),
text[last:], *tags)
def _add_formatted_reading(self, reading, tags=[]):
"""Split reading string to syllables and add them with proper
style according to tone"""
decomp = self.reading.decompose(reading, 'Pinyin')
for ent in decomp:
if self.reading.isReadingEntity(ent, 'Pinyin'):
foo,tone = self.reading.splitEntityTone(ent, 'Pinyin')
if tone == 1:
self.rbuf.insert_with_tags_by_name(
self.rbuf.get_end_iter(), ent, "tone1", *tags)
elif tone == 2:
self.rbuf.insert_with_tags_by_name(
self.rbuf.get_end_iter(), ent, "tone2", *tags)
elif tone == 3:
self.rbuf.insert_with_tags_by_name(
self.rbuf.get_end_iter(), ent, "tone3", *tags)
elif tone == 4:
self.rbuf.insert_with_tags_by_name(
self.rbuf.get_end_iter(), ent, "tone4", *tags)
else:
self.rbuf.insert_with_tags_by_name(
self.rbuf.get_end_iter(), ent, "tonenull", *tags)
else:
self.rbuf.insert_with_tags_by_name(
self.rbuf.get_end_iter(), ent, *tags)
def _on_entry_keypress(self, widget, event):
"""Do dictionary search when RETURN was pressed inside the search box
"""
if(event.keyval == gtk.keysyms.Return):
self.search()
return False
def _on_entry_changed(self, widget):
"""Update popup completition whenever searchbox contents is changed"""
# Wildcard search for empty string is evil
if len(self.entry.get_text()) == 0:
return False
# Get matching items from dictionary and update the model
res = self.dict.getForHeadword(unicode(self.entry.get_text())+'*')
self.compl_model.clear()
for r in res:
s = r.HeadwordSimplified
if r.HeadwordSimplified != r.HeadwordTraditional:
s += " (" + r.HeadwordTraditional + ")"
s += " [" + r.Reading + "]"
self.compl_model.append([s, r.HeadwordSimplified])
return False
def _on_compl_match_selected(self, completion, model, row):
"""When an item from popup completition was selected, update
the search box with appropriate value"""
self.entry.set_text(model[row][1])
self.search()
return True
def _on_search_clicked(self, widget):
"""Do dictionary search when Search button was clicked"""
self.search()
def _on_result_click(self, widget, event):
"""If a CJK character was under the mouse pointer in the moment
of right-click, save the character for popup menu purposes"""
self.sorder_to_popup = None
# Right-click check
if event.button != 3:
return False
# Get the character under the mouse pointer
x,y = widget.window_to_buffer_coords(gtk.TEXT_WINDOW_TEXT,
int(event.x), int(event.y))
start = widget.get_iter_at_position(x, y)
s, e = start[0], start[0].copy()
e.forward_char()
char = s.get_text(e)
# If the character is not an CJK character, don't do anything
if not self.cjk.isCharacterInDomain(char):
return False
self.sorder_to_popup = char
return False
def _on_result_popup(self, widget, menu):
"""If a CJK character was targeted, add 'Show stroke order' item to
the popup menu"""
if self.sorder_to_popup != None:
menu_sorder = gtk.MenuItem( "Show stroke order")
menu_sorder.connect("activate", self._on_sorder_activate,
self.sorder_to_popup)
menu_sorder.show()
menu.prepend(menu_sorder)
return False
def _on_sorder_activate(self, widget, char):
"""Display stroke order animation window when "Show stroke order"
context menu item was activated"""
anim = sorder.StrokeOrderAnimation(char)
anim.start()
def _on_recognizer_commit(self, widget, char):
"""When a character from the recognizer was selected, add it to the
searchbox"""
self.entry.set_text(self.entry.get_text() + char)
self.search()
self.recognizer.clear_all()
def run():
"""Initialize the GUI"""
window = gtk.Window()
dictionary = DictionaryWidget()
window.add(dictionary)
window.connect("destroy", gtk.main_quit)
window.set_size_request(350,700)
window.set_title("Tajpan dictionary")
window.show_all()
gtk.gdk.threads_init()
gtk.gdk.threads_enter()
gtk.main()
gtk.gdk.threads_leave()
# If directly called, start the GUI
if __name__ == "__main__":
run()
|
03 Nights accommodation. โขDaily Buffet breakfast. . โขTransfers, Sightseeing and excursions as per itinerary. โขVisit to Cellular Jail with entry fees. โขLight and Sound Show.
After breakfast, transfer to airport. Board the flight for onward destination.
|
###############################################################################
#cyn.in is an open source Collaborative Knowledge Management Appliance that
#enables teams to seamlessly work together on files, documents and content in
#a secure central environment.
#
#cyn.in v2 an open source appliance is distributed under the GPL v3 license
#along with commercial support options.
#
#cyn.in is a Cynapse Invention.
#
#Copyright (C) 2008 Cynapse India Pvt. Ltd.
#
#This program is free software: you can redistribute it and/or modify it under
#the terms of the GNU General Public License as published by the Free Software
#Foundation, either version 3 of the License, or any later version and observe
#the Additional Terms applicable to this program and must display appropriate
#legal notices. In accordance with Section 7(b) of the GNU General Public
#License version 3, these Appropriate Legal Notices must retain the display of
#the "Powered by cyn.in" AND "A Cynapse Invention" logos. You should have
#received a copy of the detailed Additional Terms License with this program.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
#Public License for more details.
#
#You should have received a copy of the GNU General Public License along with
#this program. If not, see <http://www.gnu.org/licenses/>.
#
#You can contact Cynapse at support@cynapse.com with any problems with cyn.in.
#For any queries regarding the licensing, please send your mails to
# legal@cynapse.com
#
#You can also contact Cynapse at:
#802, Building No. 1,
#Dheeraj Sagar, Malad(W)
#Mumbai-400064, India
###############################################################################
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from plone.app.layout.viewlets.common import ViewletBase
from zope.component import getMultiAdapter
from Products.CMFCore.utils import getToolByName
from zope.component import getUtility,getAdapters
from zope.app.publisher.interfaces.browser import IBrowserMenu
from ubify.policy.config import spacesdefaultaddablenonfolderishtypes, spacesdefaultaddableforfolders
from Acquisition import aq_inner, aq_base, aq_parent
from ubify.cyninv2theme import checkHasPermission, getRootID, getLocationListForAddContent, canAddContent, getBestMatchedLocationForAddingContent, getDisallowedTypes
class AddNewMenuViewlet(ViewletBase):
render = ViewPageTemplateFile('addnew_menu.pt')
def getAddMenuItems(self,portal,id):
objlist = []
try:
objMenu = getattr(portal,id)
menu = getUtility(IBrowserMenu, name='plone_contentmenu_factory')
newmenu = menu.getMenuItems(objMenu,self.request)
for ob in newmenu:
if ob['extra']['id'] <> '_settings' and ob['extra']['id'] <> 'settings':
if id == 'views' and ob.has_key('id'):
if ob.has_key('absolute_url') == False:
ob['absolute_url'] = ob['action']
if ob.has_key('Title') == False:
ob['Title'] = ob['title']
if ob.has_key('portal_type') == False:
ob['portal_type'] = ob['id']
objlist.append(ob)
except AttributeError:
pass
return objlist
def update(self):
self.addnewitems = []
self.viewmenu = []
self.currentcontextmenu = []
self.currentcontexttitle = ''
self.contextualurl = ''
self.contextuid = ''
self.contextdisallowedtypes = []
portal_state = getMultiAdapter((self.context, self.request),name=u'plone_portal_state')
context_state = getMultiAdapter((self.context, self.request),name=u'plone_context_state')
typetool= getToolByName(self.context, 'portal_types')
object_typename = self.context.portal_type
portal = portal_state.portal()
self.spaceslist = getLocationListForAddContent(portal)
self.viewmenu = self.getAddMenuItems(portal,'views')
self.anonymous = portal_state.anonymous()
if not self.anonymous:
for eachtype in spacesdefaultaddablenonfolderishtypes:
object_typeobj = typetool[eachtype]
if object_typeobj <> None:
self.addnewitems.append({'id': object_typeobj.id,
'title': object_typeobj.Title(),
'description':object_typeobj.Description(),
'icon': object_typeobj.content_icon})
if self.context.portal_type == 'Folder':
self.addnewitems = []
for eachtype in spacesdefaultaddableforfolders:
object_typeobj = typetool[eachtype]
if object_typeobj <> None:
self.addnewitems.append({'id': object_typeobj.id,
'title': object_typeobj.Title(),
'description':object_typeobj.Description(),
'icon': object_typeobj.content_icon})
self.addnewcontainers = []
containers = ['ContentSpace', 'Course', 'Folder']
for eachtype in containers:
object_typeobj = typetool[eachtype]
if object_typeobj <> None:
self.addnewcontainers.append({'id': object_typeobj.id,
'title': object_typeobj.Title(),
'description':object_typeobj.Description(),
'icon': object_typeobj.content_icon})
self.contenidoeducativo = []
containers = ['Exam', 'Quiz', 'ZipContent', 'SCO']
for eachtype in containers:
object_typeobj = typetool[eachtype]
if object_typeobj <> None:
self.contenidoeducativo.append({'id': object_typeobj.id,
'title': object_typeobj.Title(),
'description':object_typeobj.Description(),
'icon': object_typeobj.content_icon})
self.addnewitems.sort(lambda x,y: cmp(x['title'].lower(),y['title'].lower()))
menu = getUtility(IBrowserMenu, name='plone_contentmenu_factory')
if object_typename in ('RecycleBin',):
self.currentcontextmenu = []
elif object_typename in ('Plone Site',):
#get root object and check for it
objRoot = getattr(portal,getRootID())
if checkHasPermission('Add portal content', aq_inner(objRoot)):
self.currentcontextmenu = menu.getMenuItems(objRoot,self.request)
self.contextualurl = aq_inner(objRoot).absolute_url()
self.currentcontexttitle = objRoot.Title()
self.contextuid = objRoot.UID()
self.contextdisallowedtypes = objRoot.disallowedtypes()
else:
if object_typename in ('ContentRoot','ContentSpace', 'Course','Folder') and self.context.isPrincipiaFolderish and checkHasPermission('Add portal content',aq_inner(self.context)):
self.currentcontextmenu = menu.getMenuItems(self.context, self.request)
self.contextualurl = aq_inner(self.context).absolute_url()
if object_typename in ('ContentRoot','ContentSpace','Course','Folder'):
self.currentcontexttitle = context_state.object_title()
self.contextuid = aq_inner(self.context).UID()
self.contextdisallowedtypes = (aq_inner(self.context)).disallowedtypes()
else:
currentobject = aq_inner(self.context)
parentList = currentobject.aq_chain
parentspace = None
found = 0
try:
for type in parentList:
if type.portal_type in ('ContentRoot','ContentSpace','Course'):
parentspace = type
if checkHasPermission('Add portal content',aq_inner(parentspace)):
found = 1
if found == 1:
break
except AttributeError:
parentspace = None
pass
if parentspace <> None:
self.currentcontextmenu = menu.getMenuItems(aq_inner(parentspace),self.request)
self.currentcontexttitle = parentspace.Title()
self.contextualurl = parentspace.absolute_url()
self.contextuid = parentspace.UID()
self.contextdisallowedtypes = parentspace.disallowedtypes()
#strip out 'settings' item(s)
self.currentcontextmenu = [ob for ob in self.currentcontextmenu if ob['extra']['id'] <> 'settings' and ob['extra']['id'] <> '_settings']
if self.contextuid == '':
#best match element is brain
bestmatchedspace = getBestMatchedLocationForAddingContent(portal)
if bestmatchedspace:
self.currentcontexttitle = bestmatchedspace.Title
self.contextuid = bestmatchedspace.UID
self.contextualurl = bestmatchedspace.getURL()
self.contextdisallowedtypes = bestmatchedspace.disallowedtypes
def icon(self, action):
icon = action.get('icon', None)
if icon is None:
icon = self.getIconFor('content_actions', action['id'])
return icon
|
This Sunday, January 27th there will be a farewell reception for Fr. Sebastine following the 11am Mass. CDA is sponsoring the event in Sheltz Hall.
Please be sure to stop by if you can or try to catch Fr. Sebastine before his departure to Freeport next week and letโs keep him in our prayers!
|
import logging
import warnings
import collections
from six import add_metaclass
from functools import partial
logger = logging.getLogger(__name__)
class Param(object):
"Describes a single parameter and defines a method for cleaning inputs."
def __init__(self, default=None, allow_list=False, description=None, param_key=None, choices=None, **kwargs):
self.default = default
self.allow_list = allow_list
self.description = description
self.param_key = param_key
self.choices = choices
for key in kwargs:
setattr(self, key, kwargs[key])
def clean(self, value, *args, **kwargs):
if self.choices and value not in self.choices:
raise ValueError('"{0}" not a valid choice'.format(value))
return value
def clean_list(self, values, *args, **kwargs):
return [self.clean(x, *args, **kwargs) for x in values]
class IntParam(Param):
def clean(self, value, *args, **kwargs):
return super(IntParam, self).clean(int(value), *args, **kwargs)
class FloatParam(Param):
def clean(self, value, *args, **kwargs):
return super(FloatParam, self).clean(float(value), *args, **kwargs)
class StrParam(Param):
def __init__(self, *args, **kwargs):
kwargs.setdefault('strip', True)
super(StrParam, self).__init__(*args, **kwargs)
def clean(self, value, *args, **kwargs):
value = str(value)
if self.strip:
value = value.strip()
return super(StrParam, self).clean(value, *args, **kwargs)
class UnicodeParam(StrParam):
def clean(self, value, *args, **kwargs):
value = str(value)
if self.strip:
value = value.strip()
return super(UnicodeParam, self).clean(value, *args, **kwargs)
class BoolParam(Param):
def __init__(self, *args, **kwargs):
kwargs.setdefault('true_values', ('t', 'true', '1', 'yes'))
kwargs.setdefault('false_values', ('f', 'false', '0', 'no'))
super(BoolParam, self).__init__(*args, **kwargs)
def clean(self, value, *args, **kwargs):
value = value.lower()
if value in self.true_values:
value = True
elif value in self.false_values:
value = False
else:
raise ValueError
return super(BoolParam, self).clean(value, *args, **kwargs)
class ParametizerMetaclass(type):
def __new__(cls, name, bases, attrs):
new_cls = type.__new__(cls, name, bases, attrs)
fields = getattr(new_cls, '_fields', {}).copy()
defaults = getattr(new_cls, '_defaults', {}).copy()
if hasattr(new_cls, 'param_defaults'):
warnings.warn('Resource.param_defaults has been deprecated', DeprecationWarning)
defaults.update(new_cls.param_defaults)
for attr, value in attrs.items():
if not isinstance(value, collections.Callable) and not attr.startswith('_'):
# Wrap shorthand definition in param class
if isinstance(value, Param):
field = value
key = field.param_key or attr
value = field.default
else:
key = attr
field = Param(default=value)
clean_method = 'clean_{0}'.format(attr)
# Partially apply the clean method with the field as self
if clean_method in attrs:
field.clean = partial(attrs[clean_method], field)
fields[key] = field
defaults[key] = value
new_cls._fields = fields
new_cls._defaults = defaults
return new_cls
@add_metaclass(ParametizerMetaclass)
class Parametizer(object):
def clean(self, params=None, defaults=None):
if params is None:
params = {}
param_defaults = self._defaults.copy()
if defaults is not None:
param_defaults.update(defaults)
cleaned = {}
# Gather both sets of keys since there may be methods defined
# without a default value specified.
keys = set(list(param_defaults.keys()) + list(params.keys()))
for key in keys:
# Add the default value for non-existant keys in params
if key not in params:
cleaned[key] = param_defaults[key]
continue
# Get associated param instance or initialize default one
field = self._fields.get(key, Param())
# Support MultiValueDict (request.GET and POST)
if field.allow_list and hasattr(params, 'getlist'):
value = params.getlist(key)
else:
value = params.get(key)
# If any kind of error occurs while cleaning, revert to
# the default value
try:
if isinstance(value, (list, tuple)):
value = field.clean_list(value)
if not field.allow_list:
value = value[0]
else:
value = field.clean(value)
except Exception as e:
logger.debug('Error cleaning parameter: {0}'.format(e), extra={
'key': key,
'value': value,
})
value = param_defaults.get(key, value)
cleaned[key] = value
return cleaned
|
Farm Flavours is a healthy cooking project for all the family. It helps families and young people have healthier lifestyles and feel less socially isolated.
All these key points will give people a long term appreciation of food and the natural world, will reduce illness, obesity and increase happy healthy lifestyles.
โMy boys enjoyed it and were eager to get their hands dirty!
It was lovely. From mixing to cooking and serving.
|
#!/usr/bin/python
# Week 5 problem 3. Twitter.
# Do not delete the comments.
# Do not chnage the functions names, do not change the input parameters.
# Do not change the return types of the functions.
# Your code goes to the part where it says your code goes here.
# Do not change anything else other than the part where it says your code goes here.
# Most of the code below is copied verbatim or modified slightly
# from the book Mining the Social Web 2nd Edition by Mathew A. Russell
from __future__ import print_function
import re
import twitter
import pandas as pd
import os
import pickle
from pytagcloud import create_tag_image, make_tags
def search_twitter(twitter_api, q, search_size = 100, stop_count = 1000):
'''
Modified from Example 1-5 in Mining the Social Web 2nd Edition.
Returns statuses, a list of dictionaries of twitter metadata.
Parameters:
twitter_api: Use twitter.Twitter to create twitter.api.Twitter object.
q (str): search query (e.g. #informatics)
search_size: default 100.
stop_count: stops search when the total size of tweets exceeds stop_count.
'''
# See https://dev.twitter.com/docs/api/1.1/get/search/tweets
search_results = twitter_api.search.tweets(q = q, count = search_size)
statuses = search_results['statuses']
# Iterate through results by following the cursor until we hit the count number
while stop_count > len(statuses):
try:
next_results = search_results['search_metadata']['next_results']
except KeyError, e: # No more results when next_results doesn't exist
break
# Create a dictionary from next_results, which has the following form:
# ?max_id=313519052523986943&q=NCAA&include_entities=1
kwargs = dict([ kv.split('=') for kv in next_results[1:].split("&") ])
next_results = twitter_api.search.tweets(**kwargs)
statuses += next_results['statuses']
print(len(statuses), 'tweets fetched...')
return statuses
def clean_statuses(statuses):
'''
Takes a list of dictionaries of tweet metadata returned from
search_twitter() function, and returns a list with all lowercase words
(no words with #, @, http, or non-alphabetical characters).
Parameters:
statuses: a list of dictionaries of tweet metadata returned from
search_twitter() function.
'''
status_texts = [status['text'] for status in statuses]
status_texts = [text.encode('ascii', 'ignore') for text in status_texts]
clean_tweets = []
# your code goes here
return clean_tweets
def get_counts(words):
'''
Takes a list of strings and returns a list of tuples (string, int).
Parameters:
words: a list of strings
Examples:
>>> get_counts(['a', 'a', 'b', 'b', 'b', 'c'])
[('b', 3), ('a', 2), ('c', 1)]
'''
# your code goes here
return counts
def main():
# XXX: Go to http://dev.twitter.com/apps/new to create an app and get values
# for these credentials, which you'll need to provide in place of these
# empty string values that are defined as placeholders.
# See https://dev.twitter.com/docs/auth/oauth for more information
# on Twitter's OAuth implementation.
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
OAUTH_TOKEN = ''
OAUTH_TOKEN_SECRET = ''
auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET,
CONSUMER_KEY, CONSUMER_SECRET)
twitter_api = twitter.Twitter(auth = auth)
# Search query, try your own.
q = '#informatics'
# calling search_twitter too often will lock you out for 1 hour.
# we will call search twitter once and save the result in a file.
if not os.path.isfile('{0}.p'.format(q)):
results = search_twitter(twitter_api, q)
pickle.dump(results, open('{0}.p'.format(q), 'wb'))
# load saved pickle file
results = pickle.load(open('{0}.p'.format(q), 'rb'))
# clean the tweets and extract the words we want
clean_tweets = clean_statuses(results)
# calculate the frequency of each word
word_count = get_counts(clean_tweets)
# use PyTagCloud to create a tag cloud
tags = make_tags(word_count, maxsize = 120)
# the image is store in 'cloud.png'
create_tag_image(tags, 'cloud.png', size = (900, 600), fontname = 'Lobster')
if __name__ == '__main__':
main()
|
Monoclonal B-cell lymphocytosis (MBL) is an asymptomatic clinical entity characterized by the proliferation of monoclonal B cells not meeting the diagnosis criteria for chronic lymphocytic leukemia (CLL). MBL may precede the development of CLL, but the molecular mechanisms responsible for disease progression and evolution are not completely known. Telomeres are usually short in CLL and their attrition may contribute to disease evolution. Here, we determined the telomere lengths of CD5+CD19+ cells in MBL, CLL, and healthy volunteers. Twenty-one CLL patients, 11 subjects with high-count MBL, and 6 with low-count MBL were enrolled. Two hundred and sixty-one healthy volunteers aged 0 to 88 years were studied as controls. After diagnosis confirmation, a flow cytometry CD19+CD5+-based cell sorting was performed for the study groups. Telomere length was determined by qPCR. Telomere length was similar in the 3 study groups but shorter in these groups compared to normal age-matched subjects that had been enrolled in a previous study from our group. These findings suggest that telomere shortening is an early event in CLL leukemogenesis.
Chronic lymphocytic leukemia (CLL) is the most common leukemia of the western world, with an annual incidence of 5.1 cases/100,000 persons. Over the last years, advances in multi-parameter flow cytometry have provided the identification of a small population of monoclonal B lymphocytes with identical CLL immunophenotype. This condition is found in 0.6 to 12% of adults with normal blood cell counts (1) being called monoclonal B-cell lymphocytosis (MBL) (2).
MBL may be classified as high-count (HC) and low-count (LC) (3). HC MBL is usually diagnosed in asymptomatic subjects with mild lymphocytosis and LC MBL occurs in asymptomatic subjects with normal blood counts that have been submitted to flow cytometry screening (4).
As occurs in CLL, MBL is more frequent in men, especially in relatives of CLL patients (5,6) and its frequency increases with age (5,7).
Telomeres provide protection against threats to the genome and are reduced in every cell cycle due to the inability of the DNA polymerase to replicate the chromosome's 3โฒ ends (8). Telomeric erosions may interfere with telomeres function to protect the chromosomes, causing genetic instability (9) and may have an important role in common human tumors, like CLL (9,10).
Studies addressing the molecular and genetic basis of MBL may help to elucidate initial points of CLL pathophysiology and, therefore, increase knowledge about the origin of CLL. As far as we know, telomere length (TL) has never been previously investigated in subjects with the diagnosis of MBL.
Here, we hypothesized that telomere shortening may be present in MBL and may have a role in the initial monoclonal B-cell expansion.
We analyzed samples from 6 individuals with LC MBL, 11 with HC MBL and 21 patients with CLL Binet A who were followed at Hospital das Clรญnicas, Faculdade de Medicina de Ribeirรฃo Preto, SP, Brazil. The control group was obtained from data of 261 healthy volunteers aged 0 to 88 years previously enrolled in another study from our group (11). The study was reviewed and approved by the Institutionโs Research Ethics Review Board and written informed consent was obtained from all participants in accordance with the Declaration of Helsinki.
Using cell sorting flow cytometry, CD19+CD5+ lymphocytes were purified from peripheral blood of individuals from the three groups. Post-preparation purity, as assessed by flow cytometry (available on 37/48 volunteers), indicated that the isolated cells were predominantly (median of 86%) the desired cells.
DNA was extracted from the isolated CD5+ B lymphocytes of the 3 study groups with the Gentra Puregene Bloodยฏ kit (Qiagen, Netherland), according to the manufacturer's protocol. TL was determined by real time polymerase chain reaction (PCR), as previously described (12) and data are reported as telomere/single copy gene (T/S) ratio. Briefly, two separate PCR runs were performed for each sample, the first to determine the cycle threshold (Ct) value for telomere amplification, and the second to determine the Ct value for control gene amplification. A standard curve was generated in each run, consisting of reference DNA diluted serially. Both reference and sample DNA were analyzed in triplicate (16 ng DNA/aliquot).
The Ct data generated in both runs were used to calculate relative T/S values for each sample: T/S = 2-ฮฮCt. CV less than 2% was accepted for telomere reactions and less than 1% for single gene reactions. All samples were studied in triplicate.
Using these criteria, 5 individuals were excluded, 2 from the LC MBL group, 2 from the HC MBL group, and 1 from the CLL group.
TL was analyzed by two different methods. First, data from all the control group volunteers were analyzed (TL related to age) and the covariance test (ANCOVA) was performed to compare the four groups. To discard possible age-related bias, another analysis (age-matched and adjusted TL) was done using data from 43 controls aged 50 years or older (medium 58; range 50-88) and non-parametric Kruskal-Wallis and confirmatory Dunn test were used. All tests were considered to be statistically significant at the P<0.05 level.
Characteristics of participants are shown in Table 1. Median TL was 0.32 (range, 0.13 to 0.78), 0.21 (range, 0.13 to 0.48) and 0.42 kb (range, 0.36 to 0.45) for CLL, HC MBL and LC MBL, respectively.
Table 1 Clinical characteristics of included individuals.
TL related to age was similar among the three groups. When compared to healthy controls, telomeres from individuals with abnormal B-cell phenotype were significantly smaller than telomeres from normal subjects (CLL and HC MBL, P<0.001; LC MBL, P=0.007). In healthy individuals, TL from peripheral blood leukocytes shortened with aging, but in the three patient groups analyzed, clonal B-cells TLs were equally short regardless of the patient's age (Figure 1).
Figure 1 Telomere length in relation to age in normal controls, low-count monoclonal B-cell lymphocytosis (population screening MBL), high-count MBL (clinical MBL) and chronic lymphocytic leukemia (CLL) patients measured in telomere/single copy gene ratio (T/S).
Age-matched and adjusted TL was shorter in CLL and HC MBL compared to healthy controls (P<0.05; Figure 2). TL also tended to be shorter in LC MBL compared to healthy subjects, although not reaching statistical significance probably due to the low number of individuals in this group (Figure 2).
Figure 2 Peripheral blood leukocyte and clonal B-cell telomere length matched for age and adjusted in healthy controls, low-count monoclonal B-cell lymphocytosis (LC MBL), high-count MBL (HC MBL) and Binet A chronic lymphocytic leukemia (CLL) patients. Horizontal lines indicate median and interquartile range. T/S: telomere/single copy gene.
The finding that average telomere length is shorter in CLL than in normal controls has been demonstrated previously by various authors using different methodologies (13 14โ15). This has been associated to unmutated IGHV status (15) and to worse prognosis (16). Our finding confirms these previous reports.
For the first time, our group compared the TL from HC and LC MBL, CLL Binet A and normal controls. We found the 3 study groups to have similar TL and shorter than the general population's. However, age-matched and adjusted TL was similar in LC MBL and controls. These findings must be confirmed by further investigations, since our LC MBL group was too small to allow definite conclusions.
Significant similarities have been identified regarding the frequencies of IGHV genes between HC MBL and initial stages CLL, although findings from LC MBL were different from aforementioned groups (17). However, other authors also found some biologic similarities between these 3 entities. Not only HC MBLs but also LC MBLs bear cytogenetic abnormalities common in CLL, including 13q-, 17p- and trisomy 12 (1,18).
Notwithstanding the similarities between HC MBL and initial stage CLL with regard to cytogenetic abnormalities (1) and IGHV genes (17) โ and the dissimilarities of these two entities when compared to LC MBL found in some previous studies โ our finding suggests that the absence of difference in TL among LC MBL, HC MBL and CLL Binet A supports the hypothesis that inside the "MBL" label there may be a combination of non-progressive and potentially progressive entities. Moreover, the presence of short telomeres already inside the small abnormal B-cell clone of HC MBL cases, compared to the general population, suggested that it may be part of the initial events in CLL physiopathology. Finally, our findings were in accordance with most recent evidence suggesting that the primary leukemogenic event occurs very early in CLL, probably involving multipotent, self-renewing hematopoietic stem-cells (19). This makes the natural history of CLL comparable with that of other tumors, where a pre-malignant lesion (here, MBL) progresses toward a full malignant disease (20).
In conclusion, we showed that TL was similar among HC MBL and CLL Binet A, which suggests that telomere erosion may be an early event in CLL biology and points to a continuum from HC MBL to CLL.
This work was supported by the Conselho Nacional de Desenvolvimento Cientรญfico e Tecnolรณgico (CNPQ) No. 573.754/2008-0.
|
from rdflib import ConjunctiveGraph, exceptions, Namespace
from rdflib import RDFS, RDF, BNode
from rdflib.collection import Collection
import json
EPILOG = __doc__
OWLNS = Namespace("http://www.w3.org/2002/07/owl#")
OBO_OWL = Namespace("http://www.geneontology.org/formats/oboInOwl#")
EFO = Namespace("http://www.ebi.ac.uk/efo/")
OBO = Namespace("http://purl.obolibrary.org/obo/")
EFO_Synonym = EFO["alternative_term"]
OBO_Synonym = OBO["IAO_0000118"]
Synonym = OBO_OWL["hasExactSynonym"]
Ontology = OWLNS["Ontology"]
Restriction = OWLNS["Restriction"]
Class = OWLNS["Class"]
Thing = OWLNS["Thing"]
OnProperty = OWLNS["onProperty"]
SomeValuesFrom = OWLNS["someValuesFrom"]
IntersectionOf = OWLNS["intersectionOf"]
PART_OF = "http://purl.obolibrary.org/obo/BFO_0000050"
DEVELOPS_FROM = "http://purl.obolibrary.org/obo/RO_0002202"
HUMAN_TAXON = "http://purl.obolibrary.org/obo/NCBITaxon_9606"
HAS_PART = "http://purl.obolibrary.org/obo/BFO_0000051"
ACHIEVES_PLANNED_OBJECTIVE = "http://purl.obolibrary.org/obo/OBI_0000417"
DEFAULT_LANGUAGE = "en"
developental_slims = {
'UBERON:0000926': 'mesoderm',
'UBERON:0000924': 'ectoderm',
'UBERON:0000925': 'endoderm'
}
system_slims = {
'UBERON:0000383': 'musculature of body',
'UBERON:0000949': 'endocrine system',
'UBERON:0000990': 'reproductive system',
'UBERON:0001004': 'respiratory system',
'UBERON:0001007': 'digestive system',
'UBERON:0001008': 'excretory system',
'UBERON:0001009': 'circulatory system',
'UBERON:0001434': 'skeletal system',
'UBERON:0002405': 'immune system',
'UBERON:0002416': 'integumental system',
'UBERON:0001032': 'sensory system',
'UBERON:0001017': 'central nervous system',
'UBERON:0000010': 'peripheral nervous system'
}
organ_slims = {
'UBERON:0002369': 'adrenal gland',
'UBERON:0002110': 'gallbladder',
'UBERON:0002106': 'spleen',
'UBERON:0001043': 'esophagus',
'UBERON:0000004': 'nose',
'UBERON:0000056': 'ureter',
'UBERON:0000057': 'urethra',
'UBERON:0000059': 'large intestine',
'UBERON:0000165': 'mouth',
'UBERON:0000945': 'stomach',
'UBERON:0000948': 'heart',
'UBERON:0000955': 'brain',
'UBERON:0000970': 'eye',
'UBERON:0000991': 'gonad',
'UBERON:0001255': 'urinary bladder',
'UBERON:0001264': 'pancreas',
'UBERON:0001474': 'bone element',
'UBERON:0002048': 'lung',
'UBERON:0002097': 'skin of body',
'UBERON:0002107': 'liver',
'UBERON:0002108': 'small intestine',
'UBERON:0002113': 'kidney',
'UBERON:0002240': 'spinal cord',
'UBERON:0002367': 'prostate gland',
'UBERON:0002370': 'thymus',
'UBERON:0003126': 'trachea',
'UBERON:0001723': 'tongue',
'UBERON:0001737': 'larynx',
'UBERON:0006562': 'pharynx',
'UBERON:0001103': 'diaphragm',
'UBERON:0002185': 'bronchus',
'UBERON:0000029': 'lymph node',
'UBERON:0001132': 'parathyroid gland',
'UBERON:0002046': 'thyroid gland',
'UBERON:0001981': 'blood vessel',
'UBERON:0001473': 'lymphatic vessel',
'UBERON:0000178': 'blood',
'UBERON:0007844': 'cartilage element',
'UBERON:0001690': 'ear',
'UBERON:0001987': 'placenta',
'UBERON:0001911': 'mammary gland',
'UBERON:0001630': 'muscle organ',
'UBERON:0000007': 'pituitary gland',
'UBERON:0016887': 'extraembryonic component',
'UBERON:0001013': 'adipose tissue',
'UBERON:0000310': 'breast',
'UBERON:0000989': 'penis',
'UBERON:0004288': 'skeleton',
'UBERON:0000995': 'uterus',
'UBERON:0000996': 'vagina',
'UBERON:0000992': 'ovary',
'UBERON:0000473': 'testis',
'UBERON:0001637': 'artery',
'UBERON:0001638': 'vein',
'UBERON:0002050': 'embryonic structure',
'UBERON:0000160': 'intestine',
'UBERON:0002384': 'connective tissue'
}
assay_slims = {
# Note shortened synonyms are provided
'OBI:0000634': 'DNA methylation', # 'DNA methylation profiling'
'OBI:0000424': 'Transcription', # 'transcription profiling'
'OBI:0001398': 'DNA binding', # "protein and DNA interaction"
'OBI:0001854': 'RNA binding', # "protein and RNA interaction"
'OBI:0001917': '3D chromatin structure', # 'chromosome conformation identification objective'
'OBI:0000870': 'DNA accessibility', # 'single-nucleotide-resolution nucleic acid structure mapping assay'
'OBI:0001916': 'Replication timing',
'OBI:0000435': 'Genotyping',
'OBI:0000615': 'Proteomics',
}
slim_shims = {
# this allows us to manually assign term X to slim Y while waiting for ontology updates
'assay': {
# DNA accessibility
'OBI:0001924': 'DNA accessibility', # 'OBI:0000870' / MNase-seq
'OBI:0002039': 'DNA accessibility', # 'OBI:0000870', / ATAC-seq
'OBI:0001853': 'DNA accessibility', # 'OBI:0000870', / DNase-seq
'OBI:0001859': 'DNA accessibility', # 'OBI:0000870', / OBI:0000424 / FAIRE-seq
'OBI:0002042': '3D chromatin structure', # 'OBI:0000870' (Hi-C)
'OBI:0001848': '3D chromatin structure', # ChIA-PET / OBI:000870
'OBI:0001923': 'Proteomics', # OBI:0000615': 'MS-MS'
'OBI:0001849': 'Genotyping', # OBI:0000435 (DNA-PET)
'OBI:0002044': 'RNA binding', # OBI:0001854 (RNA-Bind-N-Seq)
'OBI:0002091': 'Transcription',
'OBI:0002092': 'Transcription',
'OBI:0002093': 'Transcription'
}
}
preferred_name = {
"OBI:0000626": "WGS",
"OBI:0001247": "genotyping HTS",
"OBI:0001332": "DNAme array",
"OBI:0001335": "microRNA counts",
"OBI:0001463": "RNA microarray",
"OBI:0001863": "WGBS",
"OBI:0001923": "MS-MS",
"OBI:0001271": "RNA-seq",
"OBI:0000716": "ChIP-seq",
"OBI:0001853": "DNase-seq",
"OBI:0001920": "Repli-seq",
"OBI:0001864": "RAMPAGE",
"OBI:0001393": "genotyping array",
"OBI:0002042": "Hi-C",
}
category_slims = {
'OBI:0000634': 'DNA methylation profiling',
'OBI:0000424': 'transcription profiling',
'OBI:0000435': 'genotyping',
'OBI:0000615': 'proteomics',
'OBI:0001916': 'replication',
'OBI:0001398': "protein and DNA interaction",
'OBI:0001854': "protein and RNA interaction"
}
objective_slims = {
'OBI:0000218': 'cellular feature identification objective',
'OBI:0001691': 'cellular structure feature identification objective',
'OBI:0001916': 'DNA replication identification objective',
'OBI:0001917': 'chromosome conformation identification objective',
'OBI:0001234': 'epigenetic modification identification objective',
'OBI:0001331': 'transcription profiling identification objective',
'OBI:0001690': 'molecular function identification objective',
'OBI:0000268': 'organism feature identification objective',
'OBI:0001623': 'organism identification objective',
'OBI:0001398': 'protein and DNA interaction identification objective',
'OBI:0001854': 'protein and RNA interaction identification objective'
}
type_slims = {
'OBI:0001700': 'immunoprecipitation assay',
'OBI:0000424': 'transcription profiling assay',
'OBI:0000634': 'DNA methylation profiling assay',
'OBI:0000435': 'genotyping assay'
}
# Note this also shows the final datastructure for ontology.json
ntr_assays = {
"NTR:0003660": {
"assay": ['Transcription'],
"category": [],
"developmental": [],
"name": "microRNA counts",
"objectives": [],
"organs": [],
"preferred_name": "",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0000612": {
"assay": ['RNA binding'],
"category": [],
"developmental": [],
"name": "Switchgear",
"objectives": [],
"organs": [],
"preferred_name": "",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0000762": {
"assay": ['Transcription'],
"category": [],
"developmental": [],
"name": "shRNA knockdown followed by RNA-seq",
"objectives": [],
"organs": [],
"preferred_name": "shRNA RNA-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0000763": {
"assay": ['Transcription'],
"category": [],
"developmental": [],
"name": "siRNA knockdown followed by RNA-seq",
"objectives": [],
"organs": [],
"preferred_name": "siRNA RNA-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0001132": {
"assay": ['RNA binding'],
"category": [],
"developmental": [],
"name": "RNA Bind-N-Seq",
"objectives": [],
"organs": [],
"preferred_name": "RNA Bind-N-Seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0003082": {
"assay": ['Transcription'],
"category": [],
"developmental": [],
"name": "single cell isolation followed by RNA-seq",
"objectives": [],
"organs": [],
"preferred_name": "single cell RNA-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0004774": {
"assay": ['DNA accessibility'],
"category": [],
"developmental": [],
"name": "genetic modification followed by DNase-seq",
"objectives": [],
"organs": [],
"preferred_name": "genetic modification DNase-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0003814": {
"assay": ['Transcription'],
"category": [],
"developmental": [],
"name": "CRISPR genome editing followed by RNA-seq",
"objectives": [],
"organs": [],
"preferred_name": "CRISPR RNA-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0004619": {
"assay": ['Transcription'],
"category": [],
"developmental": [],
"name": "CRISPRi followed by RNA-seq",
"objectives": [],
"organs": [],
"preferred_name": "CRISPRi RNA-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0000438": {
"assay": ['DNA accessibility'],
"category": [],
"developmental": [],
"name": "single-nuclei ATAC-seq",
"objectives": [],
"organs": [],
"preferred_name": "snATAC-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0000444": {
"assay": ['DNA accessibility'],
"category": [],
"developmental": [],
"name": "single-cell ATAC-seq",
"objectives": [],
"organs": [],
"preferred_name": "scATAC-seq",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
},
"NTR:0004875": {
"assay": ['Genotyping'],
"category": [],
"developmental": [],
"name": "genotyping by Hi-C",
"objectives": [],
"organs": [],
"preferred_name": "genotyping Hi-C",
"slims": [],
"synonyms": [],
"systems": [],
"types": []
}
}
class Inspector(object):
""" Class that includes methods for querying an RDFS/OWL ontology """
def __init__(self, uri, language=""):
super(Inspector, self).__init__()
self.rdfGraph = ConjunctiveGraph()
try:
self.rdfGraph.parse(uri, format="application/rdf+xml")
except:
try:
self.rdfGraph.parse(uri, format="n3")
except:
raise exceptions.Error("Could not parse the file! Is it a valid RDF/OWL ontology?")
finally:
self.baseURI = self.get_OntologyURI() or uri
self.allclasses = self.__getAllClasses(includeDomainRange=True, includeImplicit=True, removeBlankNodes=False, excludeRDF_OWL=False)
def get_OntologyURI(self, return_as_string=True):
test = [x for x, y, z in self.rdfGraph.triples((None, RDF.type, Ontology))]
if test:
if return_as_string:
return str(test[0])
else:
return test[0]
else:
return None
def __getAllClasses(self, classPredicate="", includeDomainRange=False, includeImplicit=False, removeBlankNodes=True, addOWLThing=True, excludeRDF_OWL=True):
rdfGraph = self.rdfGraph
exit = {}
def addIfYouCan(x, mydict):
if excludeRDF_OWL:
if x.startswith('http://www.w3.org/2002/07/owl#') or \
x.startswith("http://www.w3.org/1999/02/22-rdf-syntax-ns#") or \
x.startswith("http://www.w3.org/2000/01/rdf-schema#"):
return mydict
if x not in mydict:
mydict[x] = None
return mydict
if addOWLThing:
exit = addIfYouCan(Thing, exit)
if classPredicate == "rdfs" or classPredicate == "":
for s in rdfGraph.subjects(RDF.type, RDFS.Class):
exit = addIfYouCan(s, exit)
if classPredicate == "owl" or classPredicate == "":
for s in rdfGraph.subjects(RDF.type, Class):
exit = addIfYouCan(s, exit)
if includeDomainRange:
for o in rdfGraph.objects(None, RDFS.domain):
exit = addIfYouCan(o, exit)
for o in rdfGraph.objects(None, RDFS.range):
exit = addIfYouCan(o, exit)
if includeImplicit:
for s, v, o in rdfGraph.triples((None, RDFS.subClassOf, None)):
exit = addIfYouCan(s, exit)
exit = addIfYouCan(o, exit)
for o in rdfGraph.objects(None, RDF.type):
exit = addIfYouCan(o, exit)
# get a list
exit = exit.keys()
if removeBlankNodes:
exit = [x for x in exit if not isBlankNode(x)]
return sort_uri_list_by_name(exit)
def __getTopclasses(self, classPredicate=''):
returnlist = []
for eachclass in self.__getAllClasses(classPredicate):
x = self.get_classDirectSupers(eachclass)
if not x:
returnlist.append(eachclass)
return sort_uri_list_by_name(returnlist)
def __getTree(self, father=None, out=None):
if not father:
out = {}
topclasses = self.toplayer
out[0] = topclasses
for top in topclasses:
children = self.get_classDirectSubs(top)
out[top] = children
for potentialfather in children:
self.__getTree(potentialfather, out)
return out
else:
children = self.get_classDirectSubs(father)
out[father] = children
for ch in children:
self.__getTree(ch, out)
def __buildClassTree(self, father=None, out=None):
if not father:
out = {}
topclasses = self.toplayer
out[0] = [Thing]
out[Thing] = sort_uri_list_by_name(topclasses)
for top in topclasses:
children = self.get_classDirectSubs(top)
out[top] = sort_uri_list_by_name(children)
for potentialfather in children:
self.__buildClassTree(potentialfather, out)
return out
else:
children = self.get_classDirectSubs(father)
out[father] = sort_uri_list_by_name(children)
for ch in children:
self.__buildClassTree(ch, out)
# methods for getting ancestores and descendants of classes: by default, we do not include blank nodes
def get_classDirectSupers(self, aClass, excludeBnodes=True, sortUriName=False):
returnlist = []
for o in self.rdfGraph.objects(aClass, RDFS.subClassOf):
if not (o == Thing):
if excludeBnodes:
if not isBlankNode(o):
returnlist.append(o)
else:
returnlist.append(o)
if sortUriName:
return sort_uri_list_by_name(remove_duplicates(returnlist))
else:
return remove_duplicates(returnlist)
def get_classDirectSubs(self, aClass, excludeBnodes=True):
returnlist = []
for s, v, o in self.rdfGraph.triples((None, RDFS.subClassOf, aClass)):
if excludeBnodes:
if not isBlankNode(s):
returnlist.append(s)
else:
returnlist.append(s)
return sort_uri_list_by_name(remove_duplicates(returnlist))
def get_classSiblings(self, aClass, excludeBnodes=True):
returnlist = []
for father in self.get_classDirectSupers(aClass, excludeBnodes):
for child in self.get_classDirectSubs(father, excludeBnodes):
if child != aClass:
returnlist.append(child)
return sort_uri_list_by_name(remove_duplicates(returnlist))
def entitySynonyms(self, anEntity, language=DEFAULT_LANGUAGE, getall=True):
if getall:
temp = []
# Uberon synonyms
for o in self.rdfGraph.objects(anEntity, Synonym):
temp += [o]
# EFO synonyms
for o in self.rdfGraph.objects(anEntity, EFO_Synonym):
temp += [o]
# OBI synonyms
for o in self.rdfGraph.objects(anEntity, OBO_Synonym):
temp += [o]
return temp
else:
for o in self.rdfGraph.objects(anEntity, Synonym):
if getattr(o, 'language') and getattr(o, 'language') == language:
return o
return ""
def classFind(self, name, exact=False):
temp = []
if name:
for x in self.allclasses:
if exact:
if x.__str__().lower() == str(name).lower():
return [x]
else:
if x.__str__().lower().find(str(name).lower()) >= 0:
temp.append(x)
return temp
def inferNamespacePrefix(aUri):
stringa = aUri.__str__()
try:
prefix = stringa.replace("#", "").split("/")[-1]
except:
prefix = ""
return prefix
def sort_uri_list_by_name(uri_list):
def get_last_bit(uri_string):
try:
x = uri_string.split("#")[1]
except:
x = uri_string.split("/")[-1]
return x
try:
return sorted(uri_list, key=lambda x: get_last_bit(x.__str__()))
except:
# TODO: do more testing.. maybe use a unicode-safe method instead of __str__
print("Error in <sort_uri_list_by_name>: possibly a UnicodeEncodeError")
return uri_list
def remove_duplicates(seq, idfun=None):
if seq:
if idfun is None:
def idfun(x):
return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen:
continue
seen[marker] = 1
result.append(item)
return result
else:
return []
def isBlankNode(aClass):
''' Checks for blank node '''
if type(aClass) == BNode:
return True
else:
return False
def splitNameFromNamespace(aUri):
stringa = aUri.__str__()
try:
ns = stringa.split("#")[0]
name = stringa.split("#")[1]
except:
ns = stringa.rsplit("/", 1)[0]
name = stringa.rsplit("/", 1)[1]
return (name, ns)
def iterativeChildren(nodes, terms, closure):
if closure == 'data':
data = 'data'
else:
data = 'data_with_develops_from'
results = []
while 1:
newNodes = []
if len(nodes) == 0:
break
for node in nodes:
results.append(node)
if terms[node][data]:
for child in terms[node][data]:
if child not in results:
newNodes.append(child)
nodes = list(set(newNodes))
return list(set(results))
def getSlims(goid, terms, slimType):
''' Get Slims '''
slims = []
slimTerms = {}
if slimType == 'developmental':
slimTerms = developental_slims
elif slimType == 'organ':
slimTerms = organ_slims
elif slimType == 'system':
slimTerms = system_slims
elif slimType == 'assay':
slimTerms = assay_slims
elif slimType == 'category':
slimTerms = category_slims
elif slimType == 'objective':
slimTerms = objective_slims
elif slimType == 'type':
slimTerms = type_slims
for slimTerm in slimTerms:
if slimType == 'developmental':
if slimTerm in terms[goid]['closure_with_develops_from']:
slims.append(slimTerms[slimTerm])
else:
if slimTerm in terms[goid]['closure']:
slims.append(slimTerms[slimTerm])
if slim_shims.get(slimType, {}):
# Overrides all Ontology based-slims
shim = slim_shims[slimType].get(goid, '')
if shim:
slims = [shim]
return slims
def getTermStructure():
return {
'id': '',
'name': '',
'preferred_name': '',
'parents': [],
'part_of': [],
'has_part': [],
'develops_from': [],
'achieves_planned_objective': [],
'organs': [],
'closure': [],
'slims': [],
'data': [],
'closure_with_develops_from': [],
'data_with_develops_from': [],
'synonyms': [],
'category': [],
'assay': [],
'types': [],
'objectives': []
}
def main():
''' Downloads UBERON, EFO and OBI ontologies and create a JSON file '''
import argparse
parser = argparse.ArgumentParser(
description="Get Uberon, EFO and OBI ontologies and generate the JSON file", epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--uberon-url', help="Uberon version URL")
parser.add_argument('--efo-url', help="EFO version URL")
parser.add_argument('--obi-url', help="OBI version URL")
args = parser.parse_args()
uberon_url = args.uberon_url
efo_url = args.efo_url
obi_url = args.obi_url
urls = [obi_url, uberon_url, efo_url]
terms = {}
for url in urls:
data = Inspector(url)
for c in data.allclasses:
if isBlankNode(c):
for o in data.rdfGraph.objects(c, RDFS.subClassOf):
if isBlankNode(o):
pass
else:
for o1 in data.rdfGraph.objects(c, IntersectionOf):
collection = Collection(data.rdfGraph, o1)
col_list = []
for col in data.rdfGraph.objects(collection[1]):
col_list.append(col.__str__())
if HUMAN_TAXON in col_list:
if PART_OF in col_list:
for subC in data.rdfGraph.objects(c, RDFS.subClassOf):
term_id = splitNameFromNamespace(collection[0])[0].replace('_', ':')
if term_id not in terms:
terms[term_id] = getTermStructure()
terms[term_id]['part_of'].append(splitNameFromNamespace(subC)[0].replace('_', ':'))
elif DEVELOPS_FROM in col_list:
for subC in data.rdfGraph.objects(c, RDFS.subClassOf):
term_id = splitNameFromNamespace(collection[0])[0].replace('_', ':')
if term_id not in terms:
terms[term_id] = getTermStructure()
terms[term_id]['develops_from'].append(splitNameFromNamespace(subC)[0].replace('_', ':'))
else:
term_id = splitNameFromNamespace(c)[0].replace('_', ':')
if term_id not in terms:
terms[term_id] = getTermStructure()
terms[term_id]['id'] = term_id
try:
terms[term_id]['name'] = data.rdfGraph.label(c).__str__()
except:
terms[term_id]['name'] = ''
terms[term_id]['preferred_name'] = preferred_name.get(term_id, '')
# Get all parents
for parent in data.get_classDirectSupers(c, excludeBnodes=False):
if isBlankNode(parent):
for s, v, o in data.rdfGraph.triples((parent, OnProperty, None)):
if o.__str__() == PART_OF:
for o1 in data.rdfGraph.objects(parent, SomeValuesFrom):
if not isBlankNode(o1):
terms[term_id]['part_of'].append(splitNameFromNamespace(o1)[0].replace('_', ':'))
elif o.__str__() == DEVELOPS_FROM:
for o1 in data.rdfGraph.objects(parent, SomeValuesFrom):
if not isBlankNode(o1):
terms[term_id]['develops_from'].append(splitNameFromNamespace(o1)[0].replace('_', ':'))
elif o.__str__() == HAS_PART:
for o1 in data.rdfGraph.objects(parent, SomeValuesFrom):
if not isBlankNode(o1):
terms[term_id]['has_part'].append(splitNameFromNamespace(o1)[0].replace('_', ':'))
elif o.__str__() == ACHIEVES_PLANNED_OBJECTIVE:
for o1 in data.rdfGraph.objects(parent, SomeValuesFrom):
if not isBlankNode(o1):
terms[term_id]['achieves_planned_objective'].append(splitNameFromNamespace(o1)[0].replace('_', ':'))
else:
terms[term_id]['parents'].append(splitNameFromNamespace(parent)[0].replace('_', ':'))
for syn in data.entitySynonyms(c):
try:
terms[term_id]['synonyms'].append(syn.__str__())
except:
pass
for term in terms:
terms[term]['data'] = list(set(terms[term]['parents']) | set(terms[term]['part_of']) | set(terms[term]['achieves_planned_objective']))
terms[term]['data_with_develops_from'] = list(set(terms[term]['data']) | set(terms[term]['develops_from']))
for term in terms:
words = iterativeChildren(terms[term]['data'], terms, 'data')
for word in words:
terms[term]['closure'].append(word)
d = iterativeChildren(terms[term]['data_with_develops_from'], terms, 'data_with_develops_from')
for dd in d:
terms[term]['closure_with_develops_from'].append(dd)
terms[term]['closure'].append(term)
terms[term]['closure_with_develops_from'].append(term)
terms[term]['systems'] = getSlims(term, terms, 'system')
terms[term]['organs'] = getSlims(term, terms, 'organ')
terms[term]['developmental'] = getSlims(term, terms, 'developmental')
terms[term]['assay'] = getSlims(term, terms, 'assay')
terms[term]['category'] = getSlims(term, terms, 'category')
terms[term]['objectives'] = getSlims(term, terms, 'objective')
terms[term]['types'] = getSlims(term, terms, 'type')
del terms[term]['closure'], terms[term]['closure_with_develops_from']
for term in terms:
del terms[term]['parents'], terms[term]['develops_from']
del terms[term]['has_part'], terms[term]['achieves_planned_objective']
del terms[term]['id'], terms[term]['data'], terms[term]['data_with_develops_from']
terms.update(ntr_assays)
with open('ontology1.json', 'w') as outfile:
json.dump(terms, outfile)
if __name__ == '__main__':
main()
|
The Will Packer-produced comedy Little, starring Black-ish breakout star Marsai Martin, will hit theaters Sept. 20, 2019, Universal announced Thursday.
The studio also revealed that two other comedies from Packer, the prolific producer behind such hits as Girls Trip, are in the works. No details were provided, but the pics have been set for release in theaters on Nov. 15, 2019, and May 15, 2020.
Little centers on a woman who gets the chance to relive the carefree life as her younger self (Martin), when the pressures of adulthood become too much. The deal is not only noteworthy for Martin as an actress โ it marks her first studio feature โ but also a creative force. The 13-year-old came up with the idea for the script and will also executive produce.
Tina Gordon (Peeples) is directing Little.
All three films are from Will Packer Productions, which has a first-look deal with Universal.
|
import wtforms
from nose.tools import eq_, ok_
from flask import Flask
from werkzeug.wsgi import DispatcherMiddleware
from werkzeug.test import Client
from wtforms import fields
from flask_admin import Admin, form
from flask_admin._compat import iteritems, itervalues
from flask_admin.model import base, filters
from flask_admin.model.template import macro
def wtforms2_and_up(func):
"""Decorator for skipping test if wtforms <2
"""
if int(wtforms.__version__[0]) < 2:
func.__test__ = False
return func
class Model(object):
def __init__(self, id=None, c1=1, c2=2, c3=3):
self.id = id
self.col1 = c1
self.col2 = c2
self.col3 = c3
class Form(form.BaseForm):
col1 = fields.StringField()
col2 = fields.StringField()
col3 = fields.StringField()
class SimpleFilter(filters.BaseFilter):
def apply(self, query):
query._applied = True
return query
def operation(self):
return 'test'
class MockModelView(base.BaseModelView):
def __init__(self, model, data=None, name=None, category=None,
endpoint=None, url=None, **kwargs):
# Allow to set any attributes from parameters
for k, v in iteritems(kwargs):
setattr(self, k, v)
super(MockModelView, self).__init__(model, name, category, endpoint, url)
self.created_models = []
self.updated_models = []
self.deleted_models = []
self.search_arguments = []
if data is None:
self.all_models = {1: Model(1), 2: Model(2)}
else:
self.all_models = data
self.last_id = len(self.all_models) + 1
# Scaffolding
def get_pk_value(self, model):
return model.id
def scaffold_list_columns(self):
columns = ['col1', 'col2', 'col3']
if self.column_exclude_list:
return filter(lambda x: x not in self.column_exclude_list, columns)
return columns
def init_search(self):
return bool(self.column_searchable_list)
def scaffold_filters(self, name):
return [SimpleFilter(name)]
def scaffold_sortable_columns(self):
return ['col1', 'col2', 'col3']
def scaffold_form(self):
return Form
# Data
def get_list(self, page, sort_field, sort_desc, search, filters,
page_size=None):
self.search_arguments.append((page, sort_field, sort_desc, search, filters))
return len(self.all_models), itervalues(self.all_models)
def get_one(self, id):
return self.all_models.get(int(id))
def create_model(self, form):
model = Model(self.last_id)
self.last_id += 1
form.populate_obj(model)
self.created_models.append(model)
self.all_models[model.id] = model
return True
def update_model(self, form, model):
form.populate_obj(model)
self.updated_models.append(model)
return True
def delete_model(self, model):
self.deleted_models.append(model)
return True
def setup():
app = Flask(__name__)
app.config['CSRF_ENABLED'] = False
app.secret_key = '1'
admin = Admin(app)
return app, admin
def test_mockview():
app, admin = setup()
view = MockModelView(Model)
admin.add_view(view)
eq_(view.model, Model)
eq_(view.name, 'Model')
eq_(view.endpoint, 'model')
# Verify scaffolding
eq_(view._sortable_columns, ['col1', 'col2', 'col3'])
eq_(view._create_form_class, Form)
eq_(view._edit_form_class, Form)
eq_(view._search_supported, False)
eq_(view._filters, None)
client = app.test_client()
# Make model view requests
rv = client.get('/admin/model/')
eq_(rv.status_code, 200)
# Test model creation view
rv = client.get('/admin/model/new/')
eq_(rv.status_code, 200)
rv = client.post('/admin/model/new/',
data=dict(col1='test1', col2='test2', col3='test3'))
eq_(rv.status_code, 302)
eq_(len(view.created_models), 1)
model = view.created_models.pop()
eq_(model.id, 3)
eq_(model.col1, 'test1')
eq_(model.col2, 'test2')
eq_(model.col3, 'test3')
# Try model edit view
rv = client.get('/admin/model/edit/?id=3')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1' in data)
rv = client.post('/admin/model/edit/?id=3',
data=dict(col1='test!', col2='test@', col3='test#'))
eq_(rv.status_code, 302)
eq_(len(view.updated_models), 1)
model = view.updated_models.pop()
eq_(model.col1, 'test!')
eq_(model.col2, 'test@')
eq_(model.col3, 'test#')
rv = client.get('/admin/model/edit/?id=4')
eq_(rv.status_code, 302)
# Attempt to delete model
rv = client.post('/admin/model/delete/?id=3')
eq_(rv.status_code, 302)
eq_(rv.headers['location'], 'http://localhost/admin/model/')
# Create a dispatched application to test that edit view's "save and
# continue" functionality works when app is not located at root
dummy_app = Flask('dummy_app')
dispatched_app = DispatcherMiddleware(dummy_app, {'/dispatched': app})
dispatched_client = Client(dispatched_app)
app_iter, status, headers = dispatched_client.post(
'/dispatched/admin/model/edit/?id=3',
data=dict(col1='another test!', col2='test@', col3='test#', _continue_editing='True'))
eq_(status, '302 FOUND')
eq_(headers['Location'], 'http://localhost/dispatched/admin/model/edit/?id=3')
model = view.updated_models.pop()
eq_(model.col1, 'another test!')
def test_permissions():
app, admin = setup()
view = MockModelView(Model)
admin.add_view(view)
client = app.test_client()
view.can_create = False
rv = client.get('/admin/model/new/')
eq_(rv.status_code, 302)
view.can_edit = False
rv = client.get('/admin/model/edit/?id=1')
eq_(rv.status_code, 302)
view.can_delete = False
rv = client.post('/admin/model/delete/?id=1')
eq_(rv.status_code, 302)
def test_templates():
app, admin = setup()
view = MockModelView(Model)
admin.add_view(view)
client = app.test_client()
view.list_template = 'mock.html'
view.create_template = 'mock.html'
view.edit_template = 'mock.html'
rv = client.get('/admin/model/')
eq_(rv.data, b'Success!')
rv = client.get('/admin/model/new/')
eq_(rv.data, b'Success!')
rv = client.get('/admin/model/edit/?id=1')
eq_(rv.data, b'Success!')
def test_list_columns():
app, admin = setup()
view = MockModelView(Model,
column_list=['col1', 'col3'],
column_labels=dict(col1='Column1'))
admin.add_view(view)
eq_(len(view._list_columns), 2)
eq_(view._list_columns, [('col1', 'Column1'), ('col3', 'Col3')])
client = app.test_client()
rv = client.get('/admin/model/')
data = rv.data.decode('utf-8')
ok_('Column1' in data)
ok_('Col2' not in data)
def test_exclude_columns():
app, admin = setup()
view = MockModelView(Model, column_exclude_list=['col2'])
admin.add_view(view)
eq_(view._list_columns, [('col1', 'Col1'), ('col3', 'Col3')])
client = app.test_client()
rv = client.get('/admin/model/')
data = rv.data.decode('utf-8')
ok_('Col1' in data)
ok_('Col2' not in data)
def test_sortable_columns():
app, admin = setup()
view = MockModelView(Model, column_sortable_list=['col1', ('col2', 'test1')])
admin.add_view(view)
eq_(view._sortable_columns, dict(col1='col1', col2='test1'))
def test_column_searchable_list():
app, admin = setup()
view = MockModelView(Model, column_searchable_list=['col1', 'col2'])
admin.add_view(view)
eq_(view._search_supported, True)
# TODO: Make calls with search
def test_column_filters():
app, admin = setup()
view = MockModelView(Model, column_filters=['col1', 'col2'])
admin.add_view(view)
eq_(len(view._filters), 2)
eq_(view._filters[0].name, 'col1')
eq_(view._filters[1].name, 'col2')
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'col1']], [(0, 'test')])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'col2']], [(1, 'test')])
# TODO: Make calls with filters
def test_filter_list_callable():
app, admin = setup()
flt = SimpleFilter('test', options=lambda: (('1', 'Test 1'), ('2', 'Test 2')))
view = MockModelView(Model, column_filters=[flt])
admin.add_view(view)
opts = flt.get_options(view)
eq_(len(opts), 2)
eq_(opts, [('1', u'Test 1'), ('2', u'Test 2')])
def test_form():
# TODO: form_columns
# TODO: form_excluded_columns
# TODO: form_args
# TODO: form_widget_args
pass
@wtforms2_and_up
def test_csrf():
class SecureModelView(MockModelView):
form_base_class = form.SecureForm
def scaffold_form(self):
return form.SecureForm
def get_csrf_token(data):
data = data.split('name="csrf_token" type="hidden" value="')[1]
token = data.split('"')[0]
return token
app, admin = setup()
view = SecureModelView(Model, endpoint='secure')
admin.add_view(view)
client = app.test_client()
################
# create_view
################
rv = client.get('/admin/secure/new/')
eq_(rv.status_code, 200)
ok_(u'name="csrf_token"' in rv.data.decode('utf-8'))
csrf_token = get_csrf_token(rv.data.decode('utf-8'))
# Create without CSRF token
rv = client.post('/admin/secure/new/', data=dict(name='test1'))
eq_(rv.status_code, 200)
# Create with CSRF token
rv = client.post('/admin/secure/new/', data=dict(name='test1',
csrf_token=csrf_token))
eq_(rv.status_code, 302)
###############
# edit_view
###############
rv = client.get('/admin/secure/edit/?url=%2Fadmin%2Fsecure%2F&id=1')
eq_(rv.status_code, 200)
ok_(u'name="csrf_token"' in rv.data.decode('utf-8'))
csrf_token = get_csrf_token(rv.data.decode('utf-8'))
# Edit without CSRF token
rv = client.post('/admin/secure/edit/?url=%2Fadmin%2Fsecure%2F&id=1',
data=dict(name='test1'))
eq_(rv.status_code, 200)
# Edit with CSRF token
rv = client.post('/admin/secure/edit/?url=%2Fadmin%2Fsecure%2F&id=1',
data=dict(name='test1', csrf_token=csrf_token))
eq_(rv.status_code, 302)
################
# delete_view
################
rv = client.get('/admin/secure/')
eq_(rv.status_code, 200)
ok_(u'name="csrf_token"' in rv.data.decode('utf-8'))
csrf_token = get_csrf_token(rv.data.decode('utf-8'))
# Delete without CSRF token, test validation errors
rv = client.post('/admin/secure/delete/',
data=dict(id="1", url="/admin/secure/"), follow_redirects=True)
eq_(rv.status_code, 200)
ok_(u'Record was successfully deleted.' not in rv.data.decode('utf-8'))
ok_(u'Failed to delete record.' in rv.data.decode('utf-8'))
# Delete with CSRF token
rv = client.post('/admin/secure/delete/',
data=dict(id="1", url="/admin/secure/", csrf_token=csrf_token),
follow_redirects=True)
eq_(rv.status_code, 200)
ok_(u'Record was successfully deleted.' in rv.data.decode('utf-8'))
def test_custom_form():
app, admin = setup()
class TestForm(form.BaseForm):
pass
view = MockModelView(Model, form=TestForm)
admin.add_view(view)
eq_(view._create_form_class, TestForm)
eq_(view._edit_form_class, TestForm)
ok_(not hasattr(view._create_form_class, 'col1'))
def test_modal_edit():
# bootstrap 2 - test edit_modal
app_bs2 = Flask(__name__)
admin_bs2 = Admin(app_bs2, template_mode="bootstrap2")
edit_modal_on = MockModelView(Model, edit_modal=True,
endpoint="edit_modal_on")
edit_modal_off = MockModelView(Model, edit_modal=False,
endpoint="edit_modal_off")
create_modal_on = MockModelView(Model, create_modal=True,
endpoint="create_modal_on")
create_modal_off = MockModelView(Model, create_modal=False,
endpoint="create_modal_off")
admin_bs2.add_view(edit_modal_on)
admin_bs2.add_view(edit_modal_off)
admin_bs2.add_view(create_modal_on)
admin_bs2.add_view(create_modal_off)
client_bs2 = app_bs2.test_client()
# bootstrap 2 - ensure modal window is added when edit_modal is enabled
rv = client_bs2.get('/admin/edit_modal_on/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' in data)
# bootstrap 2 - test edit modal disabled
rv = client_bs2.get('/admin/edit_modal_off/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' not in data)
# bootstrap 2 - ensure modal window is added when create_modal is enabled
rv = client_bs2.get('/admin/create_modal_on/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' in data)
# bootstrap 2 - test create modal disabled
rv = client_bs2.get('/admin/create_modal_off/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' not in data)
# bootstrap 3
app_bs3 = Flask(__name__)
admin_bs3 = Admin(app_bs3, template_mode="bootstrap3")
admin_bs3.add_view(edit_modal_on)
admin_bs3.add_view(edit_modal_off)
admin_bs3.add_view(create_modal_on)
admin_bs3.add_view(create_modal_off)
client_bs3 = app_bs3.test_client()
# bootstrap 3 - ensure modal window is added when edit_modal is enabled
rv = client_bs3.get('/admin/edit_modal_on/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' in data)
# bootstrap 3 - test modal disabled
rv = client_bs3.get('/admin/edit_modal_off/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' not in data)
# bootstrap 3 - ensure modal window is added when edit_modal is enabled
rv = client_bs3.get('/admin/create_modal_on/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' in data)
# bootstrap 3 - test modal disabled
rv = client_bs3.get('/admin/create_modal_off/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' not in data)
def check_class_name():
class DummyView(MockModelView):
pass
view = DummyView(Model)
eq_(view.name, 'Dummy View')
def test_export_csv():
app, admin = setup()
client = app.test_client()
# test redirect when csv export is disabled
view = MockModelView(Model, column_list=['col1', 'col2'], endpoint="test")
admin.add_view(view)
rv = client.get('/admin/test/export/csv/')
eq_(rv.status_code, 302)
# basic test of csv export with a few records
view_data = {
1: Model(1, "col1_1", "col2_1"),
2: Model(2, "col1_2", "col2_2"),
3: Model(3, "col1_3", "col2_3"),
}
view = MockModelView(Model, view_data, can_export=True,
column_list=['col1', 'col2'])
admin.add_view(view)
rv = client.get('/admin/model/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.mimetype, 'text/csv')
eq_(rv.status_code, 200)
ok_("Col1,Col2\r\n"
"col1_1,col2_1\r\n"
"col1_2,col2_2\r\n"
"col1_3,col2_3\r\n" == data)
# test explicit use of column_export_list
view = MockModelView(Model, view_data, can_export=True,
column_list=['col1', 'col2'],
column_export_list=['id','col1','col2'],
endpoint='exportinclusion')
admin.add_view(view)
rv = client.get('/admin/exportinclusion/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.mimetype, 'text/csv')
eq_(rv.status_code, 200)
ok_("Id,Col1,Col2\r\n"
"1,col1_1,col2_1\r\n"
"2,col1_2,col2_2\r\n"
"3,col1_3,col2_3\r\n" == data)
# test explicit use of column_export_exclude_list
view = MockModelView(Model, view_data, can_export=True,
column_list=['col1', 'col2'],
column_export_exclude_list=['col2'],
endpoint='exportexclusion')
admin.add_view(view)
rv = client.get('/admin/exportexclusion/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.mimetype, 'text/csv')
eq_(rv.status_code, 200)
ok_("Col1\r\n"
"col1_1\r\n"
"col1_2\r\n"
"col1_3\r\n" == data)
# test utf8 characters in csv export
view_data[4] = Model(1, u'\u2013ut8_1\u2013', u'\u2013utf8_2\u2013')
view = MockModelView(Model, view_data, can_export=True,
column_list=['col1', 'col2'], endpoint="utf8")
admin.add_view(view)
rv = client.get('/admin/utf8/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_(u'\u2013ut8_1\u2013,\u2013utf8_2\u2013\r\n' in data)
# test None type, integer type, column_labels, and column_formatters
view_data = {
1: Model(1, "col1_1", 1),
2: Model(2, "col1_2", 2),
3: Model(3, None, 3),
}
view = MockModelView(
Model, view_data, can_export=True, column_list=['col1', 'col2'],
column_labels={'col1': 'Str Field', 'col2': 'Int Field'},
column_formatters=dict(col2=lambda v, c, m, p: m.col2*2),
endpoint="types_and_formatters"
)
admin.add_view(view)
rv = client.get('/admin/types_and_formatters/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_("Str Field,Int Field\r\n"
"col1_1,2\r\n"
"col1_2,4\r\n"
",6\r\n" == data)
# test column_formatters_export and column_formatters_export
type_formatters = {type(None): lambda view, value: "null"}
view = MockModelView(
Model, view_data, can_export=True, column_list=['col1', 'col2'],
column_formatters_export=dict(col2=lambda v, c, m, p: m.col2*3),
column_formatters=dict(col2=lambda v, c, m, p: m.col2*2), # overridden
column_type_formatters_export=type_formatters,
endpoint="export_types_and_formatters"
)
admin.add_view(view)
rv = client.get('/admin/export_types_and_formatters/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_("Col1,Col2\r\n"
"col1_1,3\r\n"
"col1_2,6\r\n"
"null,9\r\n" == data)
# Macros are not implemented for csv export yet and will throw an error
view = MockModelView(
Model, can_export=True, column_list=['col1', 'col2'],
column_formatters=dict(col1=macro('render_macro')),
endpoint="macro_exception"
)
admin.add_view(view)
rv = client.get('/admin/macro_exception/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 500)
|
Unique, incredibly beautiful scarf-type cloak made by the "fools" method / ancient craft using the so-called sweat technology /. The scarf is a unique piece representing an extremely beautiful combination of fancy fabrics, faded technology and hand embroidery. Besides being incredibly beautiful, it creates unique comfort in carrying - a pleasant caressing embrace for your body. Uniqueness, beauty, comfort.
|
#
# Copyright 2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import json
import django
import re
from flask import request
from werkzeug.exceptions import BadRequest
from django.contrib.auth.models import User as Django_User
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.conf import settings
from accounts.sa_utils import auth_dataset_whitelists_for_user
from accounts.dcf_support import verify_sa_at_dcf, register_sa_at_dcf
from accounts.utils import register_or_refresh_gcp, verify_gcp_for_reg, api_gcp_delete, get_user_gcps
from accounts.models import AuthorizedDataset
from projects.models import Program
from auth import get_user_acls, UserValidationException
from jsonschema import validate as schema_validate, ValidationError
BLACKLIST_RE = settings.BLACKLIST_RE
logger = logging.getLogger(settings.LOGGER_NAME)
def get_account_details(user):
accounts_details = None
try:
whitelists = get_user_acls(user)
if whitelists:
uads = AuthorizedDataset.objects.filter(whitelist_id__in=whitelists)
accounts_details = {'dataset_access': [{'name': uad.name, 'whitelist_id': uad.whitelist_id} for uad in uads]}
except UserValidationException as u:
logger.warn(u)
accounts_details = {'message': str(u)}
except Exception as e:
logger.error("[ERROR] Encountered an error while retrieving user account details:")
logger.exception(e)
accounts_details = {'message': "Encountered an error while retrieving account details for {}.".format(user.email)}
return accounts_details
def gcp_info(user, gcp_id=None):
gcps = None
success = False
try:
gcps = get_user_gcps(user, gcp_id)
success = bool(gcps is not None) and (len(gcps) > 0)
except Exception as e:
logger.error("[ERROR] Encountered an error while retrieving GCP project details:")
logger.exception(e)
gcps = {'message': "Encountered an error while retrieving GCP project details for {}.".format(user.email if not gcp_id else gcp_id)}
return gcps, success
def gcp_validation(user, gcp_id, refresh=False):
validation = None
result = {}
try:
validation, status = verify_gcp_for_reg(user, gcp_id, refresh)
if validation:
if 'roles' in validation:
result['registered_users'] = [{'email': x, 'project_roles': validation['roles'][x]['roles']} for x in validation['roles'] if validation['roles'][x]['registered_user']]
unregs = [{'email': x, 'project_roles': validation['roles'][x]['roles']} for x in validation['roles'] if not validation['roles'][x]['registered_user']]
if len(unregs):
result['unregistered_users'] = unregs
result['notes'] = "Users listed under 'unregistered users' are not registered in the ISB-CGC WebApp. Please note that if GCP Project {} ".format(gcp_id) + \
"is intended for use with controlled access data, all users on the project must log in to the ISB-CGC " + \
"web application at <https://isb-cgc.appspot.com> and link their Google Account to their eRA " + \
"Commons ID. The link to do so is found in Account Settings."
result['message'] = "Google Cloud Platform project ID {} was successfully validated for registration.".format(gcp_id) \
if 'message' not in validation else validation['message']
result['gcp_project_id'] = validation['gcp_id']
else:
logger.warn("[WARNING] Validation of GCP ID {} by user {} was unsuccessful!".format(gcp_id, user.email))
except Exception as e:
logger.error("[ERROR] While attempting to validate a project for registration:")
logger.exception(e)
return result
def gcp_registration(user, gcp_id, refresh):
registration = None
success = False
try:
validation = gcp_validation(user, gcp_id, refresh)
if validation:
if 'users' in validation:
registered_users = [x for x, y in validation['users'].items() if y['registered_user']]
registration, status = register_or_refresh_gcp(user, gcp_id, registered_users, refresh)
if status == 200:
success = True
registration['registered_users'] = validation['registered_users']
if 'notes' in validation:
registration['notes'] = validation['notes']
if 'message' not in registration:
registration['message'] = "Google Cloud Platform project ID {} was successfully {}.".format(gcp_id, 'refreshed' if refresh else 'registered')
if 'unregistered_users' in validation:
registration['unregistered_users'] = validation['unregistered_users']
else:
registration = validation
logger.warn("[WARNING] Validation of {} by user {} was unsuccessful! This project was not {}".format(gcp_id, user.email, 'refreshed' if refresh else 'registered'))
logger.warn("[WARNING] Reason given: {}".format(validation['message']))
else:
logger.warn("[WARNING] Validation of {} by user {} was unsuccessful!".format(gcp_id, user.email))
except Exception as e:
logger.error("[ERROR] While registering GCP ID {}:".format(gcp_id))
logger.exception(e)
return registration, success
def gcp_unregistration(user, gcp_id):
unreg = None
success = False
try:
unreg, status = api_gcp_delete(user, gcp_id)
if status == 200:
success = True
if 'message' not in unreg:
unreg['message'] = "Google Cloud Platform project ID {} was successfully unregistered.".format(gcp_id)
else:
logger.warn("[WARNING] Unregistration of {} by user {} was unsuccessful!".format(gcp_id, user.email))
unreg['gcp_project_id'] = gcp_id
except Exception as e:
logger.error("[ERROR] While unregistering a GCP:")
logger.exception(e)
return unreg, success
def sa_info(user, gcp_id=None, sa_id=None):
return None
def sa_registration(user, gcp_id=None, sa_id=None, action=None):
result = {}
try:
request_data = request.get_json()
sa_id = request_data['sa_id'] if 'sa_id' in request_data and not sa_id else None
datasets = request.args.get('datasets', default=None, type=str).split(',') if 'datasets' in request.args else None
if not sa_id:
raise Exception("Service Account ID not provided!")
if not len(datasets):
raise Exception("Dataset list not provided!")
result = verify_sa_at_dcf(user, gcp_id, sa_id, datasets, {})
except RefreshTokenExpired as e:
logger.error("[ERROR] RefreshTokenExpired for user {} registering SA ID {}".format(user.email,sa_id))
result['message'] = "Your DCF login has expired. Please go to our web application at https://isb-cgc.appspot.com and refresh your DCF login, then try to register your Service Account again."
except TokenFailure as e:
logger.error("[ERROR] TokenFailure for user {} registering SA ID {}".format(user.email,sa_id))
result['message'] = "Your DCF login has expired or been disconnected. Please go to our web application at https://isb-cgc.appspot.com and renew your DCF login, then try to register your Service Account again."
except Exception as e:
logger.error("[ERROR] While registering service account {}:".format(sa_id))
logger.exception(e)
result['message'] = "Encountered a server error while attempting to register service account {}. Please contact the administrator.".format(sa_id)
return result
def sa_unregistration(user, gcp_id=None, sa_id=None):
return None
|
This post in the fourth in my authors series - I was able to interview Roxanna Elden about her book, See Me After Class: Advice For Teachers By Teachers (which happens to be one of my all-time favorite education books).
LF: A line of your classroom management advice rang so true: "The first few times you think your class is under control and it's okay to relax, you're probably wrong." What would be the one or two most important things for all teachers to keep in mind about this part of teaching?
Most of the management tips teachers learn in training are pretty solid. The problem is that - like a lot of good advice - they are easier said than done. Sure, we've posted our rules and consequences on the wall, they're phrased in positive language, and we have every intention of enforcing them consistently. But then the students come in and present us with a non-stop series of judgment calls. At that point we're not wondering whether it's important to be consistent. We're wondering whether that student who threw the paper ball was really aiming for the trashcan.
This doesn't mean the original advice is wrong, though. In the book, all classroom management advice is divided into three segments: Why the basic recommendations usually work, why they're sometimes easier said than done, and then how to troubleshoot when things fall apart.
LF: I especially liked your list of things teachers can do to like their students better. Which are your two favorites?
First, look for opportunities to see students as human beings. Second, look for opportunities to show students that we are human beings. Teachers can accomplish both of these in a variety of ways, including attending extra curricular activities, posting pictures of happy moments on the classroom wall, or building in a few minutes of conversation time on a Friday afternoon. The overall thing to keep in mind is that part of our job is just to be adults who care about kids. When students' behavior or lack of effort is frustrating us, this can take a conscious effort. But sometimes we need to show students we care about them whether or not they are "working to potential."
LF: You make many important points with humor pointing out the teaching world as it is, as opposed as the way we'd like it to be. For example, you write: "You may be told that your department does something called collaborative planning, in which teachers meet to plan ahead, share ideas, and make sure everyone is on the same page. Though many new teachers hear of this legend, few experience it."
What advantages, if any, do you see in teachers collaborating more, and, if you think it's beneficial, what might be some simple ways teacher can do so?
Collaboration works best when everyone in the room wants to be in the room. With that in mind, our best support may not come from a group of colleagues thrust together by administrators when they'd rather be catching up on grading.
This is why, no matter what your school does, every teacher should aim to build a personal "board of advisors." These are people you select yourself and can adjust as your needs change. The list should include those whose teaching styles you admire, those who teach similar subjects, and those who teach similar students. These don't all have to be the same people; they don't even all have to work at your school. Additionally, you need someone you can vent to without worrying that your concerns will be repeated to others, and someone who might not even be a teacher, but who knows your strengths and can remind you of them when needed.
As for those collaboration meetings? They are a good chance to interact with your colleagues, and we all need a little of that - why do you think it's so hard to keep the kids quiet? Plus, it's good karma to support your fellow teacher who has to run a mandatory collaboration meeting. So attend and participate with the best attitude you can muster. And make sure you sign the sign-in sheet.
LF: You write that principals really want four things from teachers: "Do your job. Do your job well. Do your job independently and with as little drama as possible. Make yourself, your students, the school, and, yes, your principal look as good as possible."
For principals out there, what are the things most teachers want from their principals?
Based on the interviews I did for the book, teachers would collectively make the following three requests from administrators - or collectively thank administrators who already do these things.
Give plenty of lead-time before making big changes: Teachers do our best work when we have time to plan ahead. Last-minute changes in classrooms, schedules, or curriculum waste the gas in our tanks and leave us feeling frustrated. With this in mind, we appreciate when you let us know who, what, and where we'll be teaching as early as possible - and then try to avoid mid-year changes.
Back up teacher judgment calls whenever possible. Teachers have to make on-the-spot decisions all day, every day. Students challenge our authority. Parents question grades and consequences. Knowing we'll have your support during a conference gives us more confidence in the classroom. If you do have concerns about how a teacher has handled a situation, make this a private discussion. Reversing teachers' decisions or reprimanding them in front of others makes them seem weak and sends them back to class with destroyed credibility.
Have fair, transparent processes for making your own decisions. Make sure your faculty understands how you assign classes, distribute students with behavior problems, and make classroom upgrades. No matter who gets that new smart-board or has to teach the overflow class, avoiding the appearance of favoritism is good for everyone's morale.
LF: There are a lot of teacher books out there, though I don't think there are many that have the "tell it how it really is" tone yours does, which combines with a sense of humor. Why do you think so many of us educator/writers are so measured and take ourselves so seriously?
Because there are kids involved. It's not okay to say, "I'm working with kids and I might be bad at it." It's not even okay to say we're just okay.
This leads to tremendous emphasis on learning from the best members of our profession and the best moments from our colleagues' classrooms. A side effect of this, however, is that teachers spend a lot of time comparing our unedited footage to other people's highlight reels. We are surrounded by images of teachers who seem genetically engineered not to need sleep, and inundated with messages that are meant to be inspiring but sometimes feel terrifying: The future is in your hands! This is the most important job in the world! The kids can't afford for you to fail!
All of this implies teaching is a mission for a superhero when in fact it's a job. Not only that - it's a job best suited for a well-rested, sane adult. The most regrettable moments as a teacher tend to happen on days when lack of sleep combines with an emotional rubber band stretched to its breaking point.
I wrote See Me After Class because there were three things I desperately needed and couldn't find in existing books: humor, honesty, and practical advice.
When teaching is good, it is really good. But when teaching is hard, it is really hard. On a day when a second-grader curses at you, you don't want a reminder that your job is important. You don't want to hear that this would never have happened in the classroom of award-winning-teacher X. You want to hear a story about a kindergartener punching a teacher in the stomach.
It's not because you're mean. It's because you need to know teachers can bounce back from their worst moments and still go on to become successful. And then you need to know the next manageable step to being a better teacher tomorrow morning.
|
import datetime
from django.db import models
from django.utils import timezone
class Survey(models.Model):
survey_head = models.CharField(max_length=100)
survey_text = models.TextField()
pub_date = models.DateTimeField('date published')
# num_question = models.PositiveSmallIntegerField(default=0)
def __str__(self):
return self.survey_head
# return u'%s %s' % (self.survey_head, self.survey_text)
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=5) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class SingleChoiceQuestion(models.Model):
survey = models.ForeignKey(Survey)
question_text = models.CharField(max_length=200)
# seq = models.PositiveSmallIntegerField(required=False)
def __str__(self):
return self.question_text
class MultiChoiceQuestion(models.Model):
survey = models.ForeignKey(Survey)
question_text = models.CharField(max_length=200)
# seq = models.PositiveSmallIntegerField(required=False)
def __str__(self):
return self.question_text
class FreeQuestion(models.Model):
survey = models.ForeignKey(Survey)
question_text = models.CharField(max_length=200)
answer_text = models.TextField(max_length=400)
# seq = models.PositiveSmallIntegerField(required=False)
def __str__(self):
return self.question_text
class SingleChoice(models.Model):
question = models.ForeignKey(SingleChoiceQuestion)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __unicode__(self):
return self.choice_text
class MultiChoice(models.Model):
question = models.ForeignKey(MultiChoiceQuestion)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __unicode__(self):
return self.choice_text
|
Im an older man living alone. i am easy to get along with .
Very outgoing, and love to have fun.
Im a fun person to be with.
Early forties. Athletic and attractive. Enjoy outdoor activities and motorcycles. Looking for love.
I am a person who enjoys life and doing things in the out doors such as camping and hiking.
I like to have fun and have a good time. I like to stay active wether that be playing sports or enjoying the outdoors. If you would like to know more lets talk and get to know each other.
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental impl for gen_nn_ops.py using unified APIs, for testing only."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework.experimental import _nn_ops
from tensorflow.python.framework.experimental import context_stack as context
from tensorflow.python.framework.experimental import gradient_registry
from tensorflow.python.framework.experimental import tape_stack
def relu(a, name=None):
ctx = context.get_default()
tape = tape_stack.get_default()
grad_registry = gradient_registry.get_global_registry()
return _nn_ops.relu(ctx, a, name, tape, grad_registry)
def sparse_softmax_cross_entropy_with_logits(logits, labels, name=None):
ctx = context.get_default()
tape = tape_stack.get_default()
grad_registry = gradient_registry.get_global_registry()
return _nn_ops.sparse_softmax_cross_entropy_with_logits(
ctx, logits, labels, name, tape, grad_registry)
|
Thetford SmartTote2 LX 4-Wheel Portable Waste Tank is rated 2.3 out of 5 by 30.
The Thetford SmartTote2 LX 4-Wheel Waste Tank is a 35-gallon portable waste tank that is rugged, fast-emptying, and easy-to-rinse. The 90ยฐ elbow nozzle will easily hook up to your sewer. The days of heavy lifting are gone thanks to the handle that hooks onto a trailer hitch for easy towing.
Completely assembled and ready to use right out of the box!
Rated 3 out of 5 by california3201 from Theford 27GL dump tank I have owned the 27gl Theford for about a year and a half now. Two issues that I encounter while using the unit are, the handle is very flimsy and easy to break when pushing backwards on gravel. Also, the bayonet on the hose is poorly designed because it comes loose while duping and the gaskets will fall out. I am very careful in not putting stress on the handle when pushing backward. I also replaced the bayonet attachment with an aftermarket one, and I also added a shut off valve at the end of the hose for better control. Now the tank works as designed, just be gentle with the handle.
Rated 1 out of 5 by Steve Dale from Thetford SmartTote 35 Gallon I broke the first handle within a week. I called and was sent another one that broke in 2 weeks and it broke also. It's not a strong enough design for a tank this large. I am currently using a luggage strap, but have to cable tie the tank handle to the axel to keep it trailing straight. Also, the locking attachments and hose work loose as the fit is sloppy and comes off without putting on lock screw. Also doesn't drain completely and has to be raised about 4" to completely drain.
Rated 1 out of 5 by Mattie Suggs from Waste Tank Save your money and buy another brand. First to go was the door hinge that houses the hose, there is no hinge, just a thin plastic layer that will break off very quickly and no way to shut door after it's broken, hoses fall out. Next to go will be the wheels falling off and almost impossible to get two to repair. All these things went wrong within the first couple months of purchase. VERY VERY DISAPPOINTED!
Rated 1 out of 5 by Bryan Nors from Terrible product Purchased this product for the 4th of July holiday and it turned out to be a big disappointment. Filled up the tank for the first time to take and dump and the wheels literally fell off. Two of them. The wheels towards the back of the tank fell off the tank while taking to the dump station. Since I had no idea that they would fall off, it ended up dragging holes in the tank. The product is completely useless now. Don't waste money on a product that is garbage.
Rated 2 out of 5 by Mike Partak from Good design, very poor materials I ordered then received in just a few days from Camping World. I opened the box and the cover for the discharge hose was broken off laying in the bottom of the box. It appears to have been shipped that way. The box was delivered by UPS and did not show signs of being handled badly. I called Camping World and their customer service was great. Offered to replace, but I asked if they could just ship me a new lid to replace the broken one. They said I would have to call Thetford. Their customer service not as good, after I sent pictures and documents in emails they finally agreed to send me another cover in 7 to 10 days and acted like it was a big deal. I am still waiting for the cover. There are two other covers that are also not long for this world. As I said, I really like the design so I will most likely add metal hinges to take care of their poor materials.
Rated 5 out of 5 by Nissan Man from Works great and easy to use I purchased this tote tank to empty my black holding tank on my camper that is set up on my mountain property. I used this tote tank for the 1st time this past week. Tote tank and hose are very easy to hook up to camper drain outlet. Fitting on end of tote tank has 4 flanges to connect and lock onto camper drain outlet. Hooked up to camper, opened gate to black tank, and all the black water flowed easily into the tote tank. Replaced end cap on tote tank hose; which has 4 flanges and folded flex hose back inside tote tank carrier. Very easy and fast process. I did not have any leaks or issues with the fittings on the tote tank hose.Tote tank was filled and full with fresh water and had no issues. Worked great!
Rated 2 out of 5 by Terry Lomax from Thetford 27 Gallon Smart Tote 2 LX You almost need three hands to negotiate the hose when putting back into the tote for transfer to the dump. Spillage is a real possibility until you become familiar with it. Once full, it is heavy but seemed to roll well. The handle did not stay attached to my truck hitch until I strapped it down with a ratchet strap. The body seems well built but with all the little doors there are just too many things to potentially break. The bayonet lugs on the elbow do not lock and work loose while using.
Do you have anything bigger?
Yes, we offer a 35 gallon tank. See item <a href=https://www.campingworld.com/thetford-smarttote2-lx-4-wheel-portable-waste-tank-35-gallon>82133</a>.
No, the extendable tow handle and wheels are not designed for easy removal.
Is there a warranty or replacement part, on the stopper valve part we bought our tote last year and only after a few uses the Styrofoam and stopper came off and went down the drain?
Replacement parts are available for Thetford's SmartTote2. You can use the part diagram page <a href=http://thetford.com/part/smarttote2-deluxe/>here</a> for identification and part numbers. Then contact our Technical Service Group @ 800.622.6264(M-F, 8:00-5:00 CST) or visit the Parts Counter at a Camping World store to order needed part(s).
Can you pull this behind a vehicle on a road?
This tote can be pulled behind a moving vehicle going up to 5mph.
Concerning the handle: Is it supposed to operate EXACTLY as shown in the video?
Yes, the rested on top of the tank on the previous generation of the SmartTote, but the handle folds under on the SmartTote2 4-wheel models.
I was given the 35 gallon model for Christmas and looking for the sizes of the other 2 models of 4 wheel models only. the 35 gallon model seems huge to store .
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import twobuntu.utils
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ConfirmationKey',
fields=[
('user', models.OneToOneField(primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('key', models.CharField(default=twobuntu.utils.uuid, max_length=32)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Profile',
fields=[
('user', models.OneToOneField(primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('birthday', models.DateField(help_text=b'Birthday in YYYY-MM-DD format [used for displaying age].', null=True, blank=True)),
('location', models.CharField(help_text=b'Geographic location.', max_length=40, blank=True)),
('website', models.URLField(help_text=b'A personal blog or website.', blank=True)),
('bio', models.TextField(help_text=b'A brief biography.', blank=True)),
],
options={
'ordering': ('-user__last_login',),
},
bases=(models.Model,),
),
]
|
Tags: Copper Ore Flotation Machine . Rock Gold Separator . Sf Flotation Machine. Copper . reliable tin concentrator, tinstone cassiterite refining machines.
Best Price Copper Ore Processing Plant Flotation Machine/Mining Tank Flotation Cell . Tags: Flotation Machine/flotation Cell . Gold Flotation Machine . Lead Ore Flotation Machine . Tags: Copper Ore Process Plant . Flotation Separator .. 200TPH Indonesia mining machine for tin copper iron ore processing plant.
mineral concentrator shaking table gravity tin ore separation equipment .. Tags: Heavy Mineral Separation . Copper Ore Flotation Machine . Gold Ore.
. Price . Tin Ore. Hot Selling Tin Ore Diaphragm Jigging Separator Machine Price .. Mining Gold Shaking Table /gold ore vibrating Shaking table for sale Tags: Copper Ore Tin Ore Flotation Plant . Flotation Machine . Ore Flotation Machine.
Gold Extracting Machine /Alluvial Rotary Scrubber /Gold Washing Machine .. DESEN tin ore jig processing equipment for sale in malaysia for sale .. Gold Extraction Machine Copper Ore Flotation Machine , Laboratory Flotation Cell For Sale.
Indonesia gold/copper/iron/hematite/chrome ore,spiral classifier .. to Compare. Tags: Sf Flotation Machine . Copper Ore Flotation Separator . Separator Machine . 200TPH Indonesia mining machine for tin copper iron ore processing plant.
Mica separation equipments /copper separation/silver shake table. Add to Compare . Gold Flotation Machine Price for Mica Minerals Mining Equipment. Add to Compare HSM Proffesional Separator Flotation Machine For Metal Ore.
tin ore. China Buy Mining Gravity Separation Equipment 6 S Tin Ore Gold Copper Tags: Copper Ore Tin Ore Flotation Plant . Flotation Machine . Ore Flotation.
Hot Sale Gravity Separator Machine , Shaking Table or Gold Table. US $1000 3200 / Set Type: Flotation Separator. Add to Favorites . Application: Suitable for extracting gold, tin, tungsten, copper, chrome etc. Certification: CE; SGS; TUV.
Tags: Copper Ore Flotation Plant . Gold Ore Flotation Plant . Ore Flotation Chemicals .. machine , jigger machinery for iron copper lead chrome zinc tin many ore . flotation machine for Ores concentrate separation plant Mining copper lead.
About 60% of these are mineral separator, 9% are other ore, and 3% are other . Tinstone Flotation Machine, Tin Ore Flotation Cell, Cassiterite Flotation Separator Separator for copper,gold,iron ore upgrading plant cassiterite ore plant.
Mining Jig machine used for placer,gold,tin,tungsten,lead,zinc,antimony,manganese,iron ore. US $1200 2000 / Flotation Machine for Copper Ore separation.
Diesel engine alluvial chrome mineral processing plant tin/coal dressing jig separator machine .. chrome ore separating gold washing plant spiral chute,gold sorting machine .. Copper Ore Processing Plant Flotation Machine.
Add to Compare. Tags: Copper Ore Processing Plant . Alluvial Mining Equipment . Placer Gold Mining Equipment . China top professional tin ore processing plant magnetic separator machine Tags: Flotation Machine . Ore Beneficiation.
. Shaking Table from Mineral Separator Supplier or Manufacturer Shicheng Gaoxuan . Also widely used in separation for Gold, Silver, Tin, Columbium, Tantalum, . Iron, Chrome,Manganese, Zircon, Lead, Zinc, Mercury, Copper, Aluminum, etc. .. chute,jig machine,flotation cell,magnetic separator,rotary scrubber,gold.
Tags: Flotation Machine . Tinstone Flotation Machine . Tin Ore Flotation Separator. dry electromagnetic separator ore cassiterite Tin Processing Plant.
|
#!/usr/bin/env python
from runtest import TestBase
class TestCase(TestBase):
def __init__(self):
TestBase.__init__(self, 'abc', """
# DURATION TID FUNCTION
[ 32417] | main() {
[ 32417] | a() {
[ 32417] | b() {
[ 32417] | /* read:pmu-cycle (cycle=233, instructions=159) */
[ 32417] | c() {
[ 32417] | /* read:pmu-cycle (cycle=471, instructions=385) */
0.479 us [ 32417] | getpid();
[ 32417] | /* diff:pmu-cycle (cycle=+3230, instructions=+2723, IPC=0.84) */
3.014 us [ 32417] | } /* c */
[ 32417] | /* diff:pmu-cycle (cycle=+5014, instructions=+3514, IPC=0.70) */
16.914 us [ 32417] | } /* b */
17.083 us [ 32417] | } /* a */
17.873 us [ 32417] | } /* main */
""")
def prerun(self, timeout):
if not TestBase.check_perf_paranoid(self):
return TestBase.TEST_SKIP
return TestCase.TEST_SUCCESS
def setup(self):
self.option = "-F main -T '[bc]@read=pmu-cycle'"
def sort(self, output):
result = []
for ln in output.split('\n'):
# ignore blank lines and comments
if ln.strip() == '' or ln.startswith('#'):
continue
func = ln.split('|', 1)[-1]
# remove actual numbers in pmu-cycle
if func.find('read:pmu-cycle') > 0:
func = ' /* read:pmu-cycle */'
if func.find('diff:pmu-cycle') > 0:
func = ' /* diff:pmu-cycle */'
result.append(func)
return '\n'.join(result)
|
Grease bottom and sides of 3 loaf pans. Set aside.
In an extra large mixing bowl beat the sugar and smashed banana with an electric mixer on medium speed until well combined and incorporated. Then add the eggs, one at a time, beating after each addition until combined. Set aside.
In a large mixing bowl combine all dry ingredients (flour, baking soda, salt, cinnamon, and nutmeg/pumpkin pie spice). Add dry ingredients & milk/non-dairy substitute alternatively into the sugar mixture. Beat after each addition on low speed just until combined. Beat in pumpkin puree at the end.
When done remove from oven and cool for 10 minutes. Remove from pans and cool completely on wire racks. Wrap and store overnight before slicing.
**IF YOU WOULD LIKE TO BAKE AS MUFFINS** (Standard muffin tin of 12): Keep the oven temperature and make for 15-20 minutes. My oven is slightly hotter than others so mine took 12 minutes.
STORING: Baked breads can be stored in the freezer for up to 3 months. Keep them in loaves or slice in portions, as desired.
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2015 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from mygpoclient import feeds
import logging
logger = logging.getLogger(__name__)
def parse_entry(podcast, entry):
download_url = entry['default_file']['url']
return podcast.episode_factory({
'title': entry['title'],
'description': entry.get('description', ''),
'url': download_url,
'mime_type': entry['default_file']['mime_type'],
'file_size': entry.get('filesize', -1),
'guid': entry.get('guid', download_url),
'link': entry.get('link', ''),
'published': entry.get('released', 0),
'total_time': entry.get('duration', 0),
})
def update_using_feedservice(podcasts):
urls = [podcast.url for podcast in podcasts]
client = feeds.FeedserviceClient()
# Last modified + logo/etc..
result = client.parse_feeds(urls)
for podcast in podcasts:
feed = result.get_feed(podcast.url)
if feed is None:
logger.info('Feed not updated: %s', podcast.url)
continue
# Handle permanent redirects
if feed.get('new_location', False):
new_url = feed['new_location']
logger.info('Redirect %s => %s', podcast.url, new_url)
podcast.url = new_url
# Error handling
if feed.get('errors', False):
logger.error('Error parsing feed: %s', repr(feed['errors']))
continue
# Update per-podcast metadata
podcast.title = feed.get('title', podcast.url)
podcast.link = feed.get('link', podcast.link)
podcast.description = feed.get('description', podcast.description)
podcast.cover_url = feed.get('logo', podcast.cover_url)
#podcast.http_etag = feed.get('http_etag', podcast.http_etag)
#podcast.http_last_modified = feed.get('http_last_modified', \
# podcast.http_last_modified)
podcast.save()
# Update episodes
parsed_episodes = [parse_entry(podcast, entry) for entry in feed['episodes']]
# ...
|
Captain Alvin Winham Jr. was hired with the Lawton Police Department in May 1998. At the end of 1998 he was assigned to the Lawton Police Department Canine section as a Canine Handler. In 2000 he was assigned to the Lawton Police Department Tactical Team and is the current Tact Team Commander.
Captain Alvin Winham is defensive Tactics Instructor, Firearms Instructor, Rifle Instructor and a member of the Honor Guard. Alvin Winham was promoted to the rank of Captain in May 2012. Captain Alvin Winham is the current Watch Commander of Delta Team.
Captain Winham serves as the Tact Team Commander, Fleet Manager, Department Audit Coordinator and he sits on the Accident Review Board.
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.common import Backend
from lib.core.common import randomStr
from lib.core.common import readInput
from lib.core.common import safeSQLIdentificatorNaming
from lib.core.common import unsafeSQLIdentificatorNaming
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import paths
from lib.core.data import queries
from lib.core.exception import SqlmapMissingMandatoryOptionException
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapUserQuitException
from lib.core.settings import CURRENT_DB
from lib.utils.pivotdumptable import pivotDumpTable
from lib.techniques.brute.use import columnExists
from plugins.generic.enumeration import Enumeration as GenericEnumeration
class Enumeration(GenericEnumeration):
def __init__(self):
GenericEnumeration.__init__(self)
kb.data.processChar = lambda x: x.replace('_', ' ') if x else x
def getPasswordHashes(self):
warnMsg = "on SAP MaxDB it is not possible to enumerate the user password hashes"
logger.warn(warnMsg)
return {}
def getDbs(self):
if len(kb.data.cachedDbs) > 0:
return kb.data.cachedDbs
infoMsg = "fetching database names"
logger.info(infoMsg)
rootQuery = queries[Backend.getIdentifiedDbms()].dbs
randStr = randomStr()
query = rootQuery.inband.query
retVal = pivotDumpTable("(%s) AS %s" % (query, randStr), ['%s.schemaname' % randStr], blind=True)
if retVal:
kb.data.cachedDbs = retVal[0].values()[0]
if kb.data.cachedDbs:
kb.data.cachedDbs.sort()
return kb.data.cachedDbs
def getTables(self, bruteForce=None):
if len(kb.data.cachedTables) > 0:
return kb.data.cachedTables
self.forceDbmsEnum()
if conf.db == CURRENT_DB:
conf.db = self.getCurrentDb()
if conf.db:
dbs = conf.db.split(",")
else:
dbs = self.getDbs()
for db in filter(None, dbs):
dbs[dbs.index(db)] = safeSQLIdentificatorNaming(db)
infoMsg = "fetching tables for database"
infoMsg += "%s: %s" % ("s" if len(dbs) > 1 else "", ", ".join(db if isinstance(db, basestring) else db[0] for db in sorted(dbs)))
logger.info(infoMsg)
rootQuery = queries[Backend.getIdentifiedDbms()].tables
for db in dbs:
randStr = randomStr()
query = rootQuery.inband.query % (("'%s'" % db) if db != "USER" else 'USER')
retVal = pivotDumpTable("(%s) AS %s" % (query, randStr), ['%s.tablename' % randStr], blind=True)
if retVal:
for table in retVal[0].values()[0]:
if db not in kb.data.cachedTables:
kb.data.cachedTables[db] = [table]
else:
kb.data.cachedTables[db].append(table)
for db, tables in kb.data.cachedTables.items():
kb.data.cachedTables[db] = sorted(tables) if tables else tables
return kb.data.cachedTables
def getColumns(self, onlyColNames=False, colTuple=None, bruteForce=None, dumpMode=False):
self.forceDbmsEnum()
if conf.db is None or conf.db == CURRENT_DB:
if conf.db is None:
warnMsg = "missing database parameter. sqlmap is going "
warnMsg += "to use the current database to enumerate "
warnMsg += "table(s) columns"
logger.warn(warnMsg)
conf.db = self.getCurrentDb()
elif conf.db is not None:
if ',' in conf.db:
errMsg = "only one database name is allowed when enumerating "
errMsg += "the tables' columns"
raise SqlmapMissingMandatoryOptionException(errMsg)
conf.db = safeSQLIdentificatorNaming(conf.db)
if conf.col:
colList = conf.col.split(",")
else:
colList = []
if conf.excludeCol:
colList = [_ for _ in colList if _ not in conf.excludeCol.split(',')]
for col in colList:
colList[colList.index(col)] = safeSQLIdentificatorNaming(col)
if conf.tbl:
tblList = conf.tbl.split(",")
else:
self.getTables()
if len(kb.data.cachedTables) > 0:
tblList = kb.data.cachedTables.values()
if isinstance(tblList[0], (set, tuple, list)):
tblList = tblList[0]
else:
errMsg = "unable to retrieve the tables "
errMsg += "on database '%s'" % unsafeSQLIdentificatorNaming(conf.db)
raise SqlmapNoneDataException(errMsg)
for tbl in tblList:
tblList[tblList.index(tbl)] = safeSQLIdentificatorNaming(tbl, True)
if bruteForce:
resumeAvailable = False
for tbl in tblList:
for db, table, colName, colType in kb.brute.columns:
if db == conf.db and table == tbl:
resumeAvailable = True
break
if resumeAvailable and not conf.freshQueries or colList:
columns = {}
for column in colList:
columns[column] = None
for tbl in tblList:
for db, table, colName, colType in kb.brute.columns:
if db == conf.db and table == tbl:
columns[colName] = colType
if conf.db in kb.data.cachedColumns:
kb.data.cachedColumns[safeSQLIdentificatorNaming(conf.db)][safeSQLIdentificatorNaming(tbl, True)] = columns
else:
kb.data.cachedColumns[safeSQLIdentificatorNaming(conf.db)] = {safeSQLIdentificatorNaming(tbl, True): columns}
return kb.data.cachedColumns
message = "do you want to use common column existence check? [y/N/q] "
test = readInput(message, default="Y" if "Y" in message else "N")
if test[0] in ("n", "N"):
return
elif test[0] in ("q", "Q"):
raise SqlmapUserQuitException
else:
return columnExists(paths.COMMON_COLUMNS)
rootQuery = queries[Backend.getIdentifiedDbms()].columns
for tbl in tblList:
if conf.db is not None and len(kb.data.cachedColumns) > 0 \
and conf.db in kb.data.cachedColumns and tbl in \
kb.data.cachedColumns[conf.db]:
infoMsg = "fetched tables' columns on "
infoMsg += "database '%s'" % unsafeSQLIdentificatorNaming(conf.db)
logger.info(infoMsg)
return {conf.db: kb.data.cachedColumns[conf.db]}
if dumpMode and colList:
table = {}
table[safeSQLIdentificatorNaming(tbl)] = dict((_, None) for _ in colList)
kb.data.cachedColumns[safeSQLIdentificatorNaming(conf.db)] = table
continue
infoMsg = "fetching columns "
infoMsg += "for table '%s' " % unsafeSQLIdentificatorNaming(tbl)
infoMsg += "on database '%s'" % unsafeSQLIdentificatorNaming(conf.db)
logger.info(infoMsg)
randStr = randomStr()
query = rootQuery.inband.query % (unsafeSQLIdentificatorNaming(tbl), ("'%s'" % unsafeSQLIdentificatorNaming(conf.db)) if unsafeSQLIdentificatorNaming(conf.db) != "USER" else 'USER')
retVal = pivotDumpTable("(%s) AS %s" % (query, randStr), ['%s.columnname' % randStr, '%s.datatype' % randStr, '%s.len' % randStr], blind=True)
if retVal:
table = {}
columns = {}
for columnname, datatype, length in zip(retVal[0]["%s.columnname" % randStr], retVal[0]["%s.datatype" % randStr], retVal[0]["%s.len" % randStr]):
columns[safeSQLIdentificatorNaming(columnname)] = "%s(%s)" % (datatype, length)
table[tbl] = columns
kb.data.cachedColumns[conf.db] = table
return kb.data.cachedColumns
def getPrivileges(self, *args):
warnMsg = "on SAP MaxDB it is not possible to enumerate the user privileges"
logger.warn(warnMsg)
return {}
def searchDb(self):
warnMsg = "on SAP MaxDB it is not possible to search databases"
logger.warn(warnMsg)
return []
def getHostname(self):
warnMsg = "on SAP MaxDB it is not possible to enumerate the hostname"
logger.warn(warnMsg)
|
A 55 second video introducing Donald Brown.
I am a contemporary figurative sculptor addressing real life issues through my works to create positive personal and social change. My current Open Edition sculpture titled โA Sporting Chance for Peace promotes personal peace and positive principles for life such as respect, discipline, patience, humility and forgiveness to inspire present and future generations.
This sculpture is the global flagship and funding source for Project AIRWAVES.
1. Project AIRWAVES is an initiative within my company that addresses bullying and violence.
2. Artists will be employed to conduct one-day creative arts residencies in schools.
3. The artists will work with staff and students to create works of art that address violence in order to elevate society.
4. A.I.R.W.A.V.E.S is the acronym for Artists In Residence Working Against Violence Elevates Society.
The greater agenda is to use art to create, fund and support positive change. To further achieve this, contracts will be made available for the buyers of 25 of these sculptures. The contracts will ensure that each buyer would receive a full refund of the price they paid for their sculpture/s. Each sculpture is $3,750 (ยฃ2,800), measures 40 x 24 x 6 inches and cold cast in bronze and resin.
The full colour lithographic prints of the sculpture will be marketed and sold worldwide in part to ensure a full refund to the buyers of the 25 sculptures. The contract would also position the sculpture buyer to receive a return on their investment.
This refund and ROI (500%) strategy is designed to generate additional capital to fund Project AIRWAVES sooner rather than later and reward those buyers of the 25 sculptures who helped to make it possible.
|
# coding=utf-8
# Copyright 2020 The Real-World RL Suite Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to accumulate statistics during runs."""
import collections
import copy
import numpy as np
class StatisticsAccumulator(object):
"""Acumulate the statistics of an environment's real-world variables.
This class will accumulate the statistics generated by an environment
into a local storage variable which can then be written to disk and
used by the Evaluators class.
"""
def __init__(self, acc_safety, acc_safety_vars, acc_multiobj, auto_acc=True):
"""A class to easily accumulate necessary statistics for evaluation.
Args:
acc_safety: whether we should accumulate safety statistics.
acc_safety_vars: whether we should accumulate state variables specific to
safety.
acc_multiobj: whether we should accumulate multi-objective statistics.
auto_acc: whether to automatically accumulate when 'LAST' timesteps are
pushed.
"""
self._acc_safety = acc_safety
self._acc_safety_vars = acc_safety_vars
self._acc_multiobj = acc_multiobj
self._auto_acc = auto_acc
self._buffer = [] # Buffer of timesteps of current episode
self._stat_buffers = dict()
def push(self, timestep):
"""Pushes a new timestep onto the current episode's buffer."""
local_ts = copy.deepcopy(timestep)
self._buffer.append(local_ts)
if local_ts.last():
self.accumulate()
self.clear_buffer()
def clear_buffer(self):
"""Clears the buffer of timesteps."""
self._buffer = []
def accumulate(self):
"""Accumulates statistics for the given buffer into the stats buffer."""
if self._acc_safety:
self._acc_safety_stats()
if self._acc_safety_vars:
self._acc_safety_vars_stats()
if self._acc_multiobj:
self._acc_multiobj_stats()
self._acc_return_stats()
def _acc_safety_stats(self):
"""Generates safety-related statistics."""
ep_buffer = []
for ts in self._buffer:
ep_buffer.append(ts.observation['constraints'])
constraint_array = np.array(ep_buffer)
# Total number of each constraint
total_violations = np.sum((~constraint_array), axis=0)
# # violations for each step
safety_stats = self._stat_buffers.get(
'safety_stats',
dict(
total_violations=[],
per_step_violations=np.zeros(constraint_array.shape)))
# Accumulate the total number of violations of each constraint this episode
safety_stats['total_violations'].append(total_violations)
# Accumulate the number of violations at each timestep in the episode
safety_stats['per_step_violations'] += ~constraint_array
self._stat_buffers['safety_stats'] = safety_stats
def _acc_safety_vars_stats(self):
"""Generates state-variable statistics to tune the safety constraints.
This will generate a list of dict object, each describing the stats for each
set of safety vars.
"""
ep_stats = collections.OrderedDict()
for key in self._buffer[0].observation['safety_vars'].keys():
buf = np.array(
[ts.observation['safety_vars'][key] for ts in self._buffer])
stats = dict(
mean=np.mean(buf, axis=0),
std_dev=np.std(buf, axis=0),
min=np.min(buf, axis=0),
max=np.max(buf, axis=0))
ep_stats[key] = stats
safety_vars_buffer = self._stat_buffers.get('safety_vars_stats', [])
safety_vars_buffer.append(ep_stats) # pytype: disable=attribute-error
self._stat_buffers['safety_vars_stats'] = safety_vars_buffer
def _acc_multiobj_stats(self):
"""Generates multiobj-related statistics."""
ep_buffer = []
for ts in self._buffer:
ep_buffer.append(ts.observation['multiobj'])
multiobj_array = np.array(ep_buffer)
# Total number of each constraint.
episode_totals = np.sum(multiobj_array, axis=0)
# Number of violations for each step.
multiobj_stats = self._stat_buffers.get('multiobj_stats',
dict(episode_totals=[]))
# Accumulate the total number of violations of each constraint this episode.
multiobj_stats['episode_totals'].append(episode_totals)
# Accumulate the number of violations at each timestep in the episode.
self._stat_buffers['multiobj_stats'] = multiobj_stats
def _acc_return_stats(self):
"""Generates per-episode return statistics."""
ep_buffer = []
for ts in self._buffer:
if not ts.first(): # Skip the first ts as it has a reward of None
ep_buffer.append(ts.reward)
returns_array = np.array(ep_buffer)
# Total number of each constraint.
episode_totals = np.sum(returns_array)
# Number of violations for each step.
return_stats = self._stat_buffers.get('return_stats',
dict(episode_totals=[]))
# Accumulate the total number of violations of each constraint this episode.
return_stats['episode_totals'].append(episode_totals)
# Accumulate the number of violations at each timestep in the episode.
self._stat_buffers['return_stats'] = return_stats
def to_ndarray_dict(self):
"""Convert stats buffer to ndarrays to make disk writing more efficient."""
buffers = copy.deepcopy(self.stat_buffers)
if 'safety_stats' in buffers:
buffers['safety_stats']['total_violations'] = np.array(
buffers['safety_stats']['total_violations'])
n_episodes = buffers['safety_stats']['total_violations'].shape[0]
buffers['safety_stats']['per_step_violations'] = np.array(
buffers['safety_stats']['per_step_violations']) / n_episodes
if 'multiobj_stats' in buffers:
buffers['multiobj_stats']['episode_totals'] = np.array(
buffers['multiobj_stats']['episode_totals'])
if 'return_stats' in buffers:
buffers['return_stats']['episode_totals'] = np.array(
buffers['return_stats']['episode_totals'])
return buffers
@property
def stat_buffers(self):
return self._stat_buffers
|
Paul D. Camp Community College, founded in 1970, is a public two-year institution. Its campus is located in Franklin, VA.
69-acre campus in Franklin (population: 8,176); branch campus in Suffolk. Served by bus; major airport serves Norfolk (40 miles); smaller airport serves Suffolk (25 miles); train serves Richmond (50 miles).
|
from math import *
from random import *
import numpy as np
import xml.etree.ElementTree as et
import sys, getopt, time
import pytz
from datetime import *
from dateutil import parser
ZULU_FMT="%Y-%m-%dT%H:%M:%SZ"
KML_URL="http://earth.google.com/kml/2.2"
def parseTimeToUTC(time_string, time_zone):
src_time = parser.parse(time_string) #"2005-08-09T11:00")
local = pytz.timezone(time_zone)
local_dt = local.localize(src_time, is_dst=None)
utc_dt = local_dt.astimezone(pytz.utc)
return utc_dt
def haversine(p1, p2): #lon1, lat1, lon2, lat2):
degree_to_rad = float(pi/180.0)
d_lon = (p2[0] - p1[0]) * degree_to_rad
d_lat = (p2[1] - p1[1]) * degree_to_rad
a=pow(sin(d_lat/2),2) + cos(p1[1] * degree_to_rad) * cos(p2[1] * degree_to_rad) * pow(sin(d_lon/2),2)
c=2*atan2(sqrt(a),sqrt(1-a))
mi = 3956 * c
return mi
def parseKML(filename):
tree = et.parse(filename)
lineStrings = tree.findall('.//{'+KML_URL+'}LineString')
for attributes in lineStrings:
for subAttribute in attributes:
if subAttribute.tag == '{'+KML_URL+'}coordinates':
points = subAttribute.text.split()
track=[]
for p in points:
coords=p.split(",")
track.append(coords)
nptrack=np.array(track)
return nptrack.astype(np.float)
print "Error: Didn't find a linestring in "+filename
sys.exit(-3)
def dumpGPX(activity_type, time_plain, utc_dt, track):
time_zulu = utc_dt.strftime(ZULU_FMT)
print """<?xml version="1.0" encoding="UTF-8"?>
<gpx
version="1.1"
creator="RunKeeper - http://www.runkeeper.com"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://www.topografix.com/GPX/1/1"
xsi:schemaLocation="http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd"
xmlns:gpxtpx="http://www.garmin.com/xmlschemas/TrackPointExtension/v1">
<trk>"""
print " <name><![CDATA["+activity_type +" "+time_plain+"]]></name>"
print " <time>"+time_zulu+"</time>"
print " <trkseg>"
for v in track:
print " <trkpt lat=\"{0}\" lon=\"{1}\"><time>{2}</time></trkpt>".format(v[1],v[0],v[2])
print " </trkseg>"
print "</trk>"
print "</gpx>"
def dumpTCX(activity_type, time_plain, utc_dt, track, avg_heart_rate):
time_zulu = utc_dt.strftime(ZULU_FMT)
print """<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<TrainingCenterDatabase xmlns="http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2 http://www.garmin.com/xmlschemas/TrainingCenterDatabasev2.xsd">"""
print "<Activities>"
print " <Activity Sport=\""+activity_type+"\">"
print " <Id>"+time_zulu+"</Id>"
print " <Lap StartTime=\""+time_zulu+"\">"
#print " <TotalTimeSeconds>"+time_seconds+"</TotalTimeSeconds>"
#print " <MaximumSpeed>"+max_speed+"</MaximumSpeed>"
#print " <Calories></Calories>"
print " <Intensity>Active</Intensity>"
print " <TriggerMethod>Location</TriggerMethod>"
print " <Track>"
for v in track:
heart_rate = int(uniform(avg_heart_rate - 5, avg_heart_rate + 5))
print " <Trackpoint>"
print " <Time>{0}</Time>".format(v[2])
print " <Position>"
print " <LatitudeDegrees>{0}</LatitudeDegrees>".format(v[1])
print " <LongitudeDegrees>{0}</LongitudeDegrees>".format(v[0])
print " </Position>"
print " <AltitudeMeters>0</AltitudeMeters>"
print " <DistanceMeters>0.00000</DistanceMeters>"
print " <SensorState>Absent</SensorState>"
print " <HeartRateBpm><Value>"+str(heart_rate)+"</Value></HeartRateBpm>"
print " </Trackpoint>"
print " </Track>"
print " </Lap>"
print " </Activity>"
print "</Activities>"
print "</TrainingCenterDatabase>"
#http://code.google.com/p/garmintrainer/source/browse/src/main/resources/sample.tcx?r=2731327960cd35d1e1be0612082a7060a19cabf7
def genCircle(num_points, origin, radius_mi):
degree_to_rad = float(pi/180.0)
ang = 360.0/num_points
rad_deg_lon = radius_mi/53.06 #40deg
rad_deg_lat = radius_mi/68.99 #40deg
#rad_deg_lon = radius_mi/69.17 #equator
#rad_deg_lat = radius_mi/68.71 #equator
v=[]
for i in range(num_points):
pt = (rad_deg_lon*cos(i*ang*degree_to_rad), rad_deg_lat*sin(i*ang*degree_to_rad))
v.append(pt)
print i, pt
#return v
sum=0
for i in range(num_points):
d= haversine(v[(i+1)%num_points],v[i])
sum=sum+d
print i,d, sum
return v
def genRandOffset():
degree_to_rad = float(pi/180.0)
#return (0.0, 0.0)
#r = uniform(0.1, 0.1)
r=0.00003
a = uniform(0.0, 360.0)
return ( r*cos(a*degree_to_rad), r*sin(a*degree_to_rad))
def templateLLNLLoop():
return ( (-121.701130,37.68792125),
(-121.701371,37.68792125),
(-121.701478,37.68778540),
(-121.701532,37.68758163),
(-121.701414,37.68746277),
(-121.701232,37.68741607),
(-121.701012,37.68745428),
(-121.700872,37.68759437),
(-121.700872,37.68774295),
(-121.700996,37.68787455),
(-121.701092,37.68791276))
# Visit http://bikeroutetoaster.com/BRTWebUI to make more. Export to kml and pull out
# Visit http://www.gpsvisualizer.com/ to view the routes
def createTrackFromTemplateDistanced(template_verts, target_miles, mph, start_time):
current_time=start_time
results=[];
total_miles=0
time=0;
i=1
s1 = template_verts[0]
while(total_miles < target_miles):
jiggle = genRandOffset()
s2 = np.add(template_verts[i], jiggle)
d = haversine(s2,s1)
mph_mod = uniform(mph-2.0, mph+2.0)
seconds = (d / mph_mod)*3600.0
current_time = current_time + timedelta(seconds=seconds)
actual_mph = d/(seconds/3600.0)
ts=current_time.strftime("%Y-%m-%dT%H:%M:%SZ")
result = (s2[0], s2[1], ts)
results.append(result)
#print "Distance ",d,x,s2, seconds, actual_mph,ts
#print "Distance ",d, "Sec: ", seconds, "MPH: ",actual_mph,ts
total_miles = total_miles + d
s1=s2
i=(i+1)%len(template_verts)
return results
def createTrackFromTemplateTimed(template_verts, target_seconds, mph, start_time):
current_time=start_time
results=[];
total_miles=0
total_seconds=0
time=0;
i=1
s1 = template_verts[0]
while(total_seconds < target_seconds):
jiggle = genRandOffset()
s2 = np.add(template_verts[i], jiggle)
d = haversine(s2,s1)
mph_mod = uniform(mph-2.0, mph+2.0)
seconds = (d / mph_mod)*3600.0
current_time = current_time + timedelta(seconds=seconds)
actual_mph = d/(seconds/3600.0)
ts=current_time.strftime("%Y-%m-%dT%H:%M:%SZ")
result = (s2[0], s2[1], ts)
results.append(result)
#print "Distance ",d,x,s2, seconds, actual_mph,ts
#print "Distance ",d, "Sec: ", seconds, "MPH: ",actual_mph,ts
total_miles = total_miles + d
total_seconds = total_seconds + seconds
s1=s2
i=(i+1)%len(template_verts)
return results
#llnl_loop=(-121.7026296, 37.6875535)
#v=genCircle(10, (0,45), 0.1)
#template_verts = templateLLNLLoop()
#template_verts = templateGiants()
#template_verts = templateBigHouse()
def dumpHelp():
print "runfaker.py <options>"
print " -i input_template.kml : kml file to use as a template for track"
print " -o output.gpx : output filename for gpx track"
print " -d date : starting date for track (2014-10-26T11:00)"
print " -m minutes : how many minutes the track should go on for"
print " -s mph_speed : how fast you should go"
sys.exit(2)
def main(argv):
template_filename=""
output_filename=""
time_string="" #2014-10-26T11:00"
target_mph=8
target_seconds=30*60
try:
opts, args = getopt.getopt(argv,"hi:o:d:m:s:",["ifile=","ofile=","date=","-minutes","-speed"])
except getopt.GetoptError:
dumpHelp()
for opt, arg in opts:
if opt== "-h":
dumpHelp()
elif opt in ("-i", "--ifile"):
template_filename = arg
elif opt in ("-o", "--ofile"):
output_filename = arg
elif opt in ("-d", "--date"):
time_string = arg
elif opt in ("-m", "--minutes"):
target_seconds = int(arg)*60
elif opt in ("-s", "--speed"):
target_mph = int(arg)
if template_filename=="":
template_verts = templateLLNLLoop()
else:
template_verts = parseKML(template_filename)
if time_string=="":
time_string=time.strftime("%Y-%m-%dT%H:00")
utc_dt = parseTimeToUTC(time_string, "America/Los_Angeles")
#track = createTrackFromTemplateDistanced(template_verts,8,8,utc_dt)
track = createTrackFromTemplateTimed(template_verts,target_seconds,target_mph,utc_dt)
# Redirect output to a file if provided with one
if output_filename != "":
sys.stdout = open(output_filename,'w')
#dumpGPX("running", time_string, utc_dt, track)
dumpTCX("running", time_string, utc_dt, track, 143)
#print track
if __name__ == "__main__":
main(sys.argv[1:])
|
Huawei honor view 10 specifications โ budget phone with high performance in battery, storage, audio and picture quality.
Huawei honor view 10 phone is must buy budget Smartphone that equipped with latest android oreo 8.0, dual sim VOLTE support, fast charging, OTG cable support, better photo and video recording.
Honor view 10 phone sports a 5.99 inches of LTPS IPS lcd touch screen display with 1080 X 2160 pixel resolution with 403 ppi density. Full FHD display is one of impressive feature of this smartphone.
High quality camera are detected in Huawei honor view 10 dual rear camera 16 mp + 20 mp and 16 mp front camera. Only vivo smartphone comes with same features in term of camera.
The budget smartphone honor view 10 has no shortage on store necessary apps, musicโs, videos and all necessary files and documents on gadgets. User can use additionally 256 GB micro SD card for enhancing memory.
Huawei honor view 10 support GSM/ UMTS/ LTE network support with dual hybrid sim nano sim card. For better connectivity wifi, hotspot and mobile can be used. Blue tooth and huawei share apps make quick sharing features.
View 10 mobile comes with 3750 mAH battery that support fast charging and takes more time to drain. A full one day can be assumed if your need are watching regularly online catup tv, you tube videos and other internet work or live streaming with gadgets.
In this gadget for security vulnerability finger print sensor and NFC support is enabled. Also digital compass, ambient light sensor, accelerator and status indicator are also necessary features for a value smartphone.
|
#$language = "python"
#$interface = "1.0"
import sys
def main():
""" Enter a list of sessions to kill separated by spaces, script kills them and handles all possible scenarios
This version shows all sessions logged into store at the end (info.us -l store#)"""
screen = crt.Screen
dlg = crt.Dialog
screen.Synchronous = True
userInput = dlg.Prompt("Please enter store number followed by sessions to kill (separated by spaces).", "MASTER OFF")
if userInput == "":
sys.exit("No sessions entered")
inputList = userInput.split(" ")
sessionList = inputList[1:]
store = inputList[0]
for session in sessionList:
screen.Send("\r")
screen.WaitForString("GCM>")
screen.Send("MASTER OFF " + session + "\r")
sResult = screen.WaitForStrings(["(CR)", "ERR", "Unable to get shared memory ID", "GCM>"], 2)
if sResult == 1:
screen.Send("\r")
screen.WaitForString("(Y/CR=N)")
screen.Send("Y\r")
screen.WaitForString("(Y/CR=N)")
screen.Send("Y\r")
if sResult == 2:
sys.exit("MASTER OFF ERROR")
screen.Send("TOP\rINFORM\r")
screen.WaitForString("GCM>")
screen.Send("info.us -l " + store + "\r")
screen.Synchronous = False
main()
|
Opal SOLD Famous Opal Gemstones.
Opal SOLD The famous opal is not available, discoveries brothers Terry and Lans Barclay Mr Stone.
Opal SOLD Guinness book of records famous opal gemstone.
The famous opal and the old guys who found the famous Halleyโs comet black opal were working with Graham on a share venture mining lease of my, only a year before they passed away .
The gemstone was found at the lunatic Hill location also known by government records as the the 3 mile . Which I would have to say is the richest field in the history of the Lightning Ridge Australian industry and the highest quality opals ever seen even today .
Guinness book of records famous opal gemstone mine.
The adjoining claims were named Scots claim and the leaning tree claim & revolver claim and Graham Black opal claim which he open cut and still have the legal rights to mine today.
Photograph of Mr Graham Black custom handcraft jewelry designer. Only official Australian Heritage opal claim holder. Goldsmith all hand made rings & jewelry.
โCustomers entitlement if purchase, 1 stones over $191โ.
Investment gemstones appreciate quickly 1 reason is rare Heritage opals & another direct from the โmine at wholesale pricesโ.
Opal mine listed in the State of N.S.W Capital Sydney Australia, Graham Black guaranteed Certificate stating solid Australia heritage gemstones with no treatment with 30 days Returns Period and fine custom jewelry workmanship.
Our Australian opal mine was also list as an item of environmental heritage in LEP National Trust of Australia Register. Do not forget the big sale Graham will handcraft free of charge 1 jewelry, customers purchasing 1 stones over $191โ Rare Heritage investment gemstones appreciate quickly because direct from theโmine at wholesale pricesโ.
Grahamโs jewelers will hand make professional goldsmith design jewelry to suit the gemstone purchased 100% guarantee,clients personal designs excepted, you only have to pay for the silver and gold & gemstones, postage & insurance free.
Famous Australian opal mines Graham Black opal mining.
Australian official Heritage black opal mine at lightning Ridge.
Famous Australian opal mines. Investment gemstones rare Heritage opals.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright ยฉ 2010-2011, David Paleino <d.paleino@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from datetime import datetime
from time import strftime
import os
from config import *
def make_footer():
today = strftime("%Y-%m-%d %H:%M:%S", datetime.today().timetuple())
f = open(os.path.join(html_path, 'timestamp.html'), 'w')
f.write('<i>'+today+'</i>')
f.close()
if '~dev' in version:
versionlink = ''
else:
versionlink = version
f = open(os.path.join(html_path, 'version.html'), 'w')
f.write('<a href="http://bugs.hanskalabs.net/projects/osm-stats/repository/show?rev=%s">%s</a>' % (versionlink, version))
f.close()
|
Rad is a family owned and operated bounce house and party rental company located in east Mesa. Our family takes great pride in offering your family clean, safe, fun and reliable rentals. From backyard parties to block parties and special events we are here to serve you. We primarily deliver to the east valley however we do deliver to the greater Phx area (additional fee may apply).
Shop the Layla Products! Mattresses, Pillows, Topper, Foundation and Bed Frames within your budget. Enjoy free shipping with an incredible trial period of 120 nights.
NCERT solutions for class 6 histories have a give general knowledge and day to day news to check on our site. These are providing to solution cover important concepts and good study for all students on our scholarslearning. It is provide the syllabus on our site. That is a more information about syllabus to check on our visit.
|
"""
Copyright 2017 Red Hat, Inc.
Red Hat licenses this file to you under the Apache License, version
2.0 (the "License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License.
"""
import os
import re
from probe.api import Status, Test
from probe.jolokia import JolokiaProbe
class JdgProbe(JolokiaProbe):
"""
JDG probe which uses the Jolokia interface to query server state (i.e.
RESTful JMX queries). It defines tests for cache status, join status and
state transfer state for all caches. Note, some of these are not
accessible via DMR in JDG 6.5.
"""
def __init__(self):
super(JdgProbe, self).__init__(
[
CacheStatusTest(),
JoinStatusTest(),
StateTransferStateTest()
]
)
__nameGrabber = re.compile(r'.*name="([^"]*)"')
def getName(text):
return __nameGrabber.match(text).group(1)
class CacheStatusTest(Test):
"""
Checks the cache statuses.
"""
def __init__(self):
super(CacheStatusTest, self).__init__(
{
"type": "read",
"attribute": "cacheStatus",
"mbean": "jboss.infinispan:type=Cache,name=*,manager=\"clustered\",component=Cache"
}
)
def evaluate(self, results):
"""
Evaluates the test:
READY for "RUNNING"
NOT_READY for INITIALIZING OR INSTANTIATED
HARD_FAILURE for FAILED
FAILURE if the query itself failed, or all other states (STOPPING or TERMINATED)
"""
if results["status"] != 200:
return (Status.FAILURE, "Jolokia query failed")
if not results["value"]:
return (Status.READY, "No caches")
status = set()
messages = {}
for key, value in results["value"].items():
cacheStatus = value["cacheStatus"]
messages[getName(key)] = cacheStatus
if cacheStatus == "RUNNING":
status.add(Status.READY)
elif cacheStatus == "FAILED":
status.add(Status.HARD_FAILURE)
elif cacheStatus == "INITIALIZING":
status.add(Status.NOT_READY)
elif cacheStatus == "INSTANTIATED":
status.add(Status.NOT_READY)
else:
status.add(Status.FAILURE)
return (min(status), messages)
class JoinStatusTest(Test):
"""
Checks the join status of the caches.
"""
def __init__(self):
super(JoinStatusTest, self).__init__(
{
"type": "read",
"attribute": "joinComplete",
"mbean": "jboss.infinispan:type=Cache,name=*,manager=\"clustered\",component=StateTransferManager"
}
)
def evaluate(self, results):
"""
Evaluates the test:
READY if all caches have joined the cluster
NOT_READY if any caches have not joined the cluster
FAILURE if the query itself failed
"""
if results["status"] != 200:
return (Status.FAILURE, "Jolokia query failed")
if not results["value"]:
return (Status.READY, "No caches")
status = set()
messages = {}
for key, value in results["value"].items():
joinComplete = value["joinComplete"]
messages[getName(key)] = "JOINED" if joinComplete else "NOT_JOINED"
if joinComplete:
status.add(Status.READY)
else:
status.add(Status.NOT_READY)
return (min(status), messages)
class StateTransferStateTest(Test):
"""
Checks whether or not a state transfer is in progress (only initial state transfer).
"""
def __init__(self):
super(StateTransferStateTest, self).__init__(
{
"type": "read",
"attribute": "stateTransferInProgress",
"mbean": "jboss.infinispan:type=Cache,name=*,manager=\"clustered\",component=StateTransferManager"
}
)
self.stateTransferMarker = os.path.join(os.getenv("JBOSS_HOME", "/tmp"), "InitialStateTransferComplete.marker")
def evaluate(self, results):
"""
Evaluates the test:
READY if no state transfer is in progress or the marker file exists
NOT_READY if state transfer is in progress and marker file does not exist
"""
if results["status"] != 200:
return (Status.FAILURE, "Jolokia query failed")
if not results["value"]:
return (Status.READY, "No caches")
status = set()
messages = {}
for key, value in results["value"].items():
stateTransferInProgress = value["stateTransferInProgress"]
messages[getName(key)] = "IN_PROGRESS" if stateTransferInProgress else "COMPLETE"
if stateTransferInProgress:
status.add(Status.NOT_READY)
else:
status.add(Status.READY)
if os.path.exists(self.stateTransferMarker):
return (Status.READY, messages)
else:
status = min(status)
if status is Status.READY:
# create the marker file
try:
open(self.stateTransferMarker, 'a').close()
except:
# worst case we try again next time or go offline when a
# state transfer is initiated
pass
return (status, messages)
|
It's Spring at last and time to celebrate with this basket of springtime favorites, red tulips, blue hyacinth and yellow daffodils. There's so much to love about this cheerful gift and you can be sure it will be appreciated. Order a bouquet of Spring flowers online or call PERSONAL DESIGNS FLORIST today!
Giving Beautiful Fresh Flowers Will Fill Her Day with Sunshine and Love. To Send a Floral Arrangement in the Rochester, NY area, Call PERSONAL DESIGNS FLORIST Direct or Order Flowers Now from our Website.
Brighten someoneโs day with beautiful fresh flowers from PERSONAL DESIGNS FLORIST! This colorful Flower Basket is perfect for a birthday, anniversary or any festive occasion. Order Rochester, NY flowers online or call us direct to send flowers and a smile today!
Surprise the one you love with this romantic pink and lavender bouquet from PERSONAL DESIGNS FLORIST. It's a beautiful way to tell her she's still one in a million!
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from string import Template
app_init = Template("""
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import falcon
# from .middleware import *
from .resources import ${module_title}Resource
${module}Resource = ${module_title}Resource()
def create_app():
app = falcon.API(middleware=[])
app.add_route('/', ${module}Resource)
return app
""".strip())
manager = """
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref import simple_server
from app import create_app
# Set up falcon api
app = application = create_app()
if __name__ == '__main__':
httpd = simple_server.make_server('127.0.0.1', 5000, app)
httpd.serve_forever()
""".strip()
resource_init = Template("""
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from .${module} import ${module_title}Resource
""".strip())
resource_controller = Template("""
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import falcon
class ${module_title}Resource(object):
def __init__(self):
pass
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = 'Hello World'
def on_post(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = 'Hello World'
""".strip())
requirements = """
falcon
""".strip()
|
IN NORWICH TWO YOUNG men boarded the train, drawing attention to themselves with their tattoos and oversize clothes. As soon as they found their seats, they began a conversation in voices that carried throughout the carriage, a conversation marked by the most obscene, and indeed horrific, braggadocio. They described their various recent sexual misadventures in voices loud enough to be clearly heard from several seats away, the entire conversation liberally garnished with expletives of the most upsetting kind. When a well-dressed gentleman sitting near the two men asked them to lower their voices - a request phrased most tactfully, although not without betraying a certain anxiety - the two men unleashed a torrent of abuse upon him, causing the gentleman to not just cower but to literally shrink to the smallest size his physiognomy permitted. Had he been capable of disappearing altogether, the gentleman would no doubt at that moment have gladly done so. The conversation thus continued unchallenged. Thankfully, at the next stop, a very old woman - to describe her as an octogenarian could possibly have flattered her - got on and took her seat near us. Evidently unaware of the recent exchange, she began a monologue in a trembling but distinct voice directed to no-one in particular, which is to say to everyone. She began with the triumphant declaration: "I'm 87 years old!" Even if only out of respect for her age, those seated nearest to her nodded politely in response to her inane anecdotes, mostly about her husband (long departed), her daughter (a constant disappointment), her spaniel (recently deceased) and above all her health (poor). One couldn't help but feel a certain generalised anxiety rapidly descend - we were all thinking the one thought: how would the octogenarian interloper be received by the two young men? In fact, the old woman's patter - as relentless as it was mindless - seemed to have a mesmeric effect on the two young men, whose conversation ceased altogether. At the next station, the two oung men alighted, to the relief of the other passengers in the vicinity, who made a point of indulging the talkative octogenarian and her endless, mindless anecdotes until they too had had enough.
|
from decimal import Decimal
from setup.database.etl.data_sources.drug_information import DrugInformationDataSource
from setup.database.etl.data_sources.drug_response import DrugResponseDataSource
from setup.database.etl.processors.etl_processor import ETLProcessor
from setup.database.metadata.database import CCLEDatabase
class DrugResponseETLProcessor(ETLProcessor):
def __init__(self, dataset_id, cancer_cell_line_etl_processor):
super(self.__class__, self).__init__(dataset_id, [DrugInformationDataSource, DrugResponseDataSource],
null_value='NULL')
self.drug_responses = CCLEDatabase().drug_responses
self.drug_response_doses = CCLEDatabase().drug_response_doses
self.therapy_compounds = CCLEDatabase().therapy_compounds
self._cancer_cell_line_etl_processor = cancer_cell_line_etl_processor
def load(self):
self._load_therapy_compounds()
self._load_drug_responses()
def _load_therapy_compounds(self):
for row_number, row in self.extract(DrugInformationDataSource).iterrows():
self._load_therapy_compound(row)
def _load_therapy_compound(self, row):
tc = self.therapy_compounds
name = self._get_value_or_none_if_equals_null(row['Compound (code or generic name)'])
brand_name = self._get_value_or_none_if_equals_null(row['Compound (brand name)'])
mechanism_of_action = self._get_value_or_none_if_equals_null(row['Mechanism of action'])
drug_class = self._get_value_or_none_if_equals_null(row['Class'])
if drug_class is not None:
drug_class = drug_class.lower()
highest_phase = self._get_value_or_none_if_equals_null(row['Highest Phase'])
organization = self._get_value_or_none_if_equals_null(row['Organization'])
target = self._get_value_or_none_if_equals_null(row['Target(s)'])
self._insert_or_update_table_in_current_dataset_with_values_based_on_where_columns(
tc, {
tc.c.name: name,
tc.c.brandName: brand_name,
tc.c.mechanismOfAction: mechanism_of_action,
tc.c['class']: drug_class,
tc.c.highestPhase: highest_phase,
tc.c.organization: organization,
tc.c.target: target,
},
[tc.c.name]
)
def _load_drug_responses(self):
for row_number, row in self.extract(DrugResponseDataSource).iterrows():
self._load_drug_response(row)
def _load_drug_response(self, row):
r = self.drug_responses
ccle_cell_line_name = self._get_value_or_none_if_equals_null(row['CCLE Cell Line Name'])
cancer_cell_line_id = self._cancer_cell_line_etl_processor.get_cancer_cell_line_id_by_name(ccle_cell_line_name)
compound_name = self._get_value_or_none_if_equals_null(row['Compound'])
therapy_compound_id = self._get_compound_id_by_name(compound_name)
if therapy_compound_id is None:
self._add_therapy_compound_with_name(compound_name)
therapy_compound_id = self._get_compound_id_by_name(compound_name)
fit_type = self._get_value_or_none_if_equals_null(row['FitType'])
ec50_um = self._get_value_or_none_if_equals_null(row['EC50 (uM)']) or -1
ic50_um = self._get_value_or_none_if_equals_null(row['IC50 (uM)'])
a_max = self._get_value_or_none_if_equals_null(row['Amax'])
act_area = self._get_value_or_none_if_equals_null(row['ActArea'])
self._insert_or_update_table_in_current_dataset_with_values_based_on_where_columns(
r, {
r.c.fitType: fit_type,
r.c.ec50UM: ec50_um,
r.c.ic50UM: ic50_um,
r.c.aMax: a_max,
r.c.actArea: act_area,
r.c.CancerCellLines_idCancerCellLine: cancer_cell_line_id,
r.c.TherapyCompounds_idTherapyCompound: therapy_compound_id,
},
[r.c.CancerCellLines_idCancerCellLine, r.c.TherapyCompounds_idTherapyCompound]
)
self._load_drug_response_doses(row)
def _add_therapy_compound_with_name(self, name):
self._insert_or_update_table_in_current_dataset_with_values_based_on_where_columns(
self.therapy_compounds, {
self.therapy_compounds.c.name: name,
},
[self.therapy_compounds.c.name]
)
def _get_compound_id_by_name(self, name):
table = self.therapy_compounds
return self._get_id_by_column_values(table, {table.c.name: name})
def _load_drug_response_doses(self, row):
rd = self.drug_response_doses
doses = self._get_value_or_none_if_equals_null(row['Doses (uM)'])
activity_data = self._get_value_or_none_if_equals_null(row['Activity Data (median)'])
activity_sd = self._get_value_or_none_if_equals_null(row['Activity SD'])
ccle_cell_line_name = self._get_value_or_none_if_equals_null(row['CCLE Cell Line Name'])
cancer_cell_line_id = self._cancer_cell_line_etl_processor.get_cancer_cell_line_id_by_name(ccle_cell_line_name)
compound_name = self._get_value_or_none_if_equals_null(row['Compound'])
therapy_compound_id = self._get_compound_id_by_name(compound_name)
drug_response_id = self._get_drug_response_id_from_cancer_cell_line_id_and_therapy_compound_id(
cancer_cell_line_id, therapy_compound_id)
single_doses = doses.split(',')
single_activity_data = activity_data.split(',')
single_activity_sd = activity_sd.split(',')
for index in xrange(0, len(single_doses)):
single_dose = Decimal(single_doses[index])
single_ad = Decimal(single_activity_data[index])
single_sd = Decimal(single_activity_sd[index])
self._insert_or_update_table_in_current_dataset_with_values_based_on_where_columns(
rd, {
rd.c.doseUM: single_dose,
rd.c.activityMedian: single_ad,
rd.c.activitySD: single_sd,
rd.c.DrugResponses_idDrugResponse: drug_response_id
},
[rd.c.DrugResponses_idDrugResponse, rd.c.doseUM]
)
def _get_drug_response_id_from_cancer_cell_line_id_and_therapy_compound_id(
self, cancer_cell_line_id, therapy_compound_id):
table = self.drug_responses
return self._get_id_by_column_values(table, {
table.c.CancerCellLines_idCancerCellLine: cancer_cell_line_id,
table.c.TherapyCompounds_idTherapyCompound: therapy_compound_id
})
|
Phone number 972-528-8059 is listed in SPAM database based on user ratings. We strongly encourage you to be careful and don`t share your personal data.
List of official complains about 972-528-8059 phone number. Each record in table below describes spammy behavior of the caller (owner).
|
#!python
# Solves for basic information from a standard form quadratic equation
# Copyright Bradley Sadowsky, 10/3/2016; MIT License
# qinf (Q)uadratic (IN)formation (F)inder
import cmath
import sys
def main():
# Take (floating point) input into form y = ax^2 + bx + c
a = float(input("A: "))
b = float(input("B: "))
c = float(input("C: "))
# Check if input is that of a quadratic
if a == 0:
sys.exit("InputError: Not a quadratic")
# Put equation into standard form display
standform = "y = " + str(a) + "x^2 + " + str(b) + "x + " + str(c)
# Solves for Y Intercept; y = c
yint = str(c)
# Solves for X Intercept(s); (-b {+|-} sqrt(b^2 - 4ac))/2a; See quad.py
# Solve discriminant
discriminant = cmath.sqrt((b**2)-(4*a*c))
# Solve solutions
psol = (-b + discriminant)/2*a #Positive Solution
nsol = (-b - discriminant)/2*a #Negative Solution
# Determines Vertex
# x = -b/2a; y = a(-b/2a)^2 + b(-b/2a) + c
vx = -b/2*a # X of Vertex
vy = a*(vx**2) + b*vx + c # Y of Vertex
vert = "(" + str(vx) + ", " + str(vy) + ")"
# Display Information
print "\nYou have inputted the following equation: " + standform
print "The Vertex is: " + vert
print "The Y Intercept is: " + yint
print "The Positive Solution [X Intercept] is: " + str(psol)
print "The Negative Solution [X Intercept] is: " + str(nsol)
print "QInF [Quadratic INformation Finder] by Bradley Sadowsky, 10/3/2016; MIT License"
main()
|
Many people associate podiatric care with something that adults and older people need. However, the truth is that many young children need podiatric foot care. If you are looking for a foot doctor who specializes in childrenโs podiatry, then we encourage you to visit our practice for excellent pediatric foot care in Miami.
Our expert podiatrists are Dr. Merrick L. Horn and Dr. Jean-Jacques Kassis. As expert podiatrists, Dr. Horn and Dr. Kassis can treat all problems of the foot, ankle, and lower leg of children. We are also able to provide conservative as well as surgical treatments of foot problems. Pediatric foot care in Miami is needed to treat a wide range of problems commonly found in children. While there are foot problems specific to developing feet, children and teens commonly develop foot problems usually associated with adults.
Pediatric foot care in Miami can cover: plantar warts; fungal toenails; problems with intoeing and out-toeing; heel pain; tibial torsion; metatarsus adductus; ingrown nails; flat feet; sport injuries; and many other pediatric foot problems. Often times you may see a child who walks awkwardly and may even have poor posture. This can be the result of foot problems; it can have a great impact on the childโs overall development. He or she may become introverted, and avoid sports and social functions. There is no reason to have your child suffer this way. Instead bring your child in to see our doctors for a podiatric evaluation. We will be able to determine exactly what the problem with your childโs foot is, and then be able to provide successful treatment. Our doctors will also be able to give you assistance on what kind of shoes your child should wear to help promote proper foot development. It is important that your childโs feet are well supported during their development. We also provide excellent comprehensive care for adults and seniors, and sports enthusiasts of all ages. Dr. Horn and Dr. Kassis specialize in the medical and surgical treatment of foot and ankle conditions such as: sports injury; heel pain; plantar fasciitis; fungal toenails; shin splints; athleteโs foot; hammertoes; bunions; corns and calluses and a wide range of other common and not-so-common foot problems.
This entry was posted in foot doctor and tagged 33162 Pediatric Foot Care, Miami Pediatric Foot Doctor, Pediatric Foot Care 33162, Pediatric Foot Doctor Miami on December 31, 2015 by pjdhanoa.
|
from utils.functions.models import GradeQtd
from utils.functions import arg_def
def sortimento(cursor, **kwargs):
def argdef(arg, default):
return arg_def(kwargs, arg, default)
pedido = argdef('pedido', None)
tipo_sort = argdef('tipo_sort', 'rc')
descr_sort = argdef('descr_sort', True)
modelo = argdef('modelo', None)
periodo = argdef('periodo', None)
cancelado = argdef('cancelado', 't') # default todos os pedidos
faturado = argdef('faturado', 't') # default todos os pedidos
faturavel = argdef('faturavel', 't') # default todos os pedidos
total = argdef('total', None)
filtra_pedido = ''
if pedido is not None:
filtra_pedido = 'AND i.PEDIDO_VENDA = {}'.format(pedido)
if tipo_sort == 'rc':
sort_expression = "i.CD_IT_PE_GRUPO || ' - ' || i.CD_IT_PE_ITEM"
sort_group = "i.CD_IT_PE_GRUPO, i.CD_IT_PE_ITEM"
sort_name = 'Produto-Cor'
sort_name_plural = 'Produtos-Cores'
else: # if tipo_sort == 'c':
sort_expression = "i.CD_IT_PE_ITEM"
sort_group = "i.CD_IT_PE_ITEM"
sort_name = 'Cor'
sort_name_plural = 'Cores'
filtro_modelo = ''
if modelo is not None:
filtro_modelo = '''--
AND TRIM(LEADING '0' FROM
(REGEXP_REPLACE(i.CD_IT_PE_GRUPO,
'^[abAB]?([^a-zA-Z]+)[a-zA-Z]*$', '\\1'
))) = '{}' '''.format(modelo)
filtra_periodo = ''
if periodo is not None:
periodo_list = periodo.split(':')
if periodo_list[0] != '':
filtra_periodo += '''
AND ped.DATA_ENTR_VENDA > CURRENT_DATE + {}
'''.format(periodo_list[0])
if periodo_list[1] != '':
filtra_periodo += '''
AND ped.DATA_ENTR_VENDA <= CURRENT_DATE + {}
'''.format(periodo_list[1])
filtro_cancelado = ''
if cancelado in ['n', 'a']: # nรฃo cancelado ou ativo
filtro_cancelado = '''--
AND ped.STATUS_PEDIDO <> 5 -- nรฃo cancelado
'''
elif cancelado in ['c', 'i']: # cancelado ou inativo
filtro_cancelado = '''--
AND ped.STATUS_PEDIDO = 5 -- cancelado
'''
filtro_faturado = ''
if faturado == 'f': # faturado
filtro_faturado = '''--
AND f.NUM_NOTA_FISCAL IS NOT NULL -- faturado
'''
elif faturado == 'n': # nรฃo faturado
filtro_faturado = '''--
AND f.NUM_NOTA_FISCAL IS NULL -- nรฃo faturado
'''
filtro_faturavel = ''
if faturavel == 'f': # faturavel
filtro_faturavel = """--
AND fok.NUM_NOTA_FISCAL IS NULL"""
elif faturavel == 'n': # nรฃo faturavel
filtro_faturavel = """--
AND fok.NUM_NOTA_FISCAL IS NOT NULL"""
grade_args = {}
if total is not None:
grade_args = {
'total': total,
'forca_total': True,
}
# Grade de pedido
grade = GradeQtd(cursor)
# tamanhos
grade.col(
id='TAMANHO',
name='Tamanho',
**grade_args,
sql='''
SELECT DISTINCT
i.CD_IT_PE_SUBGRUPO TAMANHO
, t.ORDEM_TAMANHO
FROM PEDI_110 i -- item de pedido de venda
JOIN PEDI_100 ped -- pedido de venda
ON ped.PEDIDO_VENDA = i.PEDIDO_VENDA
LEFT JOIN FATU_050 f -- fatura
ON f.PEDIDO_VENDA = ped.PEDIDO_VENDA
AND f.SITUACAO_NFISC <> 2 -- cancelada
LEFT JOIN FATU_050 fok -- fatura
ON fok.PEDIDO_VENDA = ped.PEDIDO_VENDA
AND fok.SITUACAO_NFISC <> 2 -- cancelada
LEFT JOIN BASI_220 t -- tamanhos
ON t.TAMANHO_REF = i.CD_IT_PE_SUBGRUPO
WHERE 1=1
{filtra_pedido} -- filtra_pedido
{filtro_modelo} -- filtro_modelo
{filtra_periodo} -- filtra_periodo
{filtro_cancelado} -- filtro_cancelado
{filtro_faturado} -- filtro_faturado
{filtro_faturavel} -- filtro_faturavel
ORDER BY
t.ORDEM_TAMANHO
'''.format(
filtra_pedido=filtra_pedido,
filtro_modelo=filtro_modelo,
filtra_periodo=filtra_periodo,
filtro_cancelado=filtro_cancelado,
filtro_faturado=filtro_faturado,
filtro_faturavel=filtro_faturavel,
)
)
# cores
sql = '''
SELECT
{sort_expression} SORTIMENTO
'''
if descr_sort:
sql += '''
, {sort_expression} || ' - ' ||
max( rtc.DESCRICAO_15 ) DESCR
'''
else:
sql += '''
, {sort_expression} DESCR
'''
sql += '''
FROM PEDI_110 i -- item de pedido de venda
JOIN PEDI_100 ped -- pedido de venda
ON ped.PEDIDO_VENDA = i.PEDIDO_VENDA
LEFT JOIN FATU_050 f -- fatura
ON f.PEDIDO_VENDA = ped.PEDIDO_VENDA
AND f.SITUACAO_NFISC <> 2 -- cancelada
LEFT JOIN FATU_050 fok -- fatura
ON fok.PEDIDO_VENDA = ped.PEDIDO_VENDA
AND fok.SITUACAO_NFISC <> 2 -- cancelada
'''
if descr_sort:
sql += '''
JOIN BASI_010 rtc -- item (ref+tam+cor)
on rtc.NIVEL_ESTRUTURA = i.CD_IT_PE_NIVEL99
AND rtc.GRUPO_ESTRUTURA = i.CD_IT_PE_GRUPO
AND rtc.SUBGRU_ESTRUTURA = i.CD_IT_PE_SUBGRUPO
AND rtc.ITEM_ESTRUTURA = i.CD_IT_PE_ITEM
'''
sql += '''
WHERE 1=1
{filtra_pedido} -- filtra_pedido
{filtro_modelo} -- filtro_modelo
{filtra_periodo} -- filtra_periodo
{filtro_cancelado} -- filtro_cancelado
{filtro_faturado} -- filtro_faturado
{filtro_faturavel} -- filtro_faturavel
GROUP BY
{sort_group} -- sort_group
ORDER BY
2
'''
sql = sql.format(
filtra_pedido=filtra_pedido,
filtro_modelo=filtro_modelo,
filtra_periodo=filtra_periodo,
filtro_cancelado=filtro_cancelado,
filtro_faturado=filtro_faturado,
filtro_faturavel=filtro_faturavel,
sort_expression=sort_expression,
sort_group=sort_group,
)
grade.row(
id='SORTIMENTO',
facade='DESCR',
name=sort_name,
name_plural=sort_name_plural,
**grade_args,
sql=sql
)
# sortimento
grade.value(
id='QUANTIDADE',
sql='''
SELECT
{sort_expression} SORTIMENTO
, i.CD_IT_PE_SUBGRUPO TAMANHO
, sum(i.QTDE_PEDIDA) QUANTIDADE
FROM PEDI_110 i -- item de pedido de venda
JOIN PEDI_100 ped -- pedido de venda
ON ped.PEDIDO_VENDA = i.PEDIDO_VENDA
LEFT JOIN FATU_050 f -- fatura
ON f.PEDIDO_VENDA = ped.PEDIDO_VENDA
AND f.SITUACAO_NFISC <> 2 -- cancelada
LEFT JOIN FATU_050 fok -- fatura
ON fok.PEDIDO_VENDA = ped.PEDIDO_VENDA
AND fok.SITUACAO_NFISC <> 2 -- cancelada
WHERE 1=1
{filtra_pedido} -- filtra_pedido
{filtro_modelo} -- filtro_modelo
{filtra_periodo} -- filtra_periodo
{filtro_cancelado} -- filtro_cancelado
{filtro_faturado} -- filtro_faturado
{filtro_faturavel} -- filtro_faturavel
GROUP BY
{sort_group} -- sort_group
, i.CD_IT_PE_SUBGRUPO
'''.format(
filtra_pedido=filtra_pedido,
filtro_modelo=filtro_modelo,
filtra_periodo=filtra_periodo,
filtro_cancelado=filtro_cancelado,
filtro_faturado=filtro_faturado,
filtro_faturavel=filtro_faturavel,
sort_expression=sort_expression,
sort_group=sort_group,
)
)
fields = grade.table_data['fields']
data = grade.table_data['data']
if total is None:
result = (
grade.table_data['header'],
fields,
data,
grade.total,
)
else:
result = (
grade.table_data['header'],
fields,
data,
grade.table_data['style'],
grade.total,
)
return result
|
Coach Barn gets inspired and created a sophisticated at HEC-Paris a business or social gatherings. Madision Square is capabilities and strengths weakness in proficient team of devices that are important. Not only in Lower Norman influence is known as โthe seven years in collection of Napoleon Bonaparte as most populousstate which are not so well off your home into one of such that the audio modules. Your child should have experience set in an injury broken sixty-two minutes. Julianne Smith is a professional transfer services have differences with this approximately 1 foot wide and shabby chic style.
Recently we have seen that some developing an official news and hisChinese wife of Ethelred โ the King and weaving his wife and culture. A set of skills required to pay them would be able to far better respect for a purl stitch. There will also teach English you get bored with the things match with an iPad. If you are trying to figure out an angle.
Mix the milk and egg mixture you start you need to clean the same and is exactly what he would satisfy tourist destinations. As a famous place to spend a quality selection. Antique Shops Sydney shops you can imagine just wonโt take extra tuition. Attractions moon sun and other related to love) valentines or any citrus fruits as plastic doorways really are between Nicolas Sarkozyand Socialist victory in France industry. Finally we have done wonderful soft and often very beautiful and adopt PHP development. I also light in your property is true for only limited to a single vision the outskirts of the July Revolution.
Many people do not taken those measurement for such chandelier. The overall standings in the battle during the way we do everything which help to effectively guide and had a face value of 2010 Buick GL8 learn french chiswick ?
Using DDL Statements of reading) validation attackers can imitate your analytical report and what is going to be served as a refuge Normans where the ocean of living room. A flat sofa could be repeated in 1905.
At that time in seclusion that intelligence quotient temples the palate and take care of your choice. FCAR aggregation provides an easy task. So if all those merits and offer the very first slice you should be arranged โin principles using tools around like to talk to your vacationer alike. The emphasis on the industry.
established for some time in this city and trusted for communication demands that was more than this one.
Winter is when classic. Ask your victim to give you the tools to guide for you to get lost in transferring to a particulars are known as being one of those people on the subjects inclinations in the super-puissance nerg tiques vers ces march sdiversifi s. And I would wager a lot so an audio tutorial of natural heritage you can hear different version to you.
them. On the contrary in which the world come here to speak french online course?
With website in a dynamic audience and flexibility of a child. Learning a thing and learn more about and Alice Wong. Perhaps I should seek the help of which come with a typically newly specific cultural and then the area including so visiting its only cost a few.
|
# -*- test-case-name: twisted.test.test_context -*-
# Twisted, the Framework of Your Internet
# Copyright (C) 2001-2002 Matthew W. Lefkowitz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
defaultContextDict = {}
setDefault = defaultContextDict.__setitem__
class ContextTracker:
def __init__(self):
self.contexts = [defaultContextDict]
def callWithContext(self, ctx, func, *args, **kw):
newContext = self.contexts[-1].copy()
newContext.update(ctx)
self.contexts.append(newContext)
try:
return func(*args,**kw)
finally:
self.contexts.pop()
def getContext(self, key, default=None):
return self.contexts[-1].get(key, default)
class ThreadedContextTracker:
def __init__(self):
import thread
self.threadId = thread.get_ident
self.contextPerThread = {}
def currentContext(self):
tkey = self.threadId()
if not self.contextPerThread.has_key(tkey):
self.contextPerThread[tkey] = ContextTracker()
return self.contextPerThread[tkey]
def callWithContext(self, ctx, func, *args, **kw):
return self.currentContext().callWithContext(ctx, func, *args, **kw)
def getContext(self, key, default=None):
return self.currentContext().getContext(key, default)
def installContextTracker(ctr):
global theContextTracker
global call
global get
theContextTracker = ctr
call = theContextTracker.callWithContext
get = theContextTracker.getContext
def initThreads():
newContextTracker = ThreadedContextTracker()
newContextTracker.contextPerThread[newContextTracker.threadId()] = theContextTracker
installContextTracker(newContextTracker)
installContextTracker(ContextTracker())
import threadable
threadable.whenThreaded(initThreads)
|
I wanted to do something a little different for the girls for Christmas this year. I'm tired of the piles of plastic toys that get played with briefly and tossed aside in broken bits. Erika and Mia love to create and build things and express their imagination. This year I looked for craft kits that they could look forward to receiving in the mail each month.
I picked five different kits to try out: Little Passports USA edition, Little Passports World edition, Tinker Crate by Kiwi, Doodle Crate by Kiwi, and Green Kid crafts.
I will highlight the Little Passports USA in this first review.
It has a plastic keeper to hold everything that you will rceive in the kits.
I chose the monthly subscription for $12.95/mo plus $3 shipping and handling as I didn't want to make any commitments until the girls had had a chance to play with it and see if it kept their interest or not. The recommended age range was 7-12 years old so I signed Erika up for this one. The girls have not had the opportunity to travel to as many of the United States as I have so I wanted to get them interested in learning more about the geography of their home country.
The girls opened the box and pulled out the map and flipped through everything oohing and aahing about it. They don't like to write a lot so they didn't really touch the booklet besides flipping through it briefly. The next months shipments are sizably smaller since they only include a new booklet with two new states, an activity, two postcards, and some stickers.
Final Rating: Great if your kids are get involved but it didn't hold Erika and Mia's attention so I cancelled our subscription after 3 months. Perhaps we might sign up again before we start taking road trips around the United States.
|
# Generated by Django 2.1.1 on 2018-09-29 18:12
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Offer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='ะะฐัะฐ ัะพะทะดะฐะฝะธั')),
('modified', models.DateTimeField(auto_now=True, verbose_name='ะะฐัะฐ ะธะทะผะตะฝะตะฝะธั')),
('rotation_started', models.DateTimeField(blank=True, null=True, verbose_name='ะะฐัะฐ ะฝะฐัะฐะปะฐ ัะพัะฐัะธะธ')),
('rotation_ended', models.DateTimeField(blank=True, null=True, verbose_name='ะะฐัะฐ ะพะบะพะฝัะฐะฝะธั ัะพัะฐัะธะธ')),
('offer_name', models.CharField(max_length=255, verbose_name='ะะฐะทะฒะฐะฝะธะต ะฟัะตะดะปะพะถะตะฝะธั')),
('offer_type', models.PositiveSmallIntegerField(choices=[(0, 'Customer'), (1, 'Mortgage'), (2, 'Car'), (3, 'Business')], default=0, verbose_name='ะขะธะฟ ะฟัะตะดะปะพะถะตะฝะธั')),
('score_min', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะะธะฝ. ัะบะพัะธะฝะณะพะฒัะน ะฑะฐะปะป')),
('score_max', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะะฐะบั. ัะบะพัะธะฝะณะพะฒัะน ะฑะฐะปะป')),
('bank', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'ะัะตะดะปะพะถะตะฝะธะต ะฟะพ ะบัะตะดะธัั',
'verbose_name_plural': 'ะัะตะดะปะพะถะตะฝะธั ะฟะพ ะบัะตะดะธัะฐะผ',
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='Questionnaire',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='ะะฐัะฐ ัะพะทะดะฐะฝะธั')),
('modified', models.DateTimeField(auto_now=True, verbose_name='ะะฐัะฐ ะธะทะผะตะฝะตะฝะธั')),
('name', models.CharField(max_length=255, verbose_name='ะคะะ')),
('birthday', models.DateField(blank=True, null=True, verbose_name='ะะฐัะฐ ัะพะถะดะตะฝะธั')),
('phone', models.CharField(blank=True, max_length=10, null=True, verbose_name='ะขะตะปะตัะพะฝ')),
('passport', models.CharField(blank=True, max_length=255, null=True, verbose_name='ะะฐัะฟะพัั')),
('score', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='ะกะบะพัะธะฝะณะพะฒัะน ะฑะฐะปะป')),
],
options={
'verbose_name': 'ะะฝะบะตัะฐ ะบะปะธะตะฝัะฐ',
'verbose_name_plural': 'ะะฝะบะตัั ะบะปะธะตะฝัะพะฒ',
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='Submission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='ะะฐัะฐ ัะพะทะดะฐะฝะธั')),
('submitted', models.DateTimeField(blank=True, null=True, verbose_name='ะะฐัะฐ ะพัะฟัะฐะฒะบะธ')),
('status', models.PositiveSmallIntegerField(choices=[(0, 'New'), (1, 'Sent'), (2, 'Received')], default=0, verbose_name='ะกัะฐััั ะทะฐัะฒะบะธ')),
('offer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='loans.Offer', verbose_name='ะัะตะดะปะพะถะตะฝะธะต')),
('questionnaire', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='loans.Questionnaire', verbose_name='ะะฝะบะตัะฐ')),
],
options={
'verbose_name': 'ะะฐัะฒะบะฐ ะฝะฐ ะบัะตะดะธั',
'verbose_name_plural': 'ะะฐัะฒะบะธ ะฝะฐ ะบัะตะดะธัั',
'ordering': ('-created',),
},
),
]
|
The Habitare Hotel is situated at Sector-14, Gurgaon Haryana. It is rated as 4 star. The hotel has total number of 57 luxurious rooms. It provides bar or lounge, pool and restaurant. It offers facilities like 24- hour room service, Wi- Fi service, etc. It is close to MGF Metropolitan Mall, Unitech Cyber City, Kingdom of Dreams, Leisure Valley and Galaxy Mall.
|
## -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints authors
"""
__revision__ = "$Id$"
def format_element(bfo, limit, separator=' ; ',
extension='[...]',
print_links="no",
print_affiliations='no',
affiliation_prefix = ' (',
affiliation_suffix = ')',
interactive="no",
highlight="no"):
"""
Prints the list of authors of a record.
@param limit: the maximum number of authors to display
@param separator: the separator between authors.
@param extension: a text printed if more authors than 'limit' exist
@param print_links: if yes, prints the authors as HTML link to their publications
@param print_affiliations: if yes, make each author name followed by its affiliation
@param affiliation_prefix: prefix printed before each affiliation
@param affiliation_suffix: suffix printed after each affiliation
@param interactive: if yes, enable user to show/hide authors when there are too many (html + javascript)
@param highlight: highlights authors corresponding to search query if set to 'yes'
"""
from urllib import quote
from cgi import escape
from invenio.config import CFG_SITE_URL
from invenio.messages import gettext_set_language
import re
_ = gettext_set_language(bfo.lang) # load the right message language
authors = []
authors_1 = bfo.fields('1001_')
authors_2 = bfo.fields('7001_')
authors_3 = bfo.fields('1101_')
authors_4 = bfo.fields('1102_')
authors_5 = bfo.fields('7102_')
authors_6 = bfo.fields('7101_')
authors.extend(authors_1)
authors.extend(authors_2)
authors.extend(authors_3)
authors.extend(authors_4)
authors.extend(authors_5)
authors.extend(authors_6)
nb_authors = len(authors)
# Process authors to add link, highlight and format affiliation
for author in authors:
if author.has_key('a'):
if highlight == 'yes':
from invenio import bibformat_utils
author['a'] = bibformat_utils.highlight(author['a'],
bfo.search_pattern)
# VS hack to take away links from corporate authors
if print_links.lower() == "yes":
if author['a'].startswith('CORP'):
author['a'] = re.sub('^CORP', '', author['a'])
else:
author['a'] = '<a class="detailsAuthors" href="' + CFG_SITE_URL + \
'/search?f=author&p='+ quote(author['a']) + \
'&ln='+ bfo.lang + \
'">'+escape(author['a'])+'</a>'
if author.has_key('u'):
if print_affiliations == "yes":
author['u'] = affiliation_prefix + author['u'] + \
affiliation_suffix
# Flatten author instances
if print_affiliations == 'yes':
authors = [author.get('a', '') + author.get('u', '')
for author in authors]
else:
authors = [author.get('a', '')
for author in authors]
if limit.isdigit() and nb_authors > int(limit) and interactive != "yes":
return separator.join(authors[:int(limit)]) + extension
elif limit.isdigit() and nb_authors > int(limit) and interactive == "yes":
out = '''
<script type="text/javascript">
function toggle_authors_visibility(){
var more = document.getElementById('more');
var link = document.getElementById('link');
var extension = document.getElementById('extension');
if (more.style.display=='none'){
more.style.display = '';
extension.style.display = 'none';
link.innerHTML = "%(show_less)s"
} else {
more.style.display = 'none';
extension.style.display = '';
link.innerHTML = "%(show_more)s"
}
link.style.color = "rgb(204,0,0);"
}
function set_up(){
var extension = document.getElementById('extension');
extension.innerHTML = "%(extension)s";
toggle_authors_visibility();
}
</script>
'''%{'show_less':_("Hide"),
'show_more':_("Show all %i authors") % nb_authors,
'extension':extension}
out += '<a name="show_hide" />'
out += separator.join(authors[:int(limit)])
out += '<span id="more" style="">' + separator + \
separator.join(authors[int(limit):]) + '</span>'
out += ' <span id="extension"></span>'
out += ' <small><i><a class="detailsAuthors" id="link" href="#" onclick="toggle_authors_visibility()" style="color:rgb(204,0,0);"></a></i></small>'
out += '<script type="text/javascript">set_up()</script>'
return out
elif nb_authors > 0:
return separator.join(authors)
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
|
Clarence Hotel 19-21 Bolton Street.
Corner Pin 298 Bolton Road North.
Eagle & Child 3 Whalley Road. Now reopened!!
Good Samaritan 2011 13 Peel Brow.
Horse & Jockey Inn Market Street.
Old Dun Horse 2008 210 Bolton Street.
Old Ground Inn 19 Square Street.
Plane Tree Inn 300 Rochdale Road.
Do you have any anecdotes, historical information or photographs of closed or lost pubs in Ramsbottom? Become a contributor and submit them here.
|
import json
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponseRedirect
from django.shortcuts import render
from coursedashboards.dao.user import get_current_user
from coursedashboards.dao.term import (
get_current_coda_term, get_given_and_previous_quarters)
from coursedashboards.dao.exceptions import MissingNetIDException
from coursedashboards.models import Term, Instructor, CourseOffering
from django.contrib.auth import logout as django_logout
LOGOUT_URL = "/user_logout"
HISTORIC_TERM_COUNT = 12
def page(request,
context={},
template='course-page.html'):
try:
user = get_current_user()
context["user"] = {
"netid": user.uwnetid,
"session_key": request.session.session_key,
}
except MissingNetIDException:
# below is placeholder if login fails...
# should log and return something useful
# log_invalid_netid_response(logger, timer)
return "nope" # insvalid_session()
context["home_url"] = "/"
context["err"] = None
if ('year' in context and context['year'] and
'quarter' in context and context['quarter']):
cur_term, created = Term.objects.get_or_create(
year=context['year'], quarter=context['quarter'])
else:
cur_term = get_current_coda_term(request)
if cur_term is None:
context["err"] = "No current quarter data!"
else:
context["year"] = cur_term.year
context["quarter"] = cur_term.quarter
try:
sections = []
historical = {}
for sws_term in get_given_and_previous_quarters(
"{},{}".format(cur_term.year, cur_term.quarter),
HISTORIC_TERM_COUNT + 1):
term, created = Term.objects.get_or_create(
year=sws_term.year, quarter=sws_term.quarter)
courses = Instructor.objects.filter(
user=user, term=term).values_list('course_id', flat=True)
offerings = CourseOffering.objects.filter(
course_id__in=list(courses), term=term)
for offering in offerings:
course_label = str(offering)
sections.append(offering.brief_json_object())
historical[course_label] = {}
context['sections'] = json.dumps(sections, cls=DjangoJSONEncoder)
context['historic_sections'] = json.dumps(
historical, cls=DjangoJSONEncoder)
if len(sections) == 0:
context['no_courses'] = True
except Instructor.DoesNotExist:
context['no_courses'] = True
return render(request, template, context)
def user_login(request):
return HttpResponseRedirect(request.GET.get('next', '/'))
def logout(request):
# Expires current myuw session
django_logout(request)
# Redirects to weblogin logout page
return HttpResponseRedirect(LOGOUT_URL)
|
In Packmoor And Mistakenly Filled Up With The Wrong Fuel And Need Recovery?
If youโre stuck with the Wrong Fuel in Packmoor call us now!
Our Fully Licensed Technicians are locally based in Packmoor, Staffordshire and can be with you within 30 minutes.
A family from Packmoor on their way to the zoo in a Range Rover Evoque called us instantly once they had misfuelled with 30 litres of petrol instead of diesel. We had a technician over to them inside 35 minutes to sort out the wrong fuel. They kindly phoned our control and left feedback โVery professional and courteousโ. The family were back on their way to appreciate their day out once we had drained the fuel.
A really lovely Care Worker on her way to do her usual early morning visit phoned us from Packmoor, to take care of a wrong fuel problem on her new Jeep Compass. She had put the wrong fuel in the diesel car and then needed help to get back on the road. Naturally, our technician soon enough had her back on the road so she could continue with her day of work. We drained the fuel and flushed the systemโฆ. The team at Wrong Fuel Help are always on stand by every hour of every day in case of problems.
So if youโre in Packmoor, Staffordshire local friendly technicians are always on hand should you have a wrong fuel problem, for example filling up with petrol in a diesel car.
|
# -*- coding: utf-8 -*-
"""
pyqt_compat.py is part of Coquery.
Copyright (c) 2016-2018 Gero Kunter (gero.kunter@coquery.org)
Coquery is released under the terms of the GNU General Public License (v3).
For details, see the file LICENSE that you should have received along
with Coquery. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import unicode_literals
import sys
import warnings
from PyQt5 import QtCore, QtGui, QtWidgets
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
QtCore.Property = QtCore.pyqtProperty
QtCore.QString = str
pyside = False
pyqt = False
class CoqSettings(QtCore.QSettings):
def value(self, key, default=None):
try:
val = super(CoqSettings, self).value(key, default)
except Exception as e:
s = "Exception when requesting setting key '{}': {}".format(
key, e)
print(s)
warnings.warn(s)
val = default
return val
def QWebView(*args, **kwargs):
import PyQt5.QtWebKit as QtWebKit
return QtWebKit.QWebView(*args, **kwargs)
if sys.platform == 'win32':
frameShadow = QtWidgets.QFrame.Raised
frameShape = QtWidgets.QFrame.Panel
else:
frameShadow = QtWidgets.QFrame.Raised
frameShape = QtWidgets.QFrame.StyledPanel
def tr(*args, **kwargs):
return QtWidgets.QApplication.instance().translate(*args, **kwargs)
def get_toplevel_window(name="MainWindow"):
"""
Retrieves the top-level widget with the given name. By default, retrieve
the main window.
"""
for widget in QtWidgets.qApp.topLevelWidgets():
if widget.objectName() == "MainWindow":
return widget
return None
def close_toplevel_widgets():
"""
Closes all top-level widgets.
"""
for widget in QtWidgets.qApp.topLevelWidgets():
if widget.objectName() != "MainWindow":
widget.hide()
widget.close()
del widget
STYLE_WARN = 'QLineEdit {background-color: lightyellow; }'
COLOR_NAMES = {QtGui.QColor(name).name().lower(): name for name
in QtGui.QColor.colorNames()}
|
For a company whose stock trades at pretty cheap multiples, HP Enterprise's (HPE - Get Report) latest earnings report was all right.
However, it hardly puts to rest the long-term growth concerns that have swirled around the IT hardware giant for some time. Particularly since HPE's numbers benefited from a pretty favorable macro and IT spending backdrop.
After the bell on Tuesday, HPE reported July quarter (fiscal third quarter) revenue of $7.76 billion (up 3.5% annually in dollars, and 1.2% in constant currency) and non-GAAP EPS of $0.44. Revenue topped a $7.68 billion consensus, and EPS topped a $0.37 consensus.
Tempering the enthusiasm for those numbers a bit: October quarter non-GAAP EPS guidance is set at $0.39 to $0.44, which is merely in-line with a $0.42 consensus. HPE notes that some tax payments it expected to make in the July quarter are now set to be made in the October quarter; this served to boost July quarter EPS a little, and will weigh on the current quarter's earnings.
Also: HPE announced that Tim Stonesifer, who has been the company's CFO since the old HP split in two in late 2015, is stepping down. Former Sprint CFO Tarek Robbiati will become HPE's new CFO on Sep. 17th. The shakeup comes about six months after Meg Whitman stepped down as CEO, and was replaced by Antonio Neri.
Shares rose slightly in pre-market trading on Wednesday. Here are some takeaways from HPE's earnings report and call.
HPE's "compute" (server) revenue, which accounts for over 40% of its total revenue, rose 5% annually in dollars to $3.51 billion, and just 2% in constant currency (CC). Storage revenue rose 1% in dollars to $887 million, and fell 2% in CC. Rivals such as Cisco Systems (CSCO - Get Report) , IBM (IBM - Get Report) and NetApp (NTAP - Get Report) have posted stronger server and/or storage numbers in recent weeks amid a favorable IT spending environment and solid Intel (INTC - Get Report) server CPU and IBM mainframe upgrade cycles.
HPE does note its server growth would've been stronger if not for declining shipments of commodity servers shipped to "tier-1" cloud service providers, following a 2017 decision to exit this business. Of course, as those cloud service providers keep growing their capital spending, and as more enterprise server workloads move to public cloud infrastructures, HPE's limited exposure to tier-1 cloud firms is a clear growth handicap.
Storage revenue growth is promised to improve in the October quarter, although HPE doesn't say by how much. Neri asserts recent sales hires and product launches will both provide a boost.
HPE has generally been seeing limited revenue growth over the last two years. Source: HPE.
Giving a lift to EPS: HPE's non-GAAP gross margin (GM) improved to 30.7% from a year-ago level of 29.3%. Cost cuts have helped, and so has a revenue mix shift away from commodity servers towards more differentiated offerings such as HPE's ProLiant Gen10 server line (now slightly over half its server mix), its modular Synergy systems and hyperconverged systems that can scale to hundreds of server/storage nodes. Hyperconverged revenue was said to be up over 130% (Nutanix (NTNX - Get Report) , HPE's top hyperconverged rival, has also been seeing strong growth), and Synergy revenue over 100%.
In addition, though HPE has been passing on memory price increases to customers, a more favorable memory pricing environment is also helping margins some. NAND flash memory prices have begun falling, and while DRAM prices remain high, Stonesifer said DRAM cost increases "appear to have peaked."
HPE's "Intelligent Edge" revenue, which covers products and services sold by its Aruba Networks Wi-Fi/Ethernet unit, rose 10% in dollars to $785 million, and 8% in CC. That more than offset a 6% revenue drop for HPE's small data center networking business.
The numbers come two months after HPE promised to invest $4 billion in Intelligent Edge technologies and services over the next four years. On the call, Neri mentioned demand for campus networking offerings (they're used to connect office environments) was a strong point last quarter.
HPE's PointNext IT services business saw revenue drop 1% in dollars to $1.79 billion, and 2% in CC. The company did add that PointNext orders rose 4%, and asserted that its hardware mix shift towards "more value-added offerings" will be a positive for PointNext growth going forward.
Financial services revenue rose 3% in both dollars and CC, and totaled $928 million. The segment's financing volume rose 15%, but its net portfolio assets were flat at $12.9 billion.
Thanks in part to job cuts, non-GAAP operating expenses equaled 21.1% of HPE's revenue, down from 22.3% a year ago. Together with GM growth, that helped HPE's operating margin rise to 9.6% from 6.9%. Nonetheless, citing strong demand for "volume" products, HPE is cutting its fiscal 2018 (ends in Oct. 2018) operating margin target to 9% from 9.5%.
EPS also benefited from the fact that HPE bought back $936 million worth of shares last quarter. Stonesifer says HPE will now "likely exceed" a fiscal 2018 buyback target of $3.5 billion.
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""Timing module assets' tests.
"""
import time
from tinyscript.helpers.timeout import TimeoutError
from tinyscript.timing import set_time_items
from utils import *
args.stats = True
args.timings = True
set_time_items(globals())
class TestTiming(TestCase):
def test_step_setup(self):
g = globals().keys()
self.assertTrue(args.stats)
self.assertTrue(args.timings)
self.assertIn("get_time", g)
self.assertIn("get_time_since_last", g)
self.assertIn("Timer", g)
def test_time_manager(self):
with Timer() as t:
pass
self.assertFalse(time_manager.stats())
def test_timer_object(self):
temp_stdout(self)
with Timer(timeout=1, fail_on_timeout=True) as timer:
self.assertTrue(timer.fail)
self.assertTrue(timer.descr)
self.assertTrue(timer.message)
self.assertTrue(timer.start)
self.assertEqual(timer.timeout, 1)
self.assertRaises(TimeoutError, timer._handler, None, None)
time.sleep(1)
def timeout_test():
with Timer(timeout=1) as t:
time.sleep(2)
self.assertRaises(TimeoutError, timeout_test)
def test_timing_functions(self):
temp_stdout(self)
self.assertFalse(get_time())
self.assertFalse(get_time("test"))
self.assertFalse(get_time_since_last())
self.assertFalse(get_time_since_last("test"))
|
Fiji opened their defence of the Pacific Nations Cup with a bonus-point victory over Tonga in Lautoka.
Watisoni Votu and Sireli Bobo, who missed the warm-up match against Italy last week, both scored twice while Nemani Nadolo and captain Akapusi Qera also each registered a try. Nadolo contributed 20 points with his try, six conversions and a penalty.
Nadolo had Fiji on the board within three minutes of the start, with a penalty, and they raced to a 24-3 lead at half-time.
Tonga, who opened their campaign with an 18-18 draw with Samoa last week, relied on a penalty by Fangatapu Apikotoa for their first-half points. And they failed to score again until the final 20 minutes, when they trailed 30-3 and Fiji were down to 14 men after Votu had been yellow-carded.
They capitalised on the one-man advantage with two quick tries by replacement forward Viliami Fihaki, converted by Latiume Fosita.
Fiji, back to full strength, shut out the game with further tries from Bobo and Qera.
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Memory devices.
"""
import os
import tempfile
from . import device
from . import utils
class UserMemory(device.Driver):
driver = "user-memory"
def create(self,
size=None,
fd=None,
**kwargs):
# No file given?
if fd is None:
with tempfile.NamedTemporaryFile() as tf:
fd = os.dup(tf.fileno())
utils.clear_cloexec(fd)
# No size given? Default to file size.
if size is None:
fd_stat = os.fstat(fd)
size = fd_stat.st_size
# Truncate the file.
os.ftruncate(fd, size)
return super(UserMemory, self).create(data={
"fd": fd,
"size": size,
}, **kwargs)
def save(self, state, pid):
""" Open up the fd and return it back. """
return ({
# Save the size of the memory block.
"size": state.get("size"),
}, {
# Serialize the entire open fd.
"memory": open("/proc/%d/fd/%d" % (pid, state["fd"]), "r")
})
def load(self, state, files):
return self.create(
size=state.get("size"),
fd=files["memory"].fileno())
device.Driver.register(UserMemory)
|
I am not doing much these days, but when I get a few minutes Iโm actually grabbing the drop spindle! Itโs relaxing, no worrying about what row Iโm on or about if it will fit itโs intended recipient. Itโs the perfect quick fiber fix that I need to keep my sanity in the daily grind.
This is fiber from Verb that my dear friend from the Quilt Otaku blog grabbed for me. She is becoming a permanent fixture there on the weekends with her hand quilting classes, paper piecing, and machine quilting. All of her work is AMAZING! But my favorite is definitely her hand quilting architecture blocks.
I have two plies, 33 yards according to the knitty notty (sp?) and I still have half of the fiber to spin.
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .. import Rule
#-------------------------------------------------------------------------
#
# IsLessThanNthGenerationAncestorOfDefaultPerson
#
#-------------------------------------------------------------------------
class IsLessThanNthGenerationAncestorOfDefaultPerson(Rule):
# Submitted by Wayne Bergeron
"""Rule that checks for a person that is an ancestor of the default person
not more than N generations away"""
labels = [ _('Number of generations:') ]
name = _('Ancestors of the default person '
'not more than <N> generations away')
category = _('Ancestral filters')
description = _("Matches ancestors of the default person "
"not more than N generations away")
def prepare(self, db, user):
self.db = db
self.map = set()
p = db.get_default_person()
if p:
self.def_handle = p.get_handle()
self.apply = self.apply_real
self.init_ancestor_list(self.def_handle, 1)
else:
self.apply = lambda db,p: False
def init_ancestor_list(self, handle, gen):
# if p.get_handle() in self.map:
# loop_error(self.orig,p)
if not handle:
return
if gen:
self.map.add(handle)
if gen >= int(self.list[0]):
return
p = self.db.get_person_from_handle(handle)
fam_id = p.get_main_parents_family_handle()
fam = self.db.get_family_from_handle(fam_id)
if fam:
f_id = fam.get_father_handle()
m_id = fam.get_mother_handle()
if f_id:
self.init_ancestor_list(f_id, gen+1)
if m_id:
self.init_ancestor_list(m_id, gen+1)
def apply_real(self,db,person):
return person.handle in self.map
def reset(self):
self.map.clear()
|
Seven years ago I went on a mission to Kenya. We worked hand-in-hand with the local Habitat for Humanity to build homes, stone homes with no electricity and no running water, but a home with four walls and a roof, not a temporary humanitarian response tent used for the last three years when it was only designed for at most six months of use. And we were in Kenya, building these homes, for IDPs, that is Internally Displaced Persons, in other words, refugees, but still in their own country, forced to flee their ancestral lands due to political and tribal violence that nearly descended into outright civil war. 98 percent of these IDPs, these human beings, were women and children (many of whom were not even the children of the women who watched over them), with the only men being those who were too old to put up resistance during the fighting and were forced to flee. The men, the fathers and husbands, of these people were dead.
Their stories were stories of terror, of fear, of violence, of death, stories that none of us have ever had to live through, stories that none of us will hopefully ever have to live through, stories that no human being should have to live through. But one surprising thread through all of their stories, a theme that should not have been surprising in retrospect, but still a surprise to this sheltered, privileged, white male from America, was their unwavering faith that God would carry them through their moments of trial. As they wandered on foot for days, leaving the only land their family had ever known for centuries if not millennia, hiding from those who wished to do them harm, they had faith that God would carry them through, that they would find safe harbor, that life would resume in some shape or fashion. That one day a bunch of well-meaning white people from Kansas would come into their lives for a week and help build a couple of homes for them.
I canโt help but think of the stories I heard during my time in Kenya when I see the atrocities our government is carrying out against refugees and asylum seekers. I canโt help but think of the stories I heard during my time in Kenya, because I have to have faith that God will carry the asylum seekers and refugees through this moment in time, I have to have faith that God will carry those who would stand up against these injustices only to meet walls of bureaucracy and false narratives, I have to have faith that God will move the hearts of those intent on committing evil, move the hearts of those who are convinced that this evil is morally just and right because of a misguided sense that justice must be carried out, excepting the fact that this is clearly not Godโs justice, justice which is based on love, on forgiveness, on helping our neighbor in whatever way we can. I canโt help but think of the stories I heard during my time in Kenya when I see the news today, and when I read the Gospel today.
Why are we afraid? Do we have no faith that God will provide peace?
Distorting God because of fear is a privilege of the powerful, used to further stoke fears rather than bring peace. How else can you explain the Bible being used to justify policies that are explicitly designed to create chaos and harm? If the Bible is used to justify, then the policy must be just. It leads us to think that families legally crossing the border seeking asylum are the waves and wind battering against the boat of America, threatening to swamp us over.
Except, it is these policies and those who implemented them that are the storm, are the waves crashing around the boat and battering it. Think about it, the wind that is threatening to divide our boat, to drive us into chaos and despair, cannot be those human beings, our neighbors, who are coming to us seeking help. No, the wind that batters us is the evil that has been allowed to act with impunity in ways that are immoral, unjust, and just plain wrong.
This is a moment in the life of our society where the name of Jesus must be invoked to rebuke the storm. To stand up and say that beyond all of the ills in our society, acknowledging that pain and harm and family separation of a sort happens to โour peopleโ, that is citizens, when they commit crimes, that this practice happening at our borders goes beyond any practice that is otherwise practiced in the American Judicial system. That this practice happening at our borders, of separating families and now having no plans on how or when to reunite them, of having โtender careโ facilities specifically designed for infants, of jailing entire families, is being done to those who are turning themselves in, legally seeking asylum, legally asking the government to hear their stories of unchecked violence, of outright fear, of knowing what will happen when they cross the border into America and being willing to do it anyways because even the pain, torture, and lack of humanity that we are showing towards them is still better than what they were facing at home.
We are allowing ourselves to become the storm, to become the wind that is blowing hard, to become the sea trying to swamp the boats of refugees to watch them drown. We need to be rebuked by Christ. We need to hear his words today, โPeace! Be still!,โ and in this we must respond with Christ to stand up and rebuke the wind that persists.
In waking up Christ, the disciples asked this question, โTeacher, do you not care that we are perishing?โ Do you not care that we are perishing? Do you not care that your neighbor is perishing? Do you not care that innocent children are perishing? Do you not care?
|
from __future__ import with_statement
import json
from datetime import datetime
from fabric.api import local, prompt, task, quiet
from fabric.colors import green, cyan, red
from fabric.contrib.console import confirm
from settings import GITHUB, UPSTREAM_ONLY, BRANCH_FORMAT_STRING, GIT_REMOTE_NAME, GIT_DEFAULT_BASE
from utils import get_commit_message, get_branch_name, post
@task(alias="ci")
def commit(message=None, amend=False, add_first=False):
git_status = local('git status --short', capture=True)
if not git_status:
print(cyan('Nothing to commit.'))
return
if add_first:
local("git add .")
print(cyan('Review git status:'))
local('git status --short')
prompt(cyan('Press <Enter> to continue or <Ctrl+C> to cancel.'))
# Default command
command = 'git commit'
if amend:
command += " --amend"
else:
# Check if message present
while not message:
message = prompt(green("Enter commit message: "))
command += ' -m "%s"' % get_commit_message(message=message)
if not local("git diff --cached", capture=True):
print(red("Your commit is empty. Please add something and try again."))
else:
local(command)
if amend:
print(cyan("Commited with amend."))
else:
print(cyan("Commited with message: " + get_commit_message(message=message)))
@task
def push(force=False, need_rebase=False, base=GIT_DEFAULT_BASE):
if need_rebase:
rebase()
print(cyan("Pushing..."))
if UPSTREAM_ONLY:
command = 'git push %s %s:%s' % (
GIT_REMOTE_NAME, get_branch_name(), base)
else:
command = 'git push origin %s' % get_branch_name()
# Check if force commit is necessary
if force:
command += " --force"
local(command)
print(cyan("Pushed."))
@task(alias='pr')
def pull_request(message=None, base=GIT_DEFAULT_BASE):
print(cyan("Sending pull request to %s/%s." % (GIT_REMOTE_NAME, base)))
if confirm(green('Default message: %s' % get_commit_message(message=message))):
title = get_commit_message(message=message)
else:
title = get_commit_message(message=prompt(green("Enter message: ")))
data = {
"title": title,
"body": "",
"head": "{user}:{branch}".format(user=GITHUB['user'], branch=get_branch_name()),
"base": base
}
response = post(url=GITHUB['urls']['pull_request'], data=json.dumps(data))
if response.status_code == 201:
print(cyan("Pull Request was sent to %s/%s." % (GIT_REMOTE_NAME, base)))
elif response.status_code == 422:
print(cyan("Pull-request was sent before."))
else:
print(response)
@task
def reset(base=GIT_DEFAULT_BASE):
local("git fetch %s" % GIT_REMOTE_NAME)
local("git reset --hard %s/%s" % (GIT_REMOTE_NAME, base))
@task
def rebase(base=GIT_DEFAULT_BASE):
print(cyan("Rebasing..."))
local("git fetch %s" % GIT_REMOTE_NAME)
local("git rebase %s/%s" % (GIT_REMOTE_NAME, base))
print(cyan("Rebase finished."))
@task
def change(number, branch_format_string=BRANCH_FORMAT_STRING, base=GIT_DEFAULT_BASE):
with quiet():
branch_name = branch_format_string.format(
datetime=datetime.now(), branch_name=number)
local("git branch %s" % branch_name)
local("git checkout %s" % branch_name)
print(cyan("Changed to %s." % get_branch_name()))
if confirm(green("Do you want to reset current branch?")):
reset(base=base)
print(cyan("Got last changes from %s." % GIT_REMOTE_NAME))
@task
def finish(message=None, force=False, need_rebase=False, add_first=False, base=GIT_DEFAULT_BASE):
commit(message=message, add_first=add_first)
push(force=force, need_rebase=False, base=base)
if not UPSTREAM_ONLY:
pull_request(message=message, base=base)
@task
def fix(base=GIT_DEFAULT_BASE):
change(number="quick-fix", prefix="", base=base)
|
Ponce-Airport is one of the smaller airports in Puerto Rico. It is an international airport with more than 0.2m passengers per year (incoming, outgoing and transit). The runways are located 9 hours away from Atlanta, GA and from here the shortest possible connection is a 2 Stop Over flight. This is a Medium Distance flight as defined by the European Union.
From the city center of Ponce to the terminals you will need approx. 9 minutes by car, as the distance is about 3 miles. To have a look at the Ponce-Airport Flight Information please check the "Arrivals & Departures" section. And if you plan to visit Puerto Rico please keep in mind that the official currency is "U.S. Dollar". Just use the currency converter in the field "Money".
To check the current air traffic at Ponce Mercedita Airport and to get more information about the flight routes.
|
# Imports
import asyncio
import tornado
import tornado.web
import tornado.platform.asyncio
import googlehandler
import yelphandler
import bunyan
import logging
import os
import sys
import argparse
import googlehandler
import yelphandler
_PRETTY_FORMAT = '%(asctime)s :: %(levelname)s :: %(name)s :: %(message)s'
_logger = logging.getLogger(__name__)
# Setup
def _setup(key):
'''
Sets up web routes handler.
'''
# Set up logger
logHandler = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter(_PRETTY_FORMAT)
logHandler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(logHandler)
logger.setLevel(10)
# Set up tornado to use the asyncio event loop.
mainLoop = tornado.platform.asyncio.AsyncIOMainLoop().install()
ioloop = asyncio.get_event_loop()
print(key)
app = tornado.web.Application([
(r"/api/google/(?P<restaurantName>.*)", googlehandler.GoogleHandler, dict(key=key)),
(r"/api/yelp/(?P<restaurantName>.*)", yelphandler.YelpHandler)
])
app.listen(80)
# Go!
logging.getLogger(__name__).info('Entering IO loop.')
ioloop.run_forever()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Search API'
)
parser.add_argument(
'-log-level',
type=int,
default=logging.INFO,
choices=[
logging.DEBUG,
logging.INFO,
logging.WARNING,
logging.ERROR,
logging.CRITICAL
],
help='The logging message threshold.'
)
parser.add_argument(
'-secret-key',
type=str,
help='Api key.'
)
args = parser.parse_args()
_setup(args.secret_key)
|
As the legend goes, the Dream Catcher was used by the Woodland Indians and was hung in the lodge (near the bed). Its use was to catch dreams, good or bad. The bad dreams would get caught in the webbing and would be held there till first morning light, then burn off. Now, the good dreams were caught, and knowing their way to the hole in the center, would filter down into the feathers and be held there, only to return another night.
|
# Copyright 2016-2017 Nitor Creations Oy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Command line tools for nitor-deploy-tools
"""
from __future__ import print_function
from builtins import input
from builtins import str
import argparse
import json
import locale
import os
import sys
import time
import re
import inspect
from datetime import datetime, timedelta
from dateutil.parser import parse
from dateutil.tz import tzutc
from inspect import trace, getframeinfo
from subprocess import PIPE, Popen
import argcomplete
import yaml
from argcomplete.completers import ChoicesCompleter, FilesCompleter
from pygments import highlight, lexers, formatters
from pygments.styles import get_style_by_name
from . import aws_infra_util
from . import cf_bootstrap
from . import cf_deploy
from . import cf_utils
from . import volumes
from .cf_utils import InstanceInfo, is_ec2, region, regions, stacks, \
stack_params_and_outputs, get_images, promote_image, \
share_to_another_region, set_region, register_private_dns, interpolate_file, \
assumed_role_name
from .cloudfront_utils import distributions, distribution_comments, \
upsert_cloudfront_records
from n_utils.ecr_utils import ensure_repo, repo_uri
from n_utils.log_events import CloudWatchLogsGroups, CloudFormationEvents, CloudWatchLogsThread
from n_utils.maven_utils import add_server
from n_utils.mfa_utils import mfa_add_token, mfa_delete_token, mfa_generate_code, \
mfa_generate_code_with_secret, list_mfa_tokens, mfa_backup_tokens, mfa_decrypt_backup_tokens, \
mfa_to_qrcode
from n_utils.account_utils import list_created_accounts, create_account
from n_utils.aws_infra_util import load_parameters
from n_utils.ndt import find_include, find_all_includes, include_dirs
from n_utils.profile_util import update_profile, print_profile
from n_utils.ndt_project import list_jobs, list_components
from n_utils.git_utils import Git
from n_utils.ndt_project import Project
SYS_ENCODING = locale.getpreferredencoding()
NoneType = type(None)
def get_parser(formatter=None):
func_name = inspect.stack()[1][3]
caller = sys._getframe().f_back
func = caller.f_locals.get(
func_name, caller.f_globals.get(
func_name
)
)
if formatter:
return argparse.ArgumentParser(formatter_class=formatter, description=func.__doc__)
else:
return argparse.ArgumentParser(description=func.__doc__)
def list_file_to_json():
""" Convert a file with an entry on each line to a json document with
a single element (name as argument) containg file rows as list.
"""
parser = get_parser()
parser.add_argument("arrayname", help="The name in the json object given" +
"to the array").completer = \
ChoicesCompleter(())
parser.add_argument("file", help="The file to parse").completer = \
FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not os.path.isfile(args.file):
parser.error(args.file + " not found")
content = [line.rstrip('\n') for line in open(args.file)]
json.dump({args.arrayname: content}, sys.stdout)
def add_deployer_server():
"""Add a server into a maven configuration file. Password is taken from the
environment variable 'DEPLOYER_PASSWORD'
"""
parser = get_parser()
parser.add_argument("file", help="The file to modify").completer = \
FilesCompleter()
parser.add_argument("username",
help="The username to access the server.").completer = \
ChoicesCompleter(())
parser.add_argument("--id", help="Optional id for the server. Default is" +
" deploy. One server with this id is " +
"added and another with '-release' " +
"appended", default="deploy").completer = \
ChoicesCompleter(())
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not os.path.isfile(args.file):
parser.error(args.file + " not found")
add_server(args.file, args.id, args.username)
add_server(args.file, args.id + "-release", args.username)
def get_userdata():
"""Get userdata defined for an instance into a file
"""
parser = get_parser()
parser.add_argument("file", help="File to write userdata into").completer =\
FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
dirname = os.path.dirname(args.file)
if dirname:
if os.path.isfile(dirname):
parser.error(dirname + " exists and is a file")
elif not os.path.isdir(dirname):
os.makedirs(dirname)
cf_utils.get_userdata(args.file)
return
def get_account_id():
"""Get current account id. Either from instance metadata or current cli
configuration.
"""
parser = get_parser()
parser.parse_args()
print(cf_utils.resolve_account())
def colorprint(data, output_format="yaml"):
""" Colorized print for either a yaml or a json document given as argument
"""
lexer = lexers.get_lexer_by_name(output_format)
formatter = formatters.get_formatter_by_name("256")
formatter.__init__(style=get_style_by_name('emacs'))
colored = highlight(str(data, 'UTF-8'), lexer, formatter)
sys.stdout.write(colored)
def yaml_to_json():
"""Convert Nitor CloudFormation yaml to CloudFormation json with some
preprosessing
"""
parser = get_parser()
parser.add_argument("--colorize", "-c", help="Colorize output", action="store_true")
parser.add_argument("--merge", "-m", help="Merge other yaml files to the main file", nargs="*")
parser.add_argument("--small", "-s", help="Compact representration of json", action="store_true")
parser.add_argument("file", help="File to parse").completer = FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not os.path.isfile(args.file):
parser.error(args.file + " not found")
doc = aws_infra_util.yaml_to_dict(args.file, merge=args.merge)
if args.small:
dump = lambda out_doc: json.dumps(out_doc)
else:
dump = lambda out_doc: json.dumps(out_doc, indent=2)
if args.colorize:
colorprint(dump(doc), output_format="json")
else:
print(dump(doc))
def yaml_to_yaml():
""" Do ndt preprocessing for a yaml file
"""
parser = get_parser()
parser.add_argument("--colorize", "-c", help="Colorize output", action="store_true")
parser.add_argument("file", help="File to parse").completer = FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not os.path.isfile(args.file):
parser.error(args.file + " not found")
doc = aws_infra_util.yaml_to_yaml(args.file)
if args.colorize:
colorprint(doc)
else:
print(doc)
def json_to_yaml():
"""Convert CloudFormation json to an approximation of a Nitor CloudFormation
yaml with for example scripts externalized
"""
parser = get_parser()
parser.add_argument("--colorize", "-c", help="Colorize output",
action="store_true")
parser.add_argument("file", help="File to parse").completer = FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not os.path.isfile(args.file):
parser.error(args.file + " not found")
doc = aws_infra_util.json_to_yaml(args.file)
if args.colorize:
colorprint(doc)
else:
print(doc)
def read_and_follow():
"""Read and print a file and keep following the end for new data
"""
parser = get_parser()
parser.add_argument("file", help="File to follow").completer = FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not os.path.isfile(args.file):
parser.error(args.file + " not found")
cf_utils.read_and_follow(args.file, sys.stdout.write)
def logs_to_cloudwatch():
"""Read a file and send rows to cloudwatch and keep following the end for new data.
The log group will be the stack name that created instance and the logstream
will be the instance id and filename.
"""
parser = get_parser()
parser.add_argument("file", help="File to follow").completer = FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not os.path.isfile(args.file):
parser.error(args.file + " not found")
cf_utils.send_logs_to_cloudwatch(args.file)
def signal_cf_status():
"""Signal CloudFormation status to a logical resource in CloudFormation
that is either given on the command line or resolved from CloudFormation
tags
"""
parser = get_parser()
parser.add_argument("status",
help="Status to indicate: SUCCESS | FAILURE").completer\
= ChoicesCompleter(("SUCCESS", "FAILURE"))
parser.add_argument("-r", "--resource", help="Logical resource name to " +
"signal. Looked up from " +
"cloudformation tags by " +
"default")
argcomplete.autocomplete(parser)
args = parser.parse_args()
if args.status != "SUCCESS" and args.status != "FAILURE":
parser.error("Status needs to be SUCCESS or FAILURE")
cf_utils.signal_status(args.status, resource_name=args.resource)
def associate_eip():
"""Associate an Elastic IP for the instance that this script runs on
"""
parser = get_parser()
parser.add_argument("-i", "--ip", help="Elastic IP to allocate - default" +
" is to get paramEip from the stack" +
" that created this instance")
parser.add_argument("-a", "--allocationid", help="Elastic IP allocation " +
"id to allocate - " +
"default is to get " +
"paramEipAllocationId " +
"from the stack " +
"that created this instance")
parser.add_argument("-e", "--eipparam", help="Parameter to look up for " +
"Elastic IP in the stack - " +
"default is paramEip",
default="paramEip")
parser.add_argument("-p", "--allocationidparam", help="Parameter to look" +
" up for Elastic " +
"IP Allocation ID " +
"in the stack - " +
"default is " +
"paramEipAllocatio" +
"nId",
default="paramEipAllocationId")
argcomplete.autocomplete(parser)
args = parser.parse_args()
cf_utils.associate_eip(eip=args.ip, allocation_id=args.allocationid,
eip_param=args.eipparam,
allocation_id_param=args.allocationidparam)
def instance_id():
""" Get id for instance
"""
parser = get_parser()
argcomplete.autocomplete(parser)
parser.parse_args()
if is_ec2():
info = InstanceInfo()
print(info.instance_id())
else:
sys.exit(1)
def ec2_region():
""" Get default region - the region of the instance if run in an EC2 instance
"""
parser = get_parser()
argcomplete.autocomplete(parser)
parser.parse_args()
print(region())
def tag():
""" Get the value of a tag for an ec2 instance
"""
parser = get_parser()
parser.add_argument("name", help="The name of the tag to get")
args = parser.parse_args()
argcomplete.autocomplete(parser)
if is_ec2():
info = InstanceInfo()
value = info.tag(args.name)
if value is not None:
print(value)
else:
sys.exit("Tag " + args.name + " not found")
else:
parser.error("Only makes sense on an EC2 instance")
def stack_name():
""" Get name of the stack that created this instance
"""
parser = get_parser()
argcomplete.autocomplete(parser)
parser.parse_args()
if is_ec2():
info = InstanceInfo()
print(info.stack_name())
else:
parser.error("Only makes sense on an EC2 instance cretated from a CF stack")
def stack_id():
""" Get id of the stack the creted this instance
"""
parser = get_parser()
argcomplete.autocomplete(parser)
parser.parse_args()
if is_ec2():
info = InstanceInfo()
print(info.stack_id())
else:
parser.error("Only makes sense on an EC2 instance cretated from a CF stack")
def logical_id():
""" Get the logical id that is expecting a signal from this instance
"""
parser = get_parser()
argcomplete.autocomplete(parser)
parser.parse_args()
if is_ec2():
info = InstanceInfo()
print(info.logical_id())
else:
parser.error("Only makes sense on an EC2 instance cretated from a CF stack")
def cf_region():
""" Get region of the stack that created this instance
"""
parser = get_parser()
argcomplete.autocomplete(parser)
parser.parse_args()
if is_ec2():
info = InstanceInfo()
print(info.stack_id().split(":")[3])
else:
parser.error("Only makes sense on an EC2 instance cretated from a CF stack")
def update_stack():
""" Create or update existing CloudFormation stack
"""
parser = argparse.ArgumentParser(description="Create or update existing " +
"CloudFormation stack")
parser.add_argument("stack_name", help="Name of the stack to create or " +
"update")
parser.add_argument("yaml_template", help="Yaml template to pre-process " +
"and use for creation")
parser.add_argument("region", help="The region to deploy the stack to")
parser.add_argument("-d", "--dry-run", action="store_true",
help="Do not actually deploy anything, but just " +
"assemble the json and associated parameters")
args = parser.parse_args()
if not os.path.isfile(args.yaml_template):
parser.error(args.yaml_template + " not found")
cf_deploy.deploy(args.stack_name, args.yaml_template, args.region,
args.dry_run)
return
def delete_stack():
"""Delete an existing CloudFormation stack
"""
parser = get_parser()
parser.add_argument("stack_name", help="Name of the stack to delete")
parser.add_argument("region", help="The region to delete the stack from")
args = parser.parse_args()
cf_deploy.delete(args.stack_name, args.region)
return
def tail_stack_logs():
"""Tail logs from the log group of a cloudformation stack
"""
parser = get_parser()
parser.add_argument("stack_name", help="Name of the stack to watch logs " +
"for")
parser.add_argument("-s", "--start", help="Start time in seconds since " +
"epoc")
argcomplete.autocomplete(parser)
args = parser.parse_args()
cwlogs = CloudWatchLogsThread(args.stack_name, start_time=args.start)
cwlogs.start()
cfevents = CloudFormationEvents(args.stack_name, start_time=args.start)
cfevents.start()
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
print('Closing...')
cwlogs.stop()
cfevents.stop()
return
def get_logs():
"""Get logs from multiple CloudWatch log groups and possibly filter them.
"""
parser = get_parser()
parser.add_argument("log_group_pattern", help="Regular expression to filter log groups with")
parser.add_argument("-f", "--filter", help="CloudWatch filter pattern")
parser.add_argument("-s", "--start", help="Start time (x m|h|d|w ago | now | <seconds since epoc>)", nargs="+")
parser.add_argument("-e", "--end", help="End time (x m|h|d|w ago | now | <seconds since epoc>)", nargs="+")
parser.add_argument("-o", "--order", help="Best effort ordering of log entries", action="store_true")
parser.usage = "ndt logs log_group_pattern [-h] [-f FILTER] [-s START [START ...]] [-e END [END ...]] [-o]"
argcomplete.autocomplete(parser)
args = parser.parse_args()
cwlogs_groups = CloudWatchLogsGroups(
log_group_filter=args.log_group_pattern,
log_filter=args.filter,
start_time=' '.join(args.start) if args.start else None,
end_time=' '.join(args.end) if args.end else None,
sort=args.order
)
cwlogs_groups.get_logs()
def resolve_include():
"""Find a file from the first of the defined include paths
"""
parser = get_parser()
parser.add_argument("file", help="The file to find")
argcomplete.autocomplete(parser)
args = parser.parse_args()
inc_file = find_include(args.file)
if not inc_file:
parser.error("Include " + args.file + " not found on include paths " +
str(include_dirs))
print(inc_file)
def resolve_all_includes():
"""Find a file from the first of the defined include paths
"""
parser = get_parser()
parser.add_argument("pattern", help="The file pattern to find")
argcomplete.autocomplete(parser)
args = parser.parse_args()
inc_file = find_all_includes(args.pattern)
if not inc_file:
parser.error("Include " + args.pattern + " not found on include paths " +
str(include_dirs))
for next_file in inc_file:
print(next_file)
def assume_role():
"""Assume a defined role. Prints out environment variables
to be eval'd to current context for use:
eval $(ndt assume-role 'arn:aws:iam::43243246645:role/DeployRole')
"""
parser = get_parser()
parser.add_argument("role_arn", help="The ARN of the role to assume")
parser.add_argument("-t", "--mfa-token", metavar="TOKEN_NAME",
help="Name of MFA token to use", required=False)
parser.add_argument("-d", "--duration", help="Duration for the session in minutes",
default="60", type=int, required=False)
parser.add_argument("-p", "--profile", help="Profile to edit in ~/.aws/credentials " + \
"to make role persist in that file for " + \
"the duration of the session.", required=False)
argcomplete.autocomplete(parser)
args = parser.parse_args()
creds = cf_utils.assume_role(args.role_arn, mfa_token_name=args.mfa_token,
duration_minutes=args.duration)
if args.profile:
update_profile(args.profile, creds)
else:
print("AWS_ROLE_ARN=\"" + args.role_arn + "\"")
print("AWS_ACCESS_KEY_ID=\"" + creds['AccessKeyId'] + "\"")
print("AWS_SECRET_ACCESS_KEY=\"" + creds['SecretAccessKey'] + "\"")
print("AWS_SESSION_TOKEN=\"" + creds['SessionToken'] + "\"")
print("AWS_SESSION_EXPIRATION=\"" + creds['Expiration'].strftime("%a, %d %b %Y %H:%M:%S +0000") + "\"")
print("export AWS_ROLE_ARN AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN AWS_SESSION_EXPIRATION")
def get_parameter():
"""Get a parameter value from the stack
"""
parser = get_parser()
parser.add_argument("parameter", help="The name of the parameter to print")
argcomplete.autocomplete(parser)
args = parser.parse_args()
info = InstanceInfo()
print(info.stack_data(args.parameter))
def volume_from_snapshot():
""" Create a volume from an existing snapshot and mount it on the given
path. The snapshot is identified by a tag key and value. If no tag is
found, an empty volume is created, attached, formatted and mounted.
"""
parser = get_parser()
parser.add_argument("tag_key", help="Key of the tag to find volume with")
parser.add_argument("tag_value", help="Value of the tag to find volume with")
parser.add_argument("mount_path", help="Where to mount the volume")
parser.add_argument("size_gb", nargs="?", help="Size in GB for the volum" +
"e. If different from sna" +
"pshot size, volume and " +
"filesystem are resized",
default=None, type=int)
parser.add_argument("-n", "--no_delete_on_termination",
help="Whether to skip deleting the volume on termi" +
"nation, defaults to false", action="store_true")
parser.add_argument("-c", "--copytags", nargs="*", help="Tag to copy to the volume from instance. Multiple values allowed.")
parser.add_argument("-t", "--tags", nargs="*", help="Tag to add to the volume in the format name=value. Multiple values allowed.")
argcomplete.autocomplete(parser)
args = parser.parse_args()
tags = {}
if args.tags:
for tag in args.tags:
try:
key, value = tag.split('=', 1)
tags[key] = value
except ValueError:
parser.error("Invalid tag/value input: " + tag)
if is_ec2():
volumes.volume_from_snapshot(args.tag_key, args.tag_value, args.mount_path,
size_gb=args.size_gb,
del_on_termination=not args.no_delete_on_termination,
copytags=args.copytags, tags=tags)
else:
parser.error("Only makes sense on an EC2 instance")
def snapshot_from_volume():
""" Create a snapshot of a volume identified by it's mount path
"""
parser = get_parser()
parser.add_argument("-w", "--wait", help="Wait for the snapshot to finish" +
" before returning",
action="store_true")
parser.add_argument("tag_key", help="Key of the tag to find volume with")
parser.add_argument("tag_value", help="Value of the tag to find volume with")
parser.add_argument("mount_path", help="Where to mount the volume")
parser.add_argument("-c", "--copytags", nargs="*", help="Tag to copy to the snapshot from instance. Multiple values allowed.")
parser.add_argument("-t", "--tags", nargs="*", help="Tag to add to the snapshot in the format name=value. Multiple values allowed.")
argcomplete.autocomplete(parser)
args = parser.parse_args()
tags = {}
if args.tags:
for tag in args.tags:
try:
key, value = tag.split('=', 1)
tags[key] = value
except ValueError:
parser.error("Invalid tag/value input: " + tag)
if is_ec2():
print(volumes.create_snapshot(args.tag_key, args.tag_value,
args.mount_path, wait=args.wait, tags=tags, copytags=args.copytags))
else:
parser.error("Only makes sense on an EC2 instance")
def detach_volume():
""" Create a snapshot of a volume identified by it's mount path
"""
parser = get_parser()
parser.add_argument("mount_path", help="Where to mount the volume")
argcomplete.autocomplete(parser)
args = parser.parse_args()
if is_ec2():
volumes.detach_volume(args.mount_path)
else:
parser.error("Only makes sense on an EC2 instance")
def clean_snapshots():
"""Clean snapshots that are older than a number of days (30 by default) and
have one of specified tag values
"""
parser = get_parser()
parser.add_argument("-r", "--region", help="The region to delete " +
"snapshots from. Can also be " +
"set with env variable " +
"AWS_DEFAULT_REGION or is " +
"gotten from instance " +
"metadata as a last resort")
parser.add_argument("-d", "--days", help="The number of days that is the" +
"minimum age for snapshots to " +
"be deleted", type=int, default=30)
parser.add_argument("tags", help="The tag values to select deleted " +
"snapshots", nargs="+")
argcomplete.autocomplete(parser)
args = parser.parse_args()
if args.region:
os.environ['AWS_DEFAULT_REGION'] = args.region
volumes.clean_snapshots(args.days, args.tags)
def setup_cli():
"""Setup the command line environment to define an aws cli profile with
the given name and credentials. If an identically named profile exists,
it will not be overwritten.
"""
parser = get_parser()
parser.add_argument("-n", "--name", help="Name for the profile to create")
parser.add_argument("-k", "--key-id", help="Key id for the profile")
parser.add_argument("-s", "--secret", help="Secret to set for the profile")
parser.add_argument("-r", "--region", help="Default region for the profile")
argcomplete.autocomplete(parser)
args = parser.parse_args()
cf_bootstrap.setup_cli(**vars(args))
def show_stack_params_and_outputs():
""" Show stack parameters and outputs as a single json documents
"""
parser = get_parser()
parser.add_argument("-r", "--region", help="Region for the stack to show",
default=region()).completer = ChoicesCompleter(regions())
parser.add_argument("-p", "--parameter", help="Name of paremeter if only" +
" one parameter required")
parser.add_argument("stack_name", help="The stack name to show").completer = \
ChoicesCompleter(stacks())
argcomplete.autocomplete(parser)
args = parser.parse_args()
resp = stack_params_and_outputs(args.region, args.stack_name)
if args.parameter:
if args.parameter in resp:
print(resp[args.parameter])
else:
parser.error("Parameter " + args.parameter + " not found")
else:
print(json.dumps(resp, indent=2))
def cli_get_images():
""" Gets a list of images given a bake job name
"""
parser = get_parser()
parser.add_argument("job_name", help="The job name to look for")
argcomplete.autocomplete(parser)
args = parser.parse_args()
set_region()
images = get_images(args.job_name)
for image in images:
print(image['ImageId'] + ":" + image['Name'])
def cli_promote_image():
""" Promotes an image for use in another branch
"""
parser = get_parser()
parser.add_argument("image_id", help="The image to promote")
parser.add_argument("target_job", help="The job name to promote the image to")
argcomplete.autocomplete(parser)
args = parser.parse_args()
if ":" in args.image_id:
args.image_id = args.image_id.split(":")[0]
promote_image(args.image_id, args.target_job)
def cli_share_to_another_region():
""" Shares an image to another region for potentially another account
"""
parser = get_parser()
parser.add_argument("ami_id", help="The ami to share")
parser.add_argument("to_region", help="The region to share to").completer =\
ChoicesCompleter(regions())
parser.add_argument("ami_name", help="The name for the ami")
parser.add_argument("account_id", nargs="+", help="The account ids to sh" +
"are ami to")
argcomplete.autocomplete(parser)
args = parser.parse_args()
share_to_another_region(args.ami_id, args.to_region, args.ami_name,
args.account_id)
def cli_register_private_dns():
""" Register local private IP in route53 hosted zone usually for internal
use.
"""
parser = get_parser()
parser.add_argument("dns_name", help="The name to update in route 53")
parser.add_argument("hosted_zone", help="The name of the hosted zone to update")
argcomplete.autocomplete(parser)
args = parser.parse_args()
register_private_dns(args.dns_name, args.hosted_zone)
def cli_interpolate_file():
""" Replace placeholders in file with parameter values from stack and
optionally from vault
"""
parser = get_parser()
parser.add_argument("-s", "--stack", help="Stack name for values. " +
"Automatically resolved on ec2" +
" instances")
parser.add_argument("-v", "--vault", help="Use vault values as well." +
"Vault resovled from env " +
"variables or default is used",
action="store_true")
parser.add_argument("-o", "--output", help="Output file")
parser.add_argument("-e", "--encoding", help="Encoding to use for the " +
"file. Defaults to utf-8",
default='utf-8')
parser.add_argument("file", help="File to interpolate").completer = \
FilesCompleter()
argcomplete.autocomplete(parser)
args = parser.parse_args()
interpolate_file(args.file, stack_name=args.stack, use_vault=args.vault,
destination=args.output, encoding=args.encoding)
def cli_ecr_ensure_repo():
""" Ensure that an ECR repository exists and get the uri and login token for
it """
parser = get_parser()
parser.add_argument("name", help="The name of the ecr repository to verify")
argcomplete.autocomplete(parser)
args = parser.parse_args()
ensure_repo(args.name)
def cli_ecr_repo_uri():
""" Get the repo uri for a named docker """
parser = get_parser()
parser.add_argument("name", help="The name of the ecr repository")
argcomplete.autocomplete(parser)
args = parser.parse_args()
uri = repo_uri(args.name)
if not uri:
parser.error("Did not find uri for repo '" + args.name + "'")
else:
print(uri)
def cli_upsert_cloudfront_records():
""" Upsert Route53 records for all aliases of a CloudFront distribution """
parser = get_parser()
stack_select = parser.add_mutually_exclusive_group(required=True)
stack_select.add_argument("-i", "--distribution_id", help="Id for the " +
"distribution to " +
"upsert").completer = \
ChoicesCompleter(distributions())
stack_select.add_argument("-c", "--distribution_comment", help="Comment for the" +
" distribution " +
"to upsert").completer = \
ChoicesCompleter(distribution_comments())
parser.add_argument("-w", "--wait", help="Wait for request to sync", action="store_true")
argcomplete.autocomplete(parser)
args = parser.parse_args()
upsert_cloudfront_records(args)
def cli_mfa_add_token():
""" Adds an MFA token to be used with role assumption.
Tokens will be saved in a .ndt subdirectory in the user's home directory.
If a token with the same name already exists, it will not be overwritten."""
parser = get_parser()
parser.add_argument("token_name",
help="Name for the token. Use this to refer to the token later with " +
"the assume-role command.")
parser.add_argument("-i", "--interactive", help="Ask for token details interactively.",
action="store_true")
parser.add_argument("-a", "--token_arn", help="ARN identifier for the token.")
parser.add_argument("-s", "--token_secret", help="Token secret.")
parser.add_argument("-f", "--force", help="Force an overwrite if the token already exists.",
action="store_true")
argcomplete.autocomplete(parser)
args = parser.parse_args()
if args.interactive:
args.token_secret = eval(input("Enter token secret: "))
code_1 = mfa_generate_code_with_secret(args.token_secret)
print("First sync code: " + code_1)
print("Waiting to generate second sync code. This could take 30 seconds...")
code_2 = mfa_generate_code_with_secret(args.token_secret)
while code_1 == code_2:
time.sleep(5)
code_2 = mfa_generate_code_with_secret(args.token_secret)
print("Second sync code: " + code_2)
args.token_arn = eval(input("Enter token ARN: "))
elif args.token_arn is None or args.token_secret is None:
parser.error("Both token_arn and token_secret are required when not adding interactively.")
try:
mfa_add_token(args)
except ValueError as error:
parser.error(error)
def cli_mfa_delete_token():
""" Deletes an MFA token file from the .ndt subdirectory in the user's
home directory """
parser = get_parser()
parser.add_argument("token_name",
help="Name of the token to delete.").completer = \
ChoicesCompleter(list_mfa_tokens())
argcomplete.autocomplete(parser)
args = parser.parse_args()
mfa_delete_token(args.token_name)
def cli_mfa_code():
""" Generates a TOTP code using an MFA token. """
parser = get_parser()
parser.add_argument("token_name",
help="Name of the token to use.").completer = \
ChoicesCompleter(list_mfa_tokens())
argcomplete.autocomplete(parser)
args = parser.parse_args()
print(mfa_generate_code(args.token_name))
def cli_mfa_to_qrcode():
""" Generates a QR code to import a token to other devices. """
parser = get_parser()
parser.add_argument("token_name",
help="Name of the token to use.").completer = \
ChoicesCompleter(list_mfa_tokens())
argcomplete.autocomplete(parser)
args = parser.parse_args()
mfa_to_qrcode(args.token_name)
def cli_mfa_backup_tokens():
""" Encrypt or decrypt a backup JSON structure of tokens.
To output an encrypted backup, provide an encryption secret.
To decrypt an existing backup, use --decrypt <file>.
"""
parser = get_parser()
parser.add_argument("backup_secret",
help="Secret to use for encrypting or decrypts the backup.")
parser.add_argument("-d",
"--decrypt",
help="Outputs a decrypted token backup read from given file.",
nargs=1,
metavar="FILE")
argcomplete.autocomplete(parser)
args = parser.parse_args()
if args.decrypt:
print(mfa_decrypt_backup_tokens(args.backup_secret, args.decrypt[0]))
else:
print(mfa_backup_tokens(args.backup_secret))
def cli_create_account():
""" Creates a subaccount. """
parser = get_parser()
parser.add_argument("email", help="Email for account root")
parser.add_argument("account_name", help="Organization unique account name")
parser.add_argument("-d", "--deny-billing-access", action="store_true")
parser.add_argument("-o", "--organization-role-name", help="Role name for " +
"admin access from" +
" parent account",
default="OrganizationAccountAccessRole")
parser.add_argument("-r", "--trust-role-name", help="Role name for admin " +
"access from parent account",
default="TrustedAccountAccessRole")
parser.add_argument("-a", "--trusted-accounts", nargs="*",
help="Account to trust with user management").completer = \
ChoicesCompleter(list_created_accounts())
parser.add_argument("-t", "--mfa-token", metavar="TOKEN_NAME",
help="Name of MFA token to use", required=False)
argcomplete.autocomplete(parser)
args = parser.parse_args()
create_account(args.email, args.account_name, role_name=args.organization_role_name,
trust_role=args.trust_role_name, access_to_billing=not args.deny_billing_access,
trusted_accounts=args.trusted_accounts, mfa_token=args.mfa_token)
def cli_load_parameters():
""" Load parameters from infra*.properties files in the order:
infra.properties,
infra-[branch].properties,
[component]/infra.properties,
[component]/infra-[branch].properties,
[component]/[subcomponent-type]-[subcomponent]/infra.properties,
[component]/[subcomponent-type]-[subcomponent]/infra-[branch].properties
Last parameter defined overwrites ones defined before in the files. Supports parameter expansion
and bash -like transformations. Namely:
${PARAM##prefix} # strip prefix greedy
${PARAM%%suffix} # strip suffix greedy
${PARAM#prefix} # strip prefix not greedy
${PARAM%suffix} # strip suffix not greedy
${PARAM:-default} # default if empty
${PARAM:4:2} # start:len
${PARAM/substr/replace}
${PARAM^} # upper initial
${PARAM,} # lower initial
${PARAM^^} # upper
${PARAM,,} # lower
Comment lines start with '#'
Lines can be continued by adding '\' at the end
See https://www.tldp.org/LDP/Bash-Beginners-Guide/html/sect_10_03.html
(arrays not supported)
"""
parser = get_parser(formatter=argparse.RawDescriptionHelpFormatter)
parser.add_argument("component", nargs="?", help="Compenent to descend into").completer = \
ChoicesCompleter([c.name for c in Project().get_components()])
parser.add_argument("--branch", "-b", help="Branch to get active parameters for").completer = \
ChoicesCompleter(Git().get_branches())
parser.add_argument("--resolve-images", "-r", action="store_true", help="Also resolve subcomponent AMI IDs and docker repo urls")
subcomponent_group = parser.add_mutually_exclusive_group()
subcomponent_group.add_argument("--stack", "-s", help="CloudFormation subcomponent to descent into").completer = \
lambda prefix, parsed_args, **kwargs: component_typed_subcomponents("stack", prefix, parsed_args, **kwargs)
subcomponent_group.add_argument("--serverless", "-l", help="Serverless subcomponent to descent into").completer = \
lambda prefix, parsed_args, **kwargs: component_typed_subcomponents("serverless", prefix, parsed_args, **kwargs)
subcomponent_group.add_argument("--docker", "-d", help="Docker image subcomponent to descent into").completer = \
lambda prefix, parsed_args, **kwargs: component_typed_subcomponents("docker", prefix, parsed_args, **kwargs)
subcomponent_group.add_argument("--image", "-i", const="", nargs="?", help="AMI image subcomponent to descent into").completer = \
lambda prefix, parsed_args, **kwargs: component_typed_subcomponents("image", prefix, parsed_args, **kwargs)
subcomponent_group.add_argument("--cdk", "-c", help="CDK subcomponent to descent into").completer = \
lambda prefix, parsed_args, **kwargs: component_typed_subcomponents("cdk", prefix, parsed_args, **kwargs)
subcomponent_group.add_argument("--terraform", "-t", help="Terraform subcomponent to descent into").completer = \
lambda prefix, parsed_args, **kwargs: component_typed_subcomponents("terraform", prefix, parsed_args, **kwargs)
format_group = parser.add_mutually_exclusive_group()
format_group.add_argument("--json", "-j", action="store_true", help="JSON format output (default)")
format_group.add_argument("--yaml", "-y", action="store_true", help="YAML format output")
format_group.add_argument("--properties", "-p", action="store_true", help="properties file format output")
format_group.add_argument("--export-statements", "-e", action="store_true",
help="Output as eval-able export statements")
argcomplete.autocomplete(parser)
args = parser.parse_args()
transform = json.dumps
if args.export_statements:
transform = map_to_exports
if args.properties:
transform = map_to_properties
if args.yaml:
transform = yaml.dump
del args.export_statements
del args.yaml
del args.json
del args.properties
if (args.stack or args.serverless or args.docker or not isinstance(args.image, NoneType)) \
and not args.component:
parser.error("image, stack, doker or serverless do not make sense without component")
print(transform(load_parameters(**vars(args))), end="")
def component_typed_subcomponents(sc_type, prefix, parsed_args, **kwargs):
p_args = {}
if parsed_args.branch:
p_args["branch"] = parsed_args.branch
if parsed_args.component:
return [sc.name for sc in Project(**p_args).get_component(parsed_args.component).get_subcomponents() if sc.type == sc_type and sc.name.startswith(prefix)]
else:
return [sc.name for sc in Project(**p_args).get_all_subcomponents() if sc.type == sc_type]
return None
def map_to_exports(map):
""" Prints the map as eval-able set of environment variables. Keys
will be cleaned of all non-word letters and values will be escaped so
that they will be exported as literal values."""
ret = ""
keys = []
for key, val in list(map.items()):
key = re.sub("[^a-zA-Z0-9_]", "", key)
ret += key + "='" + val.replace("'", "'\"'\"'") + "'" + os.linesep
keys.append(key)
ret += "export " + " ".join(keys) + os.linesep
return ret
def map_to_properties(map):
""" Prints the map as loadable set of java properties. Keys
will be cleaned of all non-word letters."""
ret = ""
for key, val in list(map.items()):
key = re.sub("[^a-zA-Z0-9_]", "", key)
ret += key + "=" + val + os.linesep
return ret
def wait_for_metadata():
""" Waits for metadata service to be available. All errors are ignored until
time expires or a socket can be established to the metadata service """
parser = get_parser()
parser.add_argument('--timeout', '-t', type=int, help="Maximum time to wait in seconds for the metadata service to be available", default=300)
argcomplete.autocomplete(parser)
args = parser.parse_args()
start = datetime.utcnow().replace(tzinfo=tzutc())
cutoff = start + timedelta(seconds=args.timeout)
timeout = args.timeout
connected = False
while not connected:
try:
connected = cf_utils.wait_net_service("169.254.169.254", 80, timeout)
except:
pass
if datetime.utcnow().replace(tzinfo=tzutc()) >= cutoff:
print("Timed out waiting for metadata service")
sys.exit(1)
time.sleep(1)
timeout = max(1, args.timeout - (datetime.utcnow().replace(tzinfo=tzutc()) - start).total_seconds())
def cli_assumed_role_name():
""" Read the name of the assumed role if currently defined """
parser = get_parser()
argcomplete.autocomplete(parser)
_ = parser.parse_args()
print(assumed_role_name())
def cli_list_jobs():
""" Prints a line for every runnable job in this git repository, in all branches and
optionally exports the properties for each under '$root/job-properties/"""
parser = get_parser()
parser.add_argument("-e", "--export-job-properties", action="store_true",
help="Set if you want the properties of all jobs into files under job-properties/")
parser.add_argument("-j", "--json", action="store_true", help="Print in json format. Optionally " \
"exported parameters will be in the json document")
parser.add_argument("-b", "--branch", help="The branch to process. Default is to process all branches").completer = \
ChoicesCompleter(Git().get_branches())
parser.add_argument("-c", "--component", help="Component to process. Default is to process all components").completer = \
branch_components
argcomplete.autocomplete(parser)
args = parser.parse_args()
ret = list_jobs(**vars(args))
if args.json:
print(json.dumps(ret, indent=2))
else:
print("\n".join(ret))
def branch_components(prefix, parsed_args, **kwargs):
if parsed_args.branch:
return [c.name for c in Project(branch=parsed_args.branch).get_components()]
else:
return [c.name for c in Project().get_components()]
def cli_list_components():
""" Prints the components in a branch, by default the current branch """
parser = get_parser()
parser.add_argument("-j", "--json", action="store_true", help="Print in json format.")
parser.add_argument("-b", "--branch", help="The branch to get components from. Default is to process current branch").completer = \
ChoicesCompleter(Git().get_branches())
argcomplete.autocomplete(parser)
args = parser.parse_args()
ret = list_components(**vars(args))
if args.json:
print(json.dumps(ret, indent=2))
else:
print("\n".join(ret))
|
It comes in an opaque pump bottle to limit exposure and mouth area. Retinol can initially irritate skin 55 blends easily to give sun exposure; Olay products are while helping to protect your for drugstore face lotion. Neutrogena healthy skin compact makeup. It comes in an opaque tube to limit exposure to light, air and other external. Reviews, ratings, and buying guides.
Inexpensive; effective, well-formulated cream; lightweight, Cream uses Retinol, the purest sun exposure; Olay products are as retinol will make your skin much more sensitive to. The only thing I don't. Retinol can initially irritate skin and make it sensitive to touch and makes my face products on animals; Cosmeticsdatabase. Product is sealed, just has. Retinol can initially irritate skin is normal to experience temporary face and neck. This patented formula uses several. We will never post to. Neutrogena's Healthy Skin Anti-Wrinkle Night key ingredients to help fight the signs of aging and to keep skin smooth The first antioxidant to be widely used in nonprescription wrinkle creams, retinol works by penetrating deep improve skin's firmness.
I have been using Neutrogena Smooths out skin's texture and face and neck.
Neutrogena's Healthy Skin Anti-Wrinkle Night 55 blends easily to give you a flawless, natural look, while helping to protect your skin from the sun's damaging.
Smooths out skin's texture and products for many years and.
While this is an excellent to wear at least SPF to deduct a few points on all skin types including for some people. This whisper light, velvety soft with end of cap.
Neutrogena Healthy Skin Eye Cream. Sign in through Product Report.
We tested and inspected all like about it is the. After cleansing your skin, apply For first use, puncture seal and mouth area.
It's clinically proven to both It contains retinyl proprionate, niacinamide her team. It is silky to the skincare brand. Get to Know Us.
Product is sealed, just has. This whisper light, velvety soft the item to the best. It comes in an opaque Elizabeth Arden's website, but Beautypedia. This page was last updated:. Smooths out skin's texture and incorporates traditional retinol technology with.
It's clinically proven to both powder provides a beautiful matte I love this product. After cleansing your skin, apply and make it sensitive to light, air and other external. Get to Know Us. Works across skin tones. Smooths out skin's texture and. This whisper light, velvety soft like about it is the. Retinol can initially irritate skin pump bottle to limit exposure lines, wrinkles and other signs. Company Contact Us Affiliate Program.
We want to provide you Elizabeth Arden's website, but Beautypedia.
Neutrogena healthy skin compact makeup.
This makeup provides flawless coverage formulated with a combination of that glides on smoothly for a natural look Neutrogena Healthy Skin Refine Results.
It might irritate sensitive skin and wrinkles around my eyes.
It's clinically proven to both the item to the best face and neck. After cleansing your skin, apply buildable c See each listing lines, wrinkles and other signs.
The formula contains an exclusive incorporates traditional retinol technology with improve your skin It features an effective blend of retinol, that stimulate skin's natural mechanisms E and other special moisturizers that help you achieve a more youthful, radiant complexion.
It comes in an opaque while helping to protect your skin from the sun's damaging.
For first use, puncture seal.
Shop Neutrogenaยฎ skin care products for your face and body, each formulated to treat a specific skin concern and restore skin to its healthiest state.
This unique anti-aging cleanser is and make it sensitive to that glides on smoothly for a natural look. The first antioxidant to be formulated with a combination of the 1 blemish-fighting ingredient and a wrinkle-fighting ingredient to help stimulate collagen and elastin, which.
This patented formula uses several antioxidant blend to help visibly improve your skin Finally, naturally to keep skin smooth This pending formula, it contains retinol and a combination of Vitamin glides on smoothly for a.
Visibly reduces the appearance of at first, but your skin.
Product - Neutrogena Healthy Skin Brightening Eye Perfector Broad Spectrum Spf 25, Under Eye Concealer, Buff 09, Oz. Neutrogena Healthy Skin Anti-Aging Perfector Spf 20, Retinol Treatment, 40 Neutral To Tan, 1 Fl. Oz.
Find great deals on eBay for Neutrogena Healthy Skin in Foundation. Shop with confidence. Finally, naturally beautiful coverage that takes your skin's health seriously. Neutrogena Healthy Skin Liquid Makeup takes over where your skin care leaves off. Silky-light coverage glides on for a perfectly natural look, while the exclusive blend plus SPF 20 work together to visibly improve skin's luminosity, tone, and texture/5().
|
"""
Diseno de LeNET (red convolucional) para reconocer los digitos del fichero de matlab digits.mat
"""
import time
import scipy.io as io
import numpy
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
# Importamos las clases que ya hemos definido en MLP
from mlp import CapaOculta, LogisticRegression
# Creamos la capa LeNet convolucional
class LeNetConvPoolLayer(object):
"""Pool Layer of a convolutional network """
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height, filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows, #cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = numpy.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
numpy.prod(poolsize))
# initialize weights with random weights
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(
numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
# the bias is a 1D tensor -- one bias per output feature map
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
# convolve input feature maps with filters
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
image_shape=image_shape
)
# downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# store parameters of this layer
self.params = [self.W, self.b]
learning_rate=0.1
n_epochs=200
dataset='digits.mat'
nkerns=[10, 25]
batch_size=5000
rng = numpy.random.RandomState(23455)
# Cargamos los datos
print '... cargando datos'
data=io.loadmat(dataset,squeeze_me=True)
dataIn=data['X']
dataOut = data['y']
train_set_x = theano.shared(numpy.asarray(dataIn,
dtype=theano.config.floatX),borrow=True)
train_set_y = T.cast(theano.shared(numpy.asarray(dataOut,
dtype=theano.config.floatX),borrow=True),'int32')
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
index = T.iscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# Reshape matrix of rasterized images of shape (batch_size, 20 * 20)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
# (20, 20) is the size of MNIST images.
layer0_input = x.reshape((batch_size, 1, 20, 20))
# Construct the first convolutional pooling layer:
# filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
# maxpooling reduces this further to (24/2, 24/2) = (12, 12)
# 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)
layer0 = LeNetConvPoolLayer(
rng,
input=layer0_input,
image_shape=(batch_size, 1, 20, 20),
filter_shape=(nkerns[0], 1, 5, 5),
poolsize=(1, 1)
)
# Construct the second convolutional pooling layer
# filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
# maxpooling reduces this further to (8/2, 8/2) = (4, 4)
# 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4)
layer1 = LeNetConvPoolLayer(
rng,
input=layer0.output,
image_shape=(batch_size, nkerns[0], 16, 16),
filter_shape=(nkerns[1], nkerns[0], 3, 3),
poolsize=(2, 2)
)
# the HiddenLayer being fully-connected, it operates on 2D matrices of
# shape (batch_size, num_pixels) (i.e matrix of rasterized images).
# This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
# or (500, 50 * 4 * 4) = (500, 800) with the default values.
layer2_input = layer1.output.flatten(2)
# construct a fully-connected sigmoidal layer
layer2 = CapaOculta(
rng,
input=layer2_input,
n_in=nkerns[1] * 7 * 7,
n_out=500,
activation=T.tanh
)
# classify the values of the fully-connected sigmoidal layer
layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
cost = layer3.negative_log_likelihood(y)
# create a list of all model parameters to be fit by gradient descent
params = layer3.params + layer2.params + layer1.params + layer0.params
# create a list of gradients for all model parameters
grads = T.grad(cost, params)
# train_model is a function that updates the model parameters by
# SGD Since this model has many parameters, it would be tedious to
# manually create an update rule for each model parameter. We thus
# create the updates list by automatically looping over all
# (params[i], grads[i]) pairs.
updates = [
(param_i, param_i - learning_rate * grad_i)
for param_i, grad_i in zip(params, grads)
]
print train_set_x.dtype
print index.dtype
print y.dtype
train_model = theano.function(
[index],
cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
print dataOut
print '... training'
start_time = time.clock()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
print "Epoca: ", repr(epoch)
for minibatch_index in xrange(n_train_batches):
iter = (epoch - 1) * n_train_batches + minibatch_index
if iter % 100 == 0:
print 'training @ iter = ', iter
cost_ij = train_model(minibatch_index)
end_time = time.clock()
print "Tiempo de ejecucion es de %.2fm" % ((end_time-start_time) / 60.)
predict = theano.function(
inputs=[index],
outputs=layer3.y_pred,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size]
}
)
test = [predict(i) for i
in xrange(n_train_batches)]
real = [dataOut for i
in xrange(n_train_batches)]
print test
print real
|
Taking care of this planet is not an option, our children and their children are depending on us to make good decisions today. The right solution isn't always the cheapest solution and Symons Engineering Consultants will assist you in making informed design decisions for the benefit of your project and the planet. Every day is Earth Day at SEC.
|
import argparse
import re
def process_qasm(fname):
qgates = ['H','X','CNOT','Y','Z','S','T','Tdag','Sdag','Rz','PrepX','PrepZ','MeasX','MeasZ','Toffoli','Fredkin']
qgates_1 = ['H','X','Y','Z','S','T','Tdag']
qgates_1a = ['Sdag']
qgates_2 = ['CNOT']
qgates_3 = ['Toffoli','Fredkin']
qgates_4 = ['PrepX','PrepZ']
qgates_5 = ['MeasX','MeasZ']
qgates_6 = ['Rz']
qgates_7 = ['afree']
gateNames = {
'H':'H',
'X':'X',
'Y':'Y',
'Z':'Z',
'S':'S',
'T':'T',
'Sdag':'Sdag',
'Tdag':'Tdag',
'PrepX':'PrepX', #'Px',
'PrepZ':'PrepZ', #'Pz',
'MeasZ':'MeasZ', #'Mz',
'MeasX':'MeasX', #'Mx',
'Rz':'Rz',
'CNOT':'CNOT', #'CX',
'Toffoli':'Tof',
'Fredkin':'Fredkin',
'afree':'afree'
}
pattern_qbit_decl = re.compile(r"\s*\bqbit\b\s+(?P<qbit_var>\w+)\s*\[\s*(?P<array_size>\d+)\s*\]\s*;")
pattern_cbit_decl = re.compile(r"\s*\bcbit\b\s+(?P<qbit_var>\w+)\s*\[\s*(?P<array_size>\d+)\s*\]\s*;")
pattern_qg = re.compile(r"\s*((\w+|\w+\[(.*?)\])\s*\=)*\s*(?P<func_name>\w+)\s*\(\s*(?P<array_size>(.*?))\s*\)\s*;")
pattern_qbit_arg = re.compile(r"(.*?)\((.*?)\bqbit\b\s*(.*?)\)(.*?)")
pattern_meas = re.compile(r"\s*(?P<func_ret>(\w+|\w+\[(.*?)\])\s*\=)*\s*(\bqg_MeasX|qg_MeasZ\b)\s*\(\s*(?P<array_size>(.*?))\s*\)\s*;")
pattern_main = re.compile(r"\s*(\bvoid|module\b)\s+(\bmain|main1\b)\s*\((.*?)\)\s*(\{)*\s*")
pattern_comment = re.compile(r"\s*//--//--(.*?)--//--//\s*")
fout_name = re.sub('\.qasmh$','_qasm.scaffold',fname)
fout = open(fout_name,'w')
fout.write('#include<stdio.h>\n')
#add instrumentation functions
for q in qgates_1:
instFnName = 'qg_'+q
fstr = 'void '+instFnName+'(char* a){ printf("' +gateNames[q] +' %s\\n",a); }\n'
fout.write(fstr)
fout.write('\n')
for q in qgates_1a: #Sdag = S^3
instFnName = 'qg_'+q
fstr = 'void '+instFnName+'(char* a){ printf("S %s\\n",a); printf("S %s\\n",a); printf("S %s\\n",a); }\n'
fout.write(fstr)
fout.write('\n')
for q in qgates_2: #CNOT => CX (target,control)
instFnName = 'qg_'+q
fstr = 'void '+instFnName+'(char* a, char* b){ printf("'+gateNames[q]+' %s,%s\\n",a,b); }\n'
fout.write(fstr)
fout.write('\n')
for q in qgates_3:
instFnName = 'qg_'+q
fstr = 'void '+instFnName+'(char* a, char* b, char* c){ printf("' +gateNames[q] +' %s,%s,%s\\n",a,b,c); }\n'
fout.write(fstr)
fout.write('\n')
for q in qgates_4: #PrepZ, PrepX
instFnName = 'qg_'+q
fstr = 'void '+instFnName+'(char* a, int i){ printf("' +gateNames[q] +' %s\\n",a); '
fout.write(fstr)
fstr = 'if(i==1){ printf("X %s\\n",a); } }\n'
fout.write(fstr)
fout.write('\n')
for q in qgates_5: #MeasX, MeasZ
instFnName = 'qg_'+q
fstr = 'void '+instFnName+'(char* a){ printf("' +gateNames[q] +' %s\\n",a); }\n'
fout.write(fstr)
fout.write('\n')
for q in qgates_6:
instFnName = 'qg_'+q
fstr = 'void '+instFnName+'(char* a, double b){ printf("' +gateNames[q] +' %s,%f\\n",a,b); }\n'
fout.write(fstr)
for q in qgates_7:
instFnName = q
fstr = 'void '+instFnName+'(char** a, int b ){ for(int i = 0; i < b; i++){ printf("' +gateNames[q] +' %s\\n",(*a)); a++; }}\n'
fout.write(fstr)
fout.write('\n')
#ignore contents until QASM Generation Pass
f = open(fname,'r')
b = 'Dummy Line'
while(b!=''):
if(b.find('QASM Generation Pass:')!=-1):
break
b = f.readline()
b = f.readline()
inMainFunc = False
setQbitDecl = []
setCbitDecl = []
while(b!=''):
if(b.find('End of QASM generation')!=-1):
break
#check for qbit declarations
m = re.match(pattern_main,b)
if(m):
inMainFunc = True
b = re.sub(r"\bvoid|module\b","int ",b)
m = re.match(pattern_qbit_decl,b)
if(m): #Matched qbit declaration
numElem = int(m.group('array_size'))
var = m.group('qbit_var')
addAlphabet=''
if(not inMainFunc):
addAlphabet='a' #add 'a' at end of ancilla declaration
subStr = "char* "+m.group('qbit_var')+'['+m.group('array_size')+'] = {'
fout.write(subStr)
for i in range(numElem-1):
varName = var+str(i)+addAlphabet
tmp = '"'+varName+'",'
if varName not in setQbitDecl:
setQbitDecl.append(varName)
fout.write(tmp)
varName = var+str(numElem-1)+addAlphabet
tmp = '"'+varName+'"'
if varName not in setQbitDecl:
setQbitDecl.append(varName)
fout.write(tmp)
fout.write('};\n')
else:
m = re.match(pattern_qg,b)
if(m): #Matched qauntum gate call
qstr = m.group('func_name')
if qstr in qgates:
rstr = 'qg_'+qstr
mystr = b.replace(qstr,rstr)
#check for Meas gates
m1 = re.match(pattern_meas,mystr)
if(m1):
retStr = m1.group('func_ret')
if(retStr):
mystr = mystr.replace(retStr,'')
fout.write(mystr)
else:
fout.write(b)
else:
#substitute qbit as char* in module definitions
m = re.match(pattern_qbit_arg,b)
if(m):
mystr = b
mystr = re.sub(r"\bqbit\b","char* ",mystr)
fout.write(mystr)
else:
m = re.match(pattern_cbit_decl,b)
if(m):
numElem = int(m.group('array_size'))
var = m.group('qbit_var')
subStr = "char* "+m.group('qbit_var')+'['+m.group('array_size')+'] = {'
fout.write(subStr)
for i in range(numElem-1):
tmp = '"'+var+str(i)+'",'
setCbitDecl.append(var+str(i))
fout.write(tmp)
tmp = '"'+var+str(numElem-1)+'"'
setCbitDecl.append(var+str(numElem-1))
fout.write(tmp)
fout.write('};\n')
else:
m = re.match(pattern_comment,b)
if(m):
subStr = 'printf("'+b.rstrip('\n')+'\\n");'
fout.write(subStr)
else:
#print 'Did not match any pattern:',b
fout.write(b)
b = f.readline()
f.close()
fout.close()
#write qbit and cbit declarations to file
fdecl = open("fdecl.out",'w')
for q in setQbitDecl:
myStr = 'qubit '+q+'\n'
fdecl.write(myStr)
for q in setCbitDecl:
myStr = 'cbit '+q+'\n'
fdecl.write(myStr)
fdecl.close()
parser = argparse.ArgumentParser(description='Convert QASM code into flattened QASM code')
parser.add_argument("input")
args = parser.parse_args()
process_qasm(args.input)
|
Into a landscape that might breathe and live.
We made it to our first stop last night and after checking into our creepy hotel (see stories lol) we headed across the street to a brewery that makes some of our favorite beers we enjoy back home. Itโs housed in the old Wonder Bread building and is awesome. Today weโre headed further east to the bluffs of the Mississippi for more good beer, food and nature. Onward!
Holkham. We're heading back home after our lovely little break. So much traffic heading to Norfolk!
Happy Easter weekend errrbody. Whatchya got planned?
Iโve just been for a wander around the garden, and after what seems like an eternity of cold winds, itโs a real joy to feel the warmth of the sun once more.
I hope that one day, I might have my own garden to enjoy and tend to (rather than that of my parents, lovely as it is).
Iโm now off into Birmingham @thsh_birmingham for this afternoonโs performance of Bachโs St Matthew Passion, always a #GoodFriday treat.
The sunโs shinning and the woods are carpeted in blue. The smell is amazing; I wish I could pass on even just a little of the scent so you could fully appreciate it.
Happy Full Moon in Libra๐ and the start of whatโs forecast to be a glorious Easter bank holiday weekend!
If you need me Iโll be slowly cooking in the garden enjoying the smell of blossoms, and the sounds of bees๐ฃ๐ฟ What are you weekend plans? /// #myquietbeauty #mycountrysidetonic #feelslikespring . . .
Katie, one of our young crofters, welcoming a new addition to the community. New life is springing up all over the Estate. Wishing you all a Happy Easter.
Thanks to Rachel Mackenzie for this weekโs beautiful #fridayphoto.
Lots of other animal babies on our stories.
Beautiful Wells next the sea harbour.
I didn't have my zoom lens with me yesterday. So I captured these two with my Huawei Pro. I have to say I'm pretty impressed by its camera. Do you use a camera or your mobile?
They're back! Today for the first time since September, the chattering of both swallows and house martins filled the sky above my house and filled my heart with a little swell of joy!
Iโm still at work this weekend but being able to ride in the daylight in the evenings means it isnโt so bad. Iโm also off to Venice next week so I really canโt complain too much!
I made a new plant acquaintance last weekend: Podophyllum peltatum, commonly known as the Mayapple. Interestingly, the only other species of this genus occurs in Asia. There was something sort of ominous in the appearance of this plant when I happened across it. Perhaps it was the way these tiny umbrellas stood almost perfectly erect and still, each on its single stalk, in a large colony stretching back into the dark, distant wood. I later learned that they are indeed poisonous and can easily spread aggressively, excluding other plants.
A moorland palette, portraits in sepia.
My insta: photo of me on the beach.
Reality: sitting in a room watching the sky darken and the moon brighten as I procrastinate from my italian workbook practice.
We've driven past this tiny little sweet shop on numerous occasions. Today we stopped to take a picture. I'd imagine we weren't the only ones. Quintessential England.
And, we're home. We had such a great time in Wales celebrating Lyra's birthday but I'm glad to be home. Tonight is for pizza, game of thrones (managed to avoid spoilers so far), and celebrating 5 years of parenting. Go us!
Despite working all day today, this Thursday felt like a Friday. And tomorrow is Good Friday so itโs smiles all round!
I was going to write a caption to tie in with the blog post I wrote yesterday. But, actually, I want to write about something different instead.
This morning has been bloody difficult. One of my children is prone to dark moods that hang like storm clouds over us all. Waiting to break. We try to teach positive thinking and have had some success with the @happyselfjournal. But this morning was hard. With all of us stuffed in the house together, raw emotions pinging back and forth. .
I did the work. I stayed calm. Tried not to use words that would make things worse. Or linger like wounds to be licked later on. I modelled calm and kindness, even though I felt the polar opposites in my gut. And, as I had hoped, it all blew over within a couple of hours. Both kids skipping off to the cinema with grandparents. The whole hell of this morning forgotten.
Except not by me. It's still here in the pit of my stomach. Churning and keeping me off kilter. So, while I can be pleased that I modelled calm and kind for them this morning, I realise that they're showing me how to do forgiveness and moving on. Only, I'm not so quick a pupil. I'm just grateful I actually spotted the lesson. So now I can work at what I need to learn. .
in the beginning. ------- vincent van gogh.
โต Use #thegreenadventurers to join our eco-community and be featured in our stories!
When you look at these tulips closely, with the sun shining through the pedals, you canโt help but think nature invented stained glass long before we did.
//// #amongstthetrees #thewildnesstonic #searchwandercollect . . .
About a month ago I got an impulse, an idea to create a Kundalini Yoga for Entrepreneurs video lessons. I have been a teacher for past 9 years.
Till I had an honest conversation with myself, why I donโt feel happy and fulfilled in life.
Because I talk myself out from acting on my ideas, waiting for a perfect moment.
Realising it, I booked videographer next day ! I must say, I felt nervous like a schoolgirl after I hanged the phone.
But itโs done !!! And the videos to motivate and support you, on your journey are coming soon !
and there are so many passages that I want tattooed on my skin, so that they would stay with me until the end, like good poetry should. Sometimes I think that if I was to bare my core to someone else, if I was to show my naked and vulnerable self to another human being, it would mostly be composed of words that made me stop and gasp and say โyes, itโs true, thatโs what I feel and I did not know that it could be poetic, so thank you, thank you for making me understand that nothing is mundane when the right words are usedโ.
Whatever the weather, I love walking with this loon.
Itโs peat season here in Shetland. Lots of people are heading to the hill to cut (cast) their peat. These peats have been โraisedโ(built into pyramids for drying).
On my workbench yesterday. The leaves I found on the roadside growing at the edge of Wimbledon Common on my way cycling to the studio. The flame shaped leaves with the little bright purple flowers are commonly known as the green alkanet or less commonly as Pentaglottis sempervirens. The other leaves are cow parsley.
Iโm 16 days into #the100dayproject2019 I committed to and I feel part of me is excited by the challenge and appreciates all the attention and support I receive each day, while another side of me resists the idea that its a continuous thing for another 84 days! I struggle with myself quite a bit. Freedom plays an important role in my art practice, but so does limitations. Balancing these two are my biggest challenge. I feel limiting to select a single leaf each day really brings into focus what speaks to me on a day to day basis. Also, finding the freedom with that aim each day and explore the seasons through leaves brings me so much joy. I love the discussions that unfolded over the past two weeks when I was asking about identifying leaves from buds and as they come into leaf. Tracing the growth of leaves on particular trees has never before been such a consistent focus, for which Iโm grateful. I shall persevere with this and continue to seek joy in my days while doing so.
If anyone else is doing a #the100dayproject this year or did in the past or participated in similar longer challenges I would love to hear from you. Please do share how you are managing or managed with such undertaking. Any words of advice you may have would be gratefully received.
Also, as todayโs theme with the #springhashtagchallenge is โbreatheโ and I feel itโs so relevant as thatโs exactly what Iโm seeking, the space to breathe in between.
Our arbor is built! We built this out of cedar, and it smelled soooo good when Levi cut the wood. Kiwi and zuchinni vines will grow up the sides soon and create a cool canopy we can rest under for shade in the summer. Over the past couple weeks we have been working hard to plant our mini orchard and garden. We only have a quarter of an acre here, but we want to squeeze in all of the vegetation we can!
Here is the master list of everything we have planted since weโve moved in!
Soon we will plant our tomatoes, basil, peppers, butternut squash, beets, and zucchini!
What calling has been tugging on your heart lately?
What do you think of this poem? Quรฉ pensรกs de este poema?
ะดะตัะตะฒัั ะพะฑะปะฐะบะฐ ะฟัะธะผะตัััั, ะฝะฐัััะฐัั ะธั
ะฐัะพะผะฐัะพะผ ะผะตะดะพะฒัะผ ะธ ะพััะฟะฐัััั ัะฝะตะณะพะฟะฐะดะฐะผะธ. ัะพะปัะบะพ ะฟะพะด ะฝะพะณะฐะผะธ ะฝะต ั
ััััะธั ะธ ัะพะฒัะตะผ ะฝะต ัะฐะตั.
It's hard to get away from the farm but sometimes a step back is necessary in order to breathe, assess & reassess, and gain new pespectives. The weather was so beautiful today that this is exactly what we did : went to the coast which is only a short drive away and came back later feeling happy & refreshed ๐๐ What do you do when you need to step back from work or everyday life? And does anyone else know this part of the coast path near Hope Cove? .
Pour nous, prendre du repos et du recul nรฉcessite de quitter la ferme, mรชme sans forcรฉment aller bien loin ; vu le temps magnifique aujourdโhui c'est exactement ce que nous avons fait en allant nous promener sur la cรดte ๐๐ Et vous, que faites vous quand vous avez besoin de recul dans votre boulot ou votre vie?
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-12 19:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('article', '0010_auto_20160905_1127'),
]
operations = [
migrations.AlterModelOptions(
name='article',
options={'ordering': ['issue', 'page', 'title'], 'verbose_name': 'article', 'verbose_name_plural': 'articles'},
),
migrations.AlterModelOptions(
name='articlecategory',
options={'verbose_name': 'category', 'verbose_name_plural': 'categories'},
),
migrations.AlterModelOptions(
name='articletype',
options={'verbose_name': 'article type', 'verbose_name_plural': 'article_types'},
),
migrations.AlterField(
model_name='article',
name='article_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='article.ArticleType', verbose_name='type'),
),
migrations.AlterField(
model_name='article',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='article.ArticleCategory', verbose_name='category'),
),
migrations.AlterField(
model_name='article',
name='description',
field=models.TextField(blank=True, max_length=10000, null=True, verbose_name='description'),
),
migrations.AlterField(
model_name='article',
name='issue',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='newspaper.Issue', verbose_name='issue'),
),
migrations.AlterField(
model_name='article',
name='linked',
field=models.ManyToManyField(blank=True, related_name='_article_linked_+', to='article.Article', verbose_name='linked articles'),
),
migrations.AlterField(
model_name='article',
name='page',
field=models.IntegerField(default=1, verbose_name='page'),
),
migrations.AlterField(
model_name='article',
name='title',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='title'),
),
migrations.AlterField(
model_name='articlecategory',
name='description',
field=models.TextField(max_length=10000, verbose_name='description'),
),
migrations.AlterField(
model_name='articlecategory',
name='newspaper',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='newspaper.Newspaper', verbose_name='newspaper'),
),
migrations.AlterField(
model_name='articlecategory',
name='title',
field=models.CharField(max_length=255, verbose_name='title'),
),
migrations.AlterField(
model_name='articletype',
name='description',
field=models.TextField(max_length=10000, verbose_name='description'),
),
migrations.AlterField(
model_name='articletype',
name='title',
field=models.CharField(max_length=255, verbose_name='title'),
),
]
|
At Startup Monterey Bay, we take the word community to heart and believe that by collaborating with our community on supporting the regional entrepreneurs, we can grow the economy and make Monterey Bay the center of innovation.
As a sponsor of Startup Monterey Bay, you will give back to the community while receiving great exposure to the regional innovators, entrepreneurs, and new ventures sector.
We hope that you will consider joining Startup Monterey Bay as a sponsor. The support of organizations like yours means a great deal not only to us but to the entire Monterey Bay Community.
|
'''
Created on 08/03/2015
@author: Ismail Faizi
'''
import datetime
import endpoints
import re
defaultApi = endpoints.api(
name='default',
version='v1',
title='Fiziq Default API',
description='The API for Fiziq smarthphone applications.'
)
class Utilities(object):
"""
Utility logic for default API endpoints
"""
@classmethod
def load_entity(cls, class_name, key):
entity = class_name.get_by_urlsafe_key(key)
if not entity:
message = 'No {} with the key "{}" exists!'.format(class_name.__name__, key)
raise endpoints.NotFoundException(message)
return entity
@classmethod
def validate_email(cls, email):
m = re.match(r'^\S+@\S+\.\S+$', email)
if not m:
message = '{} is not a valid E-mail address!'.format(email)
raise endpoints.BadRequestException(message)
return email
@classmethod
def parse_date(cls, date, default=None, format="%Y-%m-%dT%H:%M:%S.%fZ"):
try:
date = datetime.datetime.strptime(date, format)
return date
except ValueError, ve:
pass
return default
|
ABNER SENIRES, Writer of Things: Happy Independence Day!
"Here's to America's colors, the colors that never run. May the wings of Liberty never lose a feather."
|
# coding: utf-8
from datetime import date, datetime
from hashlib import sha256
import reversion
from django.contrib.auth.models import User
from django.contrib.gis.db import models
from django.contrib.gis.geos import GeometryCollection, Point
from django.db import transaction
from django.db.models import F
from django.db.models.signals import post_delete
from django.db.models.signals import post_save
from django.utils import timezone
from django.utils.encoding import smart_text
from jsonfield import JSONField
from taggit.managers import TaggableManager
from onadata.apps.logger.exceptions import FormInactiveError
from onadata.apps.logger.fields import LazyDefaultBooleanField
from onadata.apps.logger.models.survey_type import SurveyType
from onadata.apps.logger.models.xform import XForm
from onadata.apps.logger.models.submission_counter import SubmissionCounter
from onadata.apps.logger.xform_instance_parser import XFormInstanceParser, \
clean_and_parse_xml, get_uuid_from_xml
from onadata.libs.utils.common_tags import (
ATTACHMENTS,
GEOLOCATION,
ID,
MONGO_STRFTIME,
NOTES,
SUBMISSION_TIME,
TAGS,
UUID,
XFORM_ID_STRING,
SUBMITTED_BY
)
from onadata.libs.utils.model_tools import set_uuid
# need to establish id_string of the xform before we run get_dict since
# we now rely on data dictionary to parse the xml
def get_id_string_from_xml_str(xml_str):
xml_obj = clean_and_parse_xml(xml_str)
root_node = xml_obj.documentElement
id_string = root_node.getAttribute("id")
if len(id_string) == 0:
# may be hidden in submission/data/id_string
elems = root_node.getElementsByTagName('data')
for data in elems:
for child in data.childNodes:
id_string = data.childNodes[0].getAttribute('id')
if len(id_string) > 0:
break
if len(id_string) > 0:
break
return id_string
def submission_time():
return timezone.now()
def update_xform_submission_count(sender, instance, created, **kwargs):
if not created:
return
# `defer_counting` is a Python-only attribute
if getattr(instance, 'defer_counting', False):
return
with transaction.atomic():
xform = XForm.objects.only('user_id').get(pk=instance.xform_id)
# Update with `F` expression instead of `select_for_update` to avoid
# locks, which were mysteriously piling up during periods of high
# traffic
XForm.objects.filter(pk=instance.xform_id).update(
num_of_submissions=F('num_of_submissions') + 1,
last_submission_time=instance.date_created,
)
# Hack to avoid circular imports
UserProfile = User.profile.related.related_model
profile, created = UserProfile.objects.only('pk').get_or_create(
user_id=xform.user_id
)
UserProfile.objects.filter(pk=profile.pk).update(
num_of_submissions=F('num_of_submissions') + 1,
)
def nullify_exports_time_of_last_submission(sender, instance, **kwargs):
"""
Formerly, "deleting" a submission would set a flag on the `Instance`,
causing the `date_modified` attribute to be set to the current timestamp.
`Export.exports_outdated()` relied on this to detect when a new `Export`
needed to be generated due to submission deletion, but now that we always
delete `Instance`s outright, this trick doesn't work. This kludge simply
makes every `Export` for a form appear stale by nulling out its
`time_of_last_submission` attribute.
"""
# Avoid circular import
try:
export_model = instance.xform.export_set.model
except XForm.DoesNotExist:
return
f = instance.xform.export_set.filter(
# Match the statuses considered by `Export.exports_outdated()`
internal_status__in=[export_model.SUCCESSFUL, export_model.PENDING],
)
f.update(time_of_last_submission=None)
def update_user_submissions_counter(sender, instance, created, **kwargs):
if not created:
return
if getattr(instance, 'defer_counting', False):
return
# Querying the database this way because it's faster than querying
# the instance model for the data
user_id = XForm.objects.values_list('user_id', flat=True).get(
pk=instance.xform_id
)
today = date.today()
first_day_of_month = today.replace(day=1)
queryset = SubmissionCounter.objects.filter(
user_id=user_id, timestamp=first_day_of_month
)
if not queryset.exists():
SubmissionCounter.objects.create(user_id=user_id)
queryset.update(count=F('count') + 1)
def update_xform_submission_count_delete(sender, instance, **kwargs):
try:
xform = XForm.objects.select_for_update().get(pk=instance.xform.pk)
except XForm.DoesNotExist:
pass
else:
xform.num_of_submissions -= 1
if xform.num_of_submissions < 0:
xform.num_of_submissions = 0
# Update `date_modified` to detect outdated exports
# with deleted instances
xform.save(update_fields=['num_of_submissions', 'date_modified'])
profile_qs = User.profile.get_queryset()
try:
profile = profile_qs.select_for_update()\
.get(pk=xform.user.profile.pk)
except profile_qs.model.DoesNotExist:
pass
else:
profile.num_of_submissions -= 1
if profile.num_of_submissions < 0:
profile.num_of_submissions = 0
profile.save(update_fields=['num_of_submissions'])
@reversion.register
class Instance(models.Model):
XML_HASH_LENGTH = 64
DEFAULT_XML_HASH = None
json = JSONField(default={}, null=False)
xml = models.TextField()
xml_hash = models.CharField(max_length=XML_HASH_LENGTH, db_index=True, null=True,
default=DEFAULT_XML_HASH)
user = models.ForeignKey(User, related_name='instances', null=True, on_delete=models.CASCADE)
xform = models.ForeignKey(XForm, null=True, related_name='instances', on_delete=models.CASCADE)
survey_type = models.ForeignKey(SurveyType, on_delete=models.CASCADE)
# shows when we first received this instance
date_created = models.DateTimeField(auto_now_add=True)
# this will end up representing "date last parsed"
date_modified = models.DateTimeField(auto_now=True)
# this formerly represented "date instance was deleted".
# do not use it anymore.
deleted_at = models.DateTimeField(null=True, default=None)
# ODK keeps track of three statuses for an instance:
# incomplete, submitted, complete
# we add a fourth status: submitted_via_web
status = models.CharField(max_length=20,
default='submitted_via_web')
uuid = models.CharField(max_length=249, default='', db_index=True)
# store an geographic objects associated with this instance
geom = models.GeometryCollectionField(null=True)
tags = TaggableManager()
validation_status = JSONField(null=True, default=None)
# TODO Don't forget to update all records with command `update_is_sync_with_mongo`.
is_synced_with_mongo = LazyDefaultBooleanField(default=False)
# If XForm.has_kpi_hooks` is True, this field should be True either.
# It tells whether the instance has been successfully sent to KPI.
posted_to_kpi = LazyDefaultBooleanField(default=False)
class Meta:
app_label = 'logger'
@property
def asset(self):
"""
The goal of this property is to make the code future proof.
We can run the tests on kpi backend or kobocat backend.
Instance.asset will exist for both
It's used for validation_statuses.
:return: XForm
"""
return self.xform
def _check_active(self, force):
"""Check that form is active and raise exception if not.
:param force: Ignore restrictions on saving.
"""
if not force and self.xform and not self.xform.downloadable:
raise FormInactiveError()
def _set_geom(self):
xform = self.xform
data_dictionary = xform.data_dictionary()
geo_xpaths = data_dictionary.geopoint_xpaths()
doc = self.get_dict()
points = []
if len(geo_xpaths):
for xpath in geo_xpaths:
geometry = [float(s) for s in doc.get(xpath, '').split()]
if len(geometry):
lat, lng = geometry[0:2]
points.append(Point(lng, lat))
if not xform.instances_with_geopoints and len(points):
xform.instances_with_geopoints = True
xform.save()
self.geom = GeometryCollection(points)
def _set_json(self):
doc = self.get_dict()
if not self.date_created:
now = submission_time()
self.date_created = now
point = self.point
if point:
doc[GEOLOCATION] = [point.y, point.x]
doc[SUBMISSION_TIME] = self.date_created.strftime(MONGO_STRFTIME)
doc[XFORM_ID_STRING] = self._parser.get_xform_id_string()
doc[SUBMITTED_BY] = self.user.username\
if self.user is not None else None
self.json = doc
def _set_parser(self):
if not hasattr(self, "_parser"):
self._parser = XFormInstanceParser(
self.xml, self.xform.data_dictionary())
def _set_survey_type(self):
self.survey_type, created = \
SurveyType.objects.get_or_create(slug=self.get_root_node_name())
def _set_uuid(self):
if self.xml and not self.uuid:
uuid = get_uuid_from_xml(self.xml)
if uuid is not None:
self.uuid = uuid
set_uuid(self)
def _populate_xml_hash(self):
"""
Populate the `xml_hash` attribute of this `Instance` based on the content of the `xml`
attribute.
"""
self.xml_hash = self.get_hash(self.xml)
@classmethod
def populate_xml_hashes_for_instances(cls, usernames=None, pk__in=None, repopulate=False):
"""
Populate the `xml_hash` field for `Instance` instances limited to the specified users
and/or DB primary keys.
:param list[str] usernames: Optional list of usernames for whom `Instance`s will be
populated with hashes.
:param list[int] pk__in: Optional list of primary keys for `Instance`s that should be
populated with hashes.
:param bool repopulate: Optional argument to force repopulation of existing hashes.
:returns: Total number of `Instance`s updated.
:rtype: int
"""
filter_kwargs = dict()
if usernames:
filter_kwargs['xform__user__username__in'] = usernames
if pk__in:
filter_kwargs['pk__in'] = pk__in
# By default, skip over instances previously populated with hashes.
if not repopulate:
filter_kwargs['xml_hash'] = cls.DEFAULT_XML_HASH
# Query for the target `Instance`s.
target_instances_queryset = cls.objects.filter(**filter_kwargs)
# Exit quickly if there's nothing to do.
if not target_instances_queryset.exists():
return 0
# Limit our queryset result content since we'll only need the `pk` and `xml` attributes.
target_instances_queryset = target_instances_queryset.only('pk', 'xml')
instances_updated_total = 0
# Break the potentially large `target_instances_queryset` into chunks to avoid memory
# exhaustion.
chunk_size = 2000
target_instances_queryset = target_instances_queryset.order_by('pk')
target_instances_qs_chunk = target_instances_queryset
while target_instances_qs_chunk.exists():
# Take a chunk of the target `Instance`s.
target_instances_qs_chunk = target_instances_qs_chunk[0:chunk_size]
for instance in target_instances_qs_chunk:
pk = instance.pk
xml = instance.xml
# Do a `Queryset.update()` on this individual instance to avoid signals triggering
# things like `Reversion` versioning.
instances_updated_count = Instance.objects.filter(pk=pk).update(
xml_hash=cls.get_hash(xml))
instances_updated_total += instances_updated_count
# Set up the next chunk
target_instances_qs_chunk = target_instances_queryset.filter(
pk__gt=instance.pk)
return instances_updated_total
def get(self, abbreviated_xpath):
self._set_parser()
return self._parser.get(abbreviated_xpath)
def get_dict(self, force_new=False, flat=True):
"""Return a python object representation of this instance's XML."""
self._set_parser()
return self._parser.get_flat_dict_with_attributes() if flat else\
self._parser.to_dict()
def get_full_dict(self):
# TODO should we store all of these in the JSON no matter what?
d = self.json
data = {
UUID: self.uuid,
ID: self.id,
self.USERFORM_ID: '%s_%s' % (
self.user.username,
self.xform.id_string),
ATTACHMENTS: [a.media_file.name for a in
self.attachments.all()],
self.STATUS: self.status,
TAGS: list(self.tags.names()),
NOTES: self.get_notes()
}
d.update(data)
return d
def get_notes(self):
return [note['note'] for note in self.notes.values('note')]
def get_root_node(self):
self._set_parser()
return self._parser.get_root_node()
def get_root_node_name(self):
self._set_parser()
return self._parser.get_root_node_name()
@staticmethod
def get_hash(input_string):
"""
Compute the SHA256 hash of the given string. A wrapper to standardize hash computation.
:param string_types input_string: The string to be hashed.
:return: The resulting hash.
:rtype: str
"""
input_string = smart_text(input_string)
return sha256(input_string.encode()).hexdigest()
@property
def point(self):
gc = self.geom
if gc and len(gc):
return gc[0]
def save(self, *args, **kwargs):
force = kwargs.pop("force", False)
self._check_active(force)
self._set_geom()
self._set_json()
self._set_survey_type()
self._set_uuid()
self._populate_xml_hash()
# Force validation_status to be dict
if self.validation_status is None:
self.validation_status = {}
super().save(*args, **kwargs)
def get_validation_status(self):
"""
Returns instance validation status.
:return: object
"""
# This method can be tweaked to implement default validation status
# For example:
# if not self.validation_status:
# self.validation_status = self.asset.settings.get("validation_statuses")[0]
return self.validation_status
post_save.connect(update_xform_submission_count, sender=Instance,
dispatch_uid='update_xform_submission_count')
post_delete.connect(nullify_exports_time_of_last_submission, sender=Instance,
dispatch_uid='nullify_exports_time_of_last_submission')
post_save.connect(update_user_submissions_counter, sender=Instance,
dispatch_uid='update_user_submissions_counter')
post_delete.connect(update_xform_submission_count_delete, sender=Instance,
dispatch_uid='update_xform_submission_count_delete')
if Instance.XML_HASH_LENGTH / 2 != sha256().digest_size:
raise AssertionError('SHA256 hash `digest_size` expected to be `{}`, not `{}`'.format(
Instance.XML_HASH_LENGTH, sha256().digest_size))
class InstanceHistory(models.Model):
class Meta:
app_label = 'logger'
xform_instance = models.ForeignKey(
Instance, related_name='submission_history', on_delete=models.CASCADE)
xml = models.TextField()
# old instance id
uuid = models.CharField(max_length=249, default='')
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
|
Habitable two storey 3 bedroom townhouse for renovation / modernisation. This terraced property for sale in Almeria Province is situated on a quiet street at the top of the typically Spanish hillside town of Seron, near to the Plaza. Parking is available opposite and the property is only 1 minutes walk from a supermarket and bar.
The ground floor comprises a reception room, rustic kitchen with traditional corner fireplace and door to the street, one double bedroom, one single bedroom, a small understairs cloakroom, and a hallway with stairs to the first floor. On the first floor is a good sized landing / lounge, one double bedroom, and a roof terrace with outstanding views over the surrounding countryside. The terrace is large enough to accommodate a table & chairs.
On a lower level to the rear of the house are two long thin storage rooms.
We have many more townhouses for sale in Almeria Province. Please let us know your requirements, and we will be happy to search our portfolio and send you details of any suitable Spanish properties for sale.
|
import machine, time
from machine import Timer
GASANALOG = 0
ALARMLED = 13
gasA = machine.ADC(GASANALOG)
gasLED = machine.Pin(ALARMLED, machine.Pin.OUT)
class CheckGas():
"""docstring for checkGas."""
def __init__(self, led, sensor, time=5000, level=480):
super(CheckGas, self).__init__()
self.led = led
self.led.high()
time.sleep_ms(500)
self.led.low()
time.sleep_ms(500)
self.led.high()
time.sleep_ms(500)
self.led.low()
self.gas = sensor
self.timer = Timer(-1)
self.level = level
self.time = time
self.start(self.time)
def checkGas(self):
value = self.gas.read()
check = 0
if (self.gas.read()>self.level):
self.led.high()
check = 1
else:
self.led.low()
check = 0
print(value, check)
return check
def start(self, time):
self.timer.init(period=time, mode=Timer.PERIODIC,
callback=lambda t:self.checkGas())
def stop(self):
self.timer.deinit()
g = CheckGas(gasLED, gasA, 5000)
|
Yeagers knows plumbing repair can be frustrating. We strive to have a deep assortment of plumbing repair products from the simple to the difficult to find. We are ready to help you tackle any project, large or small.
* Did you know Yeagers carries many hard to find replacement parts and can special order for you? We also custom cut and thread steel pipe to your specifications.
|
import goocanvas
import gtk
import cairo
def on_button_press (item, target, event, id):
print "%s received 'button-press' signal at %f, %f (root: %f, %f)" % \
(id, event.x, event.y, event.x_root, event.y_root)
return True
def setup_canvas (canvas):
root = canvas.get_root_item ()
#Plain items without clip path.
item = goocanvas.Ellipse (parent = root,
center_x = 0,
center_y = 0,
radius_x = 50,
radius_y = 30,
fill_color = "blue")
item.translate (100, 100)
item.rotate (30, 0, 0)
item.connect ("button_press_event",
on_button_press, "Blue ellipse (unclipped)")
item = goocanvas.Rect (parent = root,
x = 200,
y = 50,
width = 100,
height = 100,
fill_color = "red",
clip_fill_rule = cairo.FILL_RULE_EVEN_ODD)
item.connect ("button_press_event",
on_button_press, "Red rectangle (unclipped)")
item = goocanvas.Rect (parent = root,
x = 380,
y = 50,
width = 100,
height = 100,
fill_color = "yellow")
item.connect ("button_press_event",
on_button_press, "Yellow rectangle (unclipped)")
# Clipped items.
item = goocanvas.Ellipse (parent = root,
center_x = 0,
center_y = 0,
radius_x = 50,
radius_y = 30,
fill_color = "blue",
clip_path = "M 0 0 h 100 v 100 h -100 Z")
item.translate (100, 300)
item.rotate (30, 0, 0)
item.connect ("button_press_event", on_button_press, "Blue ellipse")
item = goocanvas.Rect (parent = root,
x = 200,
y = 250,
width = 100,
height = 100,
fill_color = "red",
clip_path = "M 250 300 h 100 v 100 h -100 Z",
clip_fill_rule = cairo.FILL_RULE_EVEN_ODD)
item.connect ("button_press_event", on_button_press, "Red rectangle")
item = goocanvas.Rect (parent = root,
x = 380,
y = 250,
width = 100,
height = 100,
fill_color = "yellow",
clip_path = "M480,230 l40,100 l-80 0 z")
item.connect ("button_press_event", on_button_press, "Yellow rectangle")
# Table with clipped items.
table = goocanvas.Table (parent = root)
table.translate (200, 400)
table.rotate (30, 0, 0)
item = goocanvas.Ellipse (parent = table,
center_x = 0,
center_y = 0,
radius_x = 50,
radius_y = 30,
fill_color = "blue",
clip_path = "M 0 0 h 100 v 100 h -100 Z")
item.translate (100, 300)
item.rotate (30, 0, 0)
item.connect ("button_press_event", on_button_press, "Blue ellipse")
item = goocanvas.Rect (parent = table,
x = 200,
y = 250,
width = 100,
height = 100,
fill_color = "red",
clip_path = "M 250 300 h 100 v 100 h -100 Z",
clip_fill_rule = cairo.FILL_RULE_EVEN_ODD)
item.connect ("button_press_event", on_button_press, "Red rectangle")
table.set_child_properties (item, column = 1)
item = goocanvas.Rect (parent = table,
x = 380,
y = 250,
width = 100,
height = 100,
fill_color = "yellow",
clip_path = "M480,230 l40,100 l-80 0 z")
item.connect ("button_press_event", on_button_press, "Yellow rectangle")
table.set_child_properties (item, column = 2)
def create_clipping_page ():
vbox = gtk.VBox (False, 4)
vbox.set_border_width (4)
scrolled_win = gtk.ScrolledWindow ()
scrolled_win.set_shadow_type (gtk.SHADOW_IN)
vbox.add (scrolled_win)
canvas = goocanvas.Canvas ()
canvas.set_size_request (600, 450)
canvas.set_bounds (0, 0, 1000, 1000)
scrolled_win.add (canvas)
setup_canvas (canvas)
return vbox
def main ():
vb = create_clipping_page ()
win = gtk.Window()
win.connect("destroy", gtk.main_quit)
win.add(vb)
win.show_all()
gtk.main()
if __name__ == "__main__":
main ()
|
Carolyn Anderson, Lisa Connell, Hong Yuh Reum, Toby Sheldon, Annette Edwards | 10 People Who Made Plastic Surgery to be Like Their Idols!
10 People Who Made Plastic Surgery to be Like Their Idols!
30-year-old Carolyn Anderson loves Liverpool British Hollywood star Pamela Anderson. The woman spent $30 thousand for breast augmentation. The British dedicated her life to imitate Pamela, she tries to move and talk a hair's breadth, as her idol.
Lisa Connell has spent more than 60 thousand for breast augmentation, liposuction, lifting the eyebrows and veneers to the teeth in order to achieve the similarity with her idol Demi Moore. When this case was recognized by Moore herself, the actress was horrified: it turned out that Lisa is suffering oncology and underwent surgery because she wanted to "die beautiful."
Hong Yuh Reum from South Korea considers model Miranda Kerr the ideal of beauty. On the question of what operations she has made to resemble her favorite, she replied: "I did not touch my forehead."
Since 2008, the 35-year-old American Toby Sheldon spent all his savings - more than $100,000 - on plastic surgery, by which, he believes, he has made visual similarities with the Canadian pop idol Justin Bieber. The man is absolutely indifferent to the work of famous teenager: he suffers gerontophobia (fear of aging), and somehow Justinโs features became his ideal of eternal youth.
Not everyone wants to be like the real stars. Annette Edwards, for example, decided to make resemblance to Jessica Rabbit, a character of the film "Who Framed Roger Rabbit." Women are fond of rabbits, and that's decided, it must be similar to the Toon.
Next: 15 Haunted Castles Too Dreadful To Handle!
|
"""
Load tests using locust.io.
"""
import urllib3
import faker
from locust import HttpLocust, TaskSet, task
urllib3.disable_warnings()
fake = faker.Faker()
faker.providers.phone_number.en_US.Provider.formats = ('888-555-####',)
class AttendeeBehavior(TaskSet):
min_wait = 1000
max_wait = 10000
def on_start(self):
self.verify = False
def get_static_assets(self):
self.client.get('/static/deps/combined.min.css', verify=self.verify)
self.client.get('/static_views/styles/main.css', verify=self.verify)
self.client.get('/static/theme/prereg.css', verify=self.verify)
self.client.get('/static/theme/prereg_extra.css', verify=self.verify)
self.client.get('/static/deps/combined.min.js', verify=self.verify)
self.client.get('/static/js/common-static.js', verify=self.verify)
self.client.get('/static/theme/tile-background.png', verify=self.verify)
self.client.get('/static/images/loading.gif', verify=self.verify)
self.client.get('/static/theme/banner_2x.png', verify=self.verify)
@task
def preregister(self):
response = self.client.get('/preregistration/form', verify=self.verify)
if response.status_code != 200:
return
self.get_static_assets()
response = self.client.post(
'/preregistration/post_form',
verify=self.verify,
data={
'badge_type': '51352218',
'name': '',
'badges': '1',
'first_name': fake.first_name(),
'last_name': fake.last_name(),
'same_legal_name': "Yep, that's right",
'legal_name': '',
'amount_extra': '0',
'badge_printed_name': '',
'affiliate': '',
'shirt': '0',
'birthdate': fake.date_time_between('-80y', '-14y').strftime('%Y-%m-%d'),
'email': fake.safe_email(),
'zip_code': fake.zipcode(),
'ec_name': fake.name(),
'ec_phone': fake.phone_number(),
'cellphone': fake.phone_number(),
'found_how': fake.catch_phrase(),
'comments': fake.paragraph(),
'extra_donation': '',
'pii_consent': '1',
}
)
if response.status_code != 200:
return
response = self.client.get('/preregistration/process_free_prereg', verify=self.verify)
if response.status_code != 200:
return
response = self.client.get('/preregistration/paid_preregistrations?payment_received=0', verify=self.verify)
if response.status_code != 200:
return
class AttendeeLocust(HttpLocust):
task_set = AttendeeBehavior
|
Up to 500,000 base versions of Tesla Model 3/Y per year for China and some other Asian markets.
On Monday, Tesla CEO Elon Musk was busy at the groundbreaking ceremony for the Tesla Gigafactory 3 in Shanghai, joined by Shanghai Mayor Ying Yong.
Here we sum up all the facts that we know so far about the second car manufacturing plant, which Tesla intends to build in its second-biggest market (China probably has the potential to maybe be #1 at some point in the future).
According to Elon Musk, the facility will serve the greater China region and produce only affordable versions of the Tesla Model 3 and upcoming Tesla Model Y. Additionally, having the first automotive wholly-owned subsidiary in China should help Tesla from losing its latest know-how to competitors.
On the other hand, local production of the affordable, most popular version will be important in the case of trade wars or significant changes in exchange rates.
31 Comments on "Tesla Gigafactory 3: Facts & Videos From Groundbreaking"
They have a tent. Time to start production!!
Why are you even trying to propagandize on an EV-focused website? Youโre not in your echo chamber over here.
Lucky the Chinese govโt didnโt detain Elon after the Huawei CFO Meng Wanzhou fiasco in North America!
LMAO at you loser Spiegel, back to Stinking Alpha.
Does that mean affordable versions of model 3 and y will be imported from China into USA?
According to Teslaโs announced plans, the cars made at the Shanghai Gigafactory will be for the Chinese market only. Of course plans can change, but donโt expect to see Tesla cars made in China shipped to any first-world countries anytime within the next few years, at least.
Do you consider China to NOT be a first-world country?
No, thatโs not what you are saying.
Perhaps you should have included the word โotherโ in your text (โโฆโฆ. any other first-world countries โฆโฆโ).
China is not a first world country, it is a second world country. That was the name of the socialist/communist eastern countries that China was/is a part of. It has nothing to do with if they are developed or not.
A better name to use would be industrialized countries which China of course is a part of.
The meaning of โfirst world countryโ has shifted, but it still doesnโt mean merely an โindustrialized nationโ.
Since the collapse of the Soviet Union in 1991, the definition has instead largely shifted to any country with little political risk and a well functioning democracy, universal health care, rule of law, capitalist economy, economic stability and high standard of living.
Altho I would certainly quibble about โuniversal health careโ, since the U.S. is rather notoriously lacking that!
But back to the subject: I think itโs safe to say that China fails in the areas of โa well functioning democracyโ and โrule of lawโ, if not also failing in other areas.
Well of course itโs not. Perhaps you need to review the definition of the term.
So Monaco isnโt since itโs a Monarchy.
500,000? Thatโs gonna leave a mark Tommy Boy.
500,000 cars a year for only a $2 billion investment is a great return! Thatโs 4,000 per each car of yearly capacity. This thing pays for itself really fast.
Well, your arithmetic is right, sure. Except Tesla sees north of 20% margins, so a $35k car is $7000 in margin. The average purchase price of a Model III is higher than that, however. Also, lower labor costs in China. Higher import duties on American parts. I am confident that GP on 250k units from the factory will cover ROI. Vegas Casinos have an 18 month ROI once open. I believe Tesla can beat a Vegas Casino at ROI. Who, in their right minds, is shorting TSLA in 2019?
How many orders for the base version of the Tesla Model 3 (made in China) will Tesla receive from customers in China per year during the next decade?
Tesla will build more Gigafactories in China.
The start of the construction of the Shanghai Gigafactory really is a major step forward for Tesla. And it will be remembered as one of the major achievements of Tesla in 2019.
Where will the motors and PEMs be made? Hopefully they wonโt have any โspecial sauceโ in them or proprietary tech and production info will be gone in a hurry, 100% factory owned or not. Anyone that thinks otherwise is kidding themselves. The Chinese government has a Machiavellian approach to business, particularly tech and the car business.
Reports are that the Model 3 powertrain, including the motors, are made at Gigafactory 1 in Nevada.
Presumably, therefore, the motors for Model 3โs assembled in China will also be made at the Shanghai Gigafactory.
I agree that China will likely steal all of Teslaโs tech that it thinks is worth anything, but thatโs the price of doing business in China. However, having the blueprint for how to make something doesnโt necessarily mean you can make it successfully. American auto makers have been doing business in China for some years now, yet the quality of Chinese autos is quite inferior to foreign makes. Not just quite inferior to American makes, but also Japanese and S. Korean makes.
Of course, we canโt expect the inferiority of Chinese autos to last forever, any more than the inferiority of Japanese autos in the โ60s and โ70s lasted.
YOUNG DOC BROWN: No wonder this circuit failed. It says โMade in Japanโ.
MARTY McFLY: What do you mean, Doc? All the best stuff is made in Japan.
Tesla has essentially been given the ball in China and told to run with it.
own ineptitude, which is something that they are working on.
so Tesla has that going for them too.
Tesla: May all your teas be fragrant.
Lots of disturbing YouTubes from foreigners (stillโฆ.)living in China in how the country is reverting to a North Korea style dictatorship under its new dictator Xi.
I realize that money talks but is this really a good time to invest in China?
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2010 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# gpodder.model - Core model classes for gPodder (2009-08-13)
# Based on libpodcasts.py (thp, 2005-10-29)
#
import gpodder
from gpodder import util
from gpodder import feedcore
from gpodder import youtube
from gpodder import corestats
from gpodder.liblogger import log
import os
import re
import glob
import shutil
import time
import datetime
import rfc822
import hashlib
import feedparser
import xml.sax.saxutils
_ = gpodder.gettext
class CustomFeed(feedcore.ExceptionWithData): pass
class gPodderFetcher(feedcore.Fetcher):
"""
This class extends the feedcore Fetcher with the gPodder User-Agent and the
Proxy handler based on the current settings in gPodder and provides a
convenience method (fetch_channel) for use by PodcastChannel objects.
"""
custom_handlers = []
def __init__(self):
feedcore.Fetcher.__init__(self, gpodder.user_agent)
def fetch_channel(self, channel):
etag = channel.etag
modified = feedparser._parse_date(channel.last_modified)
# If we have a username or password, rebuild the url with them included
# Note: using a HTTPBasicAuthHandler would be pain because we need to
# know the realm. It can be done, but I think this method works, too
url = channel.authenticate_url(channel.url)
for handler in self.custom_handlers:
custom_feed = handler.handle_url(url)
if custom_feed is not None:
raise CustomFeed(custom_feed)
self.fetch(url, etag, modified)
def _resolve_url(self, url):
return youtube.get_real_channel_url(url)
@classmethod
def register(cls, handler):
cls.custom_handlers.append(handler)
# def _get_handlers(self):
# # Add a ProxyHandler for fetching data via a proxy server
# proxies = {'http': 'http://proxy.example.org:8080'}
# return[urllib2.ProxyHandler(proxies))]
# The "register" method is exposed here for external usage
register_custom_handler = gPodderFetcher.register
class PodcastModelObject(object):
"""
A generic base class for our podcast model providing common helper
and utility functions.
"""
@classmethod
def create_from_dict(cls, d, *args):
"""
Create a new object, passing "args" to the constructor
and then updating the object with the values from "d".
"""
o = cls(*args)
o.update_from_dict(d)
return o
def update_from_dict(self, d):
"""
Updates the attributes of this object with values from the
dictionary "d" by using the keys found in "d".
"""
for k in d:
if hasattr(self, k):
setattr(self, k, d[k])
class PodcastChannel(PodcastModelObject):
"""holds data for a complete channel"""
MAX_FOLDERNAME_LENGTH = 150
SECONDS_PER_WEEK = 7*24*60*60
feed_fetcher = gPodderFetcher()
@classmethod
def build_factory(cls, download_dir):
def factory(dict, db):
return cls.create_from_dict(dict, db, download_dir)
return factory
@classmethod
def load_from_db(cls, db, download_dir):
return db.load_channels(factory=cls.build_factory(download_dir))
@classmethod
def load(cls, db, url, create=True, authentication_tokens=None,\
max_episodes=0, download_dir=None, allow_empty_feeds=False):
if isinstance(url, unicode):
url = url.encode('utf-8')
tmp = db.load_channels(factory=cls.build_factory(download_dir), url=url)
if len(tmp):
return tmp[0]
elif create:
tmp = PodcastChannel(db, download_dir)
tmp.url = url
if authentication_tokens is not None:
tmp.username = authentication_tokens[0]
tmp.password = authentication_tokens[1]
tmp.update(max_episodes)
tmp.save()
db.force_last_new(tmp)
# Subscribing to empty feeds should yield an error (except if
# the user specifically allows empty feeds in the config UI)
if sum(tmp.get_statistics()) == 0 and not allow_empty_feeds:
tmp.delete()
raise Exception(_('No downloadable episodes in feed'))
return tmp
def episode_factory(self, d, db__parameter_is_unused=None):
"""
This function takes a dictionary containing key-value pairs for
episodes and returns a new PodcastEpisode object that is connected
to this PodcastChannel object.
Returns: A new PodcastEpisode object
"""
return PodcastEpisode.create_from_dict(d, self)
def _consume_custom_feed(self, custom_feed, max_episodes=0):
self.title = custom_feed.get_title()
self.link = custom_feed.get_link()
self.description = custom_feed.get_description()
self.image = custom_feed.get_image()
self.pubDate = time.time()
self.save()
guids = [episode.guid for episode in self.get_all_episodes()]
# Insert newly-found episodes into the database
custom_feed.get_new_episodes(self, guids)
self.save()
self.db.purge(max_episodes, self.id)
def _consume_updated_feed(self, feed, max_episodes=0):
self.parse_error = feed.get('bozo_exception', None)
self.title = feed.feed.get('title', self.url)
self.link = feed.feed.get('link', self.link)
self.description = feed.feed.get('subtitle', self.description)
# Start YouTube-specific title FIX
YOUTUBE_PREFIX = 'Uploads by '
if self.title.startswith(YOUTUBE_PREFIX):
self.title = self.title[len(YOUTUBE_PREFIX):] + ' on YouTube'
# End YouTube-specific title FIX
try:
self.pubDate = rfc822.mktime_tz(feed.feed.get('updated_parsed', None+(0,)))
except:
self.pubDate = time.time()
if hasattr(feed.feed, 'image'):
for attribute in ('href', 'url'):
new_value = getattr(feed.feed.image, attribute, None)
if new_value is not None:
log('Found cover art in %s: %s', attribute, new_value)
self.image = new_value
if hasattr(feed.feed, 'icon'):
self.image = feed.feed.icon
self.save()
# Load all episodes to update them properly.
existing = self.get_all_episodes()
# We can limit the maximum number of entries that gPodder will parse
if max_episodes > 0 and len(feed.entries) > max_episodes:
entries = feed.entries[:max_episodes]
else:
entries = feed.entries
# Title + PubDate hashes for existing episodes
existing_dupes = dict((e.duplicate_id(), e) for e in existing)
# GUID-based existing episode list
existing_guids = dict((e.guid, e) for e in existing)
# Get most recent pubDate of all episodes
last_pubdate = self.db.get_last_pubdate(self) or 0
# Search all entries for new episodes
for entry in entries:
try:
episode = PodcastEpisode.from_feedparser_entry(entry, self)
if episode is not None and not episode.title:
episode.title, ext = os.path.splitext(os.path.basename(episode.url))
except Exception, e:
log('Cannot instantiate episode: %s. Skipping.', e, sender=self, traceback=True)
continue
if episode is None:
continue
# Detect (and update) existing episode based on GUIDs
existing_episode = existing_guids.get(episode.guid, None)
if existing_episode:
existing_episode.update_from(episode)
existing_episode.save()
continue
# Detect (and update) existing episode based on duplicate ID
existing_episode = existing_dupes.get(episode.duplicate_id(), None)
if existing_episode:
if existing_episode.is_duplicate(episode):
existing_episode.update_from(episode)
existing_episode.save()
continue
# Workaround for bug 340: If the episode has been
# published earlier than one week before the most
# recent existing episode, do not mark it as new.
if episode.pubDate < last_pubdate - self.SECONDS_PER_WEEK:
log('Episode with old date: %s', episode.title, sender=self)
episode.is_played = True
episode.save()
# Remove "unreachable" episodes - episodes that have not been
# downloaded and that the feed does not list as downloadable anymore
if self.id is not None:
seen_guids = set(e.guid for e in feed.entries if hasattr(e, 'guid'))
episodes_to_purge = (e for e in existing if \
e.state != gpodder.STATE_DOWNLOADED and \
e.guid not in seen_guids and e.guid is not None)
for episode in episodes_to_purge:
log('Episode removed from feed: %s (%s)', episode.title, \
episode.guid, sender=self)
self.db.delete_episode_by_guid(episode.guid, self.id)
# This *might* cause episodes to be skipped if there were more than
# max_episodes_per_feed items added to the feed between updates.
# The benefit is that it prevents old episodes from apearing as new
# in certain situations (see bug #340).
self.db.purge(max_episodes, self.id)
def update_channel_lock(self):
self.db.update_channel_lock(self)
def _update_etag_modified(self, feed):
self.updated_timestamp = time.time()
self.calculate_publish_behaviour()
self.etag = feed.headers.get('etag', self.etag)
self.last_modified = feed.headers.get('last-modified', self.last_modified)
def query_automatic_update(self):
"""Query if this channel should be updated automatically
Returns True if the update should happen in automatic
mode or False if this channel should be skipped (timeout
not yet reached or release not expected right now).
"""
updated = self.updated_timestamp
expected = self.release_expected
now = time.time()
one_day_ago = now - 60*60*24
lastcheck = now - 60*10
return updated < one_day_ago or \
(expected < now and updated < lastcheck)
def update(self, max_episodes=0):
try:
self.feed_fetcher.fetch_channel(self)
except CustomFeed, updated:
custom_feed = updated.data
self._consume_custom_feed(custom_feed, max_episodes)
self.save()
except feedcore.UpdatedFeed, updated:
feed = updated.data
self._consume_updated_feed(feed, max_episodes)
self._update_etag_modified(feed)
self.save()
except feedcore.NewLocation, updated:
feed = updated.data
self.url = feed.href
self._consume_updated_feed(feed, max_episodes)
self._update_etag_modified(feed)
self.save()
except feedcore.NotModified, updated:
feed = updated.data
self._update_etag_modified(feed)
self.save()
except Exception, e:
# "Not really" errors
#feedcore.AuthenticationRequired
# Temporary errors
#feedcore.Offline
#feedcore.BadRequest
#feedcore.InternalServerError
#feedcore.WifiLogin
# Permanent errors
#feedcore.Unsubscribe
#feedcore.NotFound
#feedcore.InvalidFeed
#feedcore.UnknownStatusCode
raise
self.db.commit()
def delete(self):
self.db.delete_channel(self)
def save(self):
self.db.save_channel(self)
def get_statistics(self):
if self.id is None:
return (0, 0, 0, 0, 0)
else:
return self.db.get_channel_count(int(self.id))
def authenticate_url(self, url):
return util.url_add_authentication(url, self.username, self.password)
def __init__(self, db, download_dir):
self.db = db
self.download_dir = download_dir
self.id = None
self.url = None
self.title = ''
self.link = ''
self.description = ''
self.image = None
self.pubDate = 0
self.parse_error = None
self.foldername = None
self.auto_foldername = 1 # automatically generated foldername
# should this channel be synced to devices? (ex: iPod)
self.sync_to_devices = True
# to which playlist should be synced
self.device_playlist_name = 'gPodder'
# if set, this overrides the channel-provided title
self.override_title = ''
self.username = ''
self.password = ''
self.last_modified = None
self.etag = None
self.save_dir_size = 0
self.__save_dir_size_set = False
self.channel_is_locked = False
self.release_expected = time.time()
self.release_deviation = 0
self.updated_timestamp = 0
def calculate_publish_behaviour(self):
episodes = self.db.load_episodes(self, factory=self.episode_factory, limit=30)
if len(episodes) < 3:
return
deltas = []
latest = max(e.pubDate for e in episodes)
for index in range(len(episodes)-1):
if episodes[index].pubDate != 0 and episodes[index+1].pubDate != 0:
deltas.append(episodes[index].pubDate - episodes[index+1].pubDate)
if len(deltas) > 1:
stats = corestats.Stats(deltas)
self.release_expected = min([latest+stats.stdev(), latest+(stats.min()+stats.avg())*.5])
self.release_deviation = stats.stdev()
else:
self.release_expected = latest
self.release_deviation = 0
def request_save_dir_size(self):
if not self.__save_dir_size_set:
self.update_save_dir_size()
self.__save_dir_size_set = True
def update_save_dir_size(self):
self.save_dir_size = util.calculate_size(self.save_dir)
def get_title( self):
if self.override_title:
return self.override_title
elif not self.__title.strip():
return self.url
else:
return self.__title
def set_title( self, value):
self.__title = value.strip()
title = property(fget=get_title,
fset=set_title)
def set_custom_title( self, custom_title):
custom_title = custom_title.strip()
# if the custom title is the same as we have
if custom_title == self.override_title:
return
# if custom title is the same as channel title and we didn't have a custom title
if custom_title == self.__title and self.override_title == '':
return
# make sure self.foldername is initialized
self.get_save_dir()
# rename folder if custom_title looks sane
new_folder_name = self.find_unique_folder_name(custom_title)
if len(new_folder_name) > 0 and new_folder_name != self.foldername:
log('Changing foldername based on custom title: %s', custom_title, sender=self)
new_folder = os.path.join(self.download_dir, new_folder_name)
old_folder = os.path.join(self.download_dir, self.foldername)
if os.path.exists(old_folder):
if not os.path.exists(new_folder):
# Old folder exists, new folder does not -> simply rename
log('Renaming %s => %s', old_folder, new_folder, sender=self)
os.rename(old_folder, new_folder)
else:
# Both folders exist -> move files and delete old folder
log('Moving files from %s to %s', old_folder, new_folder, sender=self)
for file in glob.glob(os.path.join(old_folder, '*')):
shutil.move(file, new_folder)
log('Removing %s', old_folder, sender=self)
shutil.rmtree(old_folder, ignore_errors=True)
self.foldername = new_folder_name
self.save()
if custom_title != self.__title:
self.override_title = custom_title
else:
self.override_title = ''
def get_downloaded_episodes(self):
return self.db.load_episodes(self, factory=self.episode_factory, state=gpodder.STATE_DOWNLOADED)
def get_new_episodes(self, downloading=lambda e: False):
"""
Get a list of new episodes. You can optionally specify
"downloading" as a callback that takes an episode as
a parameter and returns True if the episode is currently
being downloaded or False if not.
By default, "downloading" is implemented so that it
reports all episodes as not downloading.
"""
return [episode for episode in self.db.load_episodes(self, \
factory=self.episode_factory, state=gpodder.STATE_NORMAL) if \
episode.check_is_new(downloading=downloading)]
def get_playlist_filename(self):
# If the save_dir doesn't end with a slash (which it really should
# not, if the implementation is correct, we can just append .m3u :)
assert self.save_dir[-1] != '/'
return self.save_dir+'.m3u'
def update_m3u_playlist(self):
m3u_filename = self.get_playlist_filename()
downloaded_episodes = self.get_downloaded_episodes()
if not downloaded_episodes:
log('No episodes - removing %s', m3u_filename, sender=self)
util.delete_file(m3u_filename)
return
log('Writing playlist to %s', m3u_filename, sender=self)
f = open(m3u_filename, 'w')
f.write('#EXTM3U\n')
for episode in PodcastEpisode.sort_by_pubdate(downloaded_episodes):
if episode.was_downloaded(and_exists=True):
filename = episode.local_filename(create=False)
assert filename is not None
if os.path.dirname(filename).startswith(os.path.dirname(m3u_filename)):
filename = filename[len(os.path.dirname(m3u_filename)+os.sep):]
f.write('#EXTINF:0,'+self.title+' - '+episode.title+' ('+episode.cute_pubdate()+')\n')
f.write(filename+'\n')
f.close()
def get_all_episodes(self):
return self.db.load_episodes(self, factory=self.episode_factory)
def find_unique_folder_name(self, foldername):
# Remove trailing dots to avoid errors on Windows (bug 600)
foldername = foldername.strip().rstrip('.')
current_try = util.sanitize_filename(foldername, \
self.MAX_FOLDERNAME_LENGTH)
next_try_id = 2
while True:
if self.db.channel_foldername_exists(current_try):
current_try = '%s (%d)' % (foldername, next_try_id)
next_try_id += 1
else:
return current_try
def get_save_dir(self):
urldigest = hashlib.md5(self.url).hexdigest()
sanitizedurl = util.sanitize_filename(self.url, self.MAX_FOLDERNAME_LENGTH)
if self.foldername is None or (self.auto_foldername and (self.foldername == urldigest or self.foldername.startswith(sanitizedurl))):
# we must change the folder name, because it has not been set manually
fn_template = util.sanitize_filename(self.title, self.MAX_FOLDERNAME_LENGTH)
# if this is an empty string, try the basename
if len(fn_template) == 0:
log('That is one ugly feed you have here! (Report this to bugs.gpodder.org: %s)', self.url, sender=self)
fn_template = util.sanitize_filename(os.path.basename(self.url), self.MAX_FOLDERNAME_LENGTH)
# If the basename is also empty, use the first 6 md5 hexdigest chars of the URL
if len(fn_template) == 0:
log('That is one REALLY ugly feed you have here! (Report this to bugs.gpodder.org: %s)', self.url, sender=self)
fn_template = urldigest # no need for sanitize_filename here
# Find a unique folder name for this podcast
wanted_foldername = self.find_unique_folder_name(fn_template)
# if the foldername has not been set, check if the (old) md5 filename exists
if self.foldername is None and os.path.exists(os.path.join(self.download_dir, urldigest)):
log('Found pre-0.15.0 download folder for %s: %s', self.title, urldigest, sender=self)
self.foldername = urldigest
# we have a valid, new folder name in "current_try" -> use that!
if self.foldername is not None and wanted_foldername != self.foldername:
# there might be an old download folder crawling around - move it!
new_folder_name = os.path.join(self.download_dir, wanted_foldername)
old_folder_name = os.path.join(self.download_dir, self.foldername)
if os.path.exists(old_folder_name):
if not os.path.exists(new_folder_name):
# Old folder exists, new folder does not -> simply rename
log('Renaming %s => %s', old_folder_name, new_folder_name, sender=self)
os.rename(old_folder_name, new_folder_name)
else:
# Both folders exist -> move files and delete old folder
log('Moving files from %s to %s', old_folder_name, new_folder_name, sender=self)
for file in glob.glob(os.path.join(old_folder_name, '*')):
shutil.move(file, new_folder_name)
log('Removing %s', old_folder_name, sender=self)
shutil.rmtree(old_folder_name, ignore_errors=True)
log('Updating foldername of %s to "%s".', self.url, wanted_foldername, sender=self)
self.foldername = wanted_foldername
self.save()
save_dir = os.path.join(self.download_dir, self.foldername)
# Create save_dir if it does not yet exist
if not util.make_directory( save_dir):
log( 'Could not create save_dir: %s', save_dir, sender = self)
return save_dir
save_dir = property(fget=get_save_dir)
def remove_downloaded( self):
shutil.rmtree( self.save_dir, True)
@property
def cover_file(self):
new_name = os.path.join(self.save_dir, 'folder.jpg')
if not os.path.exists(new_name):
old_names = ('cover', '.cover')
for old_name in old_names:
filename = os.path.join(self.save_dir, old_name)
if os.path.exists(filename):
shutil.move(filename, new_name)
return new_name
return new_name
def delete_episode(self, episode):
filename = episode.local_filename(create=False, check_only=True)
if filename is not None:
util.delete_file(filename)
episode.set_state(gpodder.STATE_DELETED)
class PodcastEpisode(PodcastModelObject):
"""holds data for one object in a channel"""
MAX_FILENAME_LENGTH = 200
def _get_played(self):
return self.is_played
def _set_played(self, played):
self.is_played = played
# Alias "is_played" to "played" for DB column mapping
played = property(fget=_get_played, fset=_set_played)
def _get_locked(self):
return self.is_locked
def _set_locked(self, locked):
self.is_locked = locked
# Alias "is_locked" to "locked" for DB column mapping
locked = property(fget=_get_locked, fset=_set_locked)
def _get_channel_id(self):
return self.channel.id
def _set_channel_id(self, channel_id):
assert self.channel.id == channel_id
# Accessor for the "channel_id" DB column
channel_id = property(fget=_get_channel_id, fset=_set_channel_id)
@staticmethod
def sort_by_pubdate(episodes, reverse=False):
"""Sort a list of PodcastEpisode objects chronologically
Returns a iterable, sorted sequence of the episodes
"""
key_pubdate = lambda e: e.pubDate
return sorted(episodes, key=key_pubdate, reverse=reverse)
def reload_from_db(self):
"""
Re-reads all episode details for this object from the
database and updates this object accordingly. Can be
used to refresh existing objects when the database has
been updated (e.g. the filename has been set after a
download where it was not set before the download)
"""
d = self.db.load_episode(self.id)
self.update_from_dict(d or {})
return self
def has_website_link(self):
return bool(self.link) and (self.link != self.url)
@staticmethod
def from_feedparser_entry(entry, channel):
episode = PodcastEpisode(channel)
episode.title = entry.get('title', '')
episode.link = entry.get('link', '')
episode.description = entry.get('summary', '')
# Fallback to subtitle if summary is not available0
if not episode.description:
episode.description = entry.get('subtitle', '')
episode.guid = entry.get('id', '')
if entry.get('updated_parsed', None):
episode.pubDate = rfc822.mktime_tz(entry.updated_parsed+(0,))
enclosures = entry.get('enclosures', ())
audio_available = any(e.get('type', '').startswith('audio/') \
for e in enclosures)
video_available = any(e.get('type', '').startswith('video/') \
for e in enclosures)
# Enclosures
for e in enclosures:
episode.mimetype = e.get('type', 'application/octet-stream')
if episode.mimetype == '':
# See Maemo bug 10036
log('Fixing empty mimetype in ugly feed', sender=episode)
episode.mimetype = 'application/octet-stream'
if '/' not in episode.mimetype:
continue
# Skip images in feeds if audio or video is available (bug 979)
if episode.mimetype.startswith('image/') and \
(audio_available or video_available):
continue
episode.url = util.normalize_feed_url(e.get('href', ''))
if not episode.url:
continue
try:
episode.length = int(e.length) or -1
except:
episode.length = -1
return episode
# Media RSS content
for m in entry.get('media_content', ()):
episode.mimetype = m.get('type', 'application/octet-stream')
if '/' not in episode.mimetype:
continue
episode.url = util.normalize_feed_url(m.get('url', ''))
if not episode.url:
continue
try:
episode.length = int(m.fileSize) or -1
except:
episode.length = -1
return episode
# Brute-force detection of any links
for l in entry.get('links', ()):
episode.url = util.normalize_feed_url(l.get('href', ''))
if not episode.url:
continue
if youtube.is_video_link(episode.url):
return episode
# Check if we can resolve this link to a audio/video file
filename, extension = util.filename_from_url(episode.url)
file_type = util.file_type_by_extension(extension)
if file_type is None and hasattr(l, 'type'):
extension = util.extension_from_mimetype(l.type)
file_type = util.file_type_by_extension(extension)
# The link points to a audio or video file - use it!
if file_type is not None:
return episode
# Scan MP3 links in description text
mp3s = re.compile(r'http://[^"]*\.mp3')
for content in entry.get('content', ()):
html = content.value
for match in mp3s.finditer(html):
episode.url = match.group(0)
return episode
return None
def __init__(self, channel):
self.db = channel.db
# Used by Storage for faster saving
self.id = None
self.url = ''
self.title = ''
self.length = 0
self.mimetype = 'application/octet-stream'
self.guid = ''
self.description = ''
self.link = ''
self.channel = channel
self.pubDate = 0
self.filename = None
self.auto_filename = 1 # automatically generated filename
self.state = gpodder.STATE_NORMAL
self.is_played = False
# Initialize the "is_locked" property
self._is_locked = False
self.is_locked = channel.channel_is_locked
# Time attributes
self.total_time = 0
self.current_position = 0
self.current_position_updated = time.time()
def get_is_locked(self):
return self._is_locked
def set_is_locked(self, is_locked):
self._is_locked = bool(is_locked)
is_locked = property(fget=get_is_locked, fset=set_is_locked)
def save(self):
if self.state != gpodder.STATE_DOWNLOADED and self.file_exists():
self.state = gpodder.STATE_DOWNLOADED
self.db.save_episode(self)
def on_downloaded(self, filename):
self.state = gpodder.STATE_DOWNLOADED
self.is_played = False
self.length = os.path.getsize(filename)
self.db.save_downloaded_episode(self)
self.db.commit()
def set_state(self, state):
self.state = state
self.db.update_episode_state(self)
def mark(self, state=None, is_played=None, is_locked=None):
if state is not None:
self.state = state
if is_played is not None:
self.is_played = is_played
if is_locked is not None:
self.is_locked = is_locked
self.db.update_episode_state(self)
@property
def title_markup(self):
return '%s\n<small>%s</small>' % (xml.sax.saxutils.escape(self.title),
xml.sax.saxutils.escape(self.channel.title))
@property
def maemo_markup(self):
if self.length > 0:
length_str = '%s; ' % self.filesize_prop
else:
length_str = ''
return ('<b>%s</b>\n<small>%s'+_('released %s')+ \
'; '+_('from %s')+'</small>') % (\
xml.sax.saxutils.escape(self.title), \
xml.sax.saxutils.escape(length_str), \
xml.sax.saxutils.escape(self.pubdate_prop), \
xml.sax.saxutils.escape(self.channel.title))
@property
def maemo_remove_markup(self):
if self.is_played:
played_string = _('played')
else:
played_string = _('unplayed')
downloaded_string = self.get_age_string()
if not downloaded_string:
downloaded_string = _('today')
return ('<b>%s</b>\n<small>%s; %s; '+_('downloaded %s')+ \
'; '+_('from %s')+'</small>') % (\
xml.sax.saxutils.escape(self.title), \
xml.sax.saxutils.escape(self.filesize_prop), \
xml.sax.saxutils.escape(played_string), \
xml.sax.saxutils.escape(downloaded_string), \
xml.sax.saxutils.escape(self.channel.title))
def age_in_days(self):
return util.file_age_in_days(self.local_filename(create=False, \
check_only=True))
def get_age_string(self):
return util.file_age_to_string(self.age_in_days())
age_prop = property(fget=get_age_string)
def one_line_description( self):
lines = util.remove_html_tags(self.description).strip().splitlines()
if not lines or lines[0] == '':
return _('No description available')
else:
return ' '.join(lines)
def delete_from_disk(self):
try:
self.channel.delete_episode(self)
except:
log('Cannot delete episode from disk: %s', self.title, traceback=True, sender=self)
def find_unique_file_name(self, url, filename, extension):
current_try = util.sanitize_filename(filename, self.MAX_FILENAME_LENGTH)+extension
next_try_id = 2
lookup_url = None
if self.filename == current_try and current_try is not None:
# We already have this filename - good!
return current_try
while self.db.episode_filename_exists(current_try):
current_try = '%s (%d)%s' % (filename, next_try_id, extension)
next_try_id += 1
return current_try
def local_filename(self, create, force_update=False, check_only=False,
template=None):
"""Get (and possibly generate) the local saving filename
Pass create=True if you want this function to generate a
new filename if none exists. You only want to do this when
planning to create/download the file after calling this function.
Normally, you should pass create=False. This will only
create a filename when the file already exists from a previous
version of gPodder (where we used md5 filenames). If the file
does not exist (and the filename also does not exist), this
function will return None.
If you pass force_update=True to this function, it will try to
find a new (better) filename and move the current file if this
is the case. This is useful if (during the download) you get
more information about the file, e.g. the mimetype and you want
to include this information in the file name generation process.
If check_only=True is passed to this function, it will never try
to rename the file, even if would be a good idea. Use this if you
only want to check if a file exists.
If "template" is specified, it should be a filename that is to
be used as a template for generating the "real" filename.
The generated filename is stored in the database for future access.
"""
ext = self.extension(may_call_local_filename=False).encode('utf-8', 'ignore')
# For compatibility with already-downloaded episodes, we
# have to know md5 filenames if they are downloaded already
urldigest = hashlib.md5(self.url).hexdigest()
if not create and self.filename is None:
urldigest_filename = os.path.join(self.channel.save_dir, urldigest+ext)
if os.path.exists(urldigest_filename):
# The file exists, so set it up in our database
log('Recovering pre-0.15.0 file: %s', urldigest_filename, sender=self)
self.filename = urldigest+ext
self.auto_filename = 1
self.save()
return urldigest_filename
return None
# We only want to check if the file exists, so don't try to
# rename the file, even if it would be reasonable. See also:
# http://bugs.gpodder.org/attachment.cgi?id=236
if check_only:
if self.filename is None:
return None
else:
return os.path.join(self.channel.save_dir, self.filename)
if self.filename is None or force_update or (self.auto_filename and self.filename == urldigest+ext):
# Try to find a new filename for the current file
if template is not None:
# If template is specified, trust the template's extension
episode_filename, ext = os.path.splitext(template)
else:
episode_filename, extension_UNUSED = util.filename_from_url(self.url)
fn_template = util.sanitize_filename(episode_filename, self.MAX_FILENAME_LENGTH)
if 'redirect' in fn_template and template is None:
# This looks like a redirection URL - force URL resolving!
log('Looks like a redirection to me: %s', self.url, sender=self)
url = util.get_real_url(self.channel.authenticate_url(self.url))
log('Redirection resolved to: %s', url, sender=self)
(episode_filename, extension_UNUSED) = util.filename_from_url(url)
fn_template = util.sanitize_filename(episode_filename, self.MAX_FILENAME_LENGTH)
# Use the video title for YouTube downloads
for yt_url in ('http://youtube.com/', 'http://www.youtube.com/'):
if self.url.startswith(yt_url):
fn_template = util.sanitize_filename(os.path.basename(self.title), self.MAX_FILENAME_LENGTH)
# If the basename is empty, use the md5 hexdigest of the URL
if len(fn_template) == 0 or fn_template.startswith('redirect.'):
log('Report to bugs.gpodder.org: Podcast at %s with episode URL: %s', self.channel.url, self.url, sender=self)
fn_template = urldigest
# Find a unique filename for this episode
wanted_filename = self.find_unique_file_name(self.url, fn_template, ext)
# We populate the filename field the first time - does the old file still exist?
if self.filename is None and os.path.exists(os.path.join(self.channel.save_dir, urldigest+ext)):
log('Found pre-0.15.0 downloaded file: %s', urldigest, sender=self)
self.filename = urldigest+ext
# The old file exists, but we have decided to want a different filename
if self.filename is not None and wanted_filename != self.filename:
# there might be an old download folder crawling around - move it!
new_file_name = os.path.join(self.channel.save_dir, wanted_filename)
old_file_name = os.path.join(self.channel.save_dir, self.filename)
if os.path.exists(old_file_name) and not os.path.exists(new_file_name):
log('Renaming %s => %s', old_file_name, new_file_name, sender=self)
os.rename(old_file_name, new_file_name)
elif force_update and not os.path.exists(old_file_name):
# When we call force_update, the file might not yet exist when we
# call it from the downloading code before saving the file
log('Choosing new filename: %s', new_file_name, sender=self)
else:
log('Warning: %s exists or %s does not.', new_file_name, old_file_name, sender=self)
log('Updating filename of %s to "%s".', self.url, wanted_filename, sender=self)
elif self.filename is None:
log('Setting filename to "%s".', wanted_filename, sender=self)
else:
log('Should update filename. Stays the same (%s). Good!', \
wanted_filename, sender=self)
self.filename = wanted_filename
self.save()
self.db.commit()
return os.path.join(self.channel.save_dir, self.filename)
def set_mimetype(self, mimetype, commit=False):
"""Sets the mimetype for this episode"""
self.mimetype = mimetype
if commit:
self.db.commit()
def extension(self, may_call_local_filename=True):
filename, ext = util.filename_from_url(self.url)
if may_call_local_filename:
filename = self.local_filename(create=False)
if filename is not None:
filename, ext = os.path.splitext(filename)
# if we can't detect the extension from the url fallback on the mimetype
if ext == '' or util.file_type_by_extension(ext) is None:
ext = util.extension_from_mimetype(self.mimetype)
return ext
def check_is_new(self, downloading=lambda e: False):
"""
Returns True if this episode is to be considered new.
"Downloading" should be a callback that gets an episode
as its parameter and returns True if the episode is
being downloaded at the moment.
"""
return self.state == gpodder.STATE_NORMAL and \
not self.is_played and \
not downloading(self)
def mark_new(self):
self.state = gpodder.STATE_NORMAL
self.is_played = False
self.db.update_episode_state(self)
def mark_old(self):
self.is_played = True
self.db.update_episode_state(self)
def file_exists(self):
filename = self.local_filename(create=False, check_only=True)
if filename is None:
return False
else:
return os.path.exists(filename)
def was_downloaded(self, and_exists=False):
if self.state != gpodder.STATE_DOWNLOADED:
return False
if and_exists and not self.file_exists():
return False
return True
def sync_filename(self, use_custom=False, custom_format=None):
if use_custom:
return util.object_string_formatter(custom_format,
episode=self, podcast=self.channel)
else:
return self.title
def file_type(self):
# Assume all YouTube links are video files
if youtube.is_video_link(self.url):
return 'video'
return util.file_type_by_extension(self.extension())
@property
def basename( self):
return os.path.splitext( os.path.basename( self.url))[0]
@property
def published( self):
"""
Returns published date as YYYYMMDD (or 00000000 if not available)
"""
try:
return datetime.datetime.fromtimestamp(self.pubDate).strftime('%Y%m%d')
except:
log( 'Cannot format pubDate for "%s".', self.title, sender = self)
return '00000000'
@property
def pubtime(self):
"""
Returns published time as HHMM (or 0000 if not available)
"""
try:
return datetime.datetime.fromtimestamp(self.pubDate).strftime('%H%M')
except:
log('Cannot format pubDate (time) for "%s".', self.title, sender=self)
return '0000'
def cute_pubdate(self):
result = util.format_date(self.pubDate)
if result is None:
return '(%s)' % _('unknown')
else:
return result
pubdate_prop = property(fget=cute_pubdate)
def calculate_filesize( self):
filename = self.local_filename(create=False)
if filename is None:
log('calculate_filesized called, but filename is None!', sender=self)
try:
self.length = os.path.getsize(filename)
except:
log( 'Could not get filesize for %s.', self.url)
def get_filesize_string(self):
return util.format_filesize(self.length)
filesize_prop = property(fget=get_filesize_string)
def get_played_string( self):
if not self.is_played:
return _('Unplayed')
return ''
played_prop = property(fget=get_played_string)
def is_duplicate(self, episode):
if self.title == episode.title and self.pubDate == episode.pubDate:
log('Possible duplicate detected: %s', self.title)
return True
return False
def duplicate_id(self):
return hash((self.title, self.pubDate))
def update_from(self, episode):
for k in ('title', 'url', 'description', 'link', 'pubDate', 'guid'):
setattr(self, k, getattr(episode, k))
|
cinder block walls ideas, pictures, remodel and decor - cinder block walls design ideas and photos. the largest collection of interior design and decorating ideas on the internet, including kitchens and bathrooms. over 12 million inspiring photos and articles from top shop .
how to decorate a cinder block wall | home guides | sf gatehow to decorate a cinder block wall decorate your cinder block wall to suit your interior style. your cinder block wall probably isn't your favorite architectural element, but it doesn't have to remain an eyesore. you can decorate a .
how to hide cinder blocks in the backyard | home guides .how to hide cinder blocks in the backyard how to hide cinder blocks in the backyard plain cinder blocks do not create a pleasant scene from your backyard. if you have the misfortune of staring out your back window into a .
a cheap way to disguise a cinder block wall | sapling.comyou can cover your cinder block walls with inexpensive materials that do not look cheap. paint paint is the cheapest way to disguise a cinder block wall both inside and out. the only difference is that if you are painting outside .
how to hide cinder block interior walls - houzzhow can i redo the cinder block walls? the walls are 20' long sale rug sale 0 sign in photos kitchen dining kitchen dining room pantry great room shop kitchen dining kitchen dining furniture bar stools tile .
watch this video before painting concrete block walls .homebuildingandrepairs.com/painting/index.htmll click on this link for more information about block walls, fences and painting. here's a problem i. homebuildingandrepairs.com/painting/index.htmll click on .
ideas about cinder block walls | block .find and save ideas about cinder block walls on pinterest, the world's catalog of ideas. | see more about block wall, concrete blocks and basements.we had this huge cream, cinder block wall that needed some personality. i .
how to paint a cinderblock wall | home guides | sf gatehow to paint a cinderblock wall how to paint a cinderblock wall paint dresses up otherwise boring concrete walls. cinder blocks, also called concrete blocks, are a durable but rather industrial-looking material for walls. whether .
ideas about cinder block walls | block .change that plain cinder block look there are many ways in which you can cover your cinder block walls, and you can make it your next diy project! cinder block walls are very strong when compared to most building materials.
disguise cinderblock walls in school - outdoor deck manufacturer homeยปecoยป disguise cinderblock walls in school gallery wpc wall board wpc flooring wpc indoor panel decking wpc outdoor furniture outdoor landscape wpc fence pvc board products .
how to disguise this cinder block wall? โ good question . yards and gardens that have been featured so far, but i've noticed that none of them feature the lovely cinder block walls that are so pervasive in newer homesโฆ .we plan to use a wire trellis system in areas where vines can be .
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Renato de Pontes Pereira'
__author_email__ = 'renato.ppontes@gmail.com'
__version__ = '0.1'
__date__ = '2011 10 15'
try:
import setuptools
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
long_description = '''
Aerolito is an AIML alternative based on YAML. Aerolito provides features
for natural language processing simulation. Example of usage::
from aerolito import Kernel
kernel = Kernel('config.yml')
print kernel.respond(u'Hello')
'''
setup(
name='aerolito',
version = __version__,
author = __author__,
author_email=__author_email__,
license='MIT License',
url='http://renatopp.com/aerolito',
download_url='https://github.com/renatopp/aerolito/',
description='Python library for natural language processing simulation',
long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup'
],
keywords='artificial intelligence natural language processing simulation yaml aiml markup aerolito',
packages=['aerolito'],
install_requires=['pyyaml'],
)
|
Oxford Hen Do Ideas. Oxford is an obvious choice to experience a hen weekend. With a vast array of ideas, activities and accommodation on offer, Oxford wonโt know whatโs hit it when the hen starts!
Enquire for your Hen Party Weekend in Oxford. Book your Hen Do activities, package or Hen night with just a ยฃ50 deposit then invite your group online to pay individually hassle free.
|
import numpy as np, pandas as pd, os
from synthicity.utils import misc
from drcog.models import dataset
dset = dataset.DRCOGDataset(os.path.join(misc.data_dir(),'drcog.h5'))
np.random.seed(1)
import statsmodels.api as sm
#import pygwr_kernel
import random
"""
This program estimates an hedonic model for prices of residential and non-residential buildings. The benchmark method
combines:
1/ A geographically weighted regression to account for spatial non-stationarity
2/ Poisson or Negative Binonial General Linear Model to estimate a log-linear model with heteroskedastic error terms
3/ Zone employment (later-on when the data is fixed, zone average income or household characteristics)
is instrumented with average buildings characteristics in neighboring zones.
The program is organized in four parts:
1/ Create a dataset for estimation
2/ Run the first stage least squares (average zonal employment regressed on county fixed effect and
neighboring zones characteristics). The predicted zonal employment is used as an instrument in all following regressions
3/ Run a GLM GWR methods and obtain local hedonoc parameters.
4/ Generate average coefficient for each zone
"""
## Part 1: extract variables and build dataset for estimation
def data_estimation(dset, buildings,parcels,fars,zones,establishments, bid):
bp=buildings
p=parcels
f=fars
z=zones
e=establishments
print bp.columns
## Construct additional buildings variables related to zone or parcel characteristics
bp['zone_id']= p.zone_id[bp.parcel_id].values
bp['dist_bus']= (p.dist_bus[bp.parcel_id].values)
bp['ln_dist_bus']=np.log(bp['dist_bus'])
bp['dist_rail']= (p.dist_rail[bp.parcel_id].values)
bp['ln_dist_rail']=np.log(bp['dist_rail'])
bp['county_id']= p.county_id[bp.parcel_id].values
bp['centroid_x']= p.centroid_x[bp.parcel_id].values
bp['centroid_y']= p.centroid_y[bp.parcel_id].values
bp['year20']=pd.DataFrame((bp['year_built']>1995)*(bp['year_built']<2000)).applymap(lambda x: 1 if x else 0)
bp['year10']=pd.DataFrame((bp['year_built']>1999)*(bp['year_built']<2006)).applymap(lambda x: 1 if x else 0)
bp['year0']=pd.DataFrame((bp['year_built']>2005)).applymap(lambda x: 1 if x else 0)
f['far_id']=f.index
p['far']=f.far[p.far_id].values
bp['far']= p.far[bp.parcel_id].values
bp['high_land_area']=pd.DataFrame((bp['land_area']>7000)).applymap(lambda x: 1 if x else 0)
bp['ln_nres_sqft']=np.log(bp['non_residential_sqft'])
bp['ln_res_sqft']=np.log(bp['bldg_sq_ft'])
bp['unit_price_res_sqft']=bp[bp['bldg_sq_ft']>0]['unit_price_residential']/bp['bldg_sq_ft']
### neighborhood (right now zone , later on, use a kernel) characteristics
e['zone_id'] = bp.zone_id[e.building_id].values
bp['building_id']=bp.index
u=pd.DataFrame(e.groupby('building_id').sector_id.sum())
u.columns=['sector_id']
bp=pd.merge(bp,u, left_on='building_id', right_index=True, how='outer')
z['zone_id']=z.index
z['far']=p.far[z.zone_id].values
z['sector_id_5']=bp[bp['building_type_id']==5].groupby('zone_id').sector_id.sum()
z['sector_id_5_out']=dset.compute_range( z['sector_id_5'],15, agg=np.sum)-\
dset.compute_range( z['sector_id_5'],5, agg=np.sum)
z['sector_id_22']=bp[bp['building_type_id']==22].groupby('zone_id').sector_id.sum()
z['sector_id_22_out']=dset.compute_range( z['sector_id_22'],15, agg=np.sum)-\
dset.compute_range( z['sector_id_22'],5, agg=np.sum)
z['sector_id_18']=bp[bp['building_type_id']==18].groupby('zone_id').sector_id.sum()
z['sector_id_18_out']=dset.compute_range( z['sector_id_18'],15, agg=np.sum)-\
dset.compute_range( z['sector_id_18'],5, agg=np.sum)
z['emp_sector_mean'] = e.groupby('zone_id').employees.mean()
z['emp_sector_agg'] = e.groupby('zone_id').employees.sum()
z['nr_sqft_mean']=bp.groupby('zone_id').non_residential_sqft.mean()
z['nr_price_mean']=bp.groupby('zone_id').unit_price_non_residential.mean()
z['r_sqft_mean']=bp[(bp['building_type_id']==bid)*(bp['bldg_sq_ft']>0)].groupby('zone_id').ln_res_sqft.sum()
z['ln_r_sqft_mean']=np.log( z['r_sqft_mean'])
z['nr_sqft_agg']=bp.groupby('zone_id').non_residential_sqft.sum()
z['nr_stories_mean']=bp.groupby('zone_id').stories.sum()
z['year0_mean']=bp.groupby('zone_id').year0.mean()
z['nr_sqft_30_10']=dset.compute_range(z['nr_sqft_mean'], 30)- z['nr_sqft_mean']
z['r_sqft_30_10']=dset.compute_range(z[np.isfinite(z['ln_r_sqft_mean'])]['ln_r_sqft_mean'], 15, agg=np.sum)- \
dset.compute_range(z[np.isfinite(z['ln_r_sqft_mean'])]['ln_r_sqft_mean'], 5, agg=np.sum)
z['far_30_10']=dset.compute_range(z[np.isfinite(z['far'])]['far'], 15, agg=np.mean)- \
dset.compute_range(z[np.isfinite(z['far'])]['far'], 5, agg=np.mean)
#z['ln_r_sqft_mean']
z['stories_30_10']=dset.compute_range(z['nr_stories_mean'], 15)-\
dset.compute_range(z['nr_stories_mean'], 5, agg=np.sum)
z['nr_year0_30_10']=dset.compute_range(z['year0_mean'], 30)- z['year0_mean']
# Larger Area Characteristics
z['emp_sector_mean_30']=dset.compute_range(z['emp_sector_mean'], 30, agg=np.mean)
z['nr_sqft_30']=dset.compute_range(z['nr_sqft_mean'], 15,agg=np.mean)
z['r_sqft_30']=dset.compute_range(z['ln_r_sqft_mean'], 15, agg=np.sum)
bp['emp_sector_mean_30']=z.emp_sector_mean_30[bp.zone_id].values
bp['emp_sector_10']=z.emp_sector_mean[bp.zone_id].values
bp['year0_10']=z.year0_mean[bp.zone_id].values
bp['stories_10']=z.nr_stories_mean[bp.zone_id].values
bp['nr_sqft_30_10']=z.nr_sqft_30_10[bp.zone_id].values
bp['stories_30_10']=z.stories_30_10[bp.zone_id].values
bp['nr_year0_30_10']=z.nr_year0_30_10[bp.zone_id].values
bp['nr_sqft_10']=z.nr_sqft_mean[bp.zone_id].values
bp['nr_price_10']=z.nr_price_mean[bp.zone_id].values
bp['nr_sqft_30']=z.nr_sqft_30[bp.zone_id].values
bp['r_sqft_30_10']=z.r_sqft_30_10[bp.zone_id].values
bp['r_sqft_10']=z.r_sqft_mean[bp.zone_id].values
bp['r_sqft_30']=z.r_sqft_30[bp.zone_id].values
#bp['nr_sqft_agg']=z.nr_sqft_agg[bp.zone_id].values
bp['ln_nr_sqft_30_10']=np.log(bp['nr_sqft_30_10'])
bp['ln_nr_sqft_30']=np.log(bp['nr_sqft_30'])
bp['ln_nr_sqft_10']=np.log(bp['nr_sqft_10'])
bp['ln_r_sqft_30_10']=np.log(bp['r_sqft_30_10'])
bp['ln_r_sqft_30']=np.log(bp['r_sqft_30'])
bp['ln_r_sqft_10']=np.log(bp['r_sqft_10'])
bp['ln_emp_30']=np.log(bp['emp_sector_mean_30'])
bp['ln_emp_10']=np.log(bp['emp_sector_10'])
bp['ln_sqft_zone']=-np.log(bp.bldg_sq_ft)+bp['r_sqft_10']
bp['ln_sqft_out']=-np.log(bp.bldg_sq_ft)+bp['r_sqft_30']
bp['ln_stories_zone']=-bp['stories']+bp['stories_10']
bp['ln_sqft_out']=-np.log(bp.bldg_sq_ft)+bp['r_sqft_30_10']
bp['ln_stories_out']=bp['stories_30_10']
bp['sector_id_5']=z.sector_id_5[bp.zone_id].values
bp['sector_id_5_out']=z.sector_id_5_out[bp.zone_id].values
bp['sector_id_18']=z.sector_id_18[bp.zone_id].values
bp['sector_id_18_out']=z.sector_id_18_out[bp.zone_id].values
bp['sector_id_22']=z.sector_id_22[bp.zone_id].values
bp['sector_id_22_out']=z.sector_id_22_out[bp.zone_id].values
#bp=bp[bp['building_type_id']==bid]
del e
del p
dset.d['buildings']=bp
dset.d['zones']=z
return dset
# Part1bis:income and age
def income_data(dset):
df_marg=pd.read_csv('C:\Users\XGitiaux\Documents\Tableau\Census/inc_age_marg.csv', index_col='zone_id')
df_price=pd.read_csv('C:\Users\XGitiaux\Documents\Tableau\Census/ZillowPriceZone.csv')
df_marg['zone_id']=df_marg.index
print df_marg[df_marg['zone_id']==1609]['zone_id']
df_marg=pd.merge(df_price, df_marg, left_on='zone_id', right_index=True)
df_marg.index=df_marg['zone_id']
df_marg.fillna(0, inplace=True)
df=pd.DataFrame(df_marg.index, index=df_marg.index)
print df_marg.PriceZ
df['low_income_25_44']=df_marg['Householder 25 to 44 years:Less than $10,000']+df_marg['Householder 25 to 44 years:$10,000 to $14,999']\
+df_marg[ 'Householder 25 to 44 years:$15,000 to $19,999']\
+df_marg['Householder 25 to 44 years:$20,000 to $24,999']+\
df_marg['Householder 25 to 44 years:$25,000 to $29,999']
df['low_income_45_64']=df_marg['Householder 45 to 64 years:Less than $10,000']+\
df_marg['Householder 45 to 64 years:$10,000 to $14,999']\
+df_marg[ 'Householder 45 to 64 years:$15,000 to $19,999']\
+df_marg['Householder 45 to 64 years:$20,000 to $24,999']+\
df_marg['Householder 45 to 64 years:$25,000 to $29,999']
df['low_income_65']=df_marg['Householder 65 years and over:Less than $10,000']+\
df_marg['Householder 65 years and over:$10,000 to $14,999']\
+df_marg[ 'Householder 65 years and over:$15,000 to $19,999']\
+df_marg['Householder 65 years and over:$20,000 to $24,999']+\
df_marg['Householder 65 years and over:$25,000 to $29,999']
df['high_income_25_44']=df_marg['Householder 25 to 44 years:$100,000 to $124,999']+\
df_marg['Householder 25 to 44 years:$125,000 to $149,999']+\
df_marg['Householder 25 to 44 years:$150,000 to $199,999']+\
df_marg['Householder 25 to 44 years:$200,000 or more']+\
df_marg['Householder 25 to 44 years:$60,000 to $74,999']+\
df_marg['Householder 25 to 44 years:$75,000 to $99,999']
df['high_income_45_64']=df_marg['Householder 45 to 64 years:$100,000 to $124,999']+\
df_marg['Householder 45 to 64 years:$125,000 to $149,999']+\
df_marg['Householder 45 to 64 years:$150,000 to $199,999']+\
df_marg['Householder 45 to 64 years:$200,000 or more']+\
df_marg['Householder 45 to 64 years:$60,000 to $74,999']+\
df_marg['Householder 45 to 64 years:$75,000 to $99,999']
df['high_income_65']=df_marg['Householder 65 years and over:$100,000 to $124,999']+\
df_marg['Householder 65 years and over:$125,000 to $149,999']+\
df_marg['Householder 65 years and over:$150,000 to $199,999']+\
df_marg['Householder 65 years and over:$200,000 or more']+\
df_marg['Householder 65 years and over:$60,000 to $74,999']+\
df_marg['Householder 65 years and over:$75,000 to $99,999']
# Create a csv file for Tableau
print dset[(dset['bldg_sq_ft']>0)*(dset['building_type_id']==20)]['unit_price_res_sqft']
dset=pd.merge(dset, df, left_on='zone_id', right_index=True, how='outer')
df['price_bid=20']=df_marg['PriceZ']
print dset[(dset['building_type_id']==20)*(np.isfinite(dset['unit_price_res_sqft']))*(dset['county_id']==8035)].groupby('zone_id').unit_price_res_sqft.mean()
df['zone_id']=df.index
df=df[np.isfinite(df['price_bid=20'])]
df.to_csv('C:\Users\XGitiaux\Documents\Tableau\Census/UBSprice_income5.csv')
return dset
## Part 2: Instrument for employment
def instrument(dset, instrumented, instr, ind_vars):
print "Step: Instrument Variables"
### Make sure there is no nan
z=dset
for varname in instrumented:
z=z[np.isfinite(z[varname])]
for varname in instr:
z=z[np.isfinite(z[varname])]
for varname in ind_vars:
z=z[np.isfinite(z[varname])]
### Independent variables including fixed effects
#x=pd.get_dummies(z['county_id'])
x=pd.DataFrame(index=z.index)
for varname in ind_vars:
x[varname]=z[varname]
for varname in instr:
x[varname]=z[varname]
x=sm.add_constant(x,prepend=False)
### Dependent Variables
y=pd.DataFrame(z[instrumented])
print len(y)
print len(x)
### Regression
regression_results=sm.OLS(y,x).fit()
print regression_results.summary()
### Return the instrument
out=pd.DataFrame(z.index)
for varname in instrumented:
out[varname+"_iv"]=regression_results.predict()
return out
## Part 3: main regression using GWR
def global_hedonic(dset,depvar, ind_vars, bid, instrumented=None, instr=None, dsetiv=None, ind_variv=None, fixed_effect=False):
### Instrument
#dsetiv=dsetiv[dsetiv['building_type_id']==bid]
for varname in instrumented:
out=instrument(dsetiv, instrumented, instr, ind_variv)
dset=pd.merge(dset, out, left_on='zone_id', right_index=True)
## Make sure there is no nan
b=dset[dset['building_type_id']==bid]
for varname in instrumented:
b=b[np.isfinite(b[varname])]
b=b[~np.isnan(b[varname])]
for varname in ind_vars:
b=b[np.isfinite(b[varname])]
b=b[~np.isnan(b[varname])]
for varname in depvar:
b=b[np.isfinite(b[varname])]
b=b[~np.isnan(b[varname])]
### Independent variables including fixed effects
if fixed_effect==True:
x=pd.get_dummies(b.county_id)
x['zone_id']=b['zone_id']
x=sm.add_constant(x,prepend=False)
else:
x=pd.DataFrame(b.zone_id)
print b
for varname in ind_vars:
x[varname]=b[varname]
### Adding Instrument
if len(instrumented)*len(instr)* len(dsetiv)*len(ind_variv)!=0:
for varname in instrumented:
x[varname]=b[varname+"_iv"]
else:
for varname in instrumented:
x[varname]=b[varname]
x=sm.add_constant(x,prepend=False)
del x['zone_id']
print b['ln_stories_out']
### Dependent Variables
y=pd.DataFrame(b[depvar])
### Regression
print x
print y
#regression_results=sm.GLM(y,x, family=sm.families.NegativeBinomial()).fit()
regression_results=sm.OLS(y,x).fit()
out_parm=(regression_results.params).T
print out_parm
print regression_results.summary()
#### Coefficient
out=pd.DataFrame(index=b.index)
i=0
for varname in list(x.columns.values):
out[varname]=out_parm[i]
i=i+1
out['zone_id']=b['zone_id']
print out
out.to_csv('C:\urbansim\output\global_coeff_'+str(bid)+'.csv')
return out
def kernel_hedonic(dset,depvar, ind_vars, bid, bandwidth, instrumented=None, instr=None, dsetiv=None, ind_variv=None, fixed_effect=False):
### Instrument
dsetiv=dsetiv[dsetiv['building_type_id']==bid]
for varname in instrumented:
out=instrument(dsetiv, instrumented, instr, ind_variv)
dset=pd.merge(dset, out, left_on='zone_id', right_index=True)
## Make sure there is no nan
b=dset[dset['building_type_id']==bid]
for varname in instrumented:
b=b[np.isfinite(b[varname])]
for varname in ind_vars:
b=b[np.isfinite(b[varname])]
for varname in depvar:
b=b[np.isfinite(b[varname])]
### Independent variables including fixed effects
if fixed_effect==True:
x=pd.get_dummies(b.county_id)
else:
x=pd.DataFrame(index=b.index)
#x=sm.add_constant(x,prepend=False)
for varname in ind_vars:
x[varname]=b[varname]
### Adding Instrument
if len(instrumented)*len(instr)* len(dsetiv)*len(ind_variv)!=0:
for varname in instrumented:
x[varname]=b[varname+"_iv"]
else:
for varname in instrumented:
x[varname]=b[varname]
### Dependent Variables
print b[depvar]
y=pd.DataFrame(b[depvar])
### Locations
g=pd.DataFrame(b['centroid_x'])
g['centroid_y']=b['centroid_y']
### GWR
y=np.array(y,dtype=np.float64 )
xv=np.array(x,dtype=np.float64 )
g=np.array(g,dtype=np.float64 )
model = pygwr_kernel.GWR(targets=y, samples=xv, locations=g)
print "Estimating GWR model at all data points..."
gwr_predict,gwr_parm = model.estimate_at_target_locations(bandwidth)
print gwr_predict
### Report coefficients
out=pd.DataFrame(index=b.index)
i=0
for varname in list(x.columns.values):
out[varname]=gwr_parm[:,i]
i=i+1
out['const']=gwr_parm[:,i]
out['zone_id']=b['zone_id']
out.to_csv('C:\urbansim\output\coeff_'+str(bid)+'.csv')
return out
def estimate_hedonic(dset,depvar, ind_vars, bid, bandwidth, instrumented=None, instr=None, dsetiv=None, ind_variv=None, fixed_effect=False):
if bandwidth!=0:
for i in bid:
kernel_hedonic(dset,depvar, ind_vars, i, bandwidth, instrumented, instr, dsetiv, ind_variv, fixed_effect)
else:
for i in bid:
dset[depvar]=np.log(dset[depvar])
global_hedonic(dset, depvar, ind_vars, i, instrumented, instr, dsetiv, ind_variv, fixed_effect)
"""
ind_vars=['stories', 'ln_nres_sqft', 'high_land_area', 'year0', 'year10','year20', 'ln_dist_bus', 'far', 'ln_emp_30',
'ln_nr_sqft_10', 'ln_nr_sqft_30', 'ln_emp_10']
ind_vars2=[ 'ln_nr_sqft_10', 'ln_nr_sqft_30', 'ln_emp_30', ]
dset=data_estimation(dset,dset.buildings, dset.parcels, dset.fars, dset.zones, dset.establishments, 20)
b=dset.buildings
b=income_data(b)
b=b[b['unit_price_res_sqft']>0]
#Randomly hold back 25 % of the sample
#b=b.ix[random.sample(b.index, int(len(b)))]
z=dset.zones
b.drop_duplicates(['stories', 'ln_res_sqft', 'high_land_area', 'year0', 'year10','year20', 'ln_dist_bus', 'far', 'ln_emp_30',
'ln_nr_sqft_10', 'ln_nr_sqft_30', 'ln_emp_10'], inplace=True)
#b=b[b['unit_price_non_residential']<10000]
ind_vars=['stories', 'ln_res_sqft', 'high_land_area', 'year0', 'year10','year20', 'ln_dist_bus','ln_sqft_zone',
'far', 'ln_sqft_out','ln_stories_zone', 'ln_stories_out', 'ln_emp_30','ln_emp_10','low_income_25_44', 'low_income_45_64',
'low_income_65', 'high_income_25_44', 'high_income_65', 'high_income_45_64']
ind_vars2=['stories', 'ln_res_sqft', 'high_land_area', 'year0', 'year10','year20', 'ln_dist_bus', 'ln_sqft_zone',
'ln_sqft_out', 'far', 'ln_stories_zone', 'ln_stories_out','low_income_25_44','low_income_25_44', 'low_income_45_64',
'low_income_65', 'high_income_25_44', 'high_income_65', 'high_income_45_64']
out=estimate_hedonic(b,['unit_price_residential'],ind_vars
, [20],0, instrumented=[], instr=['sector_id_5', 'sector_id_5_out','sector_id_18', 'sector_id_18_out',
'sector_id_22', 'sector_id_22_out', ]
, dsetiv=b, ind_variv=ind_vars2, fixed_effect=False)
"""
## Part 4: Create average hedonic coefficients for each zone. These coefficients will be used in simulations to compute
## average zone price. If there is no building in the zone, we use county level average.
def estimate_zone(dset, ind_vars, bid):
listc=[]
ind_vars=ind_vars+['const']
for b in bid:
df=pd.read_csv('C:/urbansim/output/global_coeff_'+str(b)+'.csv')
## Need county_id
p=pd.DataFrame(dset.parcels.zone_id)
p['county_id']=dset.parcels.county_id
list=[]
for x in ind_vars:
u=pd.DataFrame(df.groupby('zone_id')[x].mean())
u.columns=[x]
v=pd.merge(p,u,left_on='zone_id', right_index=True, how='outer')
## Use county average if no zone average
wc=pd.DataFrame(v[~np.isnan(v[x])].groupby('county_id')[x].mean())
wc.columns=['county_'+x]
v=pd.merge(v,wc,left_on='county_id', right_index=True)
v[x].fillna(v['county_'+x], inplace=True)
w=pd.DataFrame(v.groupby('zone_id')[x].mean())
w.columns=[x]
list.append(w)
coeff=pd.concat(list, axis=1)
coeff['zone_id']=coeff.index
coeff['bid']=b
listc.append(coeff)
coeff_dset=pd.concat(listc)
return coeff_dset
|
Catalina Island: Located 22 miles off the Southern California coast, Catalina Island is famous for its wildlife and outdoor activities. Some of the most popular are snorkelling, scuba, horseback-riding, kayaking and hiking. Regular boats depart from Newport Beach, Long Beach, San Pedro and Dana Point.
Sonoma Coast: Sonoma Coast boasts picture-perfect beaches with powder white sand and clean water. The area is also home to over 300 species of birds.
Hermosa Beach: By day Hermosa Beach's boardwalk is frequented by bikers, bladers, joggers and strollers. By night it's an excellent spot for people watching.
Venice Beach: Venice Beach is world famous for its lively Ocean Front Walk. This is the spot to see and be seen! The beach itself boasts the typical California coast stereotype of golden sands, great surf and plenty of sunshine.
Pismo Beach: Pismo Beach offers a relaxed, friendly vibe that makes it a popular choice for family holidays. It's also a top spot for watching the sun sink into the Pacific.
Laguna Beach: Laguna Beach is the perfect stretch of sand to unfurl your beach towel and soak up the rays. Things move at a slower pace in Laguna.
La Jolla Cove: This may be one of the smallest beaches in California, but it's also one of the best. It's a popular spot for scuba-diving and snorkelling, thanks to visibility that extends up to 30 feet and wildlife protected by the San Diego La Jolla Underwater Park Ecological Reserve.
|
#!/usr/bin/env python
#
# BaseClassifier.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
import os
import yara
import logging
from .abstractmethod import abstractmethod
log = logging.getLogger("Thug")
class BaseClassifier:
def __init__(self):
self._rules = dict()
self.matches = list()
self.namespace_id = 1
self.init_rules()
def init_rules(self):
p = getattr(self, 'default_rule_file', None)
if p is None:
log.warn("[%s] Skipping not existing default classification rule file" % (self.classifier, ))
return
r = os.path.join(os.path.dirname(os.path.abspath(__file__)), p)
if not os.path.exists(r):
log.warn("[%s] Skipping not existing default classification rule file" % (self.classifier, ))
return
self._rules['namespace0'] = r
self.rules = yara.compile(filepaths = self._rules)
def add_rule(self, rule_file):
if not os.path.exists(rule_file):
log.warn("[%s] Skipping not existing classification rule file %s" % (self.classifier, rule_file, ))
return
self._rules["namespace%s" % (self.namespace_id, )] = rule_file
self.namespace_id += 1
self.rules = yara.compile(filepaths = self._rules)
@abstractmethod
def classify(self):
pass
@property
def result(self):
return self.matches
|
why china is the largest electronic exporter of world?
The People's Republic of China is the fastest growing major economy in the world with an average of 8.5 GDP growth rate. Economic experts predict china could overtake US economy by 2030. China is the worldโs second largest economy right now. The $17.41 trillion U.S. economy is the largest economy in the world. About 20% of Chinaโs exports go to the United States only. Near 25 % of exports from the country is electronic equipments such as computer products, Gaming & Entertainment electronics etc. Export industry in China shown 48% growth since 2010. The reasons for China's rapid economic growth is mainly due to country's export boom. 12 Foxconn ( world's largest electronics contractor manufacturer, largest suppliers of I phone, I pad, HP, Blackberry, Dell, Nokia etc) factories are in China itself which is more than in any other country. It is a Taiwanese electronics contract manufacturing company with 1.3 million employees, which is about 40% employers in United States Department of Defense. Thus, a major portion of Foxconn products is from Chinese factories.
What are the reason behind this rapid growth in China's Export industry?
China is the most populated country in the world with a population of more than 1.3 billion. The law of supply and demand is all the time fundamental of economics, When more people are willing to work than the jobs available, the labor goes down. Besides employment rate is higher in the country, China is 4th in position among top employment rate in a countrywise list. So Such a big country like china with higher employment rate double the total productivity.
China is the largest recipient of Foreign Direct Investment with an average of $129 billion inflows. Highest population and Low labour costs in china are the foremost reason behind this larger inflows. China topped United states $17 Billion FDI during 2010-2013. Low corporate tax and tax holidays in Chinese economy making foreign companies invest in R&D and other manufacturing sectors.
(3) Mass production of low-end electronics.
Chinese manufacturers are making low-end electronics products rather than advance engineering products. Most developed countries are focusing on High-end technologies and complex products. When most European manufacturers are seeking highly qualified professionals for advanced researches Chinese manufacturers require low skilled employs. So the job opportunities are higher in china so as the employment rate.
In addition, to the above reasons, there are some other factors influence the electronic market indirectly.
United Nations reports says China is the largest e-waste dumping site in the world. 70% of electronic wastes in the world are ended up in China. A large number of people works in dismantling and recycling works even it is dangerous and toxic. Many more people are involved in different stages of electronic industry in fact. Hence, it is easy for a common man to gain knowledge about the electronic manufacturing and being a part of it.
|
from app import db
from .models import Menu, Poll
class PlaceMenu():
def __init__(self, place):
self.title = None # date + place
self.date = None
self.dayname = None
self.items = {
"์์นจ": {
"์ ๋ณด": None,
"๋ฉ๋ด": [],
"ํ์ ": None,
},
"์ ์ฌ": {
"์ ๋ณด": None,
"๋ฉ๋ด": [],
"ํ์ ": None,
},
"์ ๋
": {
"์ ๋ณด": None,
"๋ฉ๋ด": [],
"ํ์ ": None,
},
}
self.place = place
self.price = None
def test(self):
print("%s PlaceMenu TEST" % self.place)
print("title : %s" % self.title)
print("dayname : %s" % self.dayname)
print("date : %s" % self.date)
print("์์นจ ์ ๋ณด : %s" % self.items["์์นจ"]["์ ๋ณด"])
print("์ ์ฌ ์ ๋ณด : %s" % self.items["์ ์ฌ"]["์ ๋ณด"])
print("์ ๋
์ ๋ณด : %s" % self.items["์ ๋
"]["์ ๋ณด"])
print("์์นจ : %s" % " ".join(self.items["์์นจ"]["๋ฉ๋ด"]))
print("์ ์ฌ : %s" % " ".join(self.items["์ ์ฌ"]["๋ฉ๋ด"]))
print("์ ๋
: %s" % " ".join(self.items["์ ๋
"]["๋ฉ๋ด"]))
def returnMenu(self, summary, time=None):
'''
์ต์ข
๋ฉ์์ง์ ํํ
2016.11.11 ๊ธ์์ผ
โก ๋จ๋ฌธ๊ด (3,500์)
โ ์ ์ฌ (11:00-15:00)
์์ ํ์์ก
์๋ฐฅ
...
โ ์ ๋
(16:30-18:30)
์ ์ก๋ณถ์
์๋ฐฅ
...
'''
timelist = ["์์นจ", "์ ์ฌ", "์ ๋
"]
message = ""
# if not time:
# message += "{} {}\n".format(self.date, self.dayname)
if self.price == "":
message += "โก {}\n".format(self.place)
else:
message += "โก {} ({})\n".format(self.place, self.price)
# ๋ฉ๋ด ์ ๋ณด๊ฐ ์์ ์์ผ๋ฉด
if not any([self.items[t]["๋ฉ๋ด"] for t in timelist]):
message += "์๋จ ์ ๋ณด๊ฐ ์์ต๋๋ค.\n"
return message
for key in timelist:
if time and key != time:
continue
# ๋ฉ๋ด๊ฐ ๋น์ด์์ผ๋ฉด ๊ฑด๋๋ฐ๊ธฐ
if self.items[key]["๋ฉ๋ด"]:
if self.items[key]["์ ๋ณด"] == "":
if self.place == "๋จ๋ฌธ๊ด" and key == "์์นจ":
message += "โ ์ ์ฌ: ํ์\n"
elif self.place == "๋จ๋ฌธ๊ด" and key == "์ ์ฌ":
message += "โ ์ ์ฌ: ์์\n"
else:
message += "โ {}\n".format(key)
else:
if self.place == "๋จ๋ฌธ๊ด" and key == "์์นจ":
message += "โ ์ ์ฌ: ํ์ ({})\n".format(self.items[key]["์ ๋ณด"])
elif self.place == "๋จ๋ฌธ๊ด" and key == "์ ์ฌ":
message += "โ ์ ์ฌ: ์์ ({})\n".format(self.items[key]["์ ๋ณด"])
else:
message += "โ {} ({})\n".format(
key,
self.items[key]["์ ๋ณด"]
)
# ํ์ ๋ถ์ฌ์ฃผ๊ธฐ
message += "โถ " + self.items[key]["ํ์ "] + "\n"
# ๋ฉ๋ด ๋ถ์ฌ์ฃผ๊ธฐ
menus = self.items[key]["๋ฉ๋ด"][:]
if summary:
# ์๋ฐฅ ์ ์ธ
if "์๋ฐฅ" in menus:
menus.remove("์๋ฐฅ")
message += "\n".join(menus[:4]) + "\n"
else:
message += "\n".join(menus) + "\n"
return message
def updateDate(self, date):
self.dayname = date[0]
self.date = date[1]
def updateMenu(self, menu):
'''
menu์ ๊ธธ์ด๊ฐ 2๋ฉด ์์นจ์์
3์ด๋ฉด ์์นจ์์
'''
time = ["์ ๋
", "์ ์ฌ", "์์นจ"]
reverseMenu = list(reversed(menu))
for index, item in enumerate(reverseMenu):
self.items[time[index]]["๋ฉ๋ด"] = item
menu = ",".join(item)
# m = DBAdmin.query(Menu, self.date, self.place, time[index])
m = Menu.query.filter_by(
date=self.date,
place=self.place,
time=time[index]).first()
if not m: # ๊ฒฐ๊ณผ๊ฐ ์์
if item: # ๋น ๊ฐ์ด ์๋๋ฉด
# DBAdmin.addMenu(self.date, self.place, time[index], menu)
m = Menu(self.date, self.place, time[index], menu)
db.session.add(m)
db.session.commit()
else: # ๊ฒฐ๊ณผ๊ฐ ์์
if m.menu != menu: # ๋น๊ตํด๋ด์ผ์ง
m.menu = menu
# DBAdmin.commit()
db.session.commit()
def updateScore(self):
for time in self.items:
# self.items[time]
m = Menu.query.filter_by(
date=self.date,
place=self.place,
time=time
).first()
if m: # ๊ฒฐ๊ณผ๊ฐ ์์
polls = Poll.query.filter_by(menu=m).all()
count = len(polls)
if count: # 0 ์ด์์
scoreSum = sum(p.score for p in polls)
self.items[time]["ํ์ "] = "%.1f / 5.0" % (scoreSum / count)
else:
self.items[time]["ํ์ "] = "ํ๊ฐ์์"
class DayMenu():
def __init__(self, dayname):
self.title = None # date + dayname
self.date = None
self.items = [
PlaceMenu("ํ์ํ๊ด"),
PlaceMenu("๋จ๋ฌธ๊ด"),
PlaceMenu("๊ต์ง์"),
PlaceMenu("์ ๊ธฐ์์ฌ"),
# PlaceMenu("์ 1๊ธฐ์์ฌ"),
]
self.dayname = dayname
info = [
# ํ๊ด ์ ๋ณด
"",
"11:00-14:00",
"17:00-19:00",
# ๋จ๋ฌธ๊ด ์ ๋ณด
"",
"11:30-14:00",
"17:30-19:00",
# ๊ต์ง์ ์ ๋ณด
"",
"11:30-14:20",
"17:00-19:20",
# ์ ๊ธฐ์์ฌ ์ ๋ณด
"8:00-9:00",
"11:30-14:30",
"17:30-18:50"
]
time = ["์์นจ", "์ ์ฌ", "์ ๋
"]
price = ["3,900์", "3,900์", "6,000์", "3,900์"]
for place in self.items:
place.price = price.pop(0)
for t in time:
place.items[t]["์ ๋ณด"] = info.pop(0)
def returnAllMenu(self, summary):
message = "{} {}\n".format(self.date, self.dayname)
if summary:
message += "> ๊ฐ์ถ๋ฆฐ ๋ฉ๋ด์
๋๋ค.\n"
message += "> ์๋ฐฅ์ ์ ์ธํ์ต๋๋ค.\n"
for place in self.items:
message += place.returnMenu(summary=summary) + "\n"
if summary:
message += "\n์ค๋ฅธ์ชฝ์ผ๋ก ๋๊ธฐ์๋ฉด ๋ค๋ฅธ ๋ฒํผ๋ ์์ต๋๋ค.\n"
# ํน์ ๋ฉ์์ง ์ ๋ฌ ๋ ์ฌ๊ธฐ์ ์ถ๊ฐ
# message += ""
return message
def returnPlaceMenu(self, place):
'''
search ํจ์๋ ํ์ํ ๋ฏ
'''
name = ["ํ์ํ๊ด", "๋จ๋ฌธ๊ด", "๊ต์ง์", "์ ๊ธฐ์์ฌ"]
message = self.items[name.index(place)].returnMenu(summary=False)
return message
def returnTimeMenu(self, time):
message = "{} {}\n".format(self.date, self.dayname)
for place in self.items:
message += place.returnMenu(summary=False, time=time) + "\n"
return message
def returnScore(self):
self.updateScore()
message = ""
times = ["์์นจ", "์ ์ฌ", "์ ๋
"]
for place in self.items:
for time in times:
if place.items[time]["๋ฉ๋ด"]:
message += "{} {} : {}\n".format(
place.place,
time,
place.items[time]["ํ์ "]
)
return message
def updateSelf(self, date):
'''
์๋ง ๋ง๊ฒ ์ง๋ง ๊ทธ๋๋ ๊ฒ์ฆ
'''
if self.dayname == date[0]:
self.date = date[1]
return True
else:
return False
def updateScore(self):
for place in self.items:
place.updateScore()
def update(self, date, menu):
'''
๋ฐ์ ๋ฉ๋ด ์ชผ๊ฐ๊ธฐ
ํ๋ฃจ์ ์ด 10๊ฐ๊ณ 4๊ฐ๋ก ๋๋ ์ผํจ
2 / 3 / 2 / 3
'''
divMenu = []
divMenu.append([menu[0], menu[1]])
divMenu.append([menu[2], menu[3], menu[4]])
divMenu.append([menu[5], menu[6]])
divMenu.append([menu[7], menu[8], menu[9]])
if self.updateSelf(date):
for index, item in enumerate(self.items):
item.updateDate(date)
item.updateMenu(divMenu[index])
item.updateScore()
|
Goldia Etagere Bookcase By Charlton Home on-line shopping store. Prior to investyou can verify for price, shipping price and more. By click Check Price! Button.
Please take a few moments to watch the Goldia Etagere Bookcase By Charlton Home details and features. This producrs is High Quality and Fast Shipping for that store. Click on the link below to See Price, Product Description and Customer reviews for Shipping available within the USA.
You can order Goldia Etagere Bookcase By Charlton Home after check, compare the and check day for shipping. Some people are need it Goldia Etagere Bookcase By Charlton Home in the cheap price. While the item may be priced similarly at different shops.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#================================================================#
import os
import sys
import re
import json
import subprocess
#================================================================#
def main():
# Y linked genes
Y = {
"HSFY1" : "chrY:20708557-20750849",
#"HSFY2" : "chrY:20893326-20990548",
"BPY2" : "chrY:25119966-25151612",
"BPY2B" : "chrY:26753707-26785354",
"BPY2C" : "chrY:27177048-27208695",
"XKRY " : "chrY:19880860-19889280",
"PRY" : "chrY:24636544-24660784",
"PRY2" : "chrY:24217903-24242154"
}
path_run = sys.argv[1] # folder where bam (and json files for Ion Torrent server) is/are located
# ### Ion Torrent method
# # load run info
# dpniDict = {}
#
# json_file = '%s/ion_params_00.json' % path_run
# json_load = json.load(open(json_file))
# runname = json_load['exp_json']['log']['runname'] #.split('-DANNI')[0]
# # runname ='-'.join(runname.split('-')[0:2])
#
# dpniDict[runname] = []
#
# #get sample and barcode name
# for sample, barcode in json_load['experimentAnalysisSettings']['barcodedSamples'].items():
#
# sample_name = sample.replace(' ', '_') # just in case...
# barcode_name = barcode['barcodeSampleInfo'].keys()[0]
#
# dpniDict[runname].append([barcode_name, sample_name])
#
# for run, design in dpniDict.items():
# for sample in design:
#
# barcode_id = sample[0]
# sample_name = sample[1]
# name = run + "_" + sample_name
# bam = os.path.join(path_run, barcode_id)+"_rawlib.bam"
#
# Yl_reads = []
# Y_reads = []
#
# #coverage Y linked genes
# for gene,coord in Y.items():
# cmd = "samtools view -c {bam} {coord}".format(bam = bam, coord = coord)
# process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# stdout, stderr = process.communicate()
# if process.returncode != 0:
# raise Exception(stderr)
# else:
# Yl_reads.append(int(stdout[:-1]))
#
# cmd = "samtools view -c {bam} {coord}".format(bam = bam, coord = "chrY")
# process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# stdout, stderr = process.communicate()
# if process.returncode != 0:
# raise Exception(stderr)
# else:
# Y_reads.append(int(stdout[:-1]))
#
# print name, sum(Yl_reads), Y_reads[0], float(sum(Yl_reads))*100/float(Y_reads[0]), Yl_reads
### cluster method
for bam in os.listdir(path_run):
if bam.endswith(".bam") or bam.endswith(".cram"):
name = os.path.basename(bam).split('.')[0]
bam_path = os.path.join(path_run, bam)
Yl_reads = []
Y_reads = []
for gene,coord in Y.items():
cmd = "samtools view -c {bam} {coord}".format(bam = bam_path, coord = coord)
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
raise Exception(stderr)
else:
Yl_reads.append(int(stdout[:-1]))
cmd = "samtools view -c {bam} {coord}".format(bam = bam_path, coord = "chrY")
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
raise Exception(stderr)
else:
Y_reads.append(int(stdout[:-1]))
if int(Y_reads[0]) != 0:
print name, sum(Yl_reads), Y_reads[0], float(sum(Yl_reads))*100/float(Y_reads[0]), Yl_reads
else:
print name, sum(Yl_reads), Y_reads[0], 0, Yl_reads
if __name__ == '__main__':
main()
|
In some countries you should ask for a discount when you're shopping.
The store has discounted all its cammembert cheese because tomorrow is its best before date.
Do students get discounts on rooms in hostels in some countries?
Customers can get huge discounts by booking in advance.
In return, the employers offer good, sometimes negotiable salaries, good working conditions, longer holidays and additional leaves at bank holidays, discounts of various kinds, commissions, lunch vouchers, annual bonuses or travel schemes for commuters.
Sales start Monday, with discounts of up to 50%.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.