text
stringlengths 29
850k
|
|---|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow estimators for Linear and DNN joined training models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.contrib.learn.python.learn.estimators import composable_model
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training
def _changing_default_center_bias():
logging.warn(
"Change warning: default value of `enable_centered_bias` will change"
" after 2016-10-09. It will be disabled by default."
"Instructions for keeping existing behaviour:\n"
"Explicitly set `enable_centered_bias` to 'True' if you want to keep "
"existing behaviour.")
# TODO(ispir): Increase test coverage
class _DNNLinearCombinedBaseEstimator(estimator.BaseEstimator):
"""An estimator for TensorFlow Linear and DNN joined training models.
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
target_column,
model_dir=None,
linear_feature_columns=None,
linear_optimizer=None,
_joint_linear_weights=False,
dnn_feature_columns=None,
dnn_optimizer=None,
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
gradient_clip_norm=None,
enable_centered_bias=True,
config=None,
feature_engineering_fn=None):
"""Initializes a _DNNLinearCombinedBaseEstimator instance.
Args:
target_column: A _TargetColumn object.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set should be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. If `None`, will use a FTRL optimizer.
_joint_linear_weights: If True will use a single (possibly partitioned)
variable to store all weights for the linear model. More efficient if
there are many columns, however requires all columns are sparse and
have the 'sum' combiner.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set should be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. If `None`, will use an Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: RunConfig object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
targets which are the output of `input_fn` and
returns features and targets which will be fed
into the model.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
super(_DNNLinearCombinedBaseEstimator, self).__init__(
model_dir=model_dir, config=config)
num_ps_replicas = config.num_ps_replicas if config else 0
self._linear_model = composable_model.LinearComposableModel(
num_label_columns=target_column.num_label_columns,
optimizer=linear_optimizer,
_joint_weights=_joint_linear_weights,
gradient_clip_norm=gradient_clip_norm,
num_ps_replicas=num_ps_replicas)
self._dnn_model = composable_model.DNNComposableModel(
num_label_columns=target_column.num_label_columns,
hidden_units=dnn_hidden_units,
optimizer=dnn_optimizer,
activation_fn=dnn_activation_fn,
dropout=dnn_dropout,
gradient_clip_norm=gradient_clip_norm,
num_ps_replicas=num_ps_replicas) if dnn_hidden_units else None
self._linear_feature_columns = linear_feature_columns
self._linear_optimizer = linear_optimizer
self._dnn_feature_columns = dnn_feature_columns
self._dnn_hidden_units = dnn_hidden_units
self._centered_bias_weight_collection = "centered_bias"
self._enable_centered_bias = enable_centered_bias
self._target_column = target_column
self._feature_engineering_fn = (
feature_engineering_fn or
(lambda features, targets: (features, targets)))
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def linear_weights_(self):
"""Returns weights per feature of the linear part."""
return self._linear_model.get_weights(model_dir=self._model_dir)
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def linear_bias_(self):
"""Returns bias of the linear part."""
return (self._linear_model.get_bias(model_dir=self._model_dir) +
self.get_variable_value("centered_bias_weight"))
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def dnn_weights_(self):
"""Returns weights of deep neural network part."""
return self._dnn_model.get_weights(model_dir=self._model_dir)
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def dnn_bias_(self):
"""Returns bias of deep neural network part."""
return (self._dnn_model.get_bias(model_dir=self._model_dir) +
[self.get_variable_value("centered_bias_weight")])
def _get_target_column(self):
"""Returns the target column of this Estimator."""
return self._target_column
def _get_feature_dict(self, features):
if isinstance(features, dict):
return features
return {"": features}
def _get_train_ops(self, features, targets):
"""See base class."""
global_step = contrib_variables.get_global_step()
assert global_step
features = self._get_feature_dict(features)
features, targets = self._feature_engineering_fn(features, targets)
logits = self._logits(features, is_training=True)
if self._enable_centered_bias:
centered_bias_step = [self._centered_bias_step(targets, features)]
else:
centered_bias_step = []
with ops.control_dependencies(centered_bias_step):
training_loss = self._target_column.training_loss(logits, targets,
features)
weighted_average_loss = self._target_column.loss(logits, targets,
features)
logging_ops.scalar_summary("loss", weighted_average_loss)
linear_train_step = self._linear_model.get_train_step(training_loss)
dnn_train_step = (self._dnn_model.get_train_step(training_loss) if
self._dnn_model else [])
with ops.control_dependencies(linear_train_step + dnn_train_step):
with ops.get_default_graph().colocate_with(global_step):
return state_ops.assign_add(global_step, 1).op, weighted_average_loss
def _get_eval_ops(self, features, targets, metrics=None):
"""See base class."""
features = self._get_feature_dict(features)
features, targets = self._feature_engineering_fn(features, targets)
logits = self._logits(features)
return self._target_column.get_eval_ops(features, logits, targets, metrics)
def _get_predict_ops(self, features):
"""See base class."""
features = self._get_feature_dict(features)
features, _ = self._feature_engineering_fn(features, None)
logits = self._logits(features)
return self._target_column.logits_to_predictions(logits, proba=True)
@deprecated(
"2016-09-23",
"The signature of the input_fn accepted by export is changing to be "
"consistent with what's used by tf.Learn Estimator's train/evaluate, "
"which makes this function useless. This will be removed after the "
"deprecation date.")
def _get_feature_ops_from_example(self, examples_batch):
column_types = layers.create_feature_spec_for_parsing((
self._get_linear_feature_columns() or []) + (
self._get_dnn_feature_columns() or []))
features = parsing_ops.parse_example(examples_batch, column_types)
return features
def _get_linear_feature_columns(self):
if not self._linear_feature_columns:
return None
feature_column_ops.check_feature_columns(self._linear_feature_columns)
return sorted(set(self._linear_feature_columns), key=lambda x: x.key)
def _get_dnn_feature_columns(self):
if not self._dnn_feature_columns:
return None
feature_column_ops.check_feature_columns(self._dnn_feature_columns)
return sorted(set(self._dnn_feature_columns), key=lambda x: x.key)
def _dnn_logits(self, features, is_training):
return self._dnn_model.build_model(
features, self._dnn_feature_columns, is_training)
def _linear_logits(self, features, is_training):
return self._linear_model.build_model(
features, self._linear_feature_columns, is_training)
def _centered_bias(self):
centered_bias = variables.Variable(
array_ops.zeros([self._target_column.num_label_columns]),
collections=[self._centered_bias_weight_collection,
ops.GraphKeys.VARIABLES],
name="centered_bias_weight")
logging_ops.scalar_summary(
["centered_bias_%d" % cb for cb in range(
self._target_column.num_label_columns)],
array_ops.reshape(centered_bias, [-1]))
return centered_bias
def _centered_bias_step(self, targets, features):
centered_bias = ops.get_collection(self._centered_bias_weight_collection)
batch_size = array_ops.shape(targets)[0]
logits = array_ops.reshape(
array_ops.tile(centered_bias[0], [batch_size]),
[batch_size, self._target_column.num_label_columns])
with ops.name_scope(None, "centered_bias", (targets, features)):
training_loss = self._target_column.training_loss(
logits, targets, features)
# Learn central bias by an optimizer. 0.1 is a convervative lr for a
# single variable.
return training.AdagradOptimizer(0.1).minimize(
training_loss, var_list=centered_bias)
def _logits(self, features, is_training=False):
linear_feature_columns = self._get_linear_feature_columns()
dnn_feature_columns = self._get_dnn_feature_columns()
if not (linear_feature_columns or dnn_feature_columns):
raise ValueError("Either linear_feature_columns or dnn_feature_columns "
"should be defined.")
if linear_feature_columns and dnn_feature_columns:
logits = (self._linear_logits(features, is_training) +
self._dnn_logits(features, is_training))
elif dnn_feature_columns:
logits = self._dnn_logits(features, is_training)
else:
logits = self._linear_logits(features, is_training)
if self._enable_centered_bias:
return nn.bias_add(logits, self._centered_bias())
else:
return logits
class DNNLinearCombinedClassifier(_DNNLinearCombinedBaseEstimator):
"""A classifier for TensorFlow Linear and DNN joined training models.
Example:
```python
education = sparse_column_with_hash_bucket(column_name="education",
hash_bucket_size=1000)
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
education_x_occupation = crossed_column(columns=[education, occupation],
hash_bucket_size=10000)
education_emb = embedding_column(sparse_id_column=education, dimension=16,
combiner="sum")
occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16,
combiner="sum")
estimator = DNNLinearCombinedClassifier(
# common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# wide settings
linear_feature_columns=[education_x_occupation],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[education_emb, occupation_emb],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
model_dir=None,
n_classes=2,
weight_column_name=None,
linear_feature_columns=None,
linear_optimizer=None,
_joint_linear_weights=False,
dnn_feature_columns=None,
dnn_optimizer=None,
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
gradient_clip_norm=None,
enable_centered_bias=None,
config=None,
feature_engineering_fn=None):
"""Constructs a DNNLinearCombinedClassifier instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
n_classes: number of target classes. Default is binary classification.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training.
It will be multiplied by the loss of the example.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. If `None`, will use a FTRL optimizer.
_joint_linear_weights: If True a single (possibly partitioned) variable
will be used to store the linear model weights. It's faster, but
requires all columns are sparse and have the 'sum' combiner.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. If `None`, will use an Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: RunConfig object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
targets which are the output of `input_fn` and
returns features and targets which will be fed
into the model.
Raises:
ValueError: If `n_classes` < 2.
ValueError: If both `linear_feature_columns` and `dnn_features_columns`
are empty at the same time.
"""
if n_classes < 2:
raise ValueError("n_classes should be greater than 1. Given: {}".format(
n_classes))
if enable_centered_bias is None:
enable_centered_bias = True
_changing_default_center_bias()
target_column = layers.multi_class_target(
n_classes=n_classes,
weight_column_name=weight_column_name)
super(DNNLinearCombinedClassifier, self).__init__(
model_dir=model_dir,
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
_joint_linear_weights=_joint_linear_weights,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
gradient_clip_norm=gradient_clip_norm,
enable_centered_bias=enable_centered_bias,
target_column=target_column,
config=config,
feature_engineering_fn=feature_engineering_fn)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=False):
"""Returns predicted classes for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes (or an iterable of predicted classes if
as_iterable is True).
"""
predictions = self.predict_proba(
x=x, input_fn=input_fn, batch_size=batch_size, as_iterable=as_iterable)
if as_iterable:
return (np.argmax(p, axis=0) for p in predictions)
else:
return np.argmax(predictions, axis=1)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(
self, x=None, input_fn=None, batch_size=None, as_iterable=False):
"""Returns prediction probabilities for given features.
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted probabilities (or an iterable of predicted
probabilities if as_iterable is True).
"""
return super(DNNLinearCombinedClassifier, self).predict(
x=x, input_fn=input_fn, batch_size=batch_size, as_iterable=as_iterable)
class DNNLinearCombinedRegressor(_DNNLinearCombinedBaseEstimator):
"""A regressor for TensorFlow Linear and DNN joined training models.
Example:
```python
education = sparse_column_with_hash_bucket(column_name="education",
hash_bucket_size=1000)
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
education_x_occupation = crossed_column(columns=[education, occupation],
hash_bucket_size=10000)
education_emb = embedding_column(sparse_id_column=education, dimension=16,
combiner="sum")
occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16,
combiner="sum")
estimator = DNNLinearCombinedRegressor(
# common settings
weight_column_name=weight_column_name,
# wide settings
linear_feature_columns=[education_x_occupation],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[education_emb, occupation_emb],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.ProximalAdagradOptimizer(...))
# To apply L1 and L2 regularization, you can set optimizers as follows:
tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.001)
# It is same for FtrlOptimizer.
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.train(input_fn_train)
estimator.evaluate(input_fn_eval)
estimator.predict(x)
```
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
model_dir=None,
weight_column_name=None,
linear_feature_columns=None,
linear_optimizer=None,
_joint_linear_weights=False,
dnn_feature_columns=None,
dnn_optimizer=None,
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
gradient_clip_norm=None,
enable_centered_bias=None,
target_dimension=1,
config=None,
feature_engineering_fn=None):
"""Initializes a DNNLinearCombinedRegressor instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. If `None`, will use a FTRL optimizer.
_joint_linear_weights: If True a single (possibly partitioned) variable
will be used to store the linear model weights. It's faster, but
requires that all columns are sparse and have the 'sum' combiner.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. If `None`, will use an Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If None,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
target_dimension: TODO(zakaria): dimension of the target for multilabels.
config: RunConfig object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
targets which are the output of `input_fn` and
returns features and targets which will be fed
into the model.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
if enable_centered_bias is None:
enable_centered_bias = True
_changing_default_center_bias()
target_column = layers.regression_target(
weight_column_name=weight_column_name,
target_dimension=target_dimension)
super(DNNLinearCombinedRegressor, self).__init__(
model_dir=model_dir,
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
_joint_linear_weights=_joint_linear_weights,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
gradient_clip_norm=gradient_clip_norm,
enable_centered_bias=enable_centered_bias,
target_column=target_column,
config=config,
feature_engineering_fn=feature_engineering_fn)
|
K.K. talks about recording the classic Judas Priest album, British Steel. Watch the clip below.
|
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Description: File system resilience testing application
# Author: Hubert Kario <hubert@kario.pl>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Copyright (c) 2015 Hubert Kario. All rights reserved.
#
# This copyrighted material is made available to anyone wishing
# to use, modify, copy, or redistribute it subject to the terms
# and conditions of the GNU General Public License version 2.
#
# This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""Methods to fragment list of writes."""
from .write import Write
class Fragmenter(object):
"""Object for fragmenting a list of writes further."""
def __init__(self, sector_size=512):
"""
Create an object.
@param sector_size: maximum size of the generated fragments
"""
self.sector_size = sector_size
def fragment(self, writes):
"""
Return a generator with fragmented Write objects from passed writes.
@param writes: list of Write objects
"""
for write in writes:
data = write.data
offset = write.offset
while data:
ret = Write(offset, data[:self.sector_size])
offset += len(ret.data)
data = data[self.sector_size:]
yield ret
|
Sooner Recon offers a full-range of commercial roofing services.
Our experienced professionals offer roof repair, roof replacement, and new construction roofing. We are experienced in multiple commercial roofing techniques including TPO, Spray coating, Metal roofing and various rolled roofing applications. Sooner Recon knows that commercial buildings present their own unique set of requirements such as scheduling, timing, parking, clean-up, and tenant issues. We pre-plan closely with building owners and property managers to help ensure each installation goes as smoothly it can. We also work hand-in-hand with our product suppliers to ensure proper specifications are met, and to solve any problems that the roof project may encounter ahead of time. Sooner Recon offers daily project updates with or clients, so you can be assured that the job is progressing as planned. Protect your investment with a new roof today!
Our spray on roof coatings are CRRC® rated and ENERGY STAR® listed highly elastomeric roof coatings. These spray on coatings offer exceptional durability and UV stability which extends the life of your roof. Attractive bright white reduces energy consumption. Available in five colors (white, beige, tan, light grey, dark grey).
Available in twelve popular colors including standard white. Light-colored surfaces have been shown to reflect the sun’s rays which reduces air-conditioning costs. Attractive ceramic granule surface cap sheets are a resilient non-woven polyester membrane mat coated with weather resistant polymer modified asphalt.
Let us help save you time and money. We are experienced in working directly with your insurance agent and adjuster to maximize the benefits available to you under your policy. Because of our expertise in working on claims, we are often able to secure additional funds for our clients that they were not able to access on their own.
Sooner Recon is 100% committed to our clients’ satisfaction.
You can be confident that we provide the best in value, quality products, courteous and experienced roofing professionals, and unparalleled customer service before, during, and after your roofing job.
|
import os, setuptools, subprocess
# Construct the redo input files, including redo.version, if we're
# starting from the original redo source dir. If we're running
# from the python pip package, the files already exist, so we
# skip this step.
mydir = os.path.dirname(__file__)
script = os.path.join(mydir, 'do')
verfile = os.path.join(mydir, 'redo/version/_version.py')
if os.path.exists(script) and not os.path.exists(verfile):
subprocess.check_call([script])
import redo.version
def read(fname):
return open(os.path.join(mydir, fname)).read()
# FIXME: we probably need to build redo/sh on the target system, somehow.
setuptools.setup(
name = 'redo-tools',
version = redo.version.TAG.replace('-', '+', 1),
python_requires='>=2.7',
author = 'Avery Pennarun',
author_email = 'apenwarr@gmail.com',
description = ('djb redo: a recursive, general purpose build system.'),
long_description=read('README.md'),
long_description_content_type='text/markdown',
license = 'Apache',
keywords = 'redo redo-ifchange make dependencies build system compiler',
url = 'https://github.com/apenwarr/redo',
packages = setuptools.find_packages(),
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Topic :: Software Development :: Build Tools',
'Topic :: Utilities',
],
entry_points = {
'console_scripts': [
'redo=redo.cmd_redo:main',
'redo-always=redo.cmd_always:main',
'redo-ifchange=redo.cmd_ifchange:main',
'redo-ifcreate=redo.cmd_ifcreate:main',
'redo-log=redo.cmd_log:main',
'redo-ood=redo.cmd_ood:main',
'redo-sources=redo.cmd_sources:main',
'redo-stamp=redo.cmd_stamp:main',
'redo-targets=redo.cmd_targets:main',
'redo-unlocked=redo.cmd_unlocked:main',
'redo-whichdo=redo.cmd_whichdo:main',
],
},
)
|
body parts for kids worksheets word correction worksheet costume design middle school.
body parts for kids worksheets teaching them words of the worksheet to teach engineering design process middle school.
body parts for kids worksheets 5 vocabulary matching worksheet 1 free kindergarten science of the design.
body parts for kids worksheets worksheet to teach of the experimental design high school answers.
body parts for kids worksheets worksheet kindergarten choice image human preschoolers preschool design process middle school.
body parts for kids worksheets preschool luxury our site free printable.
body parts for kids worksheets major scissors human systems worksheet of the design tab in excel 2016 mac.
body parts for kids worksheets preschool inspirational kindergarten unique a to z full color textbook lovely awesome desi.
body parts for kids worksheets preschool fresh worksheet design process middle school.
body parts for kids worksheets simple coloring pages preschool adults in collection of worksheet beautiful how to learn 8 steps design technolog.
body parts for kids worksheets in worksheet 6 free kindergarten all download and on engineering design pr.
body parts for kids worksheets preschool human preschoolers themed worksheet.
body parts for kids worksheets worksheet can use as a dictionary to label designed early writers.
body parts for kids worksheets download by tablet desktop original size back to inspirational image of preschool lovely 9 best science.
body parts for kids worksheets easy to color kindergarten worksheet human anatomy diagram picture child development d.
body parts for kids worksheets coloring page of a turkey sheets fish worksheet kindergarten free printable made by teachers lesson reading turke.
body parts for kids worksheets human printable puzzle animal kindergarten worksheet math can use as a dictionary to label engineering design p.
body parts for kids worksheets medium size of grade 1 science simple activities human math gorgeous printable weather animals projects d.
body parts for kids worksheets posters the kindergarten blog pictures of preschool design tab in excel.
body parts for kids worksheets of the kindergarten related worksheet on engineering design process.
body parts for kids worksheets leaning of bird worksheet stock vector royalty free design process middle school.
body parts for kids worksheets human free grade phonics the designing contingencies.
body parts for kids worksheets kindergarten free printable my games worksheet design high school.
body parts for kids worksheets word correction worksheet design technology ks2.
body parts for kids worksheets preschool awesome 9 best images on new coloring es kindergarten image web design students.
body parts for kids worksheets preschool science design tab in excel 2016 greyed out.
body parts for kids worksheets free printable kindergarten i think this would be great students to get designing.
|
# -*- coding: UTF-8 -*-
"""
Kodi resolveurl plugin
Copyright (C) 2016 alifrezser
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import json
import re
from lib import helpers
from resolveurl import common
from resolveurl.resolver import ResolveUrl, ResolverError
class IndavideoResolver(ResolveUrl):
name = "indavideo"
domains = ["indavideo.hu"]
pattern = '(?://|\.)(indavideo\.hu)/(?:player/video|video)/([0-9A-Za-z-_]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'User-Agent': common.FF_USER_AGENT}
html = self.net.http_GET(web_url, headers=headers).content
data = json.loads(html)
if data['success'] == '0':
html = self.net.http_GET('http://indavideo.hu/video/%s' % media_id).content
hash = re.search('emb_hash.+?value\s*=\s*"([^"]+)', html)
if not hash:
raise ResolverError('File not found')
web_url = self.get_url(host, hash.group(1))
html = self.net.http_GET(web_url).content
data = json.loads(html)
if data['success'] == '1':
video_files = data['data']['video_files']
if not video_files:
raise ResolverError('File removed')
tokens = data['data']['filesh']
sources = []
if isinstance(video_files, dict): video_files = video_files.values()
for i in video_files:
match = re.search('\.(\d+)\.mp4', i)
if match: sources.append((match.group(1), i))
sources = [(i[0], i[1] + '&token=%s' % tokens[i[0]]) for i in sources]
try: sources = list(set(sources))
except: pass
sources = sorted(sources, key=lambda x: x[0])[::-1]
return helpers.pick_source(sources)
raise ResolverError('File not found')
def get_url(self, host, media_id):
return 'http://amfphp.indavideo.hu/SYm0json.php/player.playerHandler.getVideoData/%s' % (media_id)
|
Well Done and a Big Thank You to Everyone - you spent the Grand Total of £855 during Book Fair Week and raised £512 in free books for our school!
We really enjoyed having the book fair in school this week. In class, we all had a World Book Day book to read and had lots of fun taking part in activities linked to the books. You can see our class displays, based on the books that we read, in the hall.
Our funny-faced monster competition was great fun too as we all designed a monster in the style of Tom Gates.
Sophie, Erin, Kate, Jayke, Nikki, Tanisha and Lucy Jo.
They were able to choose a book from the book fair as their prize.
Throughout the week, all of the staff moved class for a shared story time. The children enjoyed hearing some of the favourite stories that staff brought with them to share.
|
# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module with user settings related views."""
from django import forms
from django import http
from melange.logic import settings as settings_logic
from melange.request import access
from melange.views.helper import urls
from soc.logic import cleaning
from soc.views import base
from soc.views.helper import url_patterns
class UserSettingsForm(forms.Form):
"""Form to set user settings for the page."""
view_as = forms.CharField()
def clean_view_as(self):
"""Cleans view_as field."""
user = cleaning.clean_existing_user('view_as')(self)
return user.key if user else None
class UserSettings(base.RequestHandler):
"""View to list and set all user settings for the page."""
access_checker = access.DEVELOPER_ACCESS_CHECKER
def djangoURLPatterns(self):
"""See base.RequestHandler.djangoURLPatterns for specification."""
return [
url_patterns.url(
r'site', r'settings/user/%s$' % url_patterns.USER,
self, name=urls.UrlNames.USER_SETTINGS)
]
def templatePath(self):
"""See base.RequestHandler.templatePath for specification."""
return 'melange/settings/user_settings.html'
def context(self, data, check, mutator):
"""See base.RequestHandler.context for specification."""
user_settings = settings_logic.getUserSettings(data.url_ndb_user.key)
initial = {}
if user_settings.view_as is not None:
initial['view_as'] = user_settings.view_as.id()
return {'form': UserSettingsForm(data=data.POST or None, initial=initial)}
def post(self, data, check, mutator):
"""See base.RequestHandler.post for specification."""
form = UserSettingsForm(data=data.POST)
if form.is_valid():
view_as = form.cleaned_data['view_as'] or None
settings_logic.setUserSettings(data.url_ndb_user.key, view_as=view_as)
return http.HttpResponseRedirect(data.request.get_full_path())
else:
# TODO(nathaniel): problematic self-use.
return self.get(data, check, mutator)
|
I am very excited to be taking this step into the breeding world as I will be mentored by the very best in the business – Bon’s breeder, Pat Swallows of Topmast Goldens and Gayle Watkins of Gaylan’s Goldens, who bred my boy, Dice, and Bon’s dam, Abbi.
Pat has been incredibly helpful so far, and I have learned so much from her. Pat has been active in Goldens since 1972, so she brings a wealth of experience and knowledge to this breeding.
Gayle has actively shown and bred multi-purpose golden retrievers since 1979 and most recently, has founded Avidog® International, LLC (www.avidog.com), a company that focuses on empowering, inspiring, and encouraging breeders and owners to transform the way they whelp and raise their puppies. Gayle has developed a unique, state-of-the-art puppy rearing and development program that gives all of our pups, no matter their future, the best possible start in life. The Avidog® puppy rearing system is carefully designed, from conception to placement, to bring out the best in every puppy. It mixes science, nature, and experience in a balanced blend of mental, physical, and social developmental opportunities with nutritional support. Furthermore, before pups are placed, they are thoroughly assessed on conformation (structure, type and athleticism), temperament, and working ability, using a comprehensive evaluation process that seeks to give every owner the ideal puppy for them and provide them a training plan for their individual puppy. Pups will be raised using this ground-breaking puppy raising and evaluation techniques developed by Avidog International/Gaylan’s Golden Retrievers and followed by Topmast Golden Retrievers.
The wording of this breeding philiosophy is not mine, and I would like to thank Gayle and Pat for their guidance and for allowing me to share their words here.
Our litters are of tremendous importance to us and all of our efforts will be concentrated into caring for and socializing our pups. As I start into this venture, my goal at Audeo Goldens, as it is at Gaylan’s and Topmast, is to produce beautiful, companion hunters that are able and willing to compete in all aspects of Golden activities.
At Audeo, our first concern is longevity; we simply do not accept that golden retrievers should live only 10 years, and we will be doing all we can to turn the tide. After longevity, we will balance temperament, conformation, working ability, and general health. We will breed only Golden Retrievers who are intelligent, confident, and loving. We are equally concerned about the serious genetic problems facing Golden Retrievers so, at a minimum, all of our breeding dogs must test clear for inherited orthopedic, eye, and heart diseases. We are also concerned with allergies, cancer, breeding or whelping problems, seizures, and swallowing disorders.
In conjunction with physical and mental soundness, we seek to produce good looking, typey Golden Retrievers with intelligence, drive, and birdiness. Achievement of these goals will be measured in the hunting field, as well as the obedience and agility rings, tracking fields, and other performance events.
Audeo puppies will undergo a well thought-out development program during their first nine weeks that has been developed by Gaylan’s Goldens. They will be handled and socialized from birth, with daily individual attention.
Between four and seventeen days, this daily attention will include Gaylan’s Early Scent Introduction (ESI) program and the U.S. Military’s Early Neurological Stimulation exercises (see Dr. Carmen Battaglia's article in the AKC Gazette, May 95 and Golden Retriever News). This early neurological stimulation has been shown to improve cardiovascular performance, strengthen heartbeats, strengthen adrenal glands, increase stress tolerance, and increase resistance to disease. In tests of learning, stimulated pups were found to be more active and were more exploratory than their non-stimulated littermates, over which they were dominant in competitive situations. Secondary effects on learning have also been noted, including increased calmness, decreased errors, and reduced distress signals when stressed.
The pups will be socialized with adults, children, other dogs, and cats. By the time they go home between eight and nine weeks, they will have been introduced to game birds, had daily walks in the woods, started their housebreaking and simple obedience training, and been in crates and on multiple car rides. If the weather permits, the pups will also be introduced to water.
Our goal is to carefully place each pup with a well-suited family, ideally in a competition, hunting, and/or working home. We will make every effort to match each owner or family with the pup that suits him or her. To do this, we will gather information from prospective owners about their lifestyle, experience in dogs, and expectations for their pup. We will also keep careful records of each pup's reactions and personality to assist in placement. During their eighth week, we will evaluate the puppies' temperaments, using the Avidog Puppy Evaluation Test (APET), a test that Gayle has have developed. This test will enable us to make the best match between puppies and owners, as well as to help new owners develop a training plan for their pup’s first year. In addition, we will do hunting, structural, and conformation assessments.
We will have a long-term commitment to all of the puppies we breed. We will expect to remain in contact with our puppy owners throughout the dog's life (and often beyond). We provide information on the successes and health of each pup's parents and relatives, as well as suggestions on training, showing, and, if appropriate, breeding. Owners have access to an online group where owners of our dogs can chat, discuss training and other issues, and brag about their pup.
Our commitment to producing healthy, stable pups demands that we collect genetic and temperament information from all dogs of our breeding. For this reason, we require that all of our pups be screened for genetic diseases at 24 months of age at the owner's expense. This screening provides important health information to owners and includes OFA hip and elbow x-rays, an eye exam by a certified veterinary ophthalmologist, and a heart exam by a board-certified cardiologist. We also urge owners to commit to annual eye exams for the life of their dog due to the growing presence of pigmentary uveitis in Goldens.
Despite our best efforts, we acknowledge that dog breeders are creating living beings and there is no way to produce perfect dogs. Even in dogs from the most conscientious breeders, health issues and physical flaws must be expected by breeders and buyers alike. While we carefully screen all breeding stock, the ambiguity surrounding the environmental and hereditary causes of hip and elbow dysplasia, cataracts, SAS, epilepsy, thyroid, pigmentary uveitis, progressive retinal atrophy, and other genetic diseases make a guarantee impossible. As breeders, we feel our requirement is to take responsibility for the dogs we have produced and we do so by warranting our dogs against genetic defect, offering a refund of half the purchase price for any dog debilitated by one of these diseases. Similarly, if any puppy purchased specifically for breeding has a disqualifying fault under CKC/AKC standards, we will reimburse the owner half the purchase price or offer a pup from a future breeding at half price. We do require that all of these affected dogs be neutered prior to sending the refund, but they do not need to be returned to us. However, we will always take any dogs of our breeding back at any time for any reason.
Before being placed in their new homes, the puppies will be examined by our veterinarian and have their first set of vaccinations, as calculated by a nanogram we will run on their dam to verify optimal timing of this first vaccination. They will be microchipped. They will be examined by a board-certified veterinary ophthalmologist and a board-certified veterinary cardiologist.
|
# Copyright 2016-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import utils_tests
import trappy
class TestSystrace(utils_tests.SetupDirectory):
def __init__(self, *args, **kwargs):
super(TestSystrace, self).__init__(
[("trace_systrace.html", "trace.html")],
*args,
**kwargs)
def test_systrace_html(self):
"""Tests parsing of a systrace embedded textual trace """
events = ["sched_switch", "sched_wakeup", "trace_event_clock_sync"]
trace = trappy.SysTrace("trace.html", events=events)
self.assertTrue(hasattr(trace, "sched_switch"))
self.assertEquals(len(trace.sched_switch.data_frame), 4)
self.assertTrue("prev_comm" in trace.sched_switch.data_frame.columns)
self.assertTrue(hasattr(trace, "sched_wakeup"))
self.assertEquals(len(trace.sched_wakeup.data_frame), 4)
self.assertTrue("target_cpu" in trace.sched_wakeup.data_frame.columns)
self.assertTrue(hasattr(trace, "trace_event_clock_sync"))
self.assertEquals(len(trace.trace_event_clock_sync.data_frame), 1)
self.assertTrue("realtime_ts" in trace.trace_event_clock_sync.data_frame.columns)
def test_cpu_counting(self):
"""SysTrace traces know the number of cpus"""
trace = trappy.SysTrace("trace.html")
self.assertTrue(hasattr(trace, "_cpus"))
self.assertEquals(trace._cpus, 3)
class TestLegacySystrace(utils_tests.SetupDirectory):
def __init__(self, *args, **kwargs):
super(TestLegacySystrace, self).__init__(
[("trace_legacy_systrace.html", "trace.html")],
*args,
**kwargs)
def test_systrace_html(self):
"""Tests parsing of a legacy systrace embedded textual trace """
events = ["sched_switch", "sched_wakeup", "sched_contrib_scale_f"]
trace = trappy.SysTrace("trace.html", events=events)
self.assertTrue(hasattr(trace, "sched_switch"))
self.assertEquals(len(trace.sched_switch.data_frame), 3)
self.assertTrue("prev_comm" in trace.sched_switch.data_frame.columns)
self.assertTrue(hasattr(trace, "sched_wakeup"))
self.assertEquals(len(trace.sched_wakeup.data_frame), 2)
self.assertTrue("target_cpu" in trace.sched_wakeup.data_frame.columns)
self.assertTrue(hasattr(trace, "sched_contrib_scale_f"))
self.assertEquals(len(trace.sched_contrib_scale_f.data_frame), 2)
self.assertTrue("freq_scale_factor" in trace.sched_contrib_scale_f.data_frame.columns)
def test_cpu_counting(self):
"""In a legacy SysTrace trace, trappy gets the number of cpus"""
trace = trappy.SysTrace("trace.html")
self.assertTrue(hasattr(trace, "_cpus"))
self.assertEquals(trace._cpus, 8)
|
Who said that it’s hard to buy gifts for guys? That may have been true once, but not anymore, thanks to our terrific James Squire Beer Hamper. After all, what man can resist beer?
|
import numpy as np
from collections import OrderedDict
def linear_sum_assignment(costs, solver=None):
"""Solve a linear sum assignment problem (LSA).
For large datasets solving the minimum cost assignment becomes the dominant runtime part.
We therefore support various solvers out of the box (currently lapsolver, scipy, ortools, munkres)
Params
------
costs : np.array
numpy matrix containing costs. Use NaN/Inf values for unassignable
row/column pairs.
Kwargs
------
solver : callable or str, optional
When str: name of solver to use.
When callable: function to invoke
When None: uses first available solver
"""
solver = solver or default_solver
if isinstance(solver, str):
# Try resolve from string
solver = solver_map.get(solver, None)
assert callable(solver), 'Invalid LAP solver.'
return solver(costs)
def lsa_solve_scipy(costs):
"""Solves the LSA problem using the scipy library."""
from scipy.optimize import linear_sum_assignment as scipy_solve
# Note there is an issue in scipy.optimize.linear_sum_assignment where
# it runs forever if an entire row/column is infinite or nan. We therefore
# make a copy of the distance matrix and compute a safe value that indicates
# 'cannot assign'. Also note + 1 is necessary in below inv-dist computation
# to make invdist bigger than max dist in case max dist is zero.
inv = ~np.isfinite(costs)
if inv.any():
costs = costs.copy()
valid = costs[~inv]
INVDIST = 2 * valid.max() + 1 if valid.shape[0] > 0 else 1.
costs[inv] = INVDIST
return scipy_solve(costs)
def lsa_solve_lapsolver(costs):
"""Solves the LSA problem using the lapsolver library."""
from lapsolver import solve_dense
return solve_dense(costs)
def lsa_solve_munkres(costs):
"""Solves the LSA problem using the Munkres library."""
from munkres import Munkres, DISALLOWED
m = Munkres()
costs = costs.copy()
inv = ~np.isfinite(costs)
if inv.any():
costs = costs.astype(object)
costs[inv] = DISALLOWED
indices = np.array(m.compute(costs), dtype=np.int64)
return indices[:,0], indices[:,1]
def lsa_solve_ortools(costs):
"""Solves the LSA problem using Google's optimization tools."""
from ortools.graph import pywrapgraph
# Google OR tools only support integer costs. Here's our attempt
# to convert from floating point to integer:
#
# We search for the minimum difference between any two costs and
# compute the first non-zero digit after the decimal place. Then
# we compute a factor,f, that scales all costs so that the difference
# is integer representable in the first digit.
#
# Example: min-diff is 0.001, then first non-zero digit place -3, so
# we scale by 1e3.
#
# For small min-diffs and large costs in general there is a change of
# overflowing.
valid = np.isfinite(costs)
min_e = -8
unique = np.unique(costs[valid])
if unique.shape[0] == 1:
min_diff = unique[0]
elif unique.shape[0] > 1:
min_diff = np.diff(unique).min()
else:
min_diff = 1
min_diff_e = 0
if min_diff != 0.0:
min_diff_e = int(np.log10(np.abs(min_diff)))
if min_diff_e < 0:
min_diff_e -= 1
e = min(max(min_e, min_diff_e), 0)
f = 10**abs(e)
assignment = pywrapgraph.LinearSumAssignment()
for r in range(costs.shape[0]):
for c in range(costs.shape[1]):
if valid[r,c]:
assignment.AddArcWithCost(r, c, int(costs[r,c]*f))
if assignment.Solve() != assignment.OPTIMAL:
return linear_sum_assignment(costs, solver='scipy')
if assignment.NumNodes() == 0:
return np.array([], dtype=np.int64), np.array([], dtype=np.int64)
pairings = []
for i in range(assignment.NumNodes()):
pairings.append([i, assignment.RightMate(i)])
indices = np.array(pairings, dtype=np.int64)
return indices[:,0], indices[:,1]
def lsa_solve_lapjv(costs):
from lap import lapjv
inv = ~np.isfinite(costs)
if inv.any():
costs = costs.copy()
valid = costs[~inv]
INVDIST = 2 * valid.max() + 1 if valid.shape[0] > 0 else 1.
costs[inv] = INVDIST
r = lapjv(costs, return_cost=False, extend_cost=True)
indices = np.array((range(costs.shape[0]), r[0]), dtype=np.int64).T
indices = indices[indices[:, 1] != -1]
return indices[:,0], indices[:,1]
def init_standard_solvers():
import importlib
from importlib import util
global available_solvers, default_solver, solver_map
solvers = [
('lapsolver', lsa_solve_lapsolver),
('lap', lsa_solve_lapjv),
('scipy', lsa_solve_scipy),
('munkres', lsa_solve_munkres),
('ortools', lsa_solve_ortools),
]
solver_map = dict(solvers)
available_solvers = [s[0] for s in solvers if importlib.util.find_spec(s[0]) is not None]
if len(available_solvers) == 0:
import warnings
default_solver = None
warnings.warn('No standard LAP solvers found. Consider `pip install lapsolver` or `pip install scipy`', category=RuntimeWarning)
else:
default_solver = available_solvers[0]
init_standard_solvers()
from contextlib import contextmanager
@contextmanager
def set_default_solver(newsolver):
'''Change the default solver within context.
Intended usage
costs = ...
mysolver = lambda x: ... # solver code that returns pairings
with lap.set_default_solver(mysolver):
rids, cids = lap.linear_sum_assignment(costs)
Params
------
newsolver : callable or str
new solver function
'''
global default_solver
oldsolver = default_solver
try:
default_solver = newsolver
yield
finally:
default_solver = oldsolver
|
From all of us here at Spare Space Storage, we want to wish you a Merry Christmas! During the holidays we feel the utmost gratitude for our tenants, partners, service providers, and staff. This year has been a whirlwind of excitement and achievement for us and we hope it has been for you as well. Though we know there are lulls throughout the highs, thank you for sticking with us through it all.
Merry Christmas, Happy Hanukkah, and Best Wishes in the New Year!
|
# -*- coding: utf-8 -*-
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Unit tests for TokenResolver functionality"""
# pylint: disable=invalid-name
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from tokenresolver import TokenResolver
xbmc = __import__('xbmc')
xbmcaddon = __import__('xbmcaddon')
xbmcgui = __import__('xbmcgui')
xbmcplugin = __import__('xbmcplugin')
xbmcvfs = __import__('xbmcvfs')
addon = xbmcaddon.Addon()
class TestTokenResolver(unittest.TestCase):
"""TestCase class"""
_tokenresolver = TokenResolver()
username = None
password = None
def setUp(self):
"""Build up function for TestCase class"""
# Save password
self.username = addon.settings['username']
self.password = addon.settings['password']
def tearDown(self):
"""Clean up function for TestCase class"""
# Restore password
addon.settings['username'] = self.username
addon.settings['password'] = self.password
def test_refresh_login(self):
"""Test refreshing login"""
self._tokenresolver.refresh_login()
def test_cleanup_userdata(self):
"""Test cleaning up userdata"""
self._tokenresolver.cleanup_userdata()
def test_successful_login(self):
"""Test successful login"""
self.username = addon.settings['username']
self.password = addon.settings['password']
self._tokenresolver.login(refresh=False)
def test_invalid_login(self):
"""Test invalid login"""
addon.settings['username'] = 'foo'
addon.settings['password'] = 'bar'
self._tokenresolver.login(refresh=False)
def test_missing_username(self):
"""Test missing username"""
addon.settings['username'] = ''
addon.settings['password'] = self.password
self._tokenresolver.login(refresh=True)
self._tokenresolver.login(refresh=False)
def test_missing_password(self):
"""Test missing password"""
addon.settings['username'] = self.username
addon.settings['password'] = ''
self._tokenresolver.login(refresh=True)
self._tokenresolver.login(refresh=False)
if __name__ == '__main__':
unittest.main()
|
On the last floor of our highest building, the penthouse offers unique panoramic views over Great Bay.
Get 1 additional night FREE for 4 to 6 nights stay!
Get 2 additional nights FREE for 7 or more nights stay!
Summer April 16th - October 15th. Fall October 16th - December 15th. Winter December 16th - April 15th.
European plan (no breakfast) with extra 15% service charge and 5% governement tax.
$ 55.00 per additional guest in Summer, and $ 75.00 in Fall and Winter.
Prices are per room per night and subject to change without notice.
|
"""
This module contains a video driver for the terasic LT24
LCD display ...
"""
from __future__ import division
import myhdl
from myhdl import Signal, intbv, enum, always_seq, concat
from .lt24intf import LT24Interface
from .lt24lcd_init_sequence import init_sequence, build_init_rom
from .lt24lcd_driver import lt24lcd_driver
@myhdl.block
def lt24lcd(glbl, vmem, lcd):
""" A video display driver for the terasic LT24 LCD display.
This driver reads pixels from the VideoMemory interface and transfers
them to the LT24 display. This hardware module (component) will also
perform the initial display configuration.
(arguments == ports)
Arguments:
glbl (Global): global signals, clock, reset, enable, etc.
vmem (VideoMemory): video memory interface, the driver will read
pixels from this interface.
lcd (LT24Interface): The external LT24 interface.
Parameters:
None
RGB 5-6-5 (8080-system 16bit parallel bus)
"""
assert isinstance(lcd, LT24Interface)
resolution, refresh_rate = (240, 320), 60
number_of_pixels = resolution[0] * resolution[1]
# local references to signals in interfaces
clock, reset = glbl.clock, glbl.reset
# make sure the user timer is configured
assert glbl.tick_user is not None
# write out a new VMEM to the LCD display, a write cycle
# consists of putting the video data on the bus and latching
# with the `wrx` signal. Init (write once) the column and
# page addresses (cmd = 2A, 2B) then write mem (2C)
states = enum(
'init_wait_reset', # wait for the controller to reset the LCD
'init_start', # start the display init sequence
'init_start_cmd', # send a command, port of the display seq
'init_next', # determine if another command
'write_cmd_start', # command subroutine
'write_cmd', # command subroutine
'display_update_start', # update the display
'display_update_start_p', # delay for command ack
'display_update', # update the display
'display_update_next', # wait for driver to ack pixel xfered
'display_update_end' # end of display update
)
state = Signal(states.init_wait_reset)
state_prev = Signal(states.init_wait_reset)
cmd = Signal(intbv(0)[8:])
return_state = Signal(states.init_wait_reset)
num_hor_pxl, num_ver_pxl = resolution
print("resolution {}x{} = {} number of pixes".format(
num_hor_pxl, num_ver_pxl, number_of_pixels))
hcnt = intbv(0, min=0, max=num_hor_pxl)
vcnt = intbv(0, min=0, max=num_ver_pxl)
# signals to start a new command transaction to the LCD
datalen = Signal(intbv(0, min=0, max=number_of_pixels+1))
data = Signal(intbv(0)[16:])
datasent = Signal(bool(0))
datalast = Signal(bool(0))
cmd_in_progress = Signal(bool(0))
# --------------------------------------------------------
# LCD driver
gdrv = lt24lcd_driver(glbl, lcd, cmd, datalen, data,
datasent, datalast, cmd_in_progress)
# --------------------------------------------------------
# build the display init sequency ROM
rom, romlen, maxpause = build_init_rom(init_sequence)
offset = Signal(intbv(0, min=0, max=romlen+1))
pause = Signal(intbv(0, min=0, max=maxpause+1))
# --------------------------------------------------------
# state-machine
@always_seq(clock.posedge, reset=reset)
def beh_state_machine():
state_prev.next = state
if state == states.init_wait_reset:
if lcd.reset_complete:
state.next = states.init_start
elif state == states.init_start:
v = rom[offset]
# @todo: change the table to only contain the number of
# bytes to be transferred
datalen.next = v - 3
p = rom[offset+1]
pause.next = p
offset.next = offset + 2
state.next = states.init_start_cmd
elif state == states.init_start_cmd:
v = rom[offset]
cmd.next = v
if datalen > 0:
v = rom[offset+1]
data.next = v
offset.next = offset + 2
else:
offset.next = offset + 1
state.next = states.write_cmd_start
return_state.next = states.init_next
elif state == states.init_next:
if pause == 0:
if offset == romlen:
state.next = states.display_update_start
else:
state.next = states.init_start
elif glbl.tick_ms:
pause.next = pause - 1
elif state == states.write_cmd_start:
state.next = states.write_cmd
elif state == states.write_cmd:
if cmd_in_progress:
if datasent and not datalast:
v = rom[offset]
data.next = v
offset.next = offset+1
else:
cmd.next = 0
state.next = return_state
elif state == states.display_update_start:
if glbl.tick_user:
cmd.next = 0x2C
state.next = states.display_update_start_p
datalen.next = number_of_pixels
elif state == states.display_update_start_p:
state.next =states.display_update
elif state == states.display_update:
assert cmd_in_progress
if vcnt == num_ver_pxl-1:
hcnt[:] = 0
vcnt[:] = 0
elif hcnt == num_hor_pxl-1:
hcnt[:] = 0
vcnt[:] = vcnt + 1
else:
hcnt[:] = hcnt + 1
# this will be the pixel for the next write cycle
vmem.hpxl.next = hcnt
vmem.vpxl.next = vcnt
# this is the pixel for the current write cycle
if hcnt == 0 and vcnt == 0:
cmd.next = 0
state.next = states.display_update_end
else:
data.next = concat(vmem.red, vmem.green, vmem.blue)
state.next = states.display_update_next
elif state == states.display_update_next:
if cmd_in_progress:
if datasent and not datalast:
state.next = states.display_update
else:
cmd.next = 0
state.next = states.display_update_end
elif state == states.display_update_end:
# wait till the driver ack the command completion
if not cmd_in_progress:
state.next = states.display_update_start
return myhdl.instances()
|
Wood texture created with the Clouds filter as a base.
The reverse of the leather began to take on a life of it's own too!
Once the embroidery was complete the leather was damped on the front with an atomiser and then paste applied to the back.
I could then stick down the leather joints and infill the insides of the boards.
The entire Photoshop document is selected.
Press Cmd/Ctrl+T for free transform, Hold down the Shift key and drag out from the corner.
In addition to my main work table, I wanted a clear space where I can roll out large pieces of leather, pattern paper, etc and do any cutting.
Give an interior a colorful, patterned wallpaper and your audience may react quite differently than they did for a wooden interior.
purple texture by omcdanie on deviantart .
Wood grunge texture.Natural Dark Wooden Background.Vector template. Grunge vector texture.
Begin with a texture, you must be in RGB mode. I have another tutorial under textures that shows you how to create this rock surface.
Announcing the Go Media building texture collection! - Go Media™ · Creativity at work!
As you can see, the collection covers a pretty wide array of texture types, and goes from super subtle to sledgehammer.
Pattern White Roses, 1152x864 pixels : Wallpapers tagged .
|
import sys
sys.path.append('..')
import datetime
from bson.objectid import ObjectId
import petrarch_ud
import PETRreader
formatted = [{u'language': u'english',
u'title': u'6 killed in attacks in Iraqi capital Friday',
u'url': u'http://www.menafn.com/1094827896/6-killed-in-attacks-in-Iraqi-capital-Friday?src=RSS',
u'stanford': 1,
u'content': "Ukraine ratified a sweeping agreement with the European Union on Tuesday.",
u'source': u'menafn_iraq',
u'parsed_sents': ["""1 Ukraine Ukraine PROPN NNP Number=Sing 2 nsubj _ _
2 ratified ratify VERB VBD Mood=Ind|Tense=Past|VerbForm=Fin 0 root _ _
3 a a DET DT Definite=Ind|PronType=Art 5 det _ _
4 sweeping sweeping ADJ JJ Degree=Pos 5 amod _ _
5 agreement agreement NOUN NN Number=Sing 2 dobj _ _
6 with with ADP IN _ 9 case _ _
7 the the DET DT Definite=Def|PronType=Art 9 det _ _
8 European european PROPN NNP Number=Sing 9 compound _ _
9 Union Union PROPN NNP Number=Sing 5 nmod _ _
10 on on ADP IN _ 11 case _ _
11 Tuesday Tuesday PROPN NNP Number=Sing 2 nmod _ _
12 . . PUNCT . _ 2 punct _ _"""],
u'date': u'160626',
u'date_added': datetime.datetime(2016, 6, 26, 19, 0, 17, 640000),
u'_id': ObjectId('57702641172ab87eb7dc98fa')}]
def test_petr_formatted_to_results():
petr_ud_results = petrarch_ud.run_pipeline(formatted, write_output=False,
parsed=True)
print(petr_ud_results)
#assert petr_ud_results == correct1_results
if __name__ == "__main__":
test_petr_formatted_to_results()
|
When forward-thinking Scottish independent school, Glenalmond College, needed to update their IT set up, they decided to ‘Go Google’ in both the classroom and in the back office.
Introducing Gmail and G Suite for Education for all administrative staff and teachers provided a flexible and collaborative working environment that allowed teachers to share resources and best practice. With some on-site training from Google Cloud Partner Refractiv, the Glenalmond staff have adopted all the G Suite tools, including Drive, and have expanded their use of Google Classroom as a virtual learning environment. Combining the two has created a fast and simple way to engage students in collaborative learning both at school and at home.
The decision to adopt Google as our main platform has been a game changer in terms of our teaching and learning: pupils are able to work more collaboratively with each other and with staff, and pupils are encouraged to take greater responsibility for their own learning and development.
WHILE YOU’RE HERE: We can help you set up or migrate to G Suite. Get in touch to find out how we can make the transition smooth and hassle free and support you afterwards with great training, security and helpdesk advice.
|
# -*- encoding: utf-8 -*-
"""Utility routines
"""
import os
import re
from enum import Enum
from contextlib import contextmanager
import sys
from io import StringIO
def pyname_join(seq):
return ".".join(seq)
def is_pkgdir(dirpath):
return os.path.isdir(dirpath) \
and os.path.isfile(os.path.join(dirpath, "__init__.py"))
def drop_pyext(pathname):
return re.sub(r"\.py$", "", pathname)
def mod_split(modname):
mo = re.match(r"^(.+)\.(.*)$", modname)
if not mo:
raise ValueError("invalid python path identifier")
return (mo.group(1), mo.group(2))
def is_empty_generator(generator):
try:
next(generator)
except StopIteration:
return True
else:
return False
class AutoEnum(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def mkdir_p(path):
try:
os.makedirs(path)
except FileExistsError:
pass
@contextmanager
def protect_cwd(dirpath=None):
saved_cwd = os.getcwd()
if dirpath is not None:
os.chdir(dirpath)
try:
yield
finally:
os.chdir(saved_cwd)
def safe_getcwd():
try:
return os.getcwd()
except FileNotFoundError:
return None
@contextmanager
def silent_stderr():
old_stderr = sys.stderr
sys.stderr = StringIO()
try:
yield
finally:
sys.stderr = old_stderr
def ensure_trailing_slash(path):
if not path.endswith("/"):
return path + "/"
return path
def issubdir(filepath, dirpath):
return filepath.startswith(ensure_trailing_slash(dirpath))
|
We’ve been on the fence about Scruffy for a long time. Last Sunday, I heard loud cackling from the coop. Always worried about pythons, I rushed over to see a large brown shape, most likely an eagle, flapping away from a branch on the tree overhead. Inside the run, Scruffy was standing on top of a stool, cawing at the top of his voice. All the hens were inside, in the henhouse. I felt so proud of my dapper cockerel protecting his ladies, that I forgot his vileness for a while.
But not for long, because eagles were not the only thing Scruffy was protecting his ladies against. The next morning, when I left the run and forgot to walk out backwards, or look into his eyes admonishingly, he attacked me from the back immediately. Thankfully, I managed to slam the door right into his sharp beak, but needless to say, I did not particularly want to go back in again after. I was not alone in being less than amused with his temper that morning: through the wire mesh I observed Scruffy chasing the girls all across the run, in a way I was not sure was meant to be romantic, but in any case was pretty violent, and not at all appreciated by the ladies.
Roel said we had to do something, and we had to do it now. Scruffy, obviously, had other ideas about that, and we spent a good fifteen minutes chasing him around the garden – that roo can run! I won’t get into details about what happened after, it suffices to say nobody enjoyed it, but it was swift and yes, the rumours about headless (or broken-necked in this case) chickens are very much true.
Some people might find it distasteful that I post a photo of my stewed rooster, but you know what, many of my friends post photos of their food on social media, including fowl, fried, roasted, cooked, or with rice. Do you know what I find distasteful? Industrial, or broiler, chickens that are raised by the tens of thousands in windowless barns, that grow so fat so quick they can’t walk, that have wounds on their legs from sitting in their own manure all day, and that are cooped up so tightly together they peck each others backs bold from boredom and frustration, with an aggression that exceeds Scruffy’s on his worst days. The air they breathe full of ammonia and faeces, makes them suffer from respiratory illnesses. There is more to say about that, but I think you get the picture.
Having kept chickens for years now, I know what sociable animals they are, how they like to dig in the sand, climb on roofs and benches, and huddle up comfortably together. Chickens are fairly stupid, but not stupid enough to endure what is happening in those industrial farms.
So yes, we kill and eat our own rooster, who grew up pampered in a large run, with six wives, and plenty of food, fresh air, sand and clean water. And yes, I proudly present this lovely dish of ‘Rooster in Prosecco’ (well, we happened to have an open bottle, and were out of red wine). And I have to admit, Scruffy was much nicer stewed than he was in real life.
Jullie haan had het goed, beter inderdaad dan al die hoenderen in kleine hokjes.
Thanks for sharing this amazing blogpost here. It was highly inspirational and an interesting read to follow. Do share more stuff, would love to hear from you.
|
# boost_experiment.py
#
# Author : James Mnatzaganian
# Contact : http://techtorials.me
# Organization : NanoComputing Research Lab - Rochester Institute of
# Technology
# Website : https://www.rit.edu/kgcoe/nanolab/
# Date Created : 10/13/15
#
# Description : Study the boost.
# Python Version : 2.7.X
#
# License : MIT License http://opensource.org/licenses/mit-license.php
# Copyright : (c) 2016 James Mnatzaganian
"""
Study the boost.
G{packagetree mHTM}
"""
__docformat__ = 'epytext'
# Native imports
import cPickle, random, csv, os, time, json
# Third party imports
import numpy as np
import bottleneck as bn
import matplotlib.pyplot as plt
from joblib import Parallel, delayed
# Program imports
from mHTM.region import SPRegion
from mHTM.plot import plot_error, compute_err
def make_data(p, nitems=100, width=100, density=0.9, seed=123456789):
"""
Make the dataset.
@param p: the full path to where the dataset should be created.
@param nitems: The number of items to create.
@param width: The size of the input.
@param density: The percentage of active bits.
@param seed: The random number seed.
"""
# Initialization
random.seed(seed)
np.random.seed(seed)
nactive = int(width * density)
# Build the dataset
ds = np.zeros((nitems, width), dtype='bool')
for i in xrange(nitems):
indexes = set(np.random.randint(0, width, nactive))
while len(indexes) != nactive:
indexes.add(random.randint(0, width - 1))
ds[i][list(indexes)] = True
# Write the file
with open(p, 'wb') as f:
cPickle.dump(ds, f, cPickle.HIGHEST_PROTOCOL)
def load_data(p):
"""
Get the dataset.
@param p: the full path to the dataset.
"""
with open(p, 'rb') as f:
ds = cPickle.load(f)
return ds
def _phase3(self):
"""
Normal phase 3, but with tracking the boost changes. Double commented lines
are new.
"""
# Update permanences
self.p = np.clip(self.p + (self.c_pupdate * self.y[:, 0:1] *
self.x[self.syn_map] - self.pdec * self.y[:, 0:1]), 0, 1)
if self.disable_boost is False:
# Update the boosting mechanisms
if self.global_inhibition:
min_dc = np.zeros(self.ncolumns)
min_dc.fill(self.c_mdc * bn.nanmax(self.active_dc))
else:
min_dc = self.c_mdc * bn.nanmax(self.neighbors * self.active_dc, 1)
## Save pre-overlap boost info
boost = list(self.boost)
# Update boost
self._update_active_duty_cycle()
self._update_boost(min_dc)
self._update_overlap_duty_cycle()
## Write out overlap boost changes
with open(os.path.join(self.out_path, 'overlap_boost.csv'), 'ab') as f:
writer = csv.writer(f)
writer.writerow([self.iter, bn.nanmean(boost != self.boost)])
# Boost permanences
mask = self.overlap_dc < min_dc
mask.resize(self.ncolumns, 1)
self.p = np.clip(self.p + self.c_sboost * mask, 0, 1)
## Write out permanence boost info
with open(os.path.join(self.out_path, 'permanence_boost.csv'), 'ab') \
as f:
writer = csv.writer(f)
writer.writerow([self.iter, bn.nanmean(mask)])
# Trim synapses
if self.trim is not False:
self.p[self.p < self.trim] = 0
def main(ds, p, ncols=2048, duty_cycle=100, nepochs=10, global_inhibition=True,
seed=123456789):
"""
Run an experiment.
@param ds: The dataset.
@param p: The full path to the directory to save the results.
@param ncols: The number of columns.
@param duty_cycle: The duty cycle.
@param nepochs: The number of epochs
@param global_inhibition: If True use global inhibition otherwise use local
inhibition.
@param seed: The random seed.
"""
# Get some parameters
ninputs = ds.shape[1]
density = np.sum(ds[0]) / float(ninputs)
# Make the directory if it doesn't exist
try:
os.makedirs(p)
except OSError:
pass
# Initializations
np.random.seed(seed)
kargs = {
'ninputs': ninputs,
'ncolumns': ncols,
'nsynapses': 40,
'random_permanence': True,
'pinc':0.03, 'pdec':0.05,
'seg_th': 15,
'nactive': int(0.02 * ncols),
'duty_cycle': duty_cycle,
'max_boost': 10,
'global_inhibition': global_inhibition,
'trim': 1e-4
}
# Create the region
delattr(SPRegion, '_phase3')
setattr(SPRegion, '_phase3', _phase3)
sp = SPRegion(**kargs)
sp.iter, sp.out_path = 1, p
# Train the region
t = time.time()
for i in xrange(nepochs):
for j, x in enumerate(ds):
sp.execute(x)
sp.iter += 1
t = time.time() - t
# Dump the details
kargs['density'] = density
kargs['seed'] = seed
kargs['nepochs'] = nepochs
kargs['time'] = t
with open(os.path.join(p, 'details.json'), 'wb') as f:
f.write(json.dumps(kargs, sort_keys=True, indent=4,
separators=(',', ': ')))
def vary_density(bp, global_inhibition=True):
"""
Vary the density level.
@pram bp: The base path.
@param global_inhibition: If True use global inhibition otherwise use local
inhibition.
"""
# density_levels = np.arange(1, 100, 1)
density_levels = np.arange(28, 100, 1)
for density in density_levels:
print density
p = os.path.join(bp, str(density))
p2 = os.path.join(p, 'data.pkl')
try:
os.makedirs(p)
except OSError:
pass
make_data(p2, density=density/100., seed=123456789)
# Repeat for good results
Parallel(n_jobs=-1)(delayed(main)(load_data(p2),
os.path.join(p, str(i)), global_inhibition=global_inhibition,
seed=i) for i in xrange(10))
def vary_dutycycle(bp, ds, global_inhibition=True):
"""
Vary the duty cycles.
@pram bp: The base path.
@param ds: The dataset to use.
@param global_inhibition: If True use global inhibition otherwise use local
inhibition.
"""
duty_cycles = (1, 10, 100, 1000, 10000)
try:
os.makedirs(bp)
except OSError:
pass
for dc in duty_cycles:
print '\n\n\n --------{0}-------- \n\n\n'.format(dc)
p = os.path.join(bp, str(dc))
main(ds, p, duty_cycle=dc, nepochs=1,
global_inhibition=global_inhibition)
def plot_density_results(bp, bp2=None):
"""
Average the results.
@param bp: The base path.
@param bp2: The second base path.
"""
def average(p):
"""
Compute the average activations for each density.
@param p: The path to the file.
@return: The average.
"""
with open(p, 'rb') as f:
reader = csv.reader(f)
data = []
for row in reader:
data.append(float(row[1]))
return np.mean(data) * 100
def get_data(p):
"""
Get the data for a single run.
@param p: The path.
@return: A tuple containing the overlap and permanences.
"""
overlap, permanence = [], []
for d in os.listdir(p):
npath = os.path.join(p, d)
if os.path.isdir(npath):
overlap.append(average(os.path.join(npath,
'overlap_boost.csv')))
permanence.append(average(os.path.join(npath,
'permanence_boost.csv')))
return np.array(overlap), np.array(permanence)
def get_all_data(bp):
"""
Get the data for all runs.
@param bp: The base path.
@return: A tuple containing the sparsity, overlap, and permanences.
"""
overlap, permanence, sparsity = [], [], []
for d in sorted([int(x) for x in os.listdir(bp)]):
sparsity.append((1 - (d / 100.)) * 100)
o, p = get_data(os.path.join(bp, str(d)))
overlap.append(o)
permanence.append(p)
return np.array(sparsity[::-1]), np.array(overlap[::-1]), \
np.array(permanence[::-1])
def make_plot_params(sparsity, overlap, permanence, title=None):
"""
Generate the parameters for the plot.
@param sparsity: The sparsity array.
@param overlap: The overlap array.
@param permanence: The permanence array.
@param title: The title for the plot.
@return: A dictionary with the parameters.
"""
return {'x_series':(sparsity, sparsity),
'y_series':(np.median(overlap, 1), np.median(permanence, 1)),
'series_names':('Overlap Boosting', 'Permanence Boosting'),
'y_errs':(compute_err(overlap), compute_err(permanence)),
'xlim':(0, 100), 'ylim':(0, 45), 'title':title
}
data = get_all_data(bp)
if bp2 is None:
plot_error(**make_plot_params(*data))
else:
# Make main plot
fig = plt.figure(figsize=(21, 20), facecolor='white')
ax = fig.add_subplot(111)
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='w', top='off', bottom='off', left='off',
right='off')
ax.set_xlabel('Sparsity [%]')
ax.set_ylabel('% Columns Boosted')
# Make subplots
ax1 = fig.add_subplot(211)
plot_error(show=False, legend=False, ax=ax1, **make_plot_params(*data,
title='Global Inhibition'))
data2 = get_all_data(bp2)
ax2 = fig.add_subplot(212, sharex=ax1, sharey=ax1)
plot_error(show=False, ax=ax2, **make_plot_params(*data2,
title='Local Inhibition'))
# Save it
# plt.show()
plt.subplots_adjust(bottom=0.15, hspace=0.3)
plt.savefig('boost_sparseness.png', format='png',
facecolor=fig.get_facecolor(), edgecolor='none')
def plot_single_run(bp1, bp2):
"""
Create an error plot for a single run.
@param bp1: The base path for global inhibition results.
@param bp2: The base path for local inhibition results.
"""
def read(p):
"""
Read in the data.
@param p: The path to the file to read.
@return: The results.
"""
with open(p, 'rb') as f:
reader = csv.reader(f)
data = []
for row in reader:
data.append(float(row[1]))
return np.array(data) * 100
def get_data(p):
"""
Get all of the results.
@param p: The directory to obtain the data in.
@return: The results.
"""
permanence = []
for d in os.listdir(p):
npath = os.path.join(p, d)
if os.path.isdir(npath):
permanence.append(read(os.path.join(npath,
'permanence_boost.csv')))
return np.array(permanence)
# Get the data
data = [get_data(bp1)]
data.append(get_data(bp2))
# Build the series
x_series = (np.arange(data[0].shape[1]), )
# Make the main plot
fig = plt.figure(figsize=(21, 20), facecolor='white')
ax = fig.add_subplot(111)
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='w', top='off', bottom='off', left='off',
right='off')
ax.set_xlabel('Iteration')
ax.set_ylabel('% Columns Boosted')
# Make subplots
ax1 = fig.add_subplot(211)
plot_error(show=False, legend=False, ax=ax1, title='Global Inhibition',
x_series=x_series, y_series=(np.median(data[0], 0), ),
y_errs=(compute_err(data[0], axis=0),), xlim=(0, 200), ylim=(0, 100))
ax2 = fig.add_subplot(212, sharex=ax1, sharey=ax1)
plot_error(show=False, ax=ax2, title='Local Inhibition', legend=False,
x_series=x_series, y_series=(np.median(data[1], 0), ),
y_errs=(compute_err(data[1], axis=0),), xlim=(0, 200), ylim=(0, 100))
# Save it
# plt.show()
plt.subplots_adjust(bottom=0.15, hspace=0.3)
plt.savefig('boost_permanence.png', format='png',
facecolor=fig.get_facecolor(), edgecolor='none')
if __name__ == '__main__':
# Params
base_dir = os.path.join(os.path.expanduser('~'), 'scratch')
p1 = os.path.join(base_dir, 'boost_experiments-global-2')
p2 = os.path.join(base_dir, 'boost_experiments-local-2')
# Experiment
vary_density(p1, True)
vary_density(p2, False)
plot_density_results(p1, p2)
density = '26'
plot_single_run(os.path.join(p1, density), os.path.join(p2, density))
|
Found Round & About | Sharing, Telling, Photographing – All For You!
Looking thru my photos to see what challenges I might participate in I found photos that brought back some great memories. I’ll share a few with you today.
Hanging in Cali with my girl – she was only 10!
My beautiful Tori. She was sick.
|
# -*- coding: utf-8 -*-
# Copyright 2008 Luis Falcon <lfalcon@gnusolidario.org>
# Copyright 2016 LasLabs Inc.
# License GPL-3.0 or later (http://www.gnu.org/licenses/gpl.html).
from datetime import datetime
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
class ResPartner(models.Model):
_inherit = 'res.partner'
type = fields.Selection(selection_add=[
('clv.patient', 'Patient'),
])
# alias = fields.Char(
# string='Alias',
# help='Common name that the Partner is referred',
# )
patient_ids = fields.One2many(
string='Related Patients',
comodel_name='clv.patient',
compute='_compute_patient_ids_and_count',
)
count_patients = fields.Integer(
compute='_compute_patient_ids_and_count',
)
# birthdate_date = fields.Date(
# string='Birthdate',
# )
# gender = fields.Selection([
# ('male', 'Male'),
# ('female', 'Female'),
# ('other', 'Other'),
# ])
# weight = fields.Float()
# weight_uom = fields.Many2one(
# string="Weight UoM",
# comodel_name="product.uom",
# default=lambda s: s.env['res.lang'].default_uom_by_category('Weight'),
# domain=lambda self: [('category_id', '=',
# self.env.ref('product.product_uom_categ_kgm').id)
# ]
# )
@api.multi
def _get_clv_entity(self):
self.ensure_one()
if self.type and self.type[:3] == 'clv':
return self.env[self.type].search([
('partner_id', '=', self.id),
])
@api.multi
def _compute_patient_ids_and_count(self):
for record in self:
patients = self.env['clv.patient'].search([
('partner_id', 'child_of', record.id),
])
record.count_patients = len(patients)
record.patient_ids = [(6, 0, patients.ids)]
# @api.multi
# @api.constrains('birthdate_date')
# def _check_birthdate_date(self):
# """ It will not allow birthdates in the future. """
# now = datetime.now()
# for record in self:
# if not record.birthdate_date:
# continue
# birthdate = fields.Datetime.from_string(record.birthdate_date)
# if birthdate > now:
# raise ValidationError(_(
# 'Partners cannot be born in the future.',
# ))
@api.model
def create(self, vals):
""" It overrides create to bind appropriate clv entity. """
if all((
vals.get('type', '').startswith('clv.'),
not self.env.context.get('clv_entity_no_create'),
)):
model = self.env[vals['type']].with_context(
clv_entity_no_create=True,
)
clv_entity = model.create(vals)
return clv_entity.partner_id
return super().create(vals)
|
The terrific digital photography below, is part of 2020 Ford Fusion Interior, Exterior and Review write-up which is classed as within Wallpaper, 2020 ford fusion rumors, 2020 ford fusion active, 2020 ford fusion pictures and published at August 3rd, 2018 06:48:22 AM by admin.
The 2020 Ford Fusion will get a new exterior coloration, Satin Steel Gray Metallic. Moreover, hatchback fashions are actually available with the diesel engine. The 2020 Ford Fusion obtained a 5-star total security ranking from the NHTSA (out of a attainable five stars). In IIHS evaluations, the 2020 Ford Fusion Interior, Exterior and Review has solely been tested in side and reasonable entrance overlap influence checks where it obtained a Good score (Good is the highest potential rating). It additionally obtained a Primary ranking on the IIHS’ front crash prevention check for having an out there ahead collision warning system (Superior is the very best rating for the front crash prevention take a look at).
This is the story of the new 2020 Ford Fusion, actually. It’s packed with options and demonstrates a classy mechanical prowess, but it’s particular about things. It’s clinical and precise, in some methods to a fault, but balanced with obvious worth and features. Just like the hit man, the 2020 Ford Fusion is not any nonsense up front, however there’s an underlying warmth and need to please when you get to comprehend it.
Here is imperative clue on 2020 ford fusion rumors. We have the cool assets for 2020 ford fusion rumors. Check it out for yourself! You can acquire The 2020 Ford Fusion Interior guide and read the latest 2020 Ford Fusion Interior, Exterior and Review in here.
|
# -*- coding: utf-8 -*-
"""
Peony Clients
:class:`BasePeonyClient` only handles requests while
:class:`PeonyClient` adds some methods that could help when using
the Twitter APIs, with a method to upload a media
"""
import asyncio
import io
from contextlib import suppress
import logging
try:
from asyncio.exceptions import CancelledError
except ImportError: # pragma: no cover
from concurrent.futures import CancelledError
from urllib.parse import urlparse
import aiohttp
from . import data_processing, exceptions, general, oauth, utils
from .api import APIPath, StreamingAPIPath
from .commands import EventStreams, task
from .exceptions import PeonyUnavailableMethod
from .oauth import OAuth1Headers
from .stream import StreamResponse
logger = logging.getLogger(__name__)
class MetaPeonyClient(type):
def __new__(cls, name, bases, attrs, **kwargs):
""" put the :class:`~peony.commands.tasks.Task`s in the right place """
tasks = {'tasks': set()}
for base in bases:
if hasattr(base, '_tasks'):
for key, value in base._tasks.items():
tasks[key] |= value
for attr in attrs.values():
if isinstance(attr, task):
tasks['tasks'].add(attr)
attrs['_tasks'] = tasks
attrs['_streams'] = EventStreams()
return super().__new__(cls, name, bases, attrs)
class BasePeonyClient(metaclass=MetaPeonyClient):
"""
Access the Twitter API easily
You can create tasks by decorating a function from a child
class with :class:`peony.task`
You also attach a :class:`EventStream` to a subclass using
the :func:`event_stream` of the subclass
After creating an instance of the child class you will be able
to run all the tasks easily by executing :func:`get_tasks`
Parameters
----------
streaming_apis : iterable, optional
Iterable containing the streaming APIs subdomains
base_url : str, optional
Format of the url for all the requests
api_version : str, optional
Default API version
suffix : str, optional
Default suffix of API endpoints
loads : function, optional
Function used to load JSON data
error_handler : function, optional
Requests decorator
session : aiohttp.ClientSession, optional
Session to use to make requests
proxy : str
Proxy used with every request
compression : bool, optional
Activate data compression on every requests, defaults to True
user_agent : str, optional
Set a custom user agent header
encoding : str, optional
text encoding of the response from the server
loop : event loop, optional
An event loop, if not specified :func:`asyncio.get_event_loop`
is called
"""
def __init__(self,
consumer_key=None,
consumer_secret=None,
access_token=None,
access_token_secret=None,
bearer_token=None,
auth=None,
headers=None,
streaming_apis=None,
base_url=None,
api_version=None,
suffix='.json',
loads=data_processing.loads,
error_handler=utils.DefaultErrorHandler,
session=None,
proxy=None,
compression=True,
user_agent=None,
encoding=None,
loop=None,
**kwargs):
if streaming_apis is None:
self.streaming_apis = general.streaming_apis
else:
self.streaming_apis = streaming_apis
if base_url is None:
self.base_url = general.twitter_base_api_url
else:
self.base_url = base_url
if api_version is None:
self.api_version = general.twitter_api_version
else:
self.api_version = api_version
if auth is None:
auth = OAuth1Headers
self.proxy = proxy
self._suffix = suffix
self.error_handler = error_handler
self.encoding = encoding
if encoding is not None:
def _loads(*args, **kwargs):
return loads(*args, encoding=encoding, **kwargs)
self._loads = _loads
else:
self._loads = loads
self.loop = asyncio.get_event_loop() if loop is None else loop
self._session = session
self._user_session = session is not None
self._gathered_tasks = None
if consumer_key is None or consumer_secret is None:
raise TypeError("missing 2 required arguments: 'consumer_key' "
"and 'consumer_secret'")
# all the possible args required by headers in :mod:`peony.oauth`
kwargs = {
'consumer_key': consumer_key,
'consumer_secret': consumer_secret,
'access_token': access_token,
'access_token_secret': access_token_secret,
'bearer_token': bearer_token,
'compression': compression,
'user_agent': user_agent,
'headers': headers,
'client': self
}
# get the args needed by the auth parameter on initialization
args = utils.get_args(auth.__init__, skip=1)
# keep only the arguments required by auth on init
kwargs = {key: value for key, value in kwargs.items()
if key in args}
self.headers = auth(**kwargs)
self.setup = self.loop.create_task(self._setup())
async def _setup(self):
if self._session is None:
logger.debug("Creating session")
self._session = aiohttp.ClientSession()
@staticmethod
def _get_base_url(base_url, api, version):
"""
create the base url for the api
Parameters
----------
base_url : str
format of the base_url using {api} and {version}
api : str
name of the api to use
version : str
version of the api
Returns
-------
str
the base url of the api you want to use
"""
format_args = {}
if "{api}" in base_url:
if api == "":
base_url = base_url.replace('{api}.', '')
else:
format_args['api'] = api
if "{version}" in base_url:
if version == "":
base_url = base_url.replace('/{version}', '')
else:
format_args['version'] = version
return base_url.format(api=api, version=version)
def __getitem__(self, values):
"""
Access the api you want
This permits the use of any API you could know about
For most api you only need to type
>>> self[api] # api is the api you want to access
You can specify a custom api version using the syntax
>>> self[api, version] # version is the api version as a str
For more complex requests
>>> self[api, version, suffix, base_url]
Returns
-------
.api.BaseAPIPath
To access an API endpoint
"""
defaults = None, self.api_version, self._suffix, self.base_url
keys = ['api', 'version', 'suffix', 'base_url']
if isinstance(values, dict):
# set values in the right order
values = [values.get(key, defaults[i])
for i, key in enumerate(keys)]
elif isinstance(values, set):
raise TypeError('Cannot use a set to access an api, '
'please use a dict, a tuple or a list instead')
elif isinstance(values, str):
values = [values, *defaults[1:]]
elif isinstance(values, tuple):
if len(values) < len(keys):
padding = (None,) * (len(keys) - len(values))
values += padding
values = [default if value is None else value
for value, default in zip(values, defaults)
if (value, default) != (None, None)]
else:
raise TypeError("Could not create an endpoint from an object of "
"type " + values.__class__.__name__)
api, version, suffix, base_url = values
base_url = self._get_base_url(base_url, api, version)
# use StreamingAPIPath if subdomain is in self.streaming_apis
if api in self.streaming_apis:
return StreamingAPIPath([base_url], suffix=suffix, client=self)
else:
return APIPath([base_url], suffix=suffix, client=self)
__getattr__ = __getitem__
def __del__(self):
if self.loop.is_closed(): # pragma: no cover
pass
elif self.loop.is_running():
self.loop.create_task(self.close())
else:
self.loop.run_until_complete(self.close())
async def request(self, method, url, future,
headers=None,
session=None,
encoding=None,
**kwargs):
"""
Make requests to the REST API
Parameters
----------
future : asyncio.Future
Future used to return the response
method : str
Method to be used by the request
url : str
URL of the resource
headers : .oauth.PeonyHeaders
Custom headers (doesn't overwrite `Authorization` headers)
session : aiohttp.ClientSession, optional
Client session used to make the request
Returns
-------
data.PeonyResponse
Response to the request
"""
await self.setup
# prepare request arguments, particularly the headers
req_kwargs = await self.headers.prepare_request(
method=method,
url=url,
headers=headers,
proxy=self.proxy,
**kwargs
)
if encoding is None:
encoding = self.encoding
session = session if (session is not None) else self._session
logger.debug("making request with parameters: %s" % req_kwargs)
async with session.request(**req_kwargs) as response:
if response.status < 400:
data = await data_processing.read(response, self._loads,
encoding=encoding)
future.set_result(data_processing.PeonyResponse(
data=data,
headers=response.headers,
url=response.url,
request=req_kwargs
))
else: # throw exception if status is not 2xx
await exceptions.throw(response, loads=self._loads,
encoding=encoding, url=url)
def stream_request(self, method, url, headers=None, _session=None,
*args, **kwargs):
"""
Make requests to the Streaming API
Parameters
----------
method : str
Method to be used by the request
url : str
URL of the resource
headers : dict
Custom headers (doesn't overwrite `Authorization` headers)
_session : aiohttp.ClientSession, optional
The session to use for this specific request, the session
given as argument of :meth:`__init__` is used by default
Returns
-------
.stream.StreamResponse
Stream context for the request
"""
return StreamResponse(
method=method,
url=url,
client=self,
headers=headers,
session=_session,
proxy=self.proxy,
**kwargs
)
@classmethod
def event_stream(cls, event_stream):
""" Decorator to attach an event stream to the class """
cls._streams.append(event_stream)
return event_stream
def _get_tasks(self):
return [task(self) for task in self._tasks['tasks']]
def get_tasks(self):
"""
Get the tasks attached to the instance
Returns
-------
list
List of tasks (:class:`asyncio.Task`)
"""
tasks = self._get_tasks()
tasks.extend(self._streams.get_tasks(self))
return tasks
async def run_tasks(self):
""" Run the tasks attached to the instance """
tasks = self.get_tasks()
self._gathered_tasks = asyncio.gather(*tasks, loop=self.loop)
try:
await self._gathered_tasks
except CancelledError:
pass
async def arun(self):
try:
await self.run_tasks()
except KeyboardInterrupt:
pass
finally:
await self.close()
def run(self):
""" Run the tasks attached to the instance """
self.loop.run_until_complete(self.arun())
def _get_close_tasks(self):
tasks = []
# cancel setup
if isinstance(self.setup, asyncio.Future):
if not self.setup.done():
async def cancel_setup():
self.setup.cancel()
try:
await self.setup
except CancelledError: # pragma: no cover
pass
tasks.append(self.loop.create_task(cancel_setup()))
# close currently running tasks
if self._gathered_tasks is not None:
async def cancel_tasks():
self._gathered_tasks.cancel()
try:
await self._gathered_tasks
except CancelledError:
pass
tasks.append(self.loop.create_task(cancel_tasks()))
return tasks
async def close(self):
""" properly close the client """
tasks = self._get_close_tasks()
if tasks:
await asyncio.wait(tasks)
# close the session only if it was created by peony
if not self._user_session and self._session is not None:
with suppress(TypeError, AttributeError):
await self._session.close()
self._session = None
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
class PeonyClient(BasePeonyClient):
"""
A client with some useful methods for most usages
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = self.loop.create_task(self._get_user())
async def _get_user(self, init=False):
"""
create a ``user`` attribute with the response of the endpoint
https://api.twitter.com/1.1/account/verify_credentials.json
"""
api = self['api', general.twitter_api_version,
".json", general.twitter_base_api_url]
if isinstance(self.headers, oauth.OAuth1Headers):
return await api.account.verify_credentials.get()
raise PeonyUnavailableMethod("user attribute is only available with "
"OAuth 1 authentification.")
def _get_close_tasks(self):
tasks = super()._get_close_tasks()
if not self.user.done():
async def cancel_user():
self.user.cancel()
try:
await self.user
except CancelledError: # pragma: no cover
pass
tasks.append(self.loop.create_task(cancel_user()))
return tasks
async def _chunked_upload(self, media, media_size,
path=None,
media_type=None,
media_category=None,
chunk_size=2**20,
**params):
"""
upload media in chunks
Parameters
----------
media : file object
a file object of the media
media_size : int
size of the media
path : str, optional
filename of the media
media_type : str, optional
mime type of the media
media_category : str, optional
twitter media category, must be used with ``media_type``
chunk_size : int, optional
size of a chunk in bytes
params : dict, optional
additional parameters of the request
Returns
-------
.data_processing.PeonyResponse
Response of the request
"""
if isinstance(media, bytes):
media = io.BytesIO(media)
chunk = media.read(chunk_size)
is_coro = asyncio.iscoroutine(chunk)
if is_coro:
chunk = await chunk
if media_type is None:
media_metadata = await utils.get_media_metadata(chunk, path)
media_type, media_category = media_metadata
elif media_category is None:
media_category = utils.get_category(media_type)
response = await self.upload.media.upload.post(
command="INIT",
total_bytes=media_size,
media_type=media_type,
media_category=media_category,
**params
)
media_id = response['media_id']
i = 0
while chunk:
req = self.upload.media.upload.post(command="APPEND",
media_id=media_id,
media=chunk,
segment_index=i)
if is_coro:
chunk, _ = await asyncio.gather(media.read(chunk_size), req)
else:
await req
chunk = media.read(chunk_size)
i += 1
status = await self.upload.media.upload.post(command="FINALIZE",
media_id=media_id)
if 'processing_info' in status:
while status['processing_info'].get('state') != "succeeded":
processing_info = status['processing_info']
if processing_info.get('state') == "failed":
error = processing_info.get('error', {})
message = error.get('message', str(status))
raise exceptions.MediaProcessingError(data=status,
message=message,
**params)
delay = processing_info['check_after_secs']
await asyncio.sleep(delay)
status = await self.upload.media.upload.get(
command="STATUS",
media_id=media_id,
**params
)
return response
async def upload_media(self, file_,
media_type=None,
media_category=None,
chunked=None,
size_limit=3 * (1024**2),
**params):
"""
upload a media file on twitter
Parameters
----------
file_ : str or pathlib.Path or file
Path to the file or file object
media_type : str, optional
mime type of the media
media_category : str, optional
Twitter's media category of the media, must be used with
``media_type``
chunked : bool, optional
If True, force the use of the chunked upload for the media
size_limit : int, optional
If set, the media will be sent using a multipart upload if
its size is over ``size_limit`` bytes
params : dict
parameters used when making the request
Returns
-------
.data_processing.PeonyResponse
Response of the request
"""
if isinstance(file_, str):
url = urlparse(file_)
if url.scheme.startswith('http'):
media = await self._session.get(file_)
else:
path = urlparse(file_).path.strip(" \"'")
media = await utils.execute(open(path, 'rb'))
elif hasattr(file_, 'read') or isinstance(file_, bytes):
media = file_
else:
raise TypeError("upload_media input must be a file object or a "
"filename or binary data or an aiohttp request")
media_size = await utils.get_size(media)
if chunked is not None:
size_test = False
else:
size_test = media_size > size_limit
if isinstance(media, aiohttp.ClientResponse):
# send the content of the response
media = media.content
if chunked or (size_test and chunked is None):
args = media, media_size, file_, media_type, media_category
response = await self._chunked_upload(*args, **params)
else:
response = await self.upload.media.upload.post(media=media,
**params)
if not hasattr(file_, 'read') and not getattr(media, 'closed', True):
media.close()
return response
|
Manchester United opened their academy season with a 3-3 draw against Derby County. Below, see the goals as Mason Greenwood impressed.
An injury-time Derby County equaliser denied Manchester United’s Under-18s victory in the opening league game of the season, at the Aon Training Complex.
The hosts looked to be heading for all three points in Neil Ryan’s first game in charge, thanks to a Mason Greenwood brace and a Brandon Williams strike, before a late penalty ensured the game finished 3-3.
United started well in the opening exchanges and controlled much of the play without creating many chances.
However, Greenwood, who was part of the senior squad’s pre-season tour of the USA, was soon causing chaos in the Derby defence. Anthony Elanga’s high press resulted in the ball falling to Greenwood and the United striker drew a smart save from Derby goalkeeper Sam French, who did well to narrow the angle.
However, he was left rooted to the spot soon after as Greenwood broke the deadlock. The United man collected the ball from deep, drove at the Derby backline and, after a few neat step-overs, launched a fierce shot into the bottom left-hand corner of the goal.
United were well on top at this stage and created a number of half-chances, the best of which was Dylan Levitt’s curling effort from outside the area, that was headed clear by an opposition defender.
When the next goal did come, it was the result of a rare Derby attack. An inch-perfect cross from the left-hand side found an eagerly-waiting Matthews and the smallest player on the pitch nodded the ball in to bring Derby back on level terms.
United nearly responded immediately and, just a few seconds after kick-off, Greenwood hit the post. From an acute angle, he managed to fire the ball goalwards and was unlucky to see it rebound off the woodwork.
The Reds could have gone in ahead at half-time after being awarded a penalty just seconds before the whistle. Dion McGhee was taken down on the corner of the box and the referee rightly pointed for a spot-kick.
However, Greenwood’s low effort was saved by Rams stopper French, who held onto the ball and drew the first half to a close.
United started fantastically after the interval and forced French into making two early saves – the first from Greenwood’s side-footed effort and the second after a dipping shot from Largie Ramazani.
The home side made their dominance pay minutes later as Williams scored the Reds’ second of the game. The defender picked up the ball at the edge of the area and, after bypassing two Derby defenders, squeezed a well-placed strike into the away goal.
United reacted well to the setback and Greenwood continued to cause havoc in the Rams defence, winning a corner for the Reds after Aliou Traore’s dribble and cross set him up in the area.
From the resultant set-piece, United nearly found the back of the net again, but Bernard’s header was agonisingly cleared off the line.
United boss Ryan introduced Charlie McCann and Ben Hockenhull for McGhee and Arnau Puigmal in the second half and, after somewhat fortuitously surviving Stretton’s effort which hit the post, the Reds scored a third 11 minutes from time.
Ramazani’s superb ball from deep played in Greenwood, who emphatically bagged his second of the game and looked to have secured all three points for the home side. The striker coolly slotted away the chance with his left foot on a day when he ran the opposition defence ragged.
United could have been out of sight had had McCann, Ramazani or Greenwood converted late chances, and the Reds were ultimately made to pay as County found an injury-time winner.
A mis-timed tackle from Hockenhull presented the visitors with a late spot-kick, the third of the game, which Whittaker slotted in to ensure both sides shared the points.
United: Carney; Puigmal (Hockenhull 67), Laird, Bernard, Williams; McGhee (McCann 62), Traore, Levitt; Elanga, Greenwood, Ramazani. Subs not used: Thompson, Neville, Helm.
|
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.test import testing
from sqlalchemy.util import function_named
from test.orm import _base
from sqlalchemy.test.schema import Table, Column
class BaseObject(object):
def __init__(self, *args, **kwargs):
for key, value in kwargs.iteritems():
setattr(self, key, value)
class Publication(BaseObject):
pass
class Issue(BaseObject):
pass
class Location(BaseObject):
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, str(getattr(self, 'issue_id', None)), repr(str(self._name.name)))
def _get_name(self):
return self._name
def _set_name(self, name):
session = create_session()
s = session.query(LocationName).filter(LocationName.name==name).first()
session.expunge_all()
if s is not None:
self._name = s
return
found = False
for i in session.new:
if isinstance(i, LocationName) and i.name == name:
self._name = i
found = True
break
if found == False:
self._name = LocationName(name=name)
name = property(_get_name, _set_name)
class LocationName(BaseObject):
def __repr__(self):
return "%s()" % (self.__class__.__name__)
class PageSize(BaseObject):
def __repr__(self):
return "%s(%sx%s, %s)" % (self.__class__.__name__, self.width, self.height, self.name)
class Magazine(BaseObject):
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, repr(self.location), repr(self.size))
class Page(BaseObject):
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, str(self.page_no))
class MagazinePage(Page):
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, str(self.page_no), repr(self.magazine))
class ClassifiedPage(MagazinePage):
pass
class MagazineTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
global publication_table, issue_table, location_table, location_name_table, magazine_table, \
page_table, magazine_page_table, classified_page_table, page_size_table
publication_table = Table('publication', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(45), default=''),
)
issue_table = Table('issue', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('publication_id', Integer, ForeignKey('publication.id')),
Column('issue', Integer),
)
location_table = Table('location', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('issue_id', Integer, ForeignKey('issue.id')),
Column('ref', CHAR(3), default=''),
Column('location_name_id', Integer, ForeignKey('location_name.id')),
)
location_name_table = Table('location_name', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(45), default=''),
)
magazine_table = Table('magazine', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('location_id', Integer, ForeignKey('location.id')),
Column('page_size_id', Integer, ForeignKey('page_size.id')),
)
page_table = Table('page', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('page_no', Integer),
Column('type', CHAR(1), default='p'),
)
magazine_page_table = Table('magazine_page', metadata,
Column('page_id', Integer, ForeignKey('page.id'), primary_key=True),
Column('magazine_id', Integer, ForeignKey('magazine.id')),
Column('orders', Text, default=''),
)
classified_page_table = Table('classified_page', metadata,
Column('magazine_page_id', Integer, ForeignKey('magazine_page.page_id'), primary_key=True),
Column('titles', String(45), default=''),
)
page_size_table = Table('page_size', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('width', Integer),
Column('height', Integer),
Column('name', String(45), default=''),
)
def generate_round_trip_test(use_unions=False, use_joins=False):
def test_roundtrip(self):
publication_mapper = mapper(Publication, publication_table)
issue_mapper = mapper(Issue, issue_table, properties = {
'publication': relationship(Publication, backref=backref('issues', cascade="all, delete-orphan")),
})
location_name_mapper = mapper(LocationName, location_name_table)
location_mapper = mapper(Location, location_table, properties = {
'issue': relationship(Issue, backref=backref('locations', lazy='joined', cascade="all, delete-orphan")),
'_name': relationship(LocationName),
})
page_size_mapper = mapper(PageSize, page_size_table)
magazine_mapper = mapper(Magazine, magazine_table, properties = {
'location': relationship(Location, backref=backref('magazine', uselist=False)),
'size': relationship(PageSize),
})
if use_unions:
page_join = polymorphic_union(
{
'm': page_table.join(magazine_page_table),
'c': page_table.join(magazine_page_table).join(classified_page_table),
'p': page_table.select(page_table.c.type=='p'),
}, None, 'page_join')
page_mapper = mapper(Page, page_table, with_polymorphic=('*', page_join), polymorphic_on=page_join.c.type, polymorphic_identity='p')
elif use_joins:
page_join = page_table.outerjoin(magazine_page_table).outerjoin(classified_page_table)
page_mapper = mapper(Page, page_table, with_polymorphic=('*', page_join), polymorphic_on=page_table.c.type, polymorphic_identity='p')
else:
page_mapper = mapper(Page, page_table, polymorphic_on=page_table.c.type, polymorphic_identity='p')
if use_unions:
magazine_join = polymorphic_union(
{
'm': page_table.join(magazine_page_table),
'c': page_table.join(magazine_page_table).join(classified_page_table),
}, None, 'page_join')
magazine_page_mapper = mapper(MagazinePage, magazine_page_table, with_polymorphic=('*', magazine_join), inherits=page_mapper, polymorphic_identity='m', properties={
'magazine': relationship(Magazine, backref=backref('pages', order_by=magazine_join.c.page_no))
})
elif use_joins:
magazine_join = page_table.join(magazine_page_table).outerjoin(classified_page_table)
magazine_page_mapper = mapper(MagazinePage, magazine_page_table, with_polymorphic=('*', magazine_join), inherits=page_mapper, polymorphic_identity='m', properties={
'magazine': relationship(Magazine, backref=backref('pages', order_by=page_table.c.page_no))
})
else:
magazine_page_mapper = mapper(MagazinePage, magazine_page_table, inherits=page_mapper, polymorphic_identity='m', properties={
'magazine': relationship(Magazine, backref=backref('pages', order_by=page_table.c.page_no))
})
classified_page_mapper = mapper(ClassifiedPage,
classified_page_table,
inherits=magazine_page_mapper,
polymorphic_identity='c',
primary_key=[page_table.c.id])
session = create_session()
pub = Publication(name='Test')
issue = Issue(issue=46,publication=pub)
location = Location(ref='ABC',name='London',issue=issue)
page_size = PageSize(name='A4',width=210,height=297)
magazine = Magazine(location=location,size=page_size)
page = ClassifiedPage(magazine=magazine,page_no=1)
page2 = MagazinePage(magazine=magazine,page_no=2)
page3 = ClassifiedPage(magazine=magazine,page_no=3)
session.add(pub)
session.flush()
print [x for x in session]
session.expunge_all()
session.flush()
session.expunge_all()
p = session.query(Publication).filter(Publication.name=="Test").one()
print p.issues[0].locations[0].magazine.pages
print [page, page2, page3]
assert repr(p.issues[0].locations[0].magazine.pages) == repr([page, page2, page3]), repr(p.issues[0].locations[0].magazine.pages)
test_roundtrip = function_named(
test_roundtrip, "test_%s" % (not use_union and (use_joins and "joins" or "select") or "unions"))
setattr(MagazineTest, test_roundtrip.__name__, test_roundtrip)
for (use_union, use_join) in [(True, False), (False, True), (False, False)]:
generate_round_trip_test(use_union, use_join)
|
After many months of development, we are pleased to announce the release of uhexen2-shanjaq 1.5.9.1. Thanks to sezero, Shanjaq, spike, and samkod the Hexen II community has some awesome new features. These features include BSP2 support, mission pack auto-loading, fixed dynamic darklight effects for GL, new script functionality, ZDoom-style multi-mod loading, 44.1k audio, and a new Hexen II protocol.
Since the launch of this website, dedicated server hosting has been hit and miss. Ideally I’d love to host a few servers that remain online 24/7. Right now the main servers usually goes up during the evening hours. I want to change this and will invest if we can get some people interested in playing online (currently we have about 3 or 4). If anyone actually reads the news headlines and is interest in playing, please sign up on the forum. Servers will include HWDM, DM w/ bots, CTF, RK, and Peanut.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import os
from udemy import __title__
from udemy import __version__
from udemy import __author__
from udemy import __email__
# from distutils.core import setup
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
setup(
name=__title__,
version=__version__,
author=__author__,
author_email=__email__,
description='My short description for my project. ',
long_description=long_description,
url='https://github.com/udemy-course/udemy',
packages=find_packages(exclude=('tests',)),
install_requires=['requests'],
entry_points='''
[console_scripts]
udemy-cli=udemy.cli:main
''',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
# 'License :: OSI Approved :: ISC License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy']
)
|
Manager Resume picture is in category Resume that can use for individual and noncommercial purpose because All trademarks referenced here in are the properties of their respective owners. In this post we give you some picture about manager resume, manager resume bullet points, manager resume description, manager resume examples 2018, manager resume keywords, manager resume objective, manager resume objective sample, manager resume samples, manager resume summary, manager resume summary examples, Manager Resume. If you want to view image in full size just click image on gallery bellow.
Manager resume templates Canreklonecco Assistant Managers Resume Examples Created by Pros MyPerfectResume Operations Manager Resume Sample Resume Genius Restaurant Manager Resume Template 28 Free Word PDF Document Restaurant assistant manager resume templates CV Example job Office Manager Resume Sample Tips Resume Genius Assistant Retail Manager Resume Examples Free to Try Today Facilities manager resume samples Canreklonecco Unforgettable Assistant Restaurant Manager Resume Examples to Stand Assistant Manager Resume Sample Monster Manager Resume.
|
from django.shortcuts import render
from django.views.generic import View
from baseapp.models import School,Block,Class_Studying,Academic_Year,District
from students.models import Child_detail
from django.template.loader import get_template
from django.template import Context
import cStringIO as StringIO
import xhtml2pdf.pisa as pisa
from django.http import HttpResponse, Http404
from django.core.paginator import Paginator, PageNotAnInteger
def render_to_pdf(template_src, context_dict, filename):
template = get_template(template_src)
context = Context(context_dict)
html = template.render(context)
result = StringIO.StringIO()
pdf = pisa.pisaDocument(
StringIO.StringIO(html.encode("UTF-8")), result, link_callback=fetch_resources)
if not pdf.err:
outfile = HttpResponse(result.getvalue(), mimetype="application/pdf")
outfile['Content-Disposition'] = 'attachment; filename=' + \
filename + '.pdf'
return outfile
return http.HttpResponse('We had some error on report generation<pre>%s</pre>' % cgi.escape(html))
def fetch_resources(uri, rel):
path = os.path.join(
settings.MEDIA_ROOT, uri.replace(settings.MEDIA_URL, ""))
return path
def download_child_profile(request,ch_id):
child = Child_detail.objects.get(id=ch_id)
pagesize = 'A4'
title = 'Child Profile'
return render_to_pdf('download_child_profile.html', locals(), 'Child_Profile')
class ReportViewBasic(View):
def get(self,request,**kwargs):
if request.user.account.user_category_id == 2 or request.user.account.user_category_id == 5:
school_list = School.objects.filter(block_id=request.user.account.associated_with).order_by('school_name')
return render(request,'report_list_basic.html',{'school_list':school_list})
elif request.user.account.user_category_id == 3 or request.user.account.user_category_id == 6 or request.user.account.user_category_id == 7 or request.user.account.user_category_id == 8 or request.user.account.user_category_id == 12 or request.user.account.user_category_id == 13 or request.user.account.user_category_id == 14:
block_list = Block.objects.filter(district_id=request.user.account.associated_with).order_by('block_name')
school_list = School.objects.filter(district_id=request.user.account.associated_with).order_by('school_name')
return render(request,'report_list_basic.html',{'school_list':school_list,'block_list':block_list})
elif request.user.account.user_category_id == 4 or request.user.account.user_category_id == 9 or request.user.account.user_category_id == 10 or request.user.account.user_category_id == 11 or request.user.account.user_category_id == 15 or request.user.account.user_category_id == 16 or request.user.account.user_category_id == 17:
district_list = District.objects.all().order_by('district_name')
block_list = Block.objects.all().order_by('block_name')
school_list = School.objects.all().order_by('school_name')
return render(request,'report_list_basic.html',{'school_list':school_list,'block_list':block_list,'district_list':district_list})
return render(request,'report_list_basic.html',locals())
def post(self,request,**kwargs):
if request.POST["class_studying"] == "all":
class_studying=request.POST["class_studying"]
if request.POST["class_studying"] == "all":
academic_year=request.POST["academic_year"]
if request.user.account.user_category_id == 2 or request.user.account.user_category_id == 5:
school_list = School.objects.filter(block_id=request.user.account.associated_with).order_by('school_name')
school_id = request.POST["school_list"]
if request.POST["class_studying"] == 'all' and request.POST["academic_year"] == 'all':
student_detail = Child_detail.objects.filter(school_id=request.POST["school_list"])
elif request.POST["class_studying"] != 'all' and request.POST["academic_year"] == 'all':
student_detail = Child_detail.objects.filter(school_id=request.POST["school_list"],class_studying=request.POST["class_studying"])
elif request.POST["class_studying"] == 'all' and request.POST["academic_year"] != 'all':
student_detail = Child_detail.objects.filter(school_id=request.POST["school_list"],academic_year_id=request.POST["academic_year"])
else:
student_detail = Child_detail.objects.filter(school_id=request.POST["school_list"],class_studying=request.POST["class_studying"],academic_year_id=request.POST["academic_year"])
return render_to_pdf('download_child_profile_basic.html',locals(),'Child_Profile')
elif request.user.account.user_category_id == 3 or request.user.account.user_category_id == 6 or request.user.account.user_category_id == 7 or request.user.account.user_category_id == 8 or request.user.account.user_category_id == 12 or request.user.account.user_category_id == 13 or request.user.account.user_category_id == 14:
block_list = Block.objects.filter(district_id=request.user.account.associated_with).order_by('block_name')
school_list = School.objects.filter(district_id=request.user.account.associated_with).order_by('school_name')
school_id = request.POST["school_list"]
block_id = request.POST["block_list"]
if request.POST["class_studying"] == 'all' and request.POST["academic_year"] == 'all':
student_detail = Child_detail.objects.filter(block_id = request.POST["block_list"],school_id=request.POST["school_list"])
elif request.POST["class_studying"] != 'all' and request.POST["academic_year"] == 'all':
student_detail = Child_detail.objects.filter(block_id = request.POST["block_list"],school_id=request.POST["school_list"],class_studying=request.POST["class_studying"])
elif request.POST["class_studying"] == 'all' and request.POST["academic_year"] != 'all':
student_detail = Child_detail.objects.filter(block_id = request.POST["block_list"],school_id=request.POST["school_list"],academic_year_id=request.POST["academic_year"])
else:
student_detail = Child_detail.objects.filter(block_id = request.POST["block_list"],school_id=request.POST["school_list"],class_studying=request.POST["class_studying"],academic_year_id=request.POST["academic_year"])
return render_to_pdf('download_child_profile_basic.html',locals(),'Child_Profile')
elif request.user.account.user_category_id == 4 or request.user.account.user_category_id == 9 or request.user.account.user_category_id == 10 or request.user.account.user_category_id == 11 or request.user.account.user_category_id == 15 or request.user.account.user_category_id == 16 or request.user.account.user_category_id == 17:
district_list = District.objects.all().order_by('district_name')
block_list = Block.objects.all().order_by('block_name')
school_list = School.objects.all().order_by('school_name')
school_id = request.POST["school_list"]
block_id = request.POST["block_list"]
district_id = request.POST["district_list"]
if request.POST["class_studying"] == 'all' and request.POST["academic_year"] == 'all':
student_detail = Child_detail.objects.filter(block_id = request.POST["block_list"],school_id=request.POST["school_list"])
elif request.POST["class_studying"] != 'all' and request.POST["academic_year"] == 'all':
student_detail = Child_detail.objects.filter(block_id = request.POST["block_list"],school_id=request.POST["school_list"],class_studying=request.POST["class_studying"])
elif request.POST["class_studying"] == 'all' and request.POST["academic_year"] != 'all':
student_detail = Child_detail.objects.filter(block_id = request.POST["block_list"],school_id=request.POST["school_list"],academic_year_id=request.POST["academic_year"])
else:
student_detail = Child_detail.objects.filter(block_id = request.POST["block_list"],school_id=request.POST["school_list"],class_studying=request.POST["class_studying"],academic_year_id=request.POST["academic_year"])
return render_to_pdf('download_child_profile_basic.html',locals(),'Child_Profile')
else:
school_id = request.user.account.associated_with
if request.POST["class_studying"] == 'all' and request.POST["academic_year"] == 'all':
student_detail = Child_detail.objects.filter(school_id=school_id)
elif request.POST["class_studying"] != 'all' and request.POST["academic_year"] == 'all':
student_detail = Child_detail.objects.filter(school_id=school_id,class_studying=request.POST["class_studying"])
elif request.POST["class_studying"] == 'all' and request.POST["academic_year"] != 'all':
student_detail = Child_detail.objects.filter(school_id=school_id,academic_year_id=request.POST["academic_year"])
else:
student_detail = Child_detail.objects.filter(school_id=school_id,class_studying=request.POST["class_studying"],academic_year_id=request.POST["academic_year"])
cls_stud = Class_Studying.objects.get(id=request.POST["class_studying"])
class_study = cls_stud.class_studying
acad_yr = Academic_Year.objects.get(id=request.POST["academic_year"])
aca_year = acad_yr.academic_year
schl_name = School.objects.get(id=school_id)
school_name = schl_name.school_name
return render_to_pdf('download_child_profile_basic.html',locals(),'Child_Profile')
pagesize = 'A4'
title = 'Child Profile'
return render(request,'report_list_basic.html',locals())
|
This image gallery named as Welding Jig Table Industrial Welding Table Baileigh might be ideal solution for your decoration. As the author has insert this image into Metal Welding Table post section, would ease you to find the perfect match of you decor desire.
Use two clean cloths when you re-oil the dining table and chairs. Use first fabric to oil the dining table and seat. Leave oil sit for 10 minutes. Then utilize instant fabric to wipe oil. It may a hassle to do this and make your elbow greasy, but it sure make your welding jig table industrial welding table baileigh keep shining and look great.
Table is one of the most essential part in a house, this is the place where the occupant particularly who with all these families use. Therefore, the layout, furniture and the others table stuff must cover all the needed without loss its appearance and visual appeal. Among so many alternatives which are accessible, metal table will be a part that produces the table appear unusual and allow it to be even more comfortable than before.
The easiest KIWOTE is by simply following the motif and select the ideal colour. Apart from that, where and how you put the furniture along with the other substance also will affect. This will ascertain the result of the design.
welding jig table industrial welding table baileigh are necessary to maintain our table. A table that doesn’t have at least a dining table and chairs isn’t a perfect table. As we know that a table and chairs are available in different styles, colors, sizes, and shapes, so it seems easy for us to choose. In reality, it isn’t that easy. We need advice to buy them. Bear in mind, to purchase them would be like to get investments, right? They are things that we don’t buy daily. That’s the reason why we need to buy the ideal ones for table carefully. It is always suggested to buy table tables together with chairs. Below is the guidance to select and buy a table table with seats.
Others welding jig table industrial welding table baileighis by creating the island cooking area, this will probably be helpful to have special area for the cooking. However, the island is not just the built in but also the table can be used for your own island. The additional table for eat will be good despite just applying built in for cleaning or cooking area. This will be quite nice and comfy for cooking while hanging outside.
Whenever you’re confused to get table appliance packages, welding jig table industrial welding table baileigh are the very best choice for you personally. You’ll find a lot of table appliance packs you may select in Lowes. Not merely it, but you’ll get many rewards by getting Lowes table home equipment. Here some tips to you regarding Lowes table appliances packs.
Some people desire to design their tables by themselves. Given that table could be their favorite place in order they want it to as suitable with their requirements and preferences as possible. You are able to search welding jig table industrial welding table baileigh and also a lot of inspirations will probably be given. Here will be the metal table suggestions for the inspiration. Layout your table with feel on texture contrast. You are able to combine and match textures, or you may additionally employing the same texture with unique layouts. You may choose exactly the same and exact gems for your table ground tile that are completed in various way and unite them as one.
|
from astropy.table import Table
import pyradex
import numpy as np
R = pyradex.Radex(column=1e16,abundance=1e-4,species='ph2co-h2')
fortho = 0.75
nFWHM = 5
FHWMmin = 0.5
FHWMmx = 5
nDens = 21
nlower = 2
nupper = 6
nCol = 41
Nlower = 10
Nupper = 16
nTemp = 31
Tlower = 10
Tupper = 300
Temps = np.logspace(1,2.5,nTemp)
Cols = 1e1**np.linspace(Nlower,Nupper,nCol)
Densities = 1e1**(np.linspace(nlower,nupper,nDens))
FWHM = np.logspace(np.log10(0.5),np.log10(5),nFWHM)
outtable = Table(names = ['Tex_303_202','Tex_322_221','Tex_321_220',
'tau_303_202','tau_322_221','tau_321_220',
'Temperature','Column','nH2','FWHM'])
for T in Temps:
for N in Cols:
for n in Densities:
for dV in FWHM:
Tlvg = R(collider_densities={'oH2':n*fortho,'pH2':(1-fortho)*n}, column=N, abundance = 1e-9, species='ph2co-h2',temperature=T,deltav=dV)
outtable.add_row()
outtable[-1]['Tex_303_202'] = Tlvg[2]['Tex']
outtable[-1]['tau_303_202'] = Tlvg[2]['tau']
outtable[-1]['Tex_322_221'] = Tlvg[9]['Tex']
outtable[-1]['tau_322_221'] = Tlvg[9]['tau']
outtable[-1]['Tex_321_220'] = Tlvg[12]['Tex']
outtable[-1]['tau_321_220'] = Tlvg[12]['tau']
outtable[-1]['Temperature'] = T
outtable[-1]['Column'] = N
outtable[-1]['nH2'] = n
outtable[-1]['FWHM'] = dV
outtable.write('ph2cogrid.fits',format='fits',overwrite=True)
|
For the second year running, Sweden and Singapore have been ranked as doing the best job of using technology to boost their economies.
The list is dominated by Nordic countries: Sweden ranked highest, with Finland (third), Denmark (seventh) and Norway (ninth) who also ranked highly. The rest of the top ten includes Asian nations Singapore, Taiwan and South Korea, along with the US (fifth) and Canada (8th).
The rankings are based on three factors: the overall “environment” for information and communications technology (taking into account both infrastructure and regulation); how ready individuals, businesses and governments are to make best use of technology; and how well the available tech is actually used.
In practice this takes into account a wide range of factors, from literacy rates to the relative cost of a mobile phone line, and from software piracy rates to the number of patent applications.
Ironically, Libya suffered the biggest decline in the rankings (falling 23 places), with Egypt also dropping. It doesn’t appear this is directly related to closures of Internet access during political protests (when your banking system can’t get online, it’s safe to say your economy isn’t making the best use of technology), but may reflect some of the tensions that eventually spilled over into protest and conflict.
Chad takes bottom place in the list: while the lower rankings are dominated by African nations, there are also several South American countries, with Bolivia just three places from the bottom.
Why the Nordic countries rank so highly is open to question (one theory has it that with some elements of the figures based on local surveys, results may be influenced by a cheery disposition.) The most common assessments are that it is a combination of high standards of education, and a particular approach to government involvement in technology: paying highly to set up infrastructure but only lightly regulating the subsequent use. Indeed, it’s notable that third-placed Finland was the first country in the world to declare that access to a 1MBps broadband service was a legal right for all citizens.
Well, us Swiss people here are quite happy of our 4th place, wouldn't you be?
But less happy that you forgot us….
I'm jealous of residents of Switzerland in just about all ways possible.When you're done with the Hadron collider, work on a teleportation device so I can visit.
The Nordic nations have like 99% literacy rates.
|
"""This module tests the service CRUD functionality
"""
import unittest
import tempfile
import shutil
import datetime
from dc24_ingester_platform.service import ingesterdb, repodb
from jcudc24ingesterapi.models.locations import Region, Location
from jcudc24ingesterapi.models.dataset import Dataset
from jcudc24ingesterapi.schemas.data_entry_schemas import DataEntrySchema
from jcudc24ingesterapi.schemas.metadata_schemas import DatasetMetadataSchema, DataEntryMetadataSchema
from jcudc24ingesterapi.schemas.data_types import FileDataType, String, Double
from jcudc24ingesterapi.ingester_platform_api import UnitOfWork
from jcudc24ingesterapi.models.data_sources import PullDataSource,\
DatasetDataSource
from jcudc24ingesterapi.models.sampling import PeriodicSampling
from jcudc24ingesterapi.models.data_entry import DataEntry
from jcudc24ingesterapi.ingester_exceptions import InvalidObjectError,\
StaleObjectError, PersistenceError
class TestServiceModels(unittest.TestCase):
def setUp(self):
self.files = tempfile.mkdtemp()
self.repo = repodb.RepositoryDB({"db":"sqlite://", "files":self.files})
self.service = ingesterdb.IngesterServiceDB("sqlite://", self.repo)
def tearDown(self):
del self.service
del self.repo
shutil.rmtree(self.files)
def test_data_types(self):
schema1 = DatasetMetadataSchema("schema1")
schema1.addAttr(FileDataType("file"))
schema1a = self.service.persist(schema1)
self.assertEquals(1, len(schema1a.attrs))
schema2 = DataEntrySchema("schema2")
schema2.addAttr(FileDataType("file"))
schema2.addAttr(Double("x"))
schema2a = self.service.persist(schema2)
loc = Location(10.0, 11.0)
loca = self.service.persist(loc)
dataset = Dataset()
dataset.schema = schema1a.id
dataset.location = loca.id
# We've trying to use a dataset_metadata schema, so this should fail
self.assertRaises(ValueError, self.service.persist, dataset)
dataset.schema = schema2a.id
# Now we're using the correct type of schema
dataset1a = self.service.persist(dataset)
dataset1b = self.service.get_dataset(dataset1a.id)
self.assertEquals(dataset1a.id, dataset1b.id)
self.assertDictEqual(dataset1a.__dict__, dataset1b.__dict__)
# Update and add a data source
dataset1b.data_source = PullDataSource("http://www.abc.net.au", None, recursive=False, field="file", processing_script="TEST", sampling=PeriodicSampling(10000))
dataset1b.enabled = True
dataset1c = self.service.persist(dataset1b)
self.assertNotEqual(None, dataset1c.data_source)
self.assertEqual("TEST", dataset1c.data_source.processing_script)
self.assertNotEqual(None, dataset1c.data_source.sampling)
datasets = self.service.get_active_datasets()
self.assertEquals(1, len(datasets))
self.assertNotEqual(None, datasets[0].data_source)
self.assertEqual("TEST", datasets[0].data_source.processing_script)
self.assertNotEqual(None, datasets[0].data_source.sampling)
# Test with criteria
datasets = self.service.get_active_datasets(kind="pull_data_source")
self.assertEquals(1, len(datasets))
datasets = self.service.get_active_datasets(kind="push_data_source")
self.assertEquals(0, len(datasets))
schema1b = self.service.get_schema(schema1a.id)
self.assertEquals(schema1a.id, schema1b.id)
datasets = self.service.search("dataset")
self.assertEquals(1, len(datasets))
schemas = self.service.search("data_entry_schema")
self.assertEquals(1, len(schemas))
schemas = self.service.search("dataset_metadata_schema")
self.assertEquals(1, len(schemas))
locs = self.service.search("location")
self.assertEquals(1, len(locs))
# Test ingest
data_entry_1 = DataEntry(dataset1b.id, datetime.datetime.now())
data_entry_1['x'] = 27.8
data_entry_1 = self.service.persist(data_entry_1)
self.assertIsNotNone(data_entry_1.id)
def test_region(self):
#{"class":"region", "name": "Region1", "region_points":[(1, 1), (1, 2)]}
region1 = Region("Region 1")
region1.region_points = [(1, 1), (1, 2)]
region1a = self.service.persist(region1)
self.assertEqual(2, len(region1a.region_points), "Not 2 region points")
# def test_unit(self):
# unit = {"insert":[{"id":-2, "class":"dataset", "location":-1, "schema": -3, "data_source":{"class":"test", "param1":"1", "param2":"2"}, "sampling":{"class":"schedule1", "param1":"1", "param2":"2"}},
# {"id":-1, "latitude":30, "longitude": 20, "class":"location"},
# {"id":-3, "attributes":[{"name":"file", "class":"file"}], "class":"data_entry_schema"}], "delete":[], "update":[], "enable":[], "disable":[]}
# unit2 = self.service.commit(unit, None)
# for obj in unit2:
# if obj["class"] == "location":
# self.assertEquals(obj["correlationid"], -1)
# elif obj["class"] == "dataset":
# self.assertEquals(obj["correlationid"], -2)
def test_schema_persistence(self):
"""This test creates a simple schema hierarchy, and tests updates, etc"""
schema1 = DataEntrySchema("base1")
schema1.addAttr(FileDataType("file"))
schema1 = self.service.persist(schema1)
self.assertGreater(schema1.id, 0, "ID does not appear valid")
self.assertEquals(1, len(schema1.attrs))
schema2 = DataEntrySchema("child1")
schema2.addAttr(FileDataType("file2"))
schema2.extends.append(schema1.id)
schema2 = self.service.persist(schema2)
self.assertGreater(schema2.id, 0, "ID does not appear valid")
self.assertEquals(1, len(schema2.attrs))
self.assertEquals("file2", schema2.attrs["file2"].name)
def test_schema_persistence_unit(self):
"""This test creates a simple schema hierarchy, and tests updates, etc"""
unit = UnitOfWork(None)
ids = []
schema1 = DataEntrySchema("base1")
schema1.addAttr(FileDataType("file"))
ids.append(unit.post(schema1))
schema2 = DataEntrySchema("child1")
schema2.addAttr(FileDataType("file2"))
schema2.extends.append(schema1.id)
ids.append(unit.post(schema2))
ret = self.service.commit(unit, None)
for obj in ret:
self.assertGreater(obj.id, 0)
self.assertIn(obj.correlationid, ids)
def test_schema_persistence_clash(self):
"""This test creates a simple schema hierarchy, that has a field name clash"""
schema1 = DataEntrySchema("base1")
schema1.addAttr(FileDataType("file"))
schema1 = self.service.persist(schema1)
self.assertGreater(schema1.id, 0, "ID does not appear valid")
self.assertEquals(1, len(schema1.attrs))
schema2 = DataEntrySchema("child1")
schema2.addAttr(FileDataType("file"))
schema2.extends.append(schema1.id)
self.assertRaises(PersistenceError, self.service.persist, schema2)
def test_state_persistence(self):
"""Test that the state of samplers and data sources can be persisted."""
sampler_state = self.service.get_sampler_state(1)
self.assertEquals(0, len(sampler_state))
self.service.persist_sampler_state(1, {"test":"abc","test2":123})
sampler_state = self.service.get_sampler_state(1)
self.assertEquals(2, len(sampler_state))
self.assertEquals("abc", sampler_state["test"])
self.assertEquals("123", sampler_state["test2"])
del sampler_state["test"]
sampler_state["test2"] = "xyz"
self.service.persist_sampler_state(1, sampler_state)
sampler_state = self.service.get_sampler_state(1)
self.assertEquals(1, len(sampler_state))
self.assertEquals("xyz", sampler_state["test2"])
# Now test the same thing on the data source state
data_source_state = self.service.get_data_source_state(1)
self.assertEquals(0, len(data_source_state))
self.service.persist_data_source_state(1, {"test":"abc","test2":123})
data_source_state = self.service.get_data_source_state(1)
self.assertEquals(2, len(data_source_state))
self.assertEquals("abc", data_source_state["test"])
self.assertEquals("123", data_source_state["test2"])
del data_source_state["test"]
data_source_state["test2"] = "xyz"
self.service.persist_data_source_state(1, data_source_state)
data_source_state = self.service.get_data_source_state(1)
self.assertEquals(1, len(data_source_state))
self.assertEquals("xyz", data_source_state["test2"])
def test_dataset_data_source_unit(self):
"""This test creates a simple schema hierarchy, and tests updates, etc"""
unit = UnitOfWork(None)
schema1 = DataEntrySchema("base1")
schema1.addAttr(FileDataType("file"))
schema_id = unit.post(schema1)
loc = Location(10.0, 11.0)
loc.name = "Location"
loc_id = unit.post(loc)
dataset1 = Dataset()
dataset1.schema = schema_id
dataset1.location = loc_id
dataset1_id = unit.post(dataset1)
dataset2 = Dataset()
dataset2.schema = schema_id
dataset2.location = loc_id
dataset2.data_source = DatasetDataSource(dataset1_id, "")
dataset2_id = unit.post(dataset2)
ret = self.service.commit(unit, None)
found = False
for r in ret:
if isinstance(r, Dataset) and dataset1_id == r.correlationid:
dataset1_id = r.id
elif isinstance(r, Dataset) and dataset2_id == r.correlationid:
self.assertEquals(dataset1_id, r.data_source.dataset_id, "Data source dataset_id was not updated")
found = True
self.assertTrue(found, "Didn't find the dataset with the dataset data source")
def test_region_persist(self):
"""Test that the region persists correctly, including version numbering, and that
region points are correctly updated"""
region = Region("Region 1")
region.region_points = [(1, 1), (1, 2)]
region1 = self.service.persist(region)
self.assertEquals(1, region1.version)
region1.version = 0
self.assertRaises(StaleObjectError, self.service.persist, region1)
region1.version = 1
region1.region_points = [(99,100)]
region2 = self.service.persist(region1)
self.assertEquals(2, region2.version)
self.assertEquals(1, len(region2.region_points))
self.assertEquals((99, 100), region2.region_points[0])
def test_location_persist(self):
loc = Location(10.0, 11.0)
loc.name = "Location"
loc1 = self.service.persist(loc)
self.assertEquals(1, loc1.version)
loc1.version = 0
self.assertRaises(StaleObjectError, self.service.persist, loc1)
loc1.version = 1
loc2 = self.service.persist(loc1)
self.assertEquals(2, loc2.version)
def test_schema_persist(self):
schema = DataEntrySchema("base1")
schema.addAttr(FileDataType("file"))
schema1 = self.service.persist(schema)
self.assertEquals(1, schema1.version)
schema1.version = 0
self.assertRaises(PersistenceError, self.service.persist, schema1)
schema1.version = 1
self.assertRaises(PersistenceError, self.service.persist, schema1)
def test_dataset_persist(self):
schema = DataEntrySchema("base1")
schema.addAttr(FileDataType("file"))
schema = self.service.persist(schema)
loc = Location(10.0, 11.0)
loc.name = "Location"
loc = self.service.persist(loc)
dataset = Dataset()
dataset.schema = schema.id
dataset.location = loc.id
dataset1 = self.service.persist(dataset)
self.assertEquals(1, dataset1.version)
dataset1.version = 0
self.assertRaises(StaleObjectError, self.service.persist, dataset1)
dataset1.version = 1
dataset2 = self.service.persist(dataset1)
self.assertEquals(2, dataset2.version)
if __name__ == '__main__':
unittest.main()
|
Childrens Film Festival Press Meet Photos. Mukesh Khanna of Shaktimaan fame was the main guest at the event and he was seen with his shaktimaan pose several times with the kids. It was a press meet about where to inaugurate the Media center and it is decided now that it will happen at L V Prasad Imax.
|
# Copyright (c) 2014 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__all__ = [
'U2fException',
'BadInputException',
'NoEligibleDevicesException',
'DeviceCompromisedException'
]
class U2fException(Exception):
status_code = 400
code = -1
def __init__(self, message, data=None):
super(U2fException, self).__init__(message, data)
self.message = message
self.data = data
class BadInputException(U2fException):
code = 10
class NotFoundException(BadInputException):
status_code = 404
class NoEligibleDevicesException(U2fException):
code = 11
class DeviceCompromisedException(U2fException):
code = 12
|
Lunch menu, a la cart dinner or just have a beer.
The Pension Petit is centrally located in Bratislava, next to the Presidential Palace and 300 m from St. Michael's Gate in the Old Town. It houses LePetit Cafe & Bar, and offers free WiFi in the cafe and in the lobby. Each accommodation unit has a private bathroom, an electric kettle and cable TV. Towels and bed linens are provided.
The Bratislava Castle, the Danube river and all other tourist attractions can be reached on foot. The Petit can be reached from the Bratislava Main Train Station and the Mlynske Nivy Central Bus Station by public transport within 10 minutes. Parking is available at a location 200 m from the property. It can be arranged upon request and at an additional charge. Stare Mesto is a great choice for travellers interested in food, old town exploring and walking.
Everything what you need for your business stay.
Relax and discover city with your friends, love or family.
One double bed and two single beds. Best for family or party friends.
Do you need park your car? Also no problem.
Enjoy our breakfast, lunch menu or a la carte dinner. No hungry? Dont worry we have also hot and cold beverages.
If you come to our pension before 2pm you can leave your luggage at the reception.
If needed, you can leave your luggage at the reception.
We are kindly informing you, that all our rooms and permises are strictly non-smoking areas. Exceptions are balconies and terrace on the ground floor, where smoking is allowed.
If you drive with your car to our pension please use adress: PANENSKA 36. Pension Petit is behind the corner to the right next to SUBWAY RESTAURANT.
We do not recommend our guests to take a taxi directly from the stationary. Always call one using phone number either from advertisements (there are plenty in the area of both stations) or use the number displayed on taxi cars parked in front of the station. This may not make sense to you, but otherwise they would charge you much more, especially since you are foreigner. The costs from train station should be max. 5€, from bus station max 8€. Your destination will be "Panenska 36", as above mentioned.
If you will be arriving by plane, we can offer you a taxi pick-up by our hotel limousine service to you. All you need to do is to let us know your flight number and its arrival time. Price from the Bratislava airport is 20€. In case you would like our driver to pick you up from the Vienna airport the price will be 65€.
If you will be arriving by train to main train station, take a bus no. 93 until "Hodzovo namestie" (2 stops). Now the "Bernard pub" should be right in front of you. Pension Petit is 15m on the left from this pub.
© 2017 Pension Petit. All Rights Reserved.
|
#!/usr/bin/env python
'''
Copyright (C) 2012-2013 Kevin Breen.
This file is part of the Maildb web application
See the 'LICENSE' File for copying permission.
'''
# All The Hashing Functions will be in here somewhere
import os
import sys
import hashlib
from core.common import Dictionary
class MailHash():
def HashMD5(self, part_data): #Generate the md5
md5_hash = hashlib.md5()
md5_hash.update(part_data)
return md5_hash.hexdigest()
def HashSha1(self, part_data): # Generate the SHA1
sha1_hash = hashlib.sha1()
sha1_hash.update(part_data)
return sha1_hash.hexdigest()
def HashSha256(self, part_data): # Generate the SHA 256
sha256_hash = hashlib.sha256()
sha256_hash.update(part_data)
return sha256_hash.hexdigest()
def HashSha512(self, part_data): # Generate the Sha512
sha512_hash = hashlib.sha512()
sha512_hash.update(part_data)
return sha512_hash.hexdigest()
def Hashssdeep(self, part_data):
import ssdeep
deep = ssdeep.hash(part_data)
return deep
def fileMD5(self, filePath):
fh = open(filePath, 'rb')
m = hashlib.md5()
while True:
data = fh.read(8192)
if not data:
break
m.update(data)
return m.hexdigest()
|
5. Do I have to receive marketing information from Precision?
You may request us to provide access to your personal information or to request its correction by calling the Customer Service team on 09 377 5451 (Auckland) or 04 384 8400 (Wellington), or email us on [email protected]. We aim to respond to most requests within 14 days, or if the request is more detailed, within 30 days. Precision may recover from you its reasonable cost of supplying you with access to your personal information.
8. Is the information Precision has about me secure?
Precision has security measures in place to protect personal information from loss, misuse, interference and from unauthorised access, modification or disclosure.
|
"""2D fiber-to-chip grating coupler optimization code.
This is a simple spins example that optimizes a fiber-to-chip grating coupler
for the SOI platform. See Su et al. Optics Express (2018) for details.
To run an optimization:
$ python3 grating.py run save-folder
To view results:
$ python3 grating.py view save-folder
To see optimization status quickly:
$ python3 grating.py view_quick save-folder
To resume an optimization:
$ python3 grating.py resume save-folder
To generate a GDS file of the grating:
$ python3 grating.py gen_gds save-folder
"""
import os
import pickle
import shutil
import gdspy
import numpy as np
from typing import List, NamedTuple, Tuple
# `spins.invdes.problem_graph` contains the high-level spins code.
from spins.invdes import problem_graph
# Import module for handling processing optimization logs.
from spins.invdes.problem_graph import log_tools
# `spins.invdes.problem_graph.optplan` contains the optimization plan schema.
from spins.invdes.problem_graph import optplan
from spins.invdes.problem_graph import workspace
# If `True`, also minimize the back-reflection.
MINIMIZE_BACKREFLECTION = False
# If 'True`, runs an additional `cont_iters' of continuous optimization with
# discreteness permittivity biasing penalty added.
# Fine-tuning the `intial_value' of `disc_scaling may be necessary depending
# on application and the number of wavelengths optimized.
DISCRETENESS_PENALTY = True
def run_opt(save_folder: str, grating_len: float, wg_width: float) -> None:
"""Main optimization script.
This function setups the optimization and executes it.
Args:
save_folder: Location to save the optimization data.
"""
os.makedirs(save_folder)
wg_thickness = 220
sim_space = create_sim_space(
"sim_fg.gds",
"sim_bg.gds",
grating_len=grating_len,
box_thickness=2000,
wg_thickness=wg_thickness,
etch_frac=0.5,
wg_width=wg_width)
obj, monitors = create_objective(
sim_space, wg_thickness=wg_thickness, grating_len=grating_len)
trans_list = create_transformations(
obj, monitors, 50, 200, sim_space, min_feature=80)
plan = optplan.OptimizationPlan(transformations=trans_list)
# Save the optimization plan so we have an exact record of all the
# parameters.
with open(os.path.join(save_folder, "optplan.json"), "w") as fp:
fp.write(optplan.dumps(plan))
# Copy over the GDS files.
shutil.copyfile("sim_fg.gds", os.path.join(save_folder, "sim_fg.gds"))
shutil.copyfile("sim_bg.gds", os.path.join(save_folder, "sim_bg.gds"))
# Execute the optimization and indicate that the current folder (".") is
# the project folder. The project folder is the root folder for any
# auxiliary files (e.g. GDS files).
problem_graph.run_plan(plan, ".", save_folder=save_folder)
# Generate the GDS file.
gen_gds(save_folder, grating_len, wg_width)
def create_sim_space(
gds_fg_name: str,
gds_bg_name: str,
grating_len: float = 12000,
etch_frac: float = 0.5,
box_thickness: float = 2000,
wg_width: float = 12000,
wg_thickness: float = 220,
buffer_len: float = 1500,
dx: int = 40,
num_pmls: int = 10,
visualize: bool = False,
) -> optplan.SimulationSpace:
"""Creates the simulation space.
The simulation space contains information about the boundary conditions,
gridding, and design region of the simulation.
Args:
gds_fg_name: Location to save foreground GDS.
gds_bg_name: Location to save background GDS.
grating_len: Length of the grating coupler and design region.
etch_frac: Etch fraction of the grating. 1.0 indicates a fully-etched
grating.
box_thickness: Thickness of BOX layer in nm.
wg_thickness: Thickness of the waveguide.
wg_width: Width of the waveguide.
buffer_len: Buffer distance to put between grating and the end of the
simulation region. This excludes PMLs.
dx: Grid spacing to use.
num_pmls: Number of PML layers to use on each side.
visualize: If `True`, draws the polygons of the GDS file.
Returns:
A `SimulationSpace` description.
"""
# Calculate the simulation size, including PMLs
sim_size = [
grating_len + 2 * buffer_len + dx * num_pmls,
wg_width + 2 * buffer_len + dx * num_pmls
]
# First, we use `gdspy` to draw the waveguides and shapes that we would
# like to use. Instead of programmatically generating a GDS file using
# `gdspy`, we could also simply provide a GDS file (e.g. drawn using
# KLayout).
# Declare some constants to represent the different layers.
LAYER_SILICON_ETCHED = 100
LAYER_SILICON_NONETCHED = 101
# Create rectangles corresponding to the waveguide, the BOX layer, and the
# design region. We extend the rectangles outside the simulation region
# by multiplying locations by a factor of 1.1.
# We distinguish between the top part of the waveguide (which is etched)
# and the bottom part of the waveguide (which is not etched).
waveguide_top = gdspy.Rectangle((-1.1 * sim_size[0] / 2, -wg_width / 2),
(-grating_len / 2, wg_width / 2),
LAYER_SILICON_ETCHED)
waveguide_bottom = gdspy.Rectangle((-1.1 * sim_size[0] / 2, -wg_width / 2),
(grating_len / 2, wg_width / 2),
LAYER_SILICON_NONETCHED)
design_region = gdspy.Rectangle((-grating_len / 2, -wg_width / 2),
(grating_len / 2, wg_width / 2),
LAYER_SILICON_ETCHED)
# Generate the foreground and background GDS files.
gds_fg = gdspy.Cell("FOREGROUND", exclude_from_current=True)
gds_fg.add(waveguide_top)
gds_fg.add(waveguide_bottom)
gds_fg.add(design_region)
gds_bg = gdspy.Cell("BACKGROUND", exclude_from_current=True)
gds_bg.add(waveguide_top)
gds_bg.add(waveguide_bottom)
gdspy.write_gds(gds_fg_name, [gds_fg], unit=1e-9, precision=1e-9)
gdspy.write_gds(gds_bg_name, [gds_bg], unit=1e-9, precision=1e-9)
if visualize:
gdspy.LayoutViewer(cells=[gds_fg])
gdspy.LayoutViewer(cells=[gds_bg])
# The BOX layer/silicon device interface is set at `z = 0`.
#
# Describe materials in each layer.
# We actually have four material layers:
# 1) Silicon substrate
# 2) Silicon oxide BOX layer
# 3) Bottom part of grating that is not etched
# 4) Top part of grating that can be etched.
#
# The last two layers put together properly describe a partial etch.
#
# Note that the layer numbering in the GDS file is arbitrary. In our case,
# layer 100 and 101 correspond to actual structure. Layer 300 is a dummy
# layer; it is used for layers that only have one material (i.e. the
# background and foreground indices are identical) so the actual structure
# used does not matter.
stack = [
optplan.GdsMaterialStackLayer(
foreground=optplan.Material(mat_name="Si"),
background=optplan.Material(mat_name="Si"),
# Note that layer number here does not actually matter because
# the foreground and background are the same material.
gds_layer=[300, 0],
extents=[-10000, -box_thickness],
),
optplan.GdsMaterialStackLayer(
foreground=optplan.Material(mat_name="SiO2"),
background=optplan.Material(mat_name="SiO2"),
gds_layer=[300, 0],
extents=[-box_thickness, 0],
),
]
# If `etch-frac` is 1, then we do not need two separate layers.
if etch_frac != 1:
stack.append(
optplan.GdsMaterialStackLayer(
foreground=optplan.Material(mat_name="Si"),
background=optplan.Material(mat_name="SiO2"),
gds_layer=[LAYER_SILICON_NONETCHED, 0],
extents=[0, wg_thickness * (1 - etch_frac)],
))
stack.append(
optplan.GdsMaterialStackLayer(
foreground=optplan.Material(mat_name="Si"),
background=optplan.Material(mat_name="SiO2"),
gds_layer=[LAYER_SILICON_ETCHED, 0],
extents=[wg_thickness * (1 - etch_frac), wg_thickness],
))
mat_stack = optplan.GdsMaterialStack(
# Any region of the simulation that is not specified is filled with
# oxide.
background=optplan.Material(mat_name="SiO2"),
stack=stack,
)
sim_z_start = -box_thickness - 1000
sim_z_end = wg_thickness + 1500
# Create a simulation space for both continuous and discrete optimization.
simspace = optplan.SimulationSpace(
name="simspace",
mesh=optplan.UniformMesh(dx=dx),
eps_fg=optplan.GdsEps(gds=gds_fg_name, mat_stack=mat_stack),
eps_bg=optplan.GdsEps(gds=gds_bg_name, mat_stack=mat_stack),
# Note that we explicitly set the simulation region. Anything
# in the GDS file outside of the simulation extents will not be drawn.
sim_region=optplan.Box3d(
center=[0, 0, (sim_z_start + sim_z_end) / 2],
extents=[sim_size[0], dx, sim_z_end - sim_z_start],
),
selection_matrix_type="uniform",
# PMLs are applied on x- and z-axes. No PMLs are applied along y-axis
# because it is the axis of translational symmetry.
pml_thickness=[num_pmls, num_pmls, 0, 0, num_pmls, num_pmls],
)
if visualize:
# To visualize permittivity distribution, we actually have to
# construct the simulation space object.
import matplotlib.pyplot as plt
from spins.invdes.problem_graph.simspace import get_fg_and_bg
context = workspace.Workspace()
eps_fg, eps_bg = get_fg_and_bg(context.get_object(simspace), wlen=1550)
def plot(x):
plt.imshow(np.abs(x)[:, 0, :].T.squeeze(), origin="lower")
plt.figure()
plt.subplot(3, 1, 1)
plot(eps_fg[2])
plt.title("eps_fg")
plt.subplot(3, 1, 2)
plot(eps_bg[2])
plt.title("eps_bg")
plt.subplot(3, 1, 3)
plot(eps_fg[2] - eps_bg[2])
plt.title("design region")
plt.show()
return simspace
def create_objective(
sim_space: optplan.SimulationSpace,
wg_thickness: float,
grating_len: float,
) -> Tuple[optplan.Function, List[optplan.Monitor]]:
"""Creates an objective function.
The objective function is what is minimized during the optimization.
Args:
sim_space: The simulation space description.
wg_thickness: Thickness of waveguide.
grating_len: Length of grating.
Returns:
A tuple `(obj, monitor_list)` where `obj` is an objectivce function that
tries to maximize the coupling efficiency of the grating coupler and
`monitor_list` is a list of monitors (values to keep track of during
the optimization.
"""
# Keep track of metrics and fields that we want to monitor.
monitor_list = []
objectives = []
# Set wavelengths to optimize over
wlens = [1550]
for wlen in wlens:
epsilon = optplan.Epsilon(
simulation_space=sim_space,
wavelength=wlen,
)
# Append to monitor list for each wavelength
monitor_list.append(
optplan.FieldMonitor(name="mon_eps_" + str(wlen), function=epsilon))
# Add a Gaussian source that is angled at 10 degrees.
sim = optplan.FdfdSimulation(
source=optplan.GaussianSource(
polarization_angle=0,
theta=np.deg2rad(-10),
psi=np.pi / 2,
center=[0, 0, wg_thickness + 700],
extents=[14000, 14000, 0],
normal=[0, 0, -1],
power=1,
w0=5200,
normalize_by_sim=True,
),
solver="local_direct",
wavelength=wlen,
simulation_space=sim_space,
epsilon=epsilon,
)
monitor_list.append(
optplan.FieldMonitor(
name="mon_field_" + str(wlen),
function=sim,
normal=[0, 1, 0],
center=[0, 0, 0],
))
wg_overlap = optplan.WaveguideModeOverlap(
center=[-grating_len / 2 - 1000, 0, wg_thickness / 2],
extents=[0.0, 1500, 1500.0],
mode_num=0,
normal=[-1.0, 0.0, 0.0],
power=1.0,
)
power = optplan.abs(
optplan.Overlap(simulation=sim, overlap=wg_overlap))**2
monitor_list.append(
optplan.SimpleMonitor(
name="mon_power_" + str(wlen), function=power))
if not MINIMIZE_BACKREFLECTION:
# Spins minimizes the objective function, so to make `power` maximized,
# we minimize `1 - power`.
obj = 1 - power
else:
# TODO: Use a Gaussian overlap to calculate power emitted by grating
# so we only need one simulation to handle backreflection and
# transmission.
refl_sim = optplan.FdfdSimulation(
source=optplan.WaveguideModeSource(
center=wg_overlap.center,
extents=wg_overlap.extents,
mode_num=0,
normal=[1, 0, 0],
power=1.0,
),
solver="local_direct",
wavelength=wlen,
simulation_space=sim_space,
epsilon=epsilon,
)
refl_power = optplan.abs(
optplan.Overlap(simulation=refl_sim, overlap=wg_overlap))**2
monitor_list.append(
optplan.SimpleMonitor(
name="mon_refl_power_" + str(wlen), function=refl_power))
# We now have two sub-objectives: Maximize transmission and minimize
# back-reflection, so we must an objective that defines the appropriate
# tradeoff between transmission and back-reflection. Here, we choose the
# simplest objective to do this, but you can use SPINS functions to
# design more elaborate objectives.
obj = (1 - power) + 4 * refl_power
objectives.append(obj)
obj = sum(objectives)
return obj, monitor_list
def create_transformations(
obj: optplan.Function,
monitors: List[optplan.Monitor],
cont_iters: int,
disc_iters: int,
sim_space: optplan.SimulationSpaceBase,
min_feature: float = 100,
cont_to_disc_factor: float = 1.1,
) -> List[optplan.Transformation]:
"""Creates a list of transformations for the optimization.
The grating coupler optimization proceeds as follows:
1) Continuous optimization whereby each pixel can vary between device and
background permittivity.
2) Discretization whereby the continuous pixel parametrization is
transformed into a discrete grating (Note that L2D is implemented here).
3) Further optimization of the discrete grating by moving the grating
edges.
Args:
opt: The objective function to minimize.
monitors: List of monitors to keep track of.
cont_iters: Number of iterations to run in continuous optimization.
disc_iters: Number of iterations to run in discrete optimization.
sim_space: Simulation space ot use.
min_feature: Minimum feature size in nanometers.
cont_to_disc_factor: Discretize the continuous grating with feature size
constraint of `min_feature * cont_to_disc_factor`.
`cont_to_disc_factor > 1` gives discrete optimization more wiggle
room.
Returns:
A list of transformations.
"""
# Setup empty transformation list.
trans_list = []
# First do continuous relaxation optimization.
cont_param = optplan.PixelParametrization(
simulation_space=sim_space,
init_method=optplan.UniformInitializer(min_val=0, max_val=1))
trans_list.append(
optplan.Transformation(
name="opt_cont",
parametrization=cont_param,
transformation=optplan.ScipyOptimizerTransformation(
optimizer="L-BFGS-B",
objective=obj,
monitor_lists=optplan.ScipyOptimizerMonitorList(
callback_monitors=monitors,
start_monitors=monitors,
end_monitors=monitors),
optimization_options=optplan.ScipyOptimizerOptions(
maxiter=cont_iters),
),
))
# If true, do another round of continous optimization with a discreteness bias.
if DISCRETENESS_PENALTY:
# Define parameters necessary to normaize discrete penalty term
obj_val_param = optplan.Parameter(
name="param_obj_final_val", initial_value=1.0)
obj_val_param_abs = optplan.abs(obj_val_param)
discrete_penalty_val = optplan.Parameter(
name="param_discrete_penalty_val", initial_value=1.0)
discrete_penalty_val_abs = optplan.abs(discrete_penalty_val)
# Initial value of scaling is arbitrary and set for specific problem
disc_scaling = optplan.Parameter(
name="discrete_scaling", initial_value=5)
normalization = disc_scaling * obj_val_param_abs / discrete_penalty_val_abs
obj_disc = obj + optplan.DiscretePenalty() * normalization
trans_list.append(
optplan.Transformation(
name="opt_cont_disc",
parameter_list=[
optplan.SetParam(
parameter=obj_val_param,
function=obj,
parametrization=cont_param),
optplan.SetParam(
parameter=discrete_penalty_val,
function=optplan.DiscretePenalty(),
parametrization=cont_param)
],
parametrization=cont_param,
transformation=optplan.ScipyOptimizerTransformation(
optimizer="L-BFGS-B",
objective=obj_disc,
monitor_lists=optplan.ScipyOptimizerMonitorList(
callback_monitors=monitors,
start_monitors=monitors,
end_monitors=monitors),
optimization_options=optplan.ScipyOptimizerOptions(
maxiter=cont_iters),
)))
# Discretize. Note we add a little bit of wiggle room by discretizing with
# a slightly larger feature size that what our target is (by factor of
# `cont_to_disc_factor`). This is to give the optimization a bit more wiggle
# room later on.
disc_param = optplan.GratingParametrization(
simulation_space=sim_space, inverted=True)
trans_list.append(
optplan.Transformation(
name="cont_to_disc",
parametrization=disc_param,
transformation=optplan.GratingEdgeFitTransformation(
parametrization=cont_param,
min_feature=cont_to_disc_factor * min_feature)))
# Discrete optimization.
trans_list.append(
optplan.Transformation(
name="opt_disc",
parametrization=disc_param,
transformation=optplan.ScipyOptimizerTransformation(
optimizer="SLSQP",
objective=obj,
constraints_ineq=[
optplan.GratingFeatureConstraint(
min_feature_size=min_feature,
simulation_space=sim_space,
boundary_constraint_scale=1.0,
)
],
monitor_lists=optplan.ScipyOptimizerMonitorList(
callback_monitors=monitors,
start_monitors=monitors,
end_monitors=monitors),
optimization_options=optplan.ScipyOptimizerOptions(
maxiter=disc_iters),
),
))
return trans_list
def view_opt(save_folder: str) -> None:
"""Shows the result of the optimization.
This runs the auto-plotter to plot all the relevant data.
See `examples/wdm2` IPython notebook for more details on how to process
the optimization logs.
Args:
save_folder: Location where the log files are saved.
"""
log_df = log_tools.create_log_data_frame(
log_tools.load_all_logs(save_folder))
monitor_descriptions = log_tools.load_from_yml(
os.path.join(os.path.dirname(__file__), "monitor_spec.yml"))
log_tools.plot_monitor_data(log_df, monitor_descriptions)
def view_opt_quick(save_folder: str) -> None:
"""Prints the current result of the optimization.
Unlike `view_opt`, which plots fields and optimization trajectories,
`view_opt_quick` prints out scalar monitors in the latest log file. This
is useful for having a quick look into the state of the optimization.
Args:
save_folder: Location where the log files are saved.
"""
with open(workspace.get_latest_log_file(save_folder), "rb") as fp:
log_data = pickle.load(fp)
for key, data in log_data["monitor_data"].items():
if np.isscalar(data):
print("{}: {}".format(key, data.squeeze()))
def resume_opt(save_folder: str) -> None:
"""Resumes a stopped optimization.
This restarts an optimization that was stopped prematurely. Note that
resuming an optimization will not lead the exact same results as if the
optimization were finished the first time around.
Args:
save_folder: Location where log files are saved. It is assumed that
the optimization plan is also saved there.
"""
# Load the optimization plan.
with open(os.path.join(save_folder, "optplan.json")) as fp:
plan = optplan.loads(fp.read())
# Run the plan with the `resume` flag to restart.
problem_graph.run_plan(plan, ".", save_folder=save_folder, resume=True)
def gen_gds(save_folder: str, grating_len: float, wg_width: float) -> None:
"""Generates a GDS file of the grating.
Args:
save_folder: Location where log files are saved. It is assumed that
the optimization plan is also saved there.
grating_len: Length of the grating.
wg_width: Width of the grating/bus waveguide.
"""
# Load the optimization plan.
with open(os.path.join(save_folder, "optplan.json")) as fp:
plan = optplan.loads(fp.read())
dx = plan.transformations[-1].parametrization.simulation_space.mesh.dx
# Load the data from the latest log file.
with open(workspace.get_latest_log_file(save_folder), "rb") as fp:
log_data = pickle.load(fp)
if log_data["transformation"] != plan.transformations[-1].name:
raise ValueError("Optimization did not run until completion.")
coords = log_data["parametrization"]["vector"] * dx
if plan.transformations[-1].parametrization.inverted:
coords = np.insert(coords, 0, 0, axis=0)
coords = np.insert(coords, -1, grating_len, axis=0)
# `coords` now contains the location of the grating edges. Now draw a
# series of rectangles to represent the grating.
grating_poly = []
for i in range(0, len(coords), 2):
grating_poly.append(
((coords[i], -wg_width / 2), (coords[i], wg_width / 2),
(coords[i + 1], wg_width / 2), (coords[i + 1], -wg_width / 2)))
# Save the grating to `grating.gds`.
grating = gdspy.Cell("GRATING", exclude_from_current=True)
grating.add(gdspy.PolygonSet(grating_poly, 100))
gdspy.write_gds(
os.path.join(save_folder, "grating.gds"), [grating],
unit=1.0e-9,
precision=1.0e-9)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"action",
choices=("run", "view", "view_quick", "resume", "gen_gds"),
help="Must be either \"run\" to run an optimization, \"view\" to "
"view the results, \"resume\" to resume an optimization, or "
"\"gen_gds\" to generate the grating GDS file.")
parser.add_argument(
"save_folder", help="Folder containing optimization logs.")
grating_len = 12000
wg_width = 12000
args = parser.parse_args()
if args.action == "run":
run_opt(args.save_folder, grating_len=grating_len, wg_width=wg_width)
elif args.action == "view":
view_opt(args.save_folder)
elif args.action == "view_quick":
view_opt_quick(args.save_folder)
elif args.action == "resume":
resume_opt(args.save_folder)
elif args.action == "gen_gds":
gen_gds(args.save_folder, grating_len=grating_len, wg_width=wg_width)
|
This innovative diet is the result of years of extensive research, formulated as a nutritional solution for pets with severe food sensitivities. Its clinically proven and highly palatable formula contains a protein source that is broken down to an amino acid level, so it can be absorbed in the digestive tract with reduced risk of triggering an immune response. Optimal amounts of B vitamins and amino acids reinforce your dog’s skin barrier. Omega-3 fatty acids EPA and DHA help promote healthy skin and coat. An exclusive blend of fibers helps support healthy digestion in dogs prone to digestive upsets, including diarrhea. And with strict manufacturing procedures to help avoid cross contamination, you can confidently feed Royal Canin Ultamino to your dog for a happy, healthy life.
Corn starch, hydrolyzed poultry by-products aggregate, coconut oil, vegetable oil, potassium phosphate, natural flavors, powdered cellulose, calcium carbonate, sodium silico aluminate, dried chicory root, L-tyrosine, fructooligosaccharides, fish oil, choline chloride, L-lysine, vitamins [DL-alpha tocopherol acetate (source of vitamin E), L-ascorbyl-2-polyphosphate (source of vitamin C), niacin supplement, D-calcium pantothenate, biotin, pyridoxine hydrochloride (vitamin B6), riboflavin supplement, vitamin B12 supplement, thiamine mononitrate (vitamin B1), vitamin A acetate, folic acid, vitamin D3 supplement], DL-methionine, taurine, L-tryptophan, histidine, trace minerals [zinc proteinate, zinc oxide, manganese proteinate, ferrous sulfate, manganous oxide, copper sulfate, sodium selenite, copper proteinate], marigold extract (Tagetes erecta L.), rosemary extract, preserved with mixed tocopherols and citric acid, magnesium oxide, potassium iodate.
I'm a CVT and my dog Cooper was diagnosed with Inflammatory Bowel Disease and this food helped save his life (he was having weight loss, chronic vomiting & diarrhea. He's still currently on his drug therapy but this food helped with his recovery and he loves it! I hope in the future they come out with a canned version. Have been with Royal Canin for 5+ years and love their company.
Thank you for sharing Cooper's success story with us!
My dog has been on this product for three years. Her IBD issues are being managed beautifully.
This food saved my GoldenDoodle from chronic ear infections and paw licking as well as white fur discoloration. Thank God our vet prescribed this after trying a few other avenues of treatment. Nala has been ear infection free for 7 + years and her fur is as white as snow. Love this product!!!
Great to hear that Nala has done so well with the Ultamino.
One year ago, our 6 yr old beagle developed some skin and questionable issues, possibly allergies to foods. After months of feeding Ultamino, his coat shines beautifully, his digestion is reliable, his stools are consistent. Although we have determined that his occasional rashes may not be food related, we continue to feed him Ultamino because of the positive effects and nutritional support he receives. And add it to a combination of the Royal Canin Beagle dry food. We have added the Ultamino into our 2 yr old German Shepherd's Royal Canin German Shepherd dry food also as a mix and supplement. Both dogs have beautiful coats and consistent stool habits and easy weight control.
Excellent! Thank you for the valuable feedback!
I like the red, white and blue color scheme!
I have been feeding Royal Canine Ultamino to my American Cocker Spaniel for 4 years now. She has terrible food allergies and we tried everything!! She went on a very basic diet of steamed sweet potatoes for two weeks to clear up the allergies and then we began adding ingredients to the sweet potatoes slowly to see what gave her a reaction. We eliminated so many things and were suspecting it had something to do with the way her body breaks down proteins. Our vet recommended trying the Royal Canine food and we were ecstatic to find that it agreed with Gracie's system!! We have been feeding it ever since!
I’ve had my dog on Ultamino for about three years and it’s been a lifesaver to my handsome pitbull who is allergic to EVERYTHING! It’s the only food that doesn’t break him all out. Thank you Royal Canin!
My 10 year old Basenji has many health issues and after starting her on this food within a couple of weeks she was able to move around much better. I haven't seen her this healthy in years and I attribute it to this food as we haven't changed anything else in her diet or medications. And more importantly she loves it!
So, my Maddy dog was diagnosed with IBD and we had tried all the proteins and mixtures to help settle her poor tummy! Diarrhea and vomiting every other day.. it was heart wrenching. Vet suggested the ultamino and she had a great reaction (meaning no reaction)!! It’s pretty expensive but she’s worth every penny!
i fed this to our 4-legged kids for some time - until we lost molly & connie and fred got sick ... so our vet put him on royal canin gastro... which he really liked !!! thanks for ALWAYS being there for us and our 'kids' !!!!
A Vet recommended this food for our highly allergic dog and we are glad we tried it. We tried every food out that there that was recommended for dogs with allergies and skin problems and they all made him worse. He was on and off antibiotics for a long time trying to fight this infection. then we decided to change his food one last time to the Ultimino and in a couple of weeks he started clearing up. It has been 10 months and he is the happiest and healthiest he has ever been.
|
"""
Forking TCP Servers stuff based on book material (but not 1:1)
"""
import socket
import argparse
import time
import os
from multiprocessing import Process
from functools import partial
def args_handle(handlers, string):
if string in handlers.keys():
return handlers.get(string)
else:
raise argparse.ArgumentTypeError("Invalid server type provided")
def parse_arguments():
"""
Parse command line arguments
:return: argparse.Namespace object holding the arguments
"""
HANDLERS = {h: o for h, o in globals().items() if h.startswith('handle_')}
parser = argparse.ArgumentParser()
parser.add_argument('--port', help='The port on which the server will listen', type=int, default=51150)
parser.add_argument('--mproc', help='The maximum allowed clients / processes at a given time', type=int, default=10)
parser.add_argument('--type', help='The server type: ' + ', '.join(HANDLERS.keys()), default='handle_fixed_request',
type=partial(args_handle, HANDLERS))
return parser.parse_args()
def handle_fixed_request(connection, address, size=512):
"""
Fixed size request handler
:param connection: the socket / connection object received
:param address: the remote address
:param size: The maximum size of each request
"""
start = time.time()
total_data = ''
try:
while len(total_data) < size:
data = connection.recv(size - len(total_data))
if not data:
break
print("[SERVER | PID {0}]: {1}".format(os.getpid(), data.rstrip()))
total_data += data
except Exception as e:
print("Error ", e.message)
finally:
connection.close()
end = time.time() - start
print("[SERVER]: {0} closed connection after {1:.2f} seconds".format(address, end))
def handle_http_request(connection, address):
"""
Deadly naive and simple HTTP handler.
:param connection: The socket
:param address: The remote-end address
"""
REQUIRED_HEADERS = ['Content-Length']
SUPPORTED_METHODS = ['GET', 'POST']
HTTP_VERSIONS = ['HTTP/1.1']
headers = dict()
headers_raw = ''
body = ''
while True:
h = connection.recv(1024)
if not h:
break
elif '\r\n' in h:
crlf_idx = h.rfind('\r\n')
headers_raw += h[:crlf_idx]
body = h[crlf_idx:]
break
headers_raw += h
# Parse Headers
request_line = headers_raw.split('\n')[0].split()
# TODO: Validate the resource element
if len(request_line) != 3 or request_line[0] not in SUPPORTED_METHODS or request_line[2] not in HTTP_VERSIONS:
print("[ERROR]: Invalid HTTP request line: " + ' '.join(request_line))
return
headers = {e.split(':')[0].strip():e.split(':')[1].strip() for e in headers_raw.splitlines()[1:]}
print(headers)
# Get body
def server(port, mproc, server_type):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', port))
s.listen(mproc)
print("[SERVER]: Listening on {0}".format(s.getsockname()))
while True:
(connection, address) = s.accept()
print("[SERVER]: Connection established with {0}".format(address))
process = Process(target=server_type, args=(connection, address))
process.daemon = True
process.start()
if __name__ == "__main__":
args = parse_arguments()
server(port=args.port, mproc=args.mproc, server_type=args.type)
|
ElizabethMD Jewelry designs Understated elegance, that is the first thing that comes to mind when I see the jewelry at Elizabeth MD, you know the Audrey Hepburn type of understated elegance. Simple lines, beautiful metals and stones, equals simply lovely.
From her blog @ http://elizabethmdesigns.blogspot.com/ . I love designing one of a kind jewelry for one of a kind women. I love using stones and metals and creating beautiful wearable art.It is created by hand with silver, Swarovski crystals, and natural stones. I loe creating custom one of a kind jewelry pieces.
I'm thinking these need to be matched-up with my vintage Western suede jacket!
And... this bracelet to go with the earrings !!!!
|
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import time
from math import log, sqrt
from random import choice
from six.moves import range
class Stat(object):
__slots__ = ('value', 'visits')
def __init__(self, value=0.0, visits=0):
self.value = value
self.visits = visits
def __repr__(self):
return u"Stat(value={}, visits={})".format(self.value, self.visits)
class UCT(object):
def __init__(self, board, **kwargs):
self.board = board
self.history = []
self.stats = {}
self.max_depth = 0
self.data = {}
self.calculation_time = float(kwargs.get('time', 30))
self.max_actions = int(kwargs.get('max_actions', 1000))
# Exploration constant, increase for more exploratory actions,
# decrease to prefer actions with known higher win rates.
self.C = float(kwargs.get('C', 1.4))
def update(self, state):
self.history.append(self.board.to_compact_state(state))
def display(self, state, action):
return self.board.display(state, action)
def winner_message(self, winners):
return self.board.winner_message(winners)
def get_action(self):
# Causes the AI to calculate the best action from the
# current game state and return it.
self.max_depth = 0
self.data = {'C': self.C, 'max_actions': self.max_actions, 'name': self.name}
self.stats.clear()
state = self.history[-1]
player = self.board.current_player(state)
legal = self.board.legal_actions(state)
# Bail out early if there is no real choice to be made.
if not legal:
return {'type': 'action', 'message': None, 'extras': self.data.copy()}
if len(legal) == 1:
return {
'type': 'action',
'message': self.board.to_json_action(legal[0]),
'extras': self.data.copy(),
}
games = 0
begin = time.time()
while time.time() - begin < self.calculation_time:
self.run_simulation()
games += 1
# Display the number of calls of `run_simulation` and the
# time elapsed.
self.data.update(games=games, max_depth=self.max_depth,
time=str(time.time() - begin))
print(self.data['games'], self.data['time'])
print("Maximum depth searched:", self.max_depth)
# Store and display the stats for each possible action.
self.data['actions'] = self.calculate_action_values(self.history, player, legal)
for m in self.data['actions']:
print(self.action_template.format(**m))
# Return the action with the highest average value.
return {
'type': 'action',
'message': self.board.to_json_action(self.data['actions'][0]['action']),
'extras': self.data.copy(),
}
def run_simulation(self):
# Plays out a "random" game from the current position,
# then updates the statistics tables with the result.
# A bit of an optimization here, so we have a local
# variable lookup instead of an attribute access each loop.
C, stats = self.C, self.stats
visited_states = []
history_copy = self.history[:]
state = history_copy[-1]
expand = True
for t in range(1, self.max_actions + 1):
legal = self.board.legal_actions(state)
actions_states = [(a, self.board.next_state(history_copy, a)) for a in legal]
if expand and not all(S in stats for a, S in actions_states):
stats.update((S, Stat()) for a, S in actions_states if S not in stats)
expand = False
if t > self.max_depth:
self.max_depth = t
if expand:
# If we have stats on all of the legal actions here, use UCB1.
actions_states = [(a, S, stats[S]) for a, S in actions_states]
log_total = log(sum(e.visits for a, S, e in actions_states) or 1)
values_actions = [
(a, S, (e.value / (e.visits or 1)) + C * sqrt(log_total / (e.visits or 1)))
for a, S, e in actions_states
]
max_value = max(v for _, _, v in values_actions)
# Filter down to only those actions with maximum value under UCB1.
actions_states = [(a, S) for a, S, v in values_actions if v == max_value]
action, state = choice(actions_states)
visited_states.append(state)
history_copy.append(state)
if self.board.is_ended(state):
break
# Back-propagation
end_values = self.end_values(state)
for state in visited_states:
if state not in stats:
continue
S = stats[state]
S.visits += 1
S.value += end_values[self.board.previous_player(state)]
class UCTWins(UCT):
name = "jrb.mcts.uct"
action_template = "{action}: {percent:.2f}% ({wins} / {plays})"
def __init__(self, board, **kwargs):
super(UCTWins, self).__init__(board, **kwargs)
self.end_values = board.win_values
def calculate_action_values(self, history, player, legal):
actions_states = ((a, self.board.next_state(history, a)) for a in legal)
return sorted(
({'action': a,
'percent': 100 * self.stats[S].value / (self.stats[S].visits or 1),
'wins': self.stats[S].value,
'plays': self.stats[S].visits}
for a, S in actions_states),
key=lambda x: (x['percent'], x['plays']),
reverse=True
)
class UCTValues(UCT):
name = "jrb.mcts.uctv"
action_template = "{action}: {average:.1f} ({sum} / {plays})"
def __init__(self, board, **kwargs):
super(UCTValues, self).__init__(board, **kwargs)
self.end_values = board.points_values
def calculate_action_values(self, history, player, legal):
actions_states = ((a, self.board.next_state(history, a)) for a in legal)
return sorted(
({'action': a,
'average': self.stats[S].value / (self.stats[S].visits or 1),
'sum': self.stats[S].value,
'plays': self.stats[S].visits}
for a, S in actions_states),
key=lambda x: (x['average'], x['plays']),
reverse=True
)
|
If Only I Could Now..
Well, as expected, I didn’t get the 5 things (oh yea, and about 40 sub-things) done today that I had planned on. However, I did take a big chunk out of it. I did clean the old place, making my hands raw. There is really less than one carload left at the old apartment now. Picking that up tomorrow at about 9:30am and then dropping off the keys with the manager of the building. We officially have the place until the 31st but I want to give them a chance to rent it out to someone else so we may get some of that last month’s rent back.
The office setup is next on the agenda tomorrow. I hope to have it up and running so that I can deliver the stuff I was supposed to deliver yesterday and my debian packages. We got a nice bedframe at IKEA today and I set it up, it took far too long but it will be good for us I think to have. Now we have one more thing we should get rid of - the foundation of the bed we used to use. It’s probably about $10 worth of injection-molded plastic, but Sleep Number sells it for $300, so we can probably get rid of it in short order.
After the office, it’s unpacking for the rest of everything. We have a general layout of what we want the apartment to look like, but we’re having to change it because the rooms aren’t quite the dimensions that we had planned on originally. Also, I think we’re going to have to move the light somewhere else, because it’s not going to jive well with the recliner part of the couch.
|
from botstory.ast import story_context
from botstory.middlewares import any, option, sticker, text
from bson.objectid import ObjectId
import datetime
import emoji
import logging
import os
import random
import re
from todo import orm, pagination_list, reflection
from todo.lists import lists_document
from todo.tasks import \
task_creation_stories, \
task_details_renderer, \
task_state_stories, \
task_story_helper, \
tasks_document
logger = logging.getLogger(__name__)
logger.debug('parse stories')
SHORT_HELP = 'Short Help:\n' \
'===========\n' \
'\n' \
':white_check_mark: Please give me few names of tasks (command: add new task)\n' \
':white_check_mark: In any time when you work with your task you can change its status ' \
'from open :arrow_right: in progress :arrow_right: done ' \
'(commands: start, stop, done, reopen)\n' \
':white_check_mark: list all your tasks (command: list)\n' \
':white_check_mark: details about task (command: last details)\n' \
':white_check_mark: work with last task (command: start last, stop last, done last, ' \
'reopen last, last task, remove last)\n' \
':white_check_mark: change all tasks at once (commands: start all, stop all, ' \
'done all, reopen all, remove all)\n' \
'\n' \
'All my source could be found here:\n' \
'https://github.com/botstory/todo-bot/, feedback and PRs are welcomed!'
SHORT_HELP_EMOJI = emoji.emojize(SHORT_HELP, use_aliases=True)
def setup(story):
pagination_list.setup(story)
task_state_stories.setup(story)
@story.on_start()
def on_start_story():
"""
User just pressed `get started` button so we can greet him
"""
@story.part()
async def greetings(message):
logger.info('greetings')
await story.say('Nice to see you here!\n'
'My goal is to help you with your list of tasks.',
user=message['user'])
await story.say(SHORT_HELP_EMOJI,
user=message['user'])
await story.ask('let\'s begin!',
quick_replies=[{
'title': 'add new task',
'payload': 'ADD_NEW_TASK',
}],
user=message['user'])
@story.on(text.text.EqualCaseIgnore('all'))
def list_of_lists_story():
@story.part()
async def show_list_of_stories(ctx):
logger.info('list of tasks')
# TODO: remove return solve one test, but why?
return await pagination_list.pagination_loop(
list_title='All lists:',
target_document=reflection.class_to_str(lists_document.ListDocument),
title_field='name',
page_length=os.environ.get('LIST_PAGE_LENGTH', 4),
**ctx,
)
@story.on([
option.Equal('LIST_TASKS_NEW_FIRST'),
text.Match('^list(( all)? tasks)?', flags=re.IGNORECASE),
text.EqualCaseIgnore('todo'),
])
def list_of_tasks_story():
@story.part()
async def list_of_tasks(ctx):
logger.info('list of tasks')
# TODO: should filter the last one
return await pagination_list.pagination_loop(
ctx,
subtitle_renderer=reflection.class_to_str(tasks_document.task_details_renderer),
list_title='List of actual tasks:',
list_type='template',
page_length=os.environ.get('LIST_PAGE_LENGTH', 4),
target_document=reflection.class_to_str(tasks_document.TaskDocument),
title_field='description',
)
@story.on(text.text.EqualCaseIgnore('new list'))
def new_list_tasks_story():
@story.part()
async def ask_name(message):
logger.info('new list')
return await story.ask(
'You are about to create new list of tasks.\nWhat is the name of it?',
user=message['user'],
)
@story.part()
async def create_list(ctx):
logger.info('create list')
list_name = text.get_raw_text(ctx)
new_list = await lists_document.ListDocument(**{
'user_id': ctx['user']['_id'],
'name': list_name,
'created_at': datetime.datetime.now(),
'updated_at': datetime.datetime.now(),
}).save()
await story.say('You\'ve just created list of tasks: '
'`{}`.\n'
'Now you can add tasks to it.'.format(list_name), user=ctx['user'])
@story.on([
option.Equal('REMOVE_LAST_TASK'),
text.Match('delete last', flags=re.IGNORECASE),
text.Match('drop last', flags=re.IGNORECASE),
text.Match('forget about last', flags=re.IGNORECASE),
text.Match('kill last', flags=re.IGNORECASE),
text.Match('remove (last|next)', flags=re.IGNORECASE),
])
def remove_last_task_story():
@story.part()
async def remove_last_task(ctx):
logger.info('remove last task')
try:
last_task = await task_story_helper.last_task(ctx)
desc = last_task.description
logger.debug('going to remove task `{}`'.format(desc))
await tasks_document.TaskDocument.objects({
'_id': last_task._id,
}).delete_one()
msg = emoji.emojize(':ok: task `{}` was removed'.format(desc), use_aliases=True)
logger.info(msg)
await story.ask(msg,
quick_replies=[{
'title': 'remove next',
'payload': 'REMOVE_LAST_TASK',
}, {
'title': 'next details',
'payload': 'LAST_TASK_DETAILS',
}, {
'title': 'add task',
'payload': 'ADD_NEW_TASK',
}, {
'title': 'list',
'payload': 'LIST_TASKS_NEW_FIRST',
},
],
user=ctx['user'])
except orm.errors.DoesNotExist:
logger.warning('user doesnt have tickets to remove')
await story.ask(emoji.emojize(
'You don\'t have any tickets yet.\n'
':information_source: Please send my few words about it and I will add it to your TODO list.',
use_aliases=True,
),
quick_replies=[{
'title': 'add new task',
'payload': 'ADD_NEW_TASK',
}],
user=ctx['user'],
)
@story.on(option.Match('REMOVE_TASK_(.+)'))
def remove_task_story():
@story.part()
async def try_to_remove_task(ctx):
task_id = story_context.get_message_data(ctx, 'option', 'matches')[0]
try:
task = await tasks_document.TaskDocument.objects.find_one({
'_id': ObjectId(task_id),
})
await tasks_document.TaskDocument.objects({
'_id': task._id,
}).delete_one()
await story.ask(emoji.emojize(':ok: Task `{}` was deleted', use_aliases=True).format(task.description),
quick_replies=[{
'title': 'add new task',
'payload': 'ADD_NEW_TASK',
}, {
'title': 'list tasks',
'payload': 'LIST_TASKS_NEW_FIRST',
},
],
user=ctx['user'])
except orm.errors.DoesNotExist:
await story.ask(emoji.emojize(':confused: Can\'t find task.\n'
'It seems that it was already removed.', use_aliases=True),
quick_replies=[{
'title': 'add new task',
'payload': 'ADD_NEW_TASK',
}, {
'title': 'list tasks',
'payload': 'LIST_TASKS_NEW_FIRST',
},
],
user=ctx['user'])
@story.on([
option.Equal('REMOVE_ALL_TASKS'),
text.Match('delete all(?: tasks)?(?: jobs)?', flags=re.IGNORECASE),
text.Match('drop all(?: tasks)?', flags=re.IGNORECASE),
text.Match('forget all(?: tasks)?', flags=re.IGNORECASE),
text.Match('kill all(?: tasks)?', flags=re.IGNORECASE),
text.Match('remove all(?: tasks)?', flags=re.IGNORECASE),
])
def remove_all_tasks_story():
@story.part()
async def ask_whether_user_really_want_to_remove_all_tasks(ctx):
logger.info('ask whether remove all tasks or not')
return await story.ask(emoji.emojize(
':question: Do you really want to remove all your tasks '
'of current list?',
use_aliases=True,
), quick_replies=[{
'title': 'Sure, remove all!',
'payload': 'CONFIRM_REMOVE_ALL'
}, {
'title': 'Nope',
'payload': 'REFUSE_REMOVE_ALL'
}], user=ctx['user'])
@story.case([
option.Equal('CONFIRM_REMOVE_ALL'),
sticker.Like(),
text.Match('confirm', flags=re.IGNORECASE),
text.Match('ok', flags=re.IGNORECASE),
text.Match('(.*)remove(.*)', flags=re.IGNORECASE),
text.Match('sure(.*)', flags=re.IGNORECASE),
text.Match('yeah', flags=re.IGNORECASE),
text.Match('yes', flags=re.IGNORECASE),
])
def confirm_to_remove_all():
@story.part()
async def remove_all_tasks(ctx):
logger.info('remove all tasks')
tasks_count = await tasks_document.TaskDocument.objects({
'user_id': ctx['user']['_id'],
}).delete()
msg = emoji.emojize(':ok: {} tasks were removed'.format(tasks_count), use_aliases=True)
logger.info(msg)
await story.ask(msg,
quick_replies=[{
'title': 'remove next',
'payload': 'REMOVE_LAST_TASK',
}, {
'title': 'next details',
'payload': 'LAST_TASK_DETAILS',
}, {
'title': 'add task',
'payload': 'ADD_NEW_TASK',
}, {
'title': 'list',
'payload': 'LIST_TASKS_NEW_FIRST',
},
],
user=ctx['user'])
@story.on([
text.Match('delete (.*)', flags=re.IGNORECASE),
text.Match('drop (.*)', flags=re.IGNORECASE),
text.Match('forget about (.*)', flags=re.IGNORECASE),
text.Match('kill (.*)', flags=re.IGNORECASE),
text.Match('remove (.*)', flags=re.IGNORECASE),
])
def remove_something_story():
"""
got request to remove something (list or task)
"""
@story.part()
async def remove_list_or_task(ctx):
logger.info('remove list or task')
target = story_context.get_message_data(ctx)['text']['matches'][0]
logger.info('target {}'.format(target))
logger.debug('try to remove task {}'.format(target))
count = await tasks_document.TaskDocument.objects({
'description': target,
'user_id': ctx['user']['_id'],
}).delete()
logger.info('remove {} lists'.format(count))
if count > 0:
await story.say(emoji.emojize(':ok: Task `{}` was removed'.format(target), use_aliases=True),
user=ctx['user'])
return
logger.debug('try to remove list {}'.format(target))
count = await lists_document.ListDocument.objects({
'name': target,
'user_id': ctx['user']['_id'],
}).delete()
logger.info('remove {} lists'.format(count))
if count > 0:
await story.say(emoji.emojize(':ok: List `{}` was removed'.format(target), use_aliases=True),
user=ctx['user'])
return
await story.say(emoji.emojize(':confused: We can\'t find `{}` what do you want to remove?'.format(target),
use_aliases=True),
user=ctx['user'])
@story.on([
text.Match('more about(.+)', flags=re.IGNORECASE),
text.Match('see(.+)', flags=re.IGNORECASE),
])
def task_details_story_by_text_match():
@story.part()
async def send_task_details(ctx):
query = story_context.get_message_data(ctx, 'text', 'matches')[0].strip()
try:
task = await tasks_document.TaskDocument.objects.find({
'description': query,
})
if len(task) == 1:
await task_details_renderer.render(story, ctx['user'], task[0])
else:
pass
# TODO:
except orm.errors.DoesNotExist:
# TODO:
pass
@story.on(option.Match('TASK_DETAILS_(.+)'))
def task_details_story_by_option_match():
@story.part()
async def send_task_details_back(ctx):
task_id = story_context.get_message_data(ctx, 'option', 'matches')[0]
try:
task = await tasks_document.TaskDocument.objects.find_one({
'_id': ObjectId(task_id),
})
await task_details_renderer.render(story, ctx['user'], task)
except orm.errors.DoesNotExist:
await story.ask(emoji.emojize(
':confused: Can\'t find task details.',
use_aliases=True),
quick_replies=[{
'title': 'add new task',
'payload': 'ADD_NEW_TASK',
}, {
'title': 'list tasks',
'payload': 'LIST_TASKS_NEW_FIRST',
}],
user=ctx['user'])
@story.on([
option.Equal('LAST_TASK_DETAILS'),
text.Match('last(?: task)?', flags=re.IGNORECASE),
text.Match('next (details|task)', flags=re.IGNORECASE),
text.Match('^(task )?details', flags=re.IGNORECASE),
])
def last_task_story():
@story.part()
async def send_last_task_details(ctx):
try:
await task_details_renderer.render(story, ctx['user'],
task=await task_story_helper.last_task(ctx))
except orm.errors.DoesNotExist:
await story.ask('There is no last task yet. Please add few.',
user=ctx['user'],
quick_replies=[{
'title': 'Add New Task',
'payload': 'ADD_NEW_TASK'
}])
@story.on([
option.Equal('ABOUT_ME'),
text.Equal('?'),
text.Equal('/?'),
text.EqualCaseIgnore('-h'),
text.EqualCaseIgnore('--help'),
text.Match('help( me)?', flags=re.IGNORECASE),
text.EqualCaseIgnore('what can I do here?'),
])
def about_me_story():
@story.part()
async def say_about_me(ctx):
await story.ask(SHORT_HELP_EMOJI,
user=ctx['user'],
quick_replies=[{
'title': 'add new task',
'payload': 'ADD_NEW_TASK',
}, {
'title': 'list tasks',
'payload': 'LIST_TASKS_NEW_FIRST',
}])
@story.on(receive=sticker.Like())
def like_story():
@story.part()
async def test_message(ctx):
msgs = [':wink:', ':heart_eyes:', ':smirk:', ':wink:', 'Thanks!', 'I like you too!']
await story.ask(emoji.emojize(random.choice(msgs), use_aliases=True),
quick_replies=[{
'title': 'add new task',
'payload': 'ADD_NEW_TASK',
}, {
'title': 'list tasks',
'payload': 'LIST_TASKS_NEW_FIRST',
}],
user=ctx['user'])
task_creation_stories.setup(story)
@story.on(receive=any.Any())
def any_story():
"""
And all the rest messages as well
"""
@story.part()
async def something_else(message):
logger.info('something_else')
await story.ask(
emoji.emojize(':confused: Sorry I don\'t know, how to react on such message yet.\n'
'Here are few things that you can do quickly',
use_aliases=True),
quick_replies=[{
'title': 'add new task',
'payload': 'ADD_NEW_TASK',
}, {
'title': 'list tasks',
'payload': 'LIST_TASKS_NEW_FIRST',
}],
user=message['user'])
|
Readers' critiques or analyses of specific news reports from Morning Edition, All Things Considered, or Weekend Edition Saturday or Sunday.
Good plan, Myt, with your sanity-preserving management of NPR's nonsensical world.
NPR's favorite lifer Earth Mom, Susan Steeamberg, swung on down to (socialist) Uruguay on this morn's ME, making an ersatz pilgrimage to the writer/philosopher Eduardo Galeano. Well, it's ersatz because SS isn't there to sit at his feet, but to maybe put him in his place for having impressed Hugo Chavez - and now Obama (maybe), with his seminal book, 'Open Veins of Latin America'.
Now, listening to Galeano (she actually lets him talk a bit - and she's damn lucky that he speaks elegant English), is the first time I myself have heard him. (I must admit, NPR actually INTRODUCED me to Galeano - but it was the WAY they did it that prompts my critique...!) Anyway, it took about two sentences to realize that he is obviously a wise, poetic, and profound man, whose courage and guts in truthful offerings is beyond doubt. But because he has been so truthful through the years, he has naturally become a target for those who don't exactly like his truths.
I lack the will to fully dissect SS's treatment of him, but her obvious (and predictable) approach demands BS-calling. After branding him a 'lefty', she figures he's just a dreamy old duffer, probably loony, and the best way to handle him is to go the Gringo route and put him in his place: a little cabinet of wistfulness, so as to disempower what is obviously a fearful notion to her - and NPR: the power of words. She is oh, so gentle, but when you think about it, thoroughly condescending and dismissive.
Stamberg, and NPR in general, never seem to grasp the value of Latin American literariness - the injections of fantasy, wit, drama, and zest, that make the stale DC-centricism look like cheap white bread. And after the segment was over, the sense of embarrassment in the voices of Inskreep and Renaay was palpable. They not only just don't get it, they are too dainty to see any possible value in it ('it' being cultural realities that might rival their own narcissistic beliefs). Plus, they're unspeakably jealous, of course.
To me, Galeano is a great comfort: an impeccably sane voice, and best approached via non-NPR avenues. We need mor Galeanos and fewer NPRs.
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2013 by Pablo Martín <goinnn@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this programe. If not, see <http://www.gnu.org/licenses/>.
# Initial code got from http://djangosnippets.org/users/danielroseman/
import os
from setuptools import setup, find_packages
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
setup(
name="django-multiselectfield",
version="0.1.2",
author="Pablo Martin",
author_email="goinnn@gmail.com",
description="Django multiple select field",
long_description=(read('README.rst') + '\n\n' + read('CHANGES.rst')),
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
license="LGPL 3",
keywords="django,multiple,select,field,choices",
url='https://github.com/goinnn/django-multiselectfield',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
)
|
This inspirational rug will be a perfect addition to any room. Children will learn the alphabet while being reminded of the love that God has for them. Uppercase letters surround the border while the verse, John 3:16, is featured in the center.
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
GeoHealth
A QGIS plugin
-------------------
begin : 2014-08-20
copyright : (C) 2014 by Etienne Trimaille
email : etienne@trimaille.eu
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import object
from qgis.core import QgsSpatialIndex, QgsFeatureRequest, QgsGeometry, Qgis
class LayerIndex(object):
"""Check an intersection between a QgsGeometry and a QgsVectorLayer."""
def __init__(self, layer):
self.__layer = layer
if Qgis.QGIS_VERSION_INT >= 20700:
self.__index = QgsSpatialIndex(layer.getFeatures())
else:
self.__index = QgsSpatialIndex()
for ft in layer.getFeatures():
self.__index.insertFeature(ft)
def contains(self, point):
"""Return true if the point intersects the layer."""
intersects = self.__index.intersects(point.boundingBox())
for i in intersects:
request = QgsFeatureRequest().setFilterFid(i)
feat = next(self.__layer.getFeatures(request))
if point.intersects(QgsGeometry(feat.geometry())):
return True
return False
def count_intersection(self, buffer_geom, nb):
"""Return true if the buffer intersects enough entities."""
count = 0
intersects = self.__index.intersects(buffer_geom.boundingBox())
for i in intersects:
request = QgsFeatureRequest().setFilterFid(i)
feat = next(self.__layer.getFeatures(request))
if buffer_geom.intersects(QgsGeometry(feat.geometry())):
count += 1
if count >= nb:
return True
return False
|
CHILD ABUSE Abuse of a child by another person in a sexual, physical or emotional manner.
ELDER ABUSE Sexual, financial, physical, or emotional abuse of an elderly person by another, or conduct resulting in harm.
RELIGIOUS ABUSE Abuse of a person by a member of a religious organisation, or abuse committed as a result of a ritual permitted by that organisation, particularly where the religious person uses their position in order to exert control.
PUBLIC BODY Abuse committed by a person employed or managed by a public body or the negligence of an employee of a public body which leads to abuse taking place.
DOMESTIC ABUSE the abuse of one partner within an intimate or family relationship. It is the repeated, random and habitual use of intimidation to control a partner. The abuse can be physical, emotional, psychological, financial or sexual.
HUMAN RIGHTS Cases involving a breach of Human Rights as defined by the Human Rights Act 1998.
SEXUAL EXPLOITATION Defined as a type of sexual abuse in which children are sexually exploited for money, power or status.
INSTITUTIONAL ABUSE Institutional abuse is the mistreatment, abuse or neglect of an adult at risk by a regime or individuals.
RITUAL ABUSE Defined as a severe form of abuse of children, adolescents and adults. consisting of physical, sexual, emotional and spiritual abuse involving the use of rituals or ceremonial abuse.
It has become increasingly recognised that children and other vulnerable members of Society including adults with learning disabilities are vulnerable to abuse. This can take the form of for example physical, sexual, emotional or psychological abuse.
Abuse can be due to neglect, inappropriate restraint, the provision of inappropriate services and treatment, or the withholding of appropriate services and treatment.
Abuse can potentially occur wherever there is a power imbalance in a relationship, for example, in Residential Care Homes, respite and day care organisations and in Schools.
Any person who has experienced abuse may be entitled to claim financial compensation. In many cases, compensation could be recovered by pursuing a claim for compensation in the Civil Courts.
In many instances where a person has suffered an injury as a result of a crime of violence, that person may be entitled to receive compensation from the Criminal Injuries Compensation Authority (CICA). Often however, the amount awarded can be relatively small.
|
import logging
import tornado.ioloop
import tornado.web
import tornado.options
import tornado.websocket
import math
import os
import json
import constants
from decorators import new_cursor
class Web_handler(tornado.web.RequestHandler):
def get(self):
self.render(
'ports.html',
GROUPED_PORTS=constants.GROUPED_PORTS,
CONNECTIONS_PER_PORT=constants.CONNECTIONS_PER_PORT,
PORT_COLORS=constants.PORT_COLORS,
groups=self.groups(),
selected_group=self.selected_group(),
ports=json.dumps(self.ports()),
)
def ports(self):
group_id = self.get_argument('group_id', None)
if not group_id:
return []
with new_cursor() as c:
rows = c.execute(
'SELECT * FROM ports WHERE group_id=?',
(group_id,)
).fetchall()
data = []
for row in rows:
data.append({
'port': row['port'],
'connection': row['connection'],
'enabled': True if row['enabled'] == 'Y' else False,
'delay': row['delay'],
})
return data
def selected_group(self):
group_id = self.get_argument('group_id', None)
if not group_id:
return
return self.group_by_id(group_id)
def group_by_id(self, group_id):
with new_cursor() as c:
return c.execute(
'SELECT * FROM groups WHERE id=?',
(group_id,)
).fetchone()
def parse_delay(self, delay):
if not delay:
return 0
try:
return int(delay)
except ValueError:
return 0
def post(self):
name = self.get_argument('groupname')
if not name:
self.redirect('/')
return
with new_cursor() as c:
group = c.execute(
'SELECT * FROM groups WHERE name=?;', (name,)
).fetchone()
if group:
group_id = group['id']
else:
c.execute('INSERT INTO groups (name) VALUES (?);', (name,))
group_id = c.lastrowid
data = json.loads(self.get_argument('json'))
pdata = []
for d in data:
pdata.append(
(
group_id,
d['port'],
d['connection'],
'Y' if d['enabled'] else 'N',
self.parse_delay(d['delay']),
)
)
c.execute('DELETE FROM ports WHERE group_id=?;', (group_id,))
c.executemany(
'''
INSERT INTO ports
(group_id, port, connection, enabled, delay)
VALUES
(?, ?, ?, ?, ?)
''',
pdata
)
self.redirect('/?group_id={}'.format(group_id))
return
def groups(self):
with new_cursor() as c:
return c.execute(
'SELECT * FROM groups ORDER BY name ASC;'
).fetchall();
class Firing_progress_handler(tornado.websocket.WebSocketHandler):
clients = []
@classmethod
def send_message(cls, message):
for c in cls.clients:
c.write_message(message)
def open(self):
Firing_progress_handler.clients.append(self)
def on_message(self, message):
pass
def on_close(self):
Firing_progress_handler.clients.remove(self)
class Fire_handler(tornado.web.RequestHandler):
t = None
def get(self):
pass
def post(self):
import threading
import fire
cancel = self.get_argument('cancel', None)
if cancel:
fire.stop = True
return
if Fire_handler.t:
if Fire_handler.t.isAlive():
return
Fire_handler.t = threading.Thread(
target=fire.fire,
args=(self.get_argument('group_id')),
)
fire.stop = False
Fire_handler.t.daemon = True
Fire_handler.t.start()
def main():
con = init_db(constants.DATABASE_FILE)
application = tornado.web.Application(
[
(r'/', Web_handler),
(r'/firing-progress', Firing_progress_handler),
(r'/fire', Fire_handler),
],
debug=True,
xsrf_cookies=False,
autoescape=None,
template_path=os.path.join(os.path.dirname(__file__), 'templates'),
static_path=os.path.join(os.path.dirname(__file__), 'static'),
)
application.listen(8000)
tornado.options.parse_command_line()
tornado.ioloop.IOLoop.instance().start()
def init_db(db_file):
with new_cursor() as c:
c.execute('''
CREATE TABLE IF NOT EXISTS groups (
id INTEGER PRIMARY KEY,
name TEXT
);
''')
c.execute('''
CREATE TABLE IF NOT EXISTS ports (
group_id INT NOT NULL,
port INT NOT NULL,
connection INT NOT NULL,
enabled TEXT,
delay INT DEFAULT 0,
PRIMARY KEY (group_id, port, connection)
);
''')
if __name__ == '__main__':
main()
|
The Château de Hohbarr is a medieval castle, first built in 1100, above the city of Saverne in what is now the French département of Bas-Rhin. It was built on sandstone rock 460m above the valley of Zorn and the plain of Alsace. Because of this, it has been called the eye of Alsace.
|
# -*- coding:utf-8 -*-
"""
Script to see if the model is like the model of [1]_ by plotting the network
frequency against the oscillation rate.
References
----------
.. [1] Fourcaud-Trocmé, N., Courtiol, E., Buonviso, N., & Voegtlin, T. (2011).
Stability of fast oscillations in the mammalian olfactory bulb: experiments
and modeling. Journal of physiology, Paris, 105(1-3), 59–70.
doi:10.1016/j.jphysparis.2011.07.009
"""
import tables
import numpy as np
import matplotlib.pyplot as plt
from h5manager import get_all_attrs
def plot_netw_freq(db_filename, point_color, label):
"""Plot g_Ein0 against FFTMAX for the given DB."""
db = tables.openFile(db_filename) # Open the HDF5 database-like
# Get the interesting values
attrs_list = (('paramset', '_v_attrs', 'Input', 'g_Ein0'),
('results', '_v_attrs', 'FFTMAX', 0))
attrs = np.array(get_all_attrs(db, attrs_list))
# Put them on the figure
plt.plot(attrs[:, 0], attrs[:, 1], ' .', color=point_color, label=label)
plt.legend(loc="upper left")
# Finally, close the db
db.close()
def plot_freqs(db_filename, point_color, label):
"""Plot mitral firing rate against network frequency."""
db = tables.openFile(db_filename)
# Get the values and arrays
attrs_list = (('results', 'spikes_it'),
('results', '_v_attrs', 'FFTMAX', 0),
('paramset', '_v_attrs', 'Common'))
attrs = get_all_attrs(db, attrs_list)
ps_common = attrs[0][2]
n_mitral = ps_common['N_mitral']
simu_length = ps_common['simu_length']
burnin = ps_common['burnin']
# Compute the spiking rate for each simulation
sim_values = np.ndarray((len(attrs), 2))
for ind_simu, simu in enumerate(attrs):
spike_times = simu[0].read()[1]
sim_values[ind_simu][0] = get_spiking_rate(spike_times, n_mitral,
simu_length, burnin)
sim_values[ind_simu][1] = simu[1] # FFTMAX already computed
# Plot the values
plt.plot(sim_values[:, 0], sim_values[:, 1], ' .', color=point_color,
label=label)
plt.legend()
# Close the DB
db.close()
def get_spiking_rate(spike_times, n_mitral, simu_length, burnin):
"""Return the spiking rate for the whole population."""
time_mask = (spike_times > burnin)
return 1.*time_mask.sum()/(n_mitral*(simu_length - burnin))
def main():
# Get the data
filename_beta = "data/db40_beta_1pop_fig_netw_freq_multiproc.h5"
filename_gamma = "data/db40_gamma_1pop_fig_netw_freq.h5"
# Build network frequency figure
plt.figure()
plot_netw_freq(filename_beta, 'blue', "beta")
plot_netw_freq(filename_gamma, 'red', "gamma")
plt.xlabel("Input excitatory conductance $g_{Ein0}$ (S $m^{-2}$)")
plt.ylabel("Network frequency $f$ (Hz)")
# Build freq vs. freq figure
plt.figure()
plot_freqs(filename_beta, 'blue', "beta")
plot_freqs(filename_gamma, 'red', "gamma")
plt.xlabel("Mitral firing rate $\\nu_0$")
plt.ylabel("Network frequency $f$ (Hz)")
plt.show()
if __name__ == '__main__':
res = main()
|
In the first part of today’s episode, Rev. Bryan Wolfmueller talks about how baptism is right next door to salvation whenever it’s talked about in the Bible. Guest Rev. Todd Wilken joins in to talk about what it means to be broken, why we talk about brokenness, and how this talk can be dangerous.
Rev. Todd Wilken, host of Issues, Etc and Assistant Pastor at Trinity Lutheran Church in Millstadt, Illinois, joins host Rev. Bryan Wolfmueller, Senior Pastor at Hope Lutheran Church in Aurora, Colorado, and author of the book Has American Christianity Failed?, to talk about curious topics to excite the imagination, equip the mind, and comfort the soul with God’s ordering of the world in the Law and Gospel. Tweet your questions to @bwolfmueller or send questions at wolfmueller.co.
|
#!/usr/bin/python
"""
Note: You can test SA_Mapper.py and sa_reducer.py by themselves
using the following line in the console:
cat sa_input | python SA_Mapper.py | sort | python sa_reducer.py
sa_input is an example input file created for S3 by SQ_Worker.py
"""
import json
import time
import sys
#from sentimentML.ML_builder import ML_builder
#from datum_box import box_tweet
from SentimentAnalysis import NB, LR
from sentimentML import ML_builder
# DBox = None
# Datum_Integers = {'positive': 1, 'neutral': 0, 'negative': -1}
SVM = None
def _setup_SVM():
global SVM
SVM = ML_builder.SVM_builder()
SVM.SVM_build()
def _setup_DatumBox():
pass
# global DBox
# Datum_api_key = os.getenv('DATUM')
# DBox = DatumBox(Datum_api_key)
def setup_SA():
_setup_SVM()
_setup_DatumBox()
def run_SA(tweet, ret_dict=None):
if not ret_dict:
ret_dict = {}
ret_dict = {'tweet_id': tweet[0]}
_run_LR_SA(tweet, ret_dict)
_run_NB_SA(tweet, ret_dict)
_run_SVM_SA(tweet, ret_dict)
_run_DatumBox(tweet, ret_dict)
return ret_dict
def _run_LR_SA(tweet, ret_dict):
t1 = time.time()
results, probs = LR.predict(tweet[1])
t2 = time.time()
ret_dict['LR_SENT'] = results
ret_dict['LR_NEG_PROB'] = probs[0]
ret_dict['LR_POS_PROB'] = probs[1]
ret_dict['LR_EXEC_TIME'] = t2 - t1
#do magic
return ret_dict
def _run_NB_SA(tweet, ret_dict):
t1 = time.time()
results, probs = NB.predict(tweet[1])
t2 = time.time()
ret_dict['NB_SENT'] = results
ret_dict['NB_NEG_PROB'] = probs[0]
ret_dict['NB_POS_PROB'] = probs[1]
ret_dict['NB_EXEC_TIME'] = t2 - t1
#do magic
return ret_dict
def _run_SVM_SA(tweet, ret_dict):
t1 = time.time()
result = SVM.Predict(tweet[1])
t2 = time.time()
ret_dict['SVM_SENT'] = result[0]
ret_dict['SVM_NEG_PROB'] = result[1][0]
ret_dict['SVM_POS_PROB'] = result[1][1]
ret_dict['SVM_EXEC_TIME'] = t2 - t1
# ret_dict['SVM_SENT'] = 1
# ret_dict['SVM_NEG_PROB'] = 0.3
# ret_dict['SVM_POS_PROB'] = 0.89
# ret_dict['SVM_EXEC_TIME'] = 0.424
#do magic
return ret_dict
def _run_DatumBox(tweet, ret_dict):
# t1 = time.time()
# result = box_tweet(tweet[1])
# t2 = time.time()
# ret_dict['DatumBox_SENT'] = result
# ret_dict['DatumBox_NEG_PROB'] = -1
# ret_dict['DatumBox_POS_PROB'] = -1
# ret_dict['DatumBox_EXEC_TIME'] = t2 - t1
ret_dict['DatumBox_SENT'] = -2
ret_dict['DatumBox_NEG_PROB'] = -1
ret_dict['DatumBox_POS_PROB'] = -1
ret_dict['DatumBox_EXEC_TIME'] = -1
#do magic
return ret_dict
def main(argv):
setup_SA()
for line in sys.stdin:
try:
tweet = json.loads(line)
except Exception:
pass # skip this tweet
else:
#do SA magics
delicious_payload = json.dumps(run_SA(tweet))
print delicious_payload.lower()
#print str(tweet[0]) + '\t' + '1'
if __name__ == "__main__":
main(sys.argv)
|
Funding has been agreed on a prime Build to Rent (BTR) development site in Birmingham, Legal & General announced recently.
The site, known as Newhall Square, will cost £53 million and is the sixth UK city in which Legal & General has invested in BTR. This latest investment now brings the number of BTR units in its pipeline to more than 1,700, following previous investments in Salford, Leeds, Bristol, Bath and Walthamstow.
The proposed development – which has planning permission for 220 residential units and 7,500 sq ft of ground floor commercial space – will also include a public square and 61 car parking spaces. A forward funding deal, with residential developer Spitfire Bespoke Homes, has been agreed to construct Legal & General’s latest investment project.
Newhall Square is located between Birmingham’s Central Business District and the retail/leisure hub of the Jewellery Quarter. It’s right in the heart of Birmingham’s city centre and the new apartments, once completed, will benefit from fine canal-side views.
Birmingham – often viewed as the UK’s unofficial second city – is currently undergoing significant regeneration, with more capital currently ploughed into infrastructure projects than any other European city. One of these – the Paradise development, a mixed-use scheme set to deliver 1.8m sq ft of new office, retail and leisure space, as well as generating 12,000 new jobs – is only five minutes’ walk from the new BTR development.
The demand from tenants in Birmingham is high and growing by the year, helped by a rapidly expanding population. Some 1.1m people currently call Birmingham home – up by 100,000 since 2004 – and population growth in this part of the UK is currently the third fastest in the country, only bettered by London and Bristol.
It’s also home to one of Europe's most youth-heavy populations, with those aged under 30 accounting for around 40% of all residents. Despite this, the amount of housing stock available is at a record low and worsened by low levels of house-building in recent years.
As a result, the private rented sector – and BTR in particular – is becoming an increasingly important cog in the property industry machine, especially where younger tenants are concerned.
Legal & General is pumping in approximately £1bn to the BTR sector, ‘to help provide the UK’s population with high quality, affordable living at all stages in their life cycle’.
“This development, in a highly sought after area of Birmingham, links the industrial heritage of the Jewellery Quarter with the modern business district, Dan Batterton, BTR fund manager at LGIM Real Assets, said.
|
__copyright__ = """ Copyright (c) 2010-2011 Torsten Schmits
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
import logging
import threading
import pygame
class FrameCounter(threading.Thread):
""" Runs a thread that calls flip() repeatedly, which waits for
vsync and thus indicates real display redraws. """
def __init__(self, flag):
threading.Thread.__init__(self)
self._flag = flag
self.frame = 0
self._locked_frame = 0
def run(self):
try:
while self._flag:
self.step()
except pygame.error as e:
logging.getLogger('FrameCounter').error(unicode(e))
def step(self):
self.sync()
self.frame += 1
def sync(self):
pygame.display.flip()
def lock(self):
self._locked_frame = self.frame
@property
def last_interval(self):
return self.frame - self._locked_frame
|
Courses may include creativity in learning, child observation, health and nutrition education, early literacy and special needs children. factors that affect the job duties, pay, and educational.
This week, Rickert gave some detailed testimony about problems with the test being given to 11- and 12-year-olds, and concluded it by saying: “I have the greatest job. I am a teacher. extra time.
View all teaching and education jobs Professional development. Newly qualified teachers (NQTs) serve a three-term induction (full-time equivalent of one school year) or probationary period of assessment, usually completed in a single school year, upon successful completion of qualified teacher status (QTS) being confirmed.
Read on to learn more about the job duties of a special education teacher. Schools offering Emotional & Behavioral Disorders degrees can also be found in.
But it’s my job. Tracy Helms, a special education teacher assistant, is among those assigned to help Caitlin. Her picture is in Caitlin’s main binder, just like all of Caitlin’s other teachers and.
The Illinois Alliance of Administrators of Special Education (IAASE) is a non-profit special education administrator organization that focuses its efforts on promoting educational opportunities for all students including those with exceptionalities.
. County Board of Education. Job Descriptions. INTERRELATED TEACHER ( RESOURCE). Valid Georgia certification on Special Education. EVALUATION:.
you are here: Browse / Job Descriptions / Teacher Job Description. Other types of teachers include special education teachers, who work with students with a.
up to 40 lbs Work in a team based environment Support lead teachers with duties as needed Benefits: Weekly pay Training Health… Kelly Educational Staffing.
The BLS predicted that demand for kindergarten teachers will be highest in the South and West, and that job growth would be highest in rural and urban areas and lowest in suburban areas. Because.
But don’t blame teachers. These data, for reasons both good and bad, reveal a dirty little secret about American education. In many districts. let alone to base the job description on that.
a special education teacher assistant whose picture – just like all of Caitlin’s other teachers and helpers – is in Caitlin’s.
Think you have what it takes to work as a special education teacher assistant? JobHero has a comprehensive job description so you can learn more!
Special Education Programs and Services Children develop and learn at different rates. Some have difficulty learning to read and write and need the assistance of a special education program.
These responses are consistent with research showing that the best job-embedded professional. So the conditions teachers in the “Teachers Know Best” study described as desirable represent a good.
The extra teachers. The final job description was not decided yet and would depend on public consultation. Decisions were also yet to be made about the coordinator-pupil ratio for urban and rural.
Special educational needs teachers play a key role in supporting young people who need extra help and guidance with their learning. As a special educational.
Mar 23, 2018. This page contains a detailed job description along with duties statements for a Special Education Teacher.
Many schools report a demand for math, science and special education teachers, as well as those who can support English as a second language. Teachers with these specialties are likely to have.
Special Education Services Department 5700 Arlington Avenue Riverside, California 92504 Telephone: (951) 352-1200 FAX: (951) 328-2511 Riverside Unified School District is committed to meeting the educational needs of the exceptional child.
What does a Special Education Teacher do, how to become a Special Education Teacher and Job Description. Career Outlook, Degree Requirement, and.
Lafayette Education Foundation is seeking an Executive Director to lead the organization. Current director Etienna Wright is taking a position with her church. “I have been with LEF for over six years.
The Teacher Keys Effectiveness System (TKES) is a common evaluation system designed for building teacher effectiveness and ensuring consistency and comparability throughout the state.
Aug 12, 2011. JOB DESCRIPTION. This is the experienced level. The employee performs a full range of professional special education teaching assignments.
It has been over 40 years since the job description of principal was issued by the Department of. children who have special educational needs. In last month’s budget the department announced an.
When applying for jobs special education teachers may notice that “attention to detail” is listed as one of the requirements for the application. Deadline-oriented.
But her rollback of this regulation confirms her original statement: She thinks protecting the rights of children with disabilities is not in her job description. group of students is identified.
Study the job posting and the college’s Web site, looking particularly for information about its teaching philosophy as represented in the mission statement, its learning goals, a description.
Do you have what it takes to enjoy a career teaching special education?. As a student moves through the school year, it'll be your job to update the IEP in. State requirements vary but student teaching often lasts between one and two years.
How to write a Secretary job description. Your job description is the first touchpoint between your company and your new hire. With millions of people searching for jobs on Indeed each month, a great job description can help you attract the most qualified candidates to your open position.
Susan TerLouw takes a proactive approach to fostering collaboration with her students’ parents. “I have found texting to be an amazing way to get connected with parents,” says the high school special education teacher.
So I’m always very hesitant every day when I do it, but it’s my job. a special education teacher assistant whose picture – just like all of Caitlin’s other teachers and helpers – is in Caitlin’s.
Resource room teachers are also known as special education teachers. Middle School Special Education Teacher: Salary, Job Duties and Outlook.
Browse 153059 VIRTUAL SPECIAL EDUCATION TEACHER Jobs ($37K-$59K). in a school setting Responsibilities: * The Special Education Teacher will be.
Qualifications. Unless your first degree is a Bachelor of Education (BEd) or a BA/BSc with Qualified Teacher Status (QTS), it is essential to gain QTS or, in Scotland, to have achieved the Standard for Provisional Registration (SPR), or be a fully-registered teacher in.
Special Education Teacher. New Hanover County Schools. Job Description. Class: Certified. Dept: School. TITLE: Special Education Teacher.
Jul 12, 2018. New Special Education Teacher jobs added daily. Primary Responsibilities: Plan for appropriate learning experiences for students based on.
The Pennsylvania Department of Education has requested job descriptions, grading policies and the district. The academy was being run by Treena Watson, a former teacher who became an administrator.
Susan TerLouw takes a proactive approach to fostering collaboration with her students’ parents. “I have found texting to be an amazing way to get connected with parents,” says the high school special education teacher. “After not having calls returned, I tried texting and got immediate.
|
# pylint: disable=bad-option-value,wrong-import-position
# We need to disable the import position checks because of the windows check that we need to do below
import copy
import logging
import os
import platform
import stat
import sys
import click
import gitlint
from gitlint.lint import GitLinter
from gitlint.config import LintConfigBuilder, LintConfigError, LintConfigGenerator
from gitlint.git import GitContext, GitContextError, git_version
from gitlint import hooks
from gitlint.shell import shell
from gitlint.utils import LOG_FORMAT
from gitlint.exception import GitlintError
# Error codes
MAX_VIOLATION_ERROR_CODE = 252
USAGE_ERROR_CODE = 253
GIT_CONTEXT_ERROR_CODE = 254
CONFIG_ERROR_CODE = 255
DEFAULT_CONFIG_FILE = ".gitlint"
# -n: disable swap files. This fixes a vim error on windows (E303: Unable to open swap file for <path>)
DEFAULT_COMMIT_MSG_EDITOR = "vim -n"
# Since we use the return code to denote the amount of errors, we need to change the default click usage error code
click.UsageError.exit_code = USAGE_ERROR_CODE
# We don't use logging.getLogger(__main__) here because that will cause DEBUG output to be lost
# when invoking gitlint as a python module (python -m gitlint.cli)
LOG = logging.getLogger("gitlint.cli")
class GitLintUsageError(GitlintError):
""" Exception indicating there is an issue with how gitlint is used. """
pass
def setup_logging():
""" Setup gitlint logging """
root_log = logging.getLogger("gitlint")
root_log.propagate = False # Don't propagate to child loggers, the gitlint root logger handles everything
handler = logging.StreamHandler()
formatter = logging.Formatter(LOG_FORMAT)
handler.setFormatter(formatter)
root_log.addHandler(handler)
root_log.setLevel(logging.ERROR)
def log_system_info():
LOG.debug("Platform: %s", platform.platform())
LOG.debug("Python version: %s", sys.version)
LOG.debug("Git version: %s", git_version())
LOG.debug("Gitlint version: %s", gitlint.__version__)
LOG.debug("GITLINT_USE_SH_LIB: %s", os.environ.get("GITLINT_USE_SH_LIB", "[NOT SET]"))
LOG.debug("DEFAULT_ENCODING: %s", gitlint.utils.DEFAULT_ENCODING)
def build_config( # pylint: disable=too-many-arguments
target, config_path, c, extra_path, ignore, contrib, ignore_stdin, staged, verbose, silent, debug
):
""" Creates a LintConfig object based on a set of commandline parameters. """
config_builder = LintConfigBuilder()
# Config precedence:
# First, load default config or config from configfile
if config_path:
config_builder.set_from_config_file(config_path)
elif os.path.exists(DEFAULT_CONFIG_FILE):
config_builder.set_from_config_file(DEFAULT_CONFIG_FILE)
# Then process any commandline configuration flags
config_builder.set_config_from_string_list(c)
# Finally, overwrite with any convenience commandline flags
if ignore:
config_builder.set_option('general', 'ignore', ignore)
if contrib:
config_builder.set_option('general', 'contrib', contrib)
if ignore_stdin:
config_builder.set_option('general', 'ignore-stdin', ignore_stdin)
if silent:
config_builder.set_option('general', 'verbosity', 0)
elif verbose > 0:
config_builder.set_option('general', 'verbosity', verbose)
if extra_path:
config_builder.set_option('general', 'extra-path', extra_path)
if target:
config_builder.set_option('general', 'target', target)
if debug:
config_builder.set_option('general', 'debug', debug)
if staged:
config_builder.set_option('general', 'staged', staged)
config = config_builder.build()
return config, config_builder
def get_stdin_data():
""" Helper function that returns data send to stdin or False if nothing is send """
# STDIN can only be 3 different types of things ("modes")
# 1. An interactive terminal device (i.e. a TTY -> sys.stdin.isatty() or stat.S_ISCHR)
# 2. A (named) pipe (stat.S_ISFIFO)
# 3. A regular file (stat.S_ISREG)
# Technically, STDIN can also be other device type like a named unix socket (stat.S_ISSOCK), but we don't
# support that in gitlint (at least not today).
#
# Now, the behavior that we want is the following:
# If someone sends something directly to gitlint via a pipe or a regular file, read it. If not, read from the
# local repository.
# Note that we don't care about whether STDIN is a TTY or not, we only care whether data is via a pipe or regular
# file.
# However, in case STDIN is not a TTY, it HAS to be one of the 2 other things (pipe or regular file), even if
# no-one is actually sending anything to gitlint over them. In this case, we still want to read from the local
# repository.
# To support this use-case (which is common in CI runners such as Jenkins and Gitlab), we need to actually attempt
# to read from STDIN in case it's a pipe or regular file. In case that fails, then we'll fall back to reading
# from the local repo.
mode = os.fstat(sys.stdin.fileno()).st_mode
stdin_is_pipe_or_file = stat.S_ISFIFO(mode) or stat.S_ISREG(mode)
if stdin_is_pipe_or_file:
input_data = sys.stdin.read()
# Only return the input data if there's actually something passed
# i.e. don't consider empty piped data
if input_data:
return str(input_data)
return False
def build_git_context(lint_config, msg_filename, refspec):
""" Builds a git context based on passed parameters and order of precedence """
# Determine which GitContext method to use if a custom message is passed
from_commit_msg = GitContext.from_commit_msg
if lint_config.staged:
LOG.debug("Fetching additional meta-data from staged commit")
from_commit_msg = lambda message: GitContext.from_staged_commit(message, lint_config.target) # noqa
# Order of precedence:
# 1. Any data specified via --msg-filename
if msg_filename:
LOG.debug("Using --msg-filename.")
return from_commit_msg(str(msg_filename.read()))
# 2. Any data sent to stdin (unless stdin is being ignored)
if not lint_config.ignore_stdin:
stdin_input = get_stdin_data()
if stdin_input:
LOG.debug("Stdin data: '%s'", stdin_input)
LOG.debug("Stdin detected and not ignored. Using as input.")
return from_commit_msg(stdin_input)
if lint_config.staged:
raise GitLintUsageError("The 'staged' option (--staged) can only be used when using '--msg-filename' or "
"when piping data to gitlint via stdin.")
# 3. Fallback to reading from local repository
LOG.debug("No --msg-filename flag, no or empty data passed to stdin. Using the local repo.")
return GitContext.from_local_repository(lint_config.target, refspec)
def handle_gitlint_error(ctx, exc):
""" Helper function to handle exceptions """
if isinstance(exc, GitContextError):
click.echo(exc)
ctx.exit(GIT_CONTEXT_ERROR_CODE)
elif isinstance(exc, GitLintUsageError):
click.echo(f"Error: {exc}")
ctx.exit(USAGE_ERROR_CODE)
elif isinstance(exc, LintConfigError):
click.echo(f"Config Error: {exc}")
ctx.exit(CONFIG_ERROR_CODE)
class ContextObj:
""" Simple class to hold data that is passed between Click commands via the Click context. """
def __init__(self, config, config_builder, refspec, msg_filename, gitcontext=None):
self.config = config
self.config_builder = config_builder
self.refspec = refspec
self.msg_filename = msg_filename
self.gitcontext = gitcontext
@click.group(invoke_without_command=True, context_settings={'max_content_width': 120},
epilog="When no COMMAND is specified, gitlint defaults to 'gitlint lint'.")
@click.option('--target', envvar='GITLINT_TARGET',
type=click.Path(exists=True, resolve_path=True, file_okay=False, readable=True),
help="Path of the target git repository. [default: current working directory]")
@click.option('-C', '--config', type=click.Path(exists=True, dir_okay=False, readable=True, resolve_path=True),
help=f"Config file location [default: {DEFAULT_CONFIG_FILE}]")
@click.option('-c', multiple=True,
help="Config flags in format <rule>.<option>=<value> (e.g.: -c T1.line-length=80). " +
"Flag can be used multiple times to set multiple config values.") # pylint: disable=bad-continuation
@click.option('--commits', envvar='GITLINT_COMMITS', default=None, help="The range of commits to lint. [default: HEAD]")
@click.option('-e', '--extra-path', envvar='GITLINT_EXTRA_PATH',
help="Path to a directory or python module with extra user-defined rules",
type=click.Path(exists=True, resolve_path=True, readable=True))
@click.option('--ignore', envvar='GITLINT_IGNORE', default="", help="Ignore rules (comma-separated by id or name).")
@click.option('--contrib', envvar='GITLINT_CONTRIB', default="",
help="Contrib rules to enable (comma-separated by id or name).")
@click.option('--msg-filename', type=click.File(), help="Path to a file containing a commit-msg.")
@click.option('--ignore-stdin', envvar='GITLINT_IGNORE_STDIN', is_flag=True,
help="Ignore any stdin data. Useful for running in CI server.")
@click.option('--staged', envvar='GITLINT_STAGED', is_flag=True,
help="Read staged commit meta-info from the local repository.")
@click.option('-v', '--verbose', envvar='GITLINT_VERBOSITY', count=True, default=0,
help="Verbosity, more v's for more verbose output (e.g.: -v, -vv, -vvv). [default: -vvv]", )
@click.option('-s', '--silent', envvar='GITLINT_SILENT', is_flag=True,
help="Silent mode (no output). Takes precedence over -v, -vv, -vvv.")
@click.option('-d', '--debug', envvar='GITLINT_DEBUG', help="Enable debugging output.", is_flag=True)
@click.version_option(version=gitlint.__version__)
@click.pass_context
def cli( # pylint: disable=too-many-arguments
ctx, target, config, c, commits, extra_path, ignore, contrib,
msg_filename, ignore_stdin, staged, verbose, silent, debug,
):
""" Git lint tool, checks your git commit messages for styling issues
Documentation: http://jorisroovers.github.io/gitlint
"""
try:
if debug:
logging.getLogger("gitlint").setLevel(logging.DEBUG)
LOG.debug("To report issues, please visit https://github.com/jorisroovers/gitlint/issues")
log_system_info()
# Get the lint config from the commandline parameters and
# store it in the context (click allows storing an arbitrary object in ctx.obj).
config, config_builder = build_config(target, config, c, extra_path, ignore, contrib,
ignore_stdin, staged, verbose, silent, debug)
LOG.debug("Configuration\n%s", config)
ctx.obj = ContextObj(config, config_builder, commits, msg_filename)
# If no subcommand is specified, then just lint
if ctx.invoked_subcommand is None:
ctx.invoke(lint)
except GitlintError as e:
handle_gitlint_error(ctx, e)
@cli.command("lint")
@click.pass_context
def lint(ctx):
""" Lints a git repository [default command] """
lint_config = ctx.obj.config
refspec = ctx.obj.refspec
msg_filename = ctx.obj.msg_filename
gitcontext = build_git_context(lint_config, msg_filename, refspec)
# Set gitcontext in the click context, so we can use it in command that are ran after this
# in particular, this is used by run-hook
ctx.obj.gitcontext = gitcontext
number_of_commits = len(gitcontext.commits)
# Exit if we don't have commits in the specified range. Use a 0 exit code, since a popular use-case is one
# where users are using --commits in a check job to check the commit messages inside a CI job. By returning 0, we
# ensure that these jobs don't fail if for whatever reason the specified commit range is empty.
if number_of_commits == 0:
LOG.debug(u'No commits in range "%s"', refspec)
ctx.exit(0)
LOG.debug(u'Linting %d commit(s)', number_of_commits)
general_config_builder = ctx.obj.config_builder
last_commit = gitcontext.commits[-1]
# Let's get linting!
first_violation = True
exit_code = 0
for commit in gitcontext.commits:
# Build a config_builder taking into account the commit specific config (if any)
config_builder = general_config_builder.clone()
config_builder.set_config_from_commit(commit)
# Create a deepcopy from the original config, so we have a unique config object per commit
# This is important for configuration rules to be able to modifying the config on a per commit basis
commit_config = config_builder.build(copy.deepcopy(lint_config))
# Actually do the linting
linter = GitLinter(commit_config)
violations = linter.lint(commit)
# exit code equals the total number of violations in all commits
exit_code += len(violations)
if violations:
# Display the commit hash & new lines intelligently
if number_of_commits > 1 and commit.sha:
linter.display.e("{0}Commit {1}:".format(
"\n" if not first_violation or commit is last_commit else "",
commit.sha[:10]
))
linter.print_violations(violations)
first_violation = False
# cap actual max exit code because bash doesn't like exit codes larger than 255:
# http://tldp.org/LDP/abs/html/exitcodes.html
exit_code = min(MAX_VIOLATION_ERROR_CODE, exit_code)
LOG.debug("Exit Code = %s", exit_code)
ctx.exit(exit_code)
@cli.command("install-hook")
@click.pass_context
def install_hook(ctx):
""" Install gitlint as a git commit-msg hook. """
try:
hooks.GitHookInstaller.install_commit_msg_hook(ctx.obj.config)
hook_path = hooks.GitHookInstaller.commit_msg_hook_path(ctx.obj.config)
click.echo(f"Successfully installed gitlint commit-msg hook in {hook_path}")
ctx.exit(0)
except hooks.GitHookInstallerError as e:
click.echo(e, err=True)
ctx.exit(GIT_CONTEXT_ERROR_CODE)
@cli.command("uninstall-hook")
@click.pass_context
def uninstall_hook(ctx):
""" Uninstall gitlint commit-msg hook. """
try:
hooks.GitHookInstaller.uninstall_commit_msg_hook(ctx.obj.config)
hook_path = hooks.GitHookInstaller.commit_msg_hook_path(ctx.obj.config)
click.echo(f"Successfully uninstalled gitlint commit-msg hook from {hook_path}")
ctx.exit(0)
except hooks.GitHookInstallerError as e:
click.echo(e, err=True)
ctx.exit(GIT_CONTEXT_ERROR_CODE)
@cli.command("run-hook")
@click.pass_context
def run_hook(ctx):
""" Runs the gitlint commit-msg hook. """
exit_code = 1
while exit_code > 0:
try:
click.echo("gitlint: checking commit message...")
ctx.invoke(lint)
except GitlintError as e:
handle_gitlint_error(ctx, e)
except click.exceptions.Exit as e:
# Flush stderr andstdout, this resolves an issue with output ordering in Cygwin
sys.stderr.flush()
sys.stdout.flush()
exit_code = e.exit_code
if exit_code == 0:
click.echo("gitlint: " + click.style("OK", fg='green') + " (no violations in commit message)")
continue
click.echo("-----------------------------------------------")
click.echo("gitlint: " + click.style("Your commit message contains violations.", fg='red'))
value = None
while value not in ["y", "n", "e"]:
click.echo("Continue with commit anyways (this keeps the current commit message)? "
"[y(es)/n(no)/e(dit)] ", nl=False)
# Ideally, we'd want to use click.getchar() or click.prompt() to get user's input here instead of
# input(). However, those functions currently don't support getting answers from stdin.
# This wouldn't be a huge issue since this is unlikely to occur in the real world,
# were it not that we use a stdin to pipe answers into gitlint in our integration tests.
# If that ever changes, we can revisit this.
# Related click pointers:
# - https://github.com/pallets/click/issues/1370
# - https://github.com/pallets/click/pull/1372
# - From https://click.palletsprojects.com/en/7.x/utils/#getting-characters-from-terminal
# Note that this function will always read from the terminal, even if stdin is instead a pipe.
value = input()
if value == "y":
LOG.debug("run-hook: commit message accepted")
exit_code = 0
elif value == "e":
LOG.debug("run-hook: editing commit message")
msg_filename = ctx.obj.msg_filename
if msg_filename:
msg_filename.seek(0)
editor = os.environ.get("EDITOR", DEFAULT_COMMIT_MSG_EDITOR)
msg_filename_path = os.path.realpath(msg_filename.name)
LOG.debug("run-hook: %s %s", editor, msg_filename_path)
shell(editor + " " + msg_filename_path)
else:
click.echo("Editing only possible when --msg-filename is specified.")
ctx.exit(exit_code)
elif value == "n":
LOG.debug("run-hook: commit message declined")
click.echo("Commit aborted.")
click.echo("Your commit message: ")
click.echo("-----------------------------------------------")
click.echo(ctx.obj.gitcontext.commits[0].message.full)
click.echo("-----------------------------------------------")
ctx.exit(exit_code)
ctx.exit(exit_code)
@cli.command("generate-config")
@click.pass_context
def generate_config(ctx):
""" Generates a sample gitlint config file. """
path = click.prompt('Please specify a location for the sample gitlint config file', default=DEFAULT_CONFIG_FILE)
path = os.path.realpath(path)
dir_name = os.path.dirname(path)
if not os.path.exists(dir_name):
click.echo(f"Error: Directory '{dir_name}' does not exist.", err=True)
ctx.exit(USAGE_ERROR_CODE)
elif os.path.exists(path):
click.echo(f"Error: File \"{path}\" already exists.", err=True)
ctx.exit(USAGE_ERROR_CODE)
LintConfigGenerator.generate_config(path)
click.echo(f"Successfully generated {path}")
ctx.exit(0)
# Let's Party!
setup_logging()
if __name__ == "__main__":
# pylint: disable=no-value-for-parameter
cli() # pragma: no cover
|
When I saw this I just knew it work be so much better. Great build quality, great that I now have a tap to drain the wort, and great that with the extra capacity I am no longer in fear of boil-overs.
I like SS BrewTech stuff. Great quality. Very professional - I also have a mini brew bucket which is another great bit of kit.
This is a professional piece of kit, with a solid well built feel. I bought in preference to a boiler with element for ease of cleaning (although mainly to be used as a sparge water heater) and freedom from electrical problems (cutting out). I have bought a separate induction table on which it fits perfectly. Only one thing to be aware it is 5.5 US gallons (not imperial) and the tap is 1/2" US thread not BSP.
|
# -*- coding: utf-8 -*-
"""plWordNet DB queries."""
from sqlalchemy import orm
from sqlalchemy.sql import func
from sqlalchemy.sql.expression import label
from pyrelaxmapper.plwn.models import (Parameter, LexicalUnit, Synset, SynsetRelation,
RelationType, UnitSynset, LexicalRelation)
# TODO: This isn't the proper version number...
def version(session):
"""Query plWordNet for format version."""
value = session.query(Parameter).filter_by(name='programversion').first().value
return value[value.rfind(' ')+1:]
def reltypes(session, types=None):
"""Query for hipernyms.
Parameters
----------
session : orm.session.Session
types : list
RelationType to select (default [10, 11], hiper/hiponyms)
"""
return (session.query(RelationType)
)
def reltypes_pwn_plwn(session):
"""Query plWN for PWN-plWN relation types."""
return (session.query(RelationType.id_)
.filter(RelationType.name.like('%plWN%'))
# Don't take potential, only take certain candidates
.filter(~ RelationType.shortcut.in_(['po_pa', 'po_ap'])))
def pwn_mappings(session, pos=None, pos_en=None):
"""Query plWN for already mapped synsets between plWN and PWN.
Selects: polish synset id, english synset unitsstr, POS
Source: Polish - Target (child): English
RelationType: selects only plWN-PWN mappings
does not take 'po_pa, po_ap' relation types.
POS: Only selects nouns
Parameters
----------
session : orm.session.Session
pos : list of int
pos_en : list of int
"""
if not pos:
pos = [2]
if not pos_en:
pos_en = [6]
rel_types = reltypes_pwn_plwn(session)
syns_en = orm.aliased(Synset)
uas_pl = orm.aliased(UnitSynset)
lunit_pl = orm.aliased(LexicalUnit)
return (session.query(label('pl_uid', Synset.id_), label('en_uid', syns_en.id_),
syns_en.unitsstr, LexicalUnit.pos)
.join(SynsetRelation, Synset.id_ == SynsetRelation.parent_id)
.join(syns_en, SynsetRelation.child_id == syns_en.id_)
.join(UnitSynset, syns_en.id_ == UnitSynset.syn_id)
.join(LexicalUnit, UnitSynset.lex_id == LexicalUnit.id_)
.join(uas_pl, Synset.id_ == uas_pl.syn_id)
.join(lunit_pl, uas_pl.lex_id == lunit_pl.id_)
.join(RelationType, SynsetRelation.rel_id == RelationType.id_)
.filter(RelationType.id_.in_(rel_types))
.filter(LexicalUnit.pos.in_(pos_en))
.filter(lunit_pl.pos.in_(pos))
.group_by(Synset.id_, syns_en.id_, syns_en.unitsstr, LexicalUnit.pos)
.order_by(Synset.id_)
)
def lunits(session, pos=None):
"""Query for lexical units, their lemma and POS.
Parameters
----------
session : orm.session.Session
pos : list
Parts of speech to select (default [2])
Returns
-------
"""
if not pos:
pos = [2]
return (session.query(LexicalUnit)
.filter(LexicalUnit.pos.in_(pos))
.order_by(LexicalUnit.id_)
)
def synsets(session, pos=None):
"""Query for synsets, concatenated ids and lemmas of their LUs.
Parameters
----------
session : orm.session.Session
pos : list
Parts of speech to select (default [2])
"""
if not pos:
pos = [2]
return (session.query(Synset.id_, Synset.definition,
label('lex_ids', func.group_concat(UnitSynset.lex_id)),
label('unitindexes', func.group_concat(UnitSynset.unitindex))
)
.join(UnitSynset)
.join(LexicalUnit)
.filter(LexicalUnit.pos.in_(pos))
.order_by(Synset.id_)
.group_by(Synset.id_)
)
def synset_relations(session, types, pos=None):
"""Query for hipernyms.
Parameters
----------
session : orm.session.Session
types : list
RelationType to select (default [10, 11], hiper/hiponyms)
"""
query = (session.query(SynsetRelation.parent_id, SynsetRelation.child_id,
SynsetRelation.rel_id)
.order_by(SynsetRelation.parent_id)
)
if types:
types = types if isinstance(types, list) else [types]
query = query.filter(SynsetRelation.rel_id.in_(types))
if pos:
pos = pos if isinstance(pos, list) else [pos]
query = (query
.join(UnitSynset, SynsetRelation.parent_id == UnitSynset.syn_id)
.join(LexicalUnit)
.filter(LexicalUnit.pos.in_(pos))
.group_by(SynsetRelation.parent_id, SynsetRelation.child_id,
SynsetRelation.rel_id)
)
return query
def lexical_relations(session, reltypes, pos=None):
"""Query for hipernyms.
Parameters
----------
session : orm.session.Session
reltypes : list
RelationType to select
pos : list
Parts of speech to extract. If empty, extract all.
"""
query = (session.query(LexicalRelation.parent_id, LexicalRelation.child_id,
LexicalRelation.rel_id)
.order_by(LexicalRelation.parent_id)
)
if reltypes:
reltypes = reltypes if isinstance(reltypes, list) else [reltypes]
query = (query
.join(RelationType)
.filter(RelationType.id_.in_(reltypes) |
RelationType.parent_id.in_(reltypes)))
if pos:
pos = pos if isinstance(pos, list) else [pos]
query = (query
.join(LexicalUnit, LexicalRelation.parent_id == LexicalUnit.id_)
.filter(LexicalUnit.pos.in_(pos))
.group_by(LexicalRelation.parent_id, LexicalRelation.child_id,
LexicalRelation.rel_id)
)
return query
|
P&E CONSULTING, INC. is a leading Human Resources Consulting Company committed to the most positive & effective HR Solution for our worldwide clients in and out of Korea.
Executive Secretary, Admin. Officer, etc.
Mechanical, Metallrugical, Mining, Electronic, Electrical, Mechatronic, Automotive, Space Aviation, Water, Air, Energy, Shipbuilding, Plant/Factory Automation, Motors, Architecture, Geo, HVAC, EPC & Plant engineering, Precision, etc.
IT and ICT; Semiconductor, LCD, LED, Mobile, R&D, e-Business, Security, Hardware, Middleware, Software, System Integration, MIS, etc.
Chemical, Pharmaceutical, Bio-chemical, Food ingredients, Medical, Medical Device, Hospital, Energy Alternatives, Film, Gas, Oil, Cosmetic, Lubricant, Adhesives, Petro-Chemical, Polymer & Fluids, Paint, UV Coating, Metallurgical, Resin, Fluoro-Polymers, Textiles, etc.
Marketing, Advertisement, PR, IR, etc.
|
import casadi as ca
import pylab as pl
import casiopeia as cp
import os
# (Model and data taken from: Diehl, Moritz: Course on System Identification,
# exercise 7, SYSCOP, IMTEK, University of Freiburg, 2014/2015)
# Defining constant problem parameters:
#
# - m: representing the ball of the mass in kg
# - L: the length of the pendulum bar in meters
# - g: the gravity constant in m/s^2
# - psi: the actuation angle of the manuver in radians, which stays
# constant for this problem
m = 1.0
L = 3.0
g = 9.81
# psi = pl.pi / 2.0
psi = pl.pi / (180.0 * 2)
# System
x = ca.MX.sym("x", 2)
p = ca.MX.sym("p", 1)
u = ca.MX.sym("u", 1)
# f = ca.vertcat([x[1], p[0]/(m*(L**2))*(u-x[0]) - g/L * pl.sin(x[0])])
f = ca.vertcat(x[1], p[0]/(m*(L**2))*(u-x[0]) - g/L * x[0])
phi = x
system = cp.system.System(x = x, u = u, p = p, f = f, phi = phi)
data = pl.loadtxt('data_pendulum.txt')
time_points = data[:500, 0]
numeas = data[:500, 1]
wmeas = data[:500, 2]
N = time_points.size
ydata = pl.array([numeas,wmeas])
udata = [psi] * (N-1)
ptrue = [3.0]
sim_true = cp.sim.Simulation(system, ptrue)
sim_true.run_system_simulation(time_points = time_points, \
x0 = ydata[:, 0], udata = udata)
# pl.figure()
# pl.plot(time_points, pl.squeeze(sim_true.simulation_results[0,:]))
# pl.plot(time_points, pl.squeeze(sim_true.simulation_results[1,:]))
# pl.show()
p_test = []
sigma = 0.1
wv = (1. / sigma**2) * pl.ones(ydata.shape)
repetitions = 100
for k in range(repetitions):
y_randn = sim_true.simulation_results + \
sigma * (pl.randn(*sim_true.simulation_results.shape))
pe_test = cp.pe.LSq(system = system, time_points = time_points,
udata = udata, xinit = y_randn, ydata = y_randn, wv = wv, pinit = 1)
pe_test.run_parameter_estimation()
p_test.append(pe_test.estimated_parameters)
p_mean = pl.mean(p_test)
p_std = pl.std(p_test, ddof=0)
pe_test.compute_covariance_matrix()
pe_test.print_estimation_results()
# Generate report
print("\np_mean = " + str(ca.DM(p_mean)))
print("phat_last_exp = " + str(ca.DM(pe_test.estimated_parameters)))
print("\np_sd = " + str(ca.DM(p_std)))
print("sd_from_covmat = " + str(ca.diag(ca.sqrt(pe_test.covariance_matrix))))
print("beta = " + str(pe_test.beta))
print("\ndelta_abs_sd = " + str(ca.fabs(ca.DM(p_std) - \
ca.diag(ca.sqrt(pe_test.covariance_matrix)))))
print("delta_rel_sd = " + str(ca.fabs(ca.DM(p_std) - \
ca.diag(ca.sqrt(pe_test.covariance_matrix))) / ca.DM(p_std)))
fname = os.path.basename(__file__)[:-3] + ".rst"
report = open(fname, "w")
report.write( \
'''Concept test: covariance matrix computation
===========================================
Simulate system. Then: add gaussian noise N~(0, sigma^2), estimate,
store estimated parameter, repeat.
.. code-block:: python
y_randn = sim_true.simulation_results + sigma * \
(np.random.randn(*sim_true.estimated_parameters.shape))
Afterwards, compute standard deviation of estimated parameters,
and compare to single covariance matrix computation done in PECas.
''')
prob = "ODE, 2 states, 1 control, 1 param, (pendulum linear)"
report.write(prob)
report.write("\n" + "-" * len(prob) + "\n\n.. code-block:: python")
report.write( \
'''.. code-block:: python
---------------------- casiopeia system definition -----------------------
The system is a dynamic system defined by a set of
explicit ODEs xdot which establish the system state x:
xdot = f(t, u, x, p, we, wu)
and by an output function phi which sets the system measurements:
y = phi(t, x, p).
Particularly, the system has:
1 inputs u
1 parameters p
2 states x
2 outputs phi
Where xdot is defined by:
xdot[0] = x[1]
xdot[1] = (((p/9)*(u-x[0]))-(3.27*x[0]))
And where phi is defined by:
y[0] = x[0]
y[1] = x[1]
''')
report.write("\n**Test results:**\n\n.. code-block:: python")
report.write("\n\n repetitions = " + str(repetitions))
report.write("\n sigma = " + str(sigma))
report.write("\n\n p_true = " + str(ca.DM(ptrue)))
report.write("\n\n p_mean = " + str(ca.DM(p_mean)))
report.write("\n phat_last_exp = " + \
str(ca.DM(pe_test.estimated_parameters)))
report.write("\n\n p_sd = " + str(ca.DM(p_std)))
report.write("\n sd_from_covmat = " \
+ str(ca.diag(ca.sqrt(pe_test.covariance_matrix))))
report.write("\n beta = " + str(pe_test.beta))
report.write("\n\n delta_abs_sd = " + str(ca.fabs(ca.DM(p_std) - \
ca.diag(ca.sqrt(pe_test.covariance_matrix)))))
report.write("\n delta_rel_sd = " + str(ca.fabs(ca.DM(p_std) - \
ca.diag(ca.sqrt(pe_test.covariance_matrix))) / ca.DM(p_std)) \
+ "\n")
report.close()
try:
os.system("rst2pdf " + fname)
except:
print("Generating PDF report failed, is rst2pdf installed correctly?")
|
The vuln writeups always mention passphrase, does the vuln affect WPA2 in infrastructure mode (with Radius/802.1x), as they don't rely on passphrases as such?
Would MAC Address filtering, available on most home WiFi units, assist in some way?
Quoting Steven: Would MAC Address filtering, available on most home WiFi units, assist in some way?
JFTR: how do you expect this (trivial to bypass) feature of your access point to impair an attackers ability to send arbitrary WiFi packets from his equipment to any client connected to your access point? The "ether" is a shared medium!
No. Too much labor, too easy to subvert. Look up the use of Ethernet Locally Administered Addresses (LAA). A widely available, standard feature of Ethernet networking can be used to set a local MAC address to any desired value, including the address of a computer observed communicating on your network. Filter bypassed.
I agree that in the WiFi medium an attacker could just send an attack directly to a client bypassing the AP. However, MAC Address filtering on the AP should prevent access into the wired portion of the network even if the attacker is able to obtain the encryption key. Again, looking for ways to limit the attack not prevent it all together.
To my knowledge, MAC address filtering really doesn't stop ANY attack, never mind this one.
Quoting Steven: I agree that in the WiFi medium an attacker could just send an attack directly to a client bypassing the AP. However, MAC Address filtering on the AP should prevent access into the wired portion of the network even if the attacker is able to obtain the encryption key. Again, looking for ways to limit the attack not prevent it all together.
Which part of "NO!" alias MAC address filtering prevents NOTHING is so hard to understand?
Simplified: KRACK lets the client reuse a (zeroed) encryption key.
Is Protected EAP (WPA2 Enterprise) as vulnerable? My reading of the description would indicate yes, but I am far from an expert. Thanks.
Does this affect WPA2 Enterprise mode deployments (with Radius/802.1x), since they don't use passphrases?
>and if they do it is typically more for billing then to protect user traffic.
Are you sure about this? wigle.net is showing only 7% of their database using WPA, while 59.8% are using WPA2, and 20% are unknown. From what I see in my neck of the woods WPA2 is pretty common for 'public' WiFi access.
Enterprise mode is affected as well. This vulnerability affects the handshake that is used to setup the keys, which is the same in "Passpharase" and "Enterprise" mode.
Regarding "Public Accesspoints". I am refering to things like Hotel/Coffee House hotspots, not to "access points seen in the public".
Anyone found a decent list of vendors that have released patches so far?
I've heard a rumour that Windows 10 has a patch but can't substantiate it.
Microsoft has released patches for Win10/7 in the Oct release.
Quoting PhilBAR: To my knowledge, MAC address filtering really doesn't stop ANY attack, never mind this one.
It'll stop a casual attacker, but anyone worth their salt will know how to spoof a MAC address.
We're using WPA2 with AES/PSK on our Cisco WL APs. I'm not sure if it's AES-based CCMP or not & I don't see an option to turn CCMP on either.
So my question is are we less vulnerable? Can anybody confirm?
Quoting AAInfoSec: We're using WPA2 with AES/PSK on our Cisco WL APs. I'm not sure if it's AES-based CCMP or not & I don't see an option to turn CCMP on either.
Only Cisco and your knowledge of your config can tell you how vulnerable you are. If it is AES, then it is AES-CCMP. AES-CCMP is vulnerable to several forms of the attack. Severity depends on the platform. The paper is only 16 pages. Check Table 3 for a quick summary, but definitely devote the time to understand the impacts of the whole paper.
It is one attack method affecting 4 handshakes, and 3 data confidentiality protocols that vary according to platform. Impacts include decryption, traffic modification, and traffic injection. Some only affect the wireless network, some permit traffic injection to the entire network.
Side Note: I am also concerned with the GCMP risk, especially bidirectional traffic injection. Even when limited to just broadcast/ multicast traffic, the number of autoconfiguration, homenet, zeroconf, even dhcp, IPv6 RA, name resolution. Lots of things to think about.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ontime
# Software to download the schedule for all public bus lines in Curitiba.
#
# Copyright (C) 2011 by Diego W. Antunes <devlware@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import urllib2
from urllib2 import Request, urlopen, URLError, HTTPError
from BeautifulSoup import BeautifulSoup
import os
from os.path import join, getsize
import tempfile
import random
import thread, time
import threading
import string
import sqlite3
import getopt, sys
import hashlib
import datetime
__version__ = "1.0"
__author__ = 'Diego W. Antunes'
__license__ = 'MIT'
class Config(object):
""" """
baseurl = 'http://www.urbs.curitiba.pr.gov.br'
horariourl = 'PORTAL/tabelahorario/'
captchaurl = 'PORTAL/tabelahorario/cap.php'
silent = False
DIAS = ["todos", "util", "sabado", "domingo"]
#SENTIDOS = ["ida", "volta"]
#PERIODOS = ["manha", "entrepico", "tarde"]
DICT_DIAS = dict(zip("0123", DIAS))
#DICT_SENTIDOS = dict(zip("IV", SENTIDOS))
database = 'ontime.sqlite'
CreateCaptchaCode = 'CREATE TABLE IF NOT EXISTS CaptchaCode \
(pk INTEGER PRIMARY KEY NOT NULL, shasum TEXT, code TEXT)'
CreateCaptchaSha1 = 'CREATE TABLE IF NOT EXISTS CaptchaSha1 \
(pk INTEGER PRIMARY KEY NOT NULL, fn TEXT, shasum TEXT, size INTEGER, occurrences INTEGER)'
CreateSchedule = 'CREATE TABLE IF NOT EXISTS Schedule \
(pk INTEGER PRIMARY KEY NOT NULL, time TEXT, hasElevator INTEGER)'
CreatePoint = 'CREATE TABLE IF NOT EXISTS Point \
(pk INTEGER PRIMARY KEY NOT NULL, pointName TEXT validity TEXT, weekDay INTEGER)'
CreateLine = 'CREATE TABLE IF NOT EXISTS Line \
(pk INTEGER PRIMARY KEY NOT NULL, lineName TEXT)'
class OntimeException(Exception):
"""Captcha exception."""
class Schedule(object):
""" """
def __init__(self):
pk
time
hasElevator = None
class Point(object):
""" """
def __init__(self):
pk
pointName
validity
weekDay
scheduleID
self.setWeekDay(weekDay)
def setWeekDay(self, day):
""" """
self._weekDay = day
class Line(object):
""" """
def __init__(self, pk, lineName = None):
self._pk
self._lineName
self.setLineName(lineName)
def setPk(self, aCode)
self._pk = aCode
def setLineName(self, line):
self._lineName = line
def data(self):
return self._data
class IMBDataBase(Config):
""" """
_conn = None
_cursor = None
def __init__(self):
""" """
self._conn = sqlite3.connect(Config.database)
self._cursor = self._conn.cursor()
try:
# Create all the tables necessary to the project
self._cursor.execute(CreateCaptchaSha1)
self._cursor.execute(CreateCaptchaCode)
self._cursor.execute(CreateSchedule)
self._cursor.execute(CreatePoint)
self._cursor.execute(CreateLine)
except sqlite3.Error, e:
print "Could not create table...", e.args[0]
sys.exit(1)
try:
self._conn.commit()
except sqlite3.Error, e:
print "Could no commit table creation...", e.args[0]
def saveData(self, fn, sha, size):
""" """
try:
self._cursor.execute('SELECT pk, occurrences FROM CaptchaSha1 WHERE shasum = ?', (sha, ))
row = self._cursor.fetchone()
if row:
pk = row[0]
occ = row[1]
try:
aTuple = (occ+1, pk, )
self._cursor.execute('UPDATE CaptchaSha1 SET occurrences = ? WHERE pk = ?', aTuple)
self._conn.commit()
except sqlite3.Error, e:
print "An error occurred:", e.args[0]
sys.exit(1)
else:
t = (fn, sha, size, 1)
try:
self._cursor.execute('INSERT INTO CaptchaSha1 (fn, shasum, size, occurrences) values (?, ?, ?, ?)', t)
self._conn.commit()
except sqlite3.Error, e:
print "An error occurred:", e.args[0]
sys.exit(1)
except sqlite3.Error, e:
print "An error occurred:", e.args[0]
sys.exit(2)
def closeDB(self):
""" """
self._cursor.close()
self._conn.close()
#class MyThread(threading.Thread):
class MyClass(ScheduleLine):
""" """
def __init__(self):
""" """
print "%s started!" % self.getName()
ScheduleLine.__init__(self, lineName, weekDay, captchaCode)
def run(self):
""" """
cookie = urllib2.HTTPCookieProcessor()
debug = urllib2.HTTPHandler()
self._opener = urllib2.build_opener(debug, cookie)
self._baseurl = baseurl
self._data = { 'info' : [] }
urllib2.install_opener(self._opener)
def request(self, data = None):
"""Method used to request server/carrier data."""
final = self._baseurl + '/' + url
request = urllib2.Request(final)
request.add_header('User-Agent', "Ontime/%s" % __version__)
request.add_header('Accept-Encoding', 'gzip')
if data is not None:
request.add_data(data)
descriptor = self._opener.open(request)
data = descriptor.read()
descriptor.close()
soup = BeautifulSoup(data)
handler(soup)
def getCaptcha(self, data = None):
req = urllib2.Request(captchaurl)
try:
response = urllib2.urlopen(req)
except URLError, e:
if hasattr(e, 'reason'):
print('We failed to reach a server.')
print('Reason: ', e.reason)
elif hasattr(e, 'code'):
print('The server couldn\'t fulfill the request.')
print('Error code: ', e.code)
else:
print('no problems found')
imgData = response.read()
imgFilename = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(10)) + '.png'
imgFileString = str(imgData)
h = hashlib.sha1()
h.update(imgFileString)
fileHash = h.hexdigest()
self._cursor.execute('SELECT code FROM CaptchaCode WHERE shasum = ?', (fileHash, ))
self.captchaCode = self._cursor.fetchone()[0]
if not self.captchaCode:
return None
return self.captchaCode
def _parseMenu(self, soup):
box = soup.find('select')
if box is None:
else:
boxd = box.findAll()
menu = soup.find(id="cboLinha")
menuOps = menu.findAll("option")
a = []
b = []
for i in menuOps:
a.append(i.contents[0])
b.append(i.attrs[0][1])
"""
Codigo para colocar no banco de dados as informacoes
for i in range(len(a)):
cursor.execute('INSERT INTO Line (lineName, pk) values (?, ?)', (a[i], int(str(b[i]))))
"""
tipoDia = soup.find(id="cboTipoDia")
opcoes = tipoDia.findAll("option") # retorna uma lista
for i in opcoes:
print i.contents
print i.attrs[0][1]
#como pegar o numero de um option
a[1].attrs[0][1]
# o retorno
u'528'
def usage():
"""Returns usage message."""
return "Usage: %s\n" \
"-d\t--database\tUses a specific <database>\n" \
"-o\t--download\n" \
"-r\t--repetition\tDefines the number of repetitions\n" \
"-h\t--help\t\tThis help" % sys.argv[0]
def download(rep):
""" """
home = os.path.abspath(os.environ['HOME'])
dirName = join(home, 'tmp', 'img')
if os.path.exists(dirName):
os.chdir(dirName)
else:
sys.exit(1)
# run the easy stuff, create a thread and make it download an captcha image
i = 0
for x in range(rep):
# startTime = datetime.datetime.now()
mythread = MyThread(name = "Thread-%d" % (x + 1))
mythread.start()
if i > 50:
time.sleep(3)
i = 0
i += 1
def parseImgFile(dbHandler):
""" """
home = os.path.abspath(os.environ['HOME'])
dirName = join(home, 'tmp', 'img')
if os.path.exists(dirName):
files = os.listdir(dirName)
for filename in files:
f = open(join(dirName, filename), 'rb')
h = hashlib.sha1()
h.update(f.read())
fileHash = h.hexdigest()
fileSize = getsize(join(dirName, filename))
f.close()
dbHandler.saveData(str(filename), str(fileHash), fileSize)
else:
print dirName + 'is not available'
sys.exit(1)
dbHandler.closeDB()
def main():
database = None
repetition = None
down = False
try:
opts, args = getopt.getopt(sys.argv[1:], "hod:r:", ["help", "download", "database=", "repetition="])
except getopt.GetoptError as err:
print(err)
print usage()
sys.exit(2)
for option, value in opts:
if option in ('-h', '--help'):
print usage()
sys.exit(0)
elif option in ('-o', '--download'):
down = True
elif option in ('-r', '--repetition'):
repetition = value
elif option in ('-d', '--database'):
database = value
else:
assert False, "unhandled option"
# download the image files
if repetition > 0 and down:
download(int(repetition))
# if a database was set, handle the downloaded files
if database:
myDB = IMBDataBase(database)
parseImgFile(myDB)
if __name__ == '__main__':
main()
|
On this website we recommend many pictures abaout Xtreme Garage Shelving that we have collected from various sites medquest-lic.com, and of course what we recommend is the most excellent of picture for Xtreme Garage Shelving. If you like the picture on our website, please do not hesitate to visit again and get inspiration from our website.
And if you want to see more images more we recommend the gallery below, you can see the picture as a reference design from your Xtreme Garage Shelving.
Thank you for seeing gallery of Xtreme Garage Shelving, we would be very happy if you come back.
Similar Keyword Xtreme Garage Shelving : xtreme garage modular shelving, xtreme garage shelving, xtreme garage shelving assembly instructions, xtreme garage shelving instructions, xtreme garage shelving model 136 5127, xtreme garage shelving model 136 5160, xtreme garage shelving review, xtreme garage shelving reviews, xtreme garage shelving system, and more.
|
"""histogram"""
import datetime
from collections import OrderedDict
import numpy as np
import pandas as pd
from pandas.io.sql import read_sql
from matplotlib.font_manager import FontProperties
from pyiem import network
from pyiem.plot import figure, get_cmap
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
# Use OrderedDict to keep webform select in this same order!
MDICT = OrderedDict(
[
("all", "No Month/Season Limit"),
("spring", "Spring (MAM)"),
("fall", "Fall (SON)"),
("winter", "Winter (DJF)"),
("summer", "Summer (JJA)"),
("jan", "January"),
("feb", "February"),
("mar", "March"),
("apr", "April"),
("may", "May"),
("jun", "June"),
("jul", "July"),
("aug", "August"),
("sep", "September"),
("oct", "October"),
("nov", "November"),
("dec", "December"),
]
)
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc["data"] = True
desc[
"description"
] = """This chart displays a histogram of daily high
and low temperatures for a station of your choice. If you optionally
choose to overlay a given year's data and select winter, the year of
the December is used for the plot. For example, the winter of 2017 is
Dec 2017 thru Feb 2018. The plot details the temperature bin with the
highest frequency."""
desc["arguments"] = [
dict(
type="station",
name="station",
default="IA0200",
label="Select Station:",
network="IACLIMATE",
),
dict(
type="int",
name="binsize",
default="10",
label="Histogram Bin Size:",
),
dict(
type="select",
name="month",
default="all",
label="Month Limiter",
options=MDICT,
),
dict(
type="year",
optional=True,
default=datetime.date.today().year,
label="Optional: Overlay Observations for given year",
name="year",
),
dict(type="cmap", name="cmap", default="Blues", label="Color Ramp:"),
]
return desc
def plotter(fdict):
""" Go """
pgconn = get_dbconn("coop")
ctx = get_autoplot_context(fdict, get_description())
station = ctx["station"]
binsize = ctx["binsize"]
month = ctx["month"]
year = ctx.get("year")
table = "alldata_%s" % (station[:2],)
nt = network.Table("%sCLIMATE" % (station[:2],))
if month == "all":
months = range(1, 13)
elif month == "fall":
months = [9, 10, 11]
elif month == "winter":
months = [12, 1, 2]
elif month == "spring":
months = [3, 4, 5]
elif month == "summer":
months = [6, 7, 8]
else:
ts = datetime.datetime.strptime("2000-" + month + "-01", "%Y-%b-%d")
# make sure it is length two for the trick below in SQL
months = [ts.month, 999]
ddf = read_sql(
f"SELECT high, low, year, month from {table} WHERE station = %s "
"and year > 1892 and high >= low and month in %s",
pgconn,
params=(station, tuple(months)),
index_col=None,
)
if ddf.empty:
raise NoDataFound("No Data Found.")
ddf["range"] = ddf["high"] - ddf["low"]
xbins = np.arange(ddf["low"].min() - 3, ddf["low"].max() + 3, binsize)
ybins = np.arange(ddf["high"].min() - 3, ddf["high"].max() + 3, binsize)
hist, xedges, yedges = np.histogram2d(
ddf["low"], ddf["high"], [xbins, ybins]
)
rows = []
for i, xedge in enumerate(xedges[:-1]):
for j, yedge in enumerate(yedges[:-1]):
rows.append(dict(high=yedge, low=xedge, count=hist[i, j]))
df = pd.DataFrame(rows)
ab = nt.sts[station]["archive_begin"]
if ab is None:
raise NoDataFound("Unknown station metadata.")
years = float(datetime.datetime.now().year - ab.year)
hist = np.ma.array(hist / years)
hist.mask = np.where(hist < (1.0 / years), True, False)
ar = np.argwhere(hist.max() == hist)
title = f"[{station}] {nt.sts[station]['name']}"
subtitle = (
"Daily High vs Low Temperature Histogram + Range between Low + High "
f"(month={month.upper()})"
)
fig = figure(title=title, subtitle=subtitle)
kax = fig.add_axes([0.65, 0.5, 0.3, 0.36])
kax.grid(True)
kax.text(
0.02,
1.02,
"Daily Temperature Range Histogram + CDF",
transform=kax.transAxes,
bbox=dict(color="tan"),
va="bottom",
)
kax.hist(ddf["range"].values, density=True, color="lightgreen")
kax.set_ylabel("Density")
kax2 = kax.twinx()
kax2.set_ylabel("Cumulative Density")
kax2.hist(
ddf["range"].values,
density=True,
cumulative=100,
histtype="step",
color="k",
)
kax.set_xlim((kax.get_xlim()[0], ddf["range"].max()))
# Table of Percentiles
ranks = ddf["range"].quantile(np.arange(0, 1.0001, 0.0025))
xpos = 0.62
ypos = 0.37
fig.text(
0.65,
ypos + 0.03,
"Daily Temperature Range Percentiles",
bbox=dict(color="tan"),
)
fig.text(xpos - 0.01, ypos - 0.01, "Percentile Value")
ypos -= 0.01
monofont = FontProperties(family="monospace")
for (q, val) in ranks.iteritems():
if 0.02 < q < 0.98 and (q * 100.0 % 10) != 0:
continue
if q > 0.1 and int(q * 100) in [20, 90]:
xpos += 0.13
ypos = 0.37
fig.text(xpos - 0.01, ypos - 0.01, "Percentile Value")
ypos -= 0.01
ypos -= 0.025
label = f"{q * 100:-6g} {val:-6.0f}"
fig.text(xpos, ypos, label, fontproperties=monofont)
ax = fig.add_axes([0.07, 0.17, 0.5, 0.73])
res = ax.pcolormesh(xedges, yedges, hist.T, cmap=get_cmap(ctx["cmap"]))
cax = fig.add_axes([0.07, 0.08, 0.5, 0.01])
fig.colorbar(res, label="Days per Year", orientation="horizontal", cax=cax)
ax.grid(True)
ax.set_ylabel(r"High Temperature $^{\circ}\mathrm{F}$")
ax.set_xlabel(r"Low Temperature $^{\circ}\mathrm{F}$")
xmax = ar[0][0]
ymax = ar[0][1]
ax.text(
0.65,
0.15,
("Largest Frequency: %.1d days\n" "High: %.0d-%.0d Low: %.0d-%.0d")
% (
hist[xmax, ymax],
yedges[ymax],
yedges[ymax + 1],
xedges[xmax],
xedges[xmax + 1],
),
ha="center",
va="center",
transform=ax.transAxes,
bbox=dict(color="white"),
)
if ddf["high"].min() < 32:
ax.axhline(32, linestyle="-", lw=1, color="k")
ax.text(
ax.get_xlim()[1],
32,
r"32$^\circ$F",
va="center",
ha="right",
color="white",
bbox=dict(color="k"),
fontsize=8,
)
if ddf["low"].min() < 32:
ax.axvline(32, linestyle="-", lw=1, color="k")
ax.text(
32,
ax.get_ylim()[1],
r"32$^\circ$F",
va="top",
ha="center",
color="white",
bbox=dict(facecolor="k", edgecolor="none"),
fontsize=8,
)
if year:
label = str(year)
if month == "winter":
ddf["year"] = (
ddf[((ddf["month"] == 1) | (ddf["month"] == 2))]["year"] - 1
)
label = "Dec %s - Feb %s" % (year, year + 1)
ddf2 = ddf[ddf["year"] == year]
ax.scatter(
ddf2["low"],
ddf2["high"],
marker="o",
s=30,
label=label,
edgecolor="yellow",
facecolor="red",
)
ax.legend()
return fig, df
if __name__ == "__main__":
plotter(dict())
|
James Robert "Bob" Beach, 87, passed away peacefully on January 18, 2018 in Cary, North Carolina.
Bob was born in Oradell, New Jersey on November 26, 1930 to Raymond and Olive Beach. The youngest of four children, Bob was an adventurous child that enjoyed the outdoors and sports. He graduated from Dwight Morrow High School in Englewood, New Jersey and continued his education at Pennsylvania Military College, earning his degree in 1953.
Bob married Pat Seibert on June 20, 1953. He joined the United States Army, where he trained and became a pilot, earning the rank of Captain. He discovered his passion for flying and his love of planes, which continued throughout his life. Following his time in the Army, Bob's career took his family all over the country prior to becoming the Vice President at the United Jersey Bank in Paramus, New Jersey, where he oversaw their Aircraft Department.
Described as a very loving and altruistic person, Bob enjoyed volunteering in his community and was a member of Rotary International. Bob and Pat retired to the beach community of Quonochontaug, Rhode Island. He enjoyed outdoor activities including sailing, kayaking, clamming, and swimming. He loved spending time with friends and his family. He cherished visits with his children and grandchildren.
Bob is survived by his wife of 64 years, Pat; two children, Becky Enterline and her husband Dave of Raleigh, NC, and Jim Beach and his wife Melinda of Anderson, SC; and four grandchildren, Katherine Wood and her husband Ryan of Houston, TX, Robert Enterline and his wife Olivia of Raleigh, NC, Tripp and Alec Beach of Anderson, SC. Bob was preceded in death by his parents, Raymond and Olive; three siblings: Bill, Ann, and Pat; and his precious dog, Chloe.
Bob will truly be missed by all who knew him. A memorial service will be held on Saturday, February 3 at 2:00 p.m. at Brown -Wynne Funeral Home in Cary, NC. In lieu of flowers, memorial donations may be given to The Rotary Foundation. Please send donations to Newman Aguilar, 909 Demerius Street, Durham, North Carolina 27701 and note that the donation is made "In Memory of Bob Beach".
|
#!/usr/bin/python
import time
import random
import sys
import alsaaudio
import wave
import sys
import struct
import math
from dotstar import Adafruit_DotStar
numpixels = 60 # Number of LEDs in strip
# Here's how to control the strip from any two GPIO pins:
datapin = 23
clockpin = 24
defaultColor = 0x0000FF
defaultBright = 32
flashColor = 0xF0F0FF
flashBright = 255
strip = Adafruit_DotStar(numpixels, datapin, clockpin)
strip.setBrightness(defaultBright)
strip.begin() # Initialize pins for output
hi_thres = 200
low_thres = 100
lightning = False
def main():
global strip
global lightning
sounds = [0, 0, 0]
channels = 2
rate = 44100
size = 1024
out_stream = alsaaudio.PCM(alsaaudio.PCM_PLAYBACK, alsaaudio.PCM_NORMAL, 'default')
out_stream.setformat(alsaaudio.PCM_FORMAT_S16_LE)
out_stream.setchannels(channels)
out_stream.setrate(rate)
out_stream.setperiodsize(size)
strip.setBrightness(defaultBright)
setAllLEDS(strip, [defaultColor])
strip.show()
thunderfiles = ['thunder.wav']
while True:
curfile = random.choice(thunderfiles)
curstream = open(curfile, "rb")
data = curstream.read(size)
tstart = 0
while data:
tstart += 1
out_stream.write(data)
data = curstream.read(size)
rmsval = rms(data)
sounds.append(rmsval)
ug = sounds.pop(0)
sounds_avg = sum(sounds) / len(sounds)
print(sounds_avg)
if sounds_avg > hi_thres and lightning == False:
strip.setBrightness(flashBright)
setAllLEDS(strip, [flashColor])
lightning = True
if sounds_avg < low_thres and lightning == True:
strip.setBrightness(defaultBright)
setAllLEDS(strip, [defaultBright])
lightning = False
curstream.close()
sys.exit(0)
def setAllLEDS(strip, colorlist):
numcolors = len(colorlist)
for x in range(numpixels):
idx = x % numcolors
strip.setPixelColor(x, colorlist[idx])
strip.show()
def rms(frame):
SHORT_NORMALIZE = (1.0/32768.0)
CHUNK = 1024
swidth = 2
count = len(frame)/swidth
format = "%dh"%(count)
shorts = struct.unpack( format, frame )
sum_squares = 0.0
for sample in shorts:
n = sample * SHORT_NORMALIZE
sum_squares += n*n
rms = math.pow(sum_squares/count,0.5);
return rms * 10000
if __name__ == "__main__":
main()
|
No it doesn't have coffee in it but it goes with coffee.
I put cold cranberry sauce on the top of the batter and the batter burst through the middle and the moisture from the cranberries made the cake quite wet. I think it is better to keep jam like fruits on the bottom so that the cake has nothing to hinder its rise. It was very good but took a long time to bake.
For 1 pie dish sized coffee-cake.
Take out a very small scoop of batter after it has soaked on the counter, before adding anything in the morning. Mix this with 1-2 c raw nuts, just until barely covered and 2 T washed raw sugar and a sprinkle of salt. Set aside to sprinkle across the top of the batter before you put it in the oven.
Pour the batter over the top, stirring just a small bit into the fruit so it becomes part of the cake.
Spread the batter coated nuts over the top of that.
Bake at convection bake 375* about 35-45 minutes.
|
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import yaml
from cliff.formatters import yaml_format
from cliff.tests import base
from cliff.tests import test_columns
import mock
class TestYAMLFormatter(base.TestBase):
def test_format_one(self):
sf = yaml_format.YAMLFormatter()
c = ('a', 'b', 'c', 'd')
d = ('A', 'B', 'C', '"escape me"')
expected = {
'a': 'A',
'b': 'B',
'c': 'C',
'd': '"escape me"'
}
output = six.StringIO()
args = mock.Mock()
sf.emit_one(c, d, output, args)
actual = yaml.safe_load(output.getvalue())
self.assertEqual(expected, actual)
def test_formattablecolumn_one(self):
sf = yaml_format.YAMLFormatter()
c = ('a', 'b', 'c', 'd')
d = ('A', 'B', 'C', test_columns.FauxColumn(['the', 'value']))
expected = {
'a': 'A',
'b': 'B',
'c': 'C',
'd': ['the', 'value'],
}
args = mock.Mock()
sf.add_argument_group(args)
args.noindent = True
output = six.StringIO()
sf.emit_one(c, d, output, args)
value = output.getvalue()
print(len(value.splitlines()))
actual = yaml.safe_load(output.getvalue())
self.assertEqual(expected, actual)
def test_list(self):
sf = yaml_format.YAMLFormatter()
c = ('a', 'b', 'c')
d = (
('A1', 'B1', 'C1'),
('A2', 'B2', 'C2'),
('A3', 'B3', 'C3')
)
expected = [
{'a': 'A1', 'b': 'B1', 'c': 'C1'},
{'a': 'A2', 'b': 'B2', 'c': 'C2'},
{'a': 'A3', 'b': 'B3', 'c': 'C3'}
]
output = six.StringIO()
args = mock.Mock()
sf.add_argument_group(args)
sf.emit_list(c, d, output, args)
actual = yaml.safe_load(output.getvalue())
self.assertEqual(expected, actual)
def test_formattablecolumn_list(self):
sf = yaml_format.YAMLFormatter()
c = ('a', 'b', 'c')
d = (
('A1', 'B1', test_columns.FauxColumn(['the', 'value'])),
)
expected = [
{'a': 'A1', 'b': 'B1', 'c': ['the', 'value']},
]
args = mock.Mock()
sf.add_argument_group(args)
args.noindent = True
output = six.StringIO()
sf.emit_list(c, d, output, args)
actual = yaml.safe_load(output.getvalue())
self.assertEqual(expected, actual)
|
Congratulations Bryan J. Soukup, Esq.!
Congratulations to Bryan J. Soukup, Esq. as he joins the national leadership team as Vice President, Government and Public Affairs. He will lead the Society's efforts to advocate for the interior design profession at the federal and state level.
|
"""
Copyright (c) 2014 Russell Nakamura
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class UnitNames(object):
"""
Unit Names is a namespace to hold units
"""
__slots__ = ()
# bits
bits = "bits"
kbits = "K" + bits
kilobits = kbits
mbits = "M" + bits
megabits = mbits
gbits = "G" + bits
gigabits = gbits
tbits = "T" + bits
terabits = tbits
pbits = "P" + bits
petabits = pbits
ebits = "E" + bits
exabits = ebits
zbits = "Z" + bits
zettabits = zbits
ybits = "Y" + bits
yottabits = ybits
# bytes
bytes = "Bytes"
kbytes = "K" + bytes
kilobytes = kbytes
mbytes = "M" + bytes
megabytes = mbytes
gbytes = "G" + bytes
gigabytes = gbytes
tbytes = "T" + bytes
terabytes = tbytes
pbytes = "P" + bytes
petabytes = pbytes
ebytes = "E" + bytes
exabytes = ebytes
zbytes = 'Z' + bytes
zettabytes = zbytes
ybytes = 'Y' + bytes
yottabytes = ybytes
class BinaryUnitNames(object):
"""
namespace for binary-unit names
"""
bits = UnitNames.bits
bibits = 'bi' + bits
kibibits = "ki" + bibits
mebibits = 'me' + bibits
gibibits = "gi" + bibits
tebibits = "te" + bibits
pebibits = "pe" + bibits
exbibits = "ex" + bibits
zebibits = "ze" + bibits
yobibits = "yo" + bibits
bytes = 'bytes'
bibytes = 'bi' + bytes
kibibytes = "ki" + bibytes
mebibytes = "me" + bibytes
gibibytes = 'gi' + bibytes
tebibytes = 'te' + bibytes
pebibytes = 'pe' + bibytes
exbibytes = "ex" + bibytes
zebibytes = "ze" + bibytes
yobibytes = "yo" + bibytes
# iperf base 2
iperf_bytes = UnitNames.bytes
iperf_kibibytes = UnitNames.kbytes
iperf_mebibytes = UnitNames.mbytes
iperf_gibibytes = UnitNames.gbytes
iperf_tebibytes = UnitNames.tbytes
iperf_pebibytes = UnitNames.pbytes
iperf_exbibytes = UnitNames.ebytes
iperf_zebibytes = UnitNames.zbytes
iperf_yobibytes = UnitNames.ybytes
# end BinaryUnitNames
IDENTITY = 1
ONE = 1.0
BYTE = 8
TO_BYTE = ONE/BYTE
class BaseConverter(dict):
"""
A creator of unit-conversion dictionaries
"""
def __init__(self, to_units, kilo_prefix):
"""
base_converter constructor
:param:
- `to_units`: a list of the units to covert to (has to be half to-bits, half to-bytes)
- `kilo_prefix`: kilo multiplier matching type of units
"""
self.to_units = to_units
self.kilo_prefix = kilo_prefix
self._prefix_conversions = None
self._bits_to_bytes = None
self._bytes_to_bits = None
# split the to_units list for later
self.bit_conversions = self.byte_conversions = len(to_units)//2
self.bit_units = to_units[:self.bit_conversions]
self.byte_units = to_units[self.byte_conversions:]
return
@property
def prefix_conversions(self):
"""
List of lists of prefix conversions
"""
if self._prefix_conversions is None:
# start with list that assumes value has no prefix
# this list is for 'bits' or 'bytes'
# the values will be 1, 1/kilo, 1/mega, etc.
start_list = [self.kilo_prefix**(-power)
for power in range(self.bit_conversions)]
self._prefix_conversions = self.conversions(conversion_factor=1,
start_list=start_list)
return self._prefix_conversions
@property
def bits_to_bytes(self):
"""
List of conversions for bits to bytes
"""
if self._bits_to_bytes is None:
self._bits_to_bytes = self.conversions(conversion_factor=TO_BYTE)
return self._bits_to_bytes
@property
def bytes_to_bits(self):
"""
list of conversions for bytes to bits
"""
if self._bytes_to_bits is None:
self._bytes_to_bits = self.conversions(conversion_factor=BYTE)
return self._bytes_to_bits
def conversions(self, conversion_factor, start_list=None):
"""
Creates the converter-lists
:param:
- `conversion_factor`: multiplier for values (8 or 1/8, or 1)
- `start_list`: if given, use to start the conversion-list
:return: list of conversion_lists
"""
if start_list is None:
# assume that prefix_conversions exists (not safe, but...)
start_list = self.prefix_conversions[0]
# start with byte_factor times the base conversions (1, 1/kilo, etc.)
converter_list = [[conversion_factor * conversion
for conversion in start_list]]
for previous in range(self.bit_conversions - 1):
# 'pop' last item from previous list
# and prepend one higher-power conversion
next_conversions = ([self.kilo_prefix**(previous+1) * conversion_factor] +
converter_list[previous][:-1])
converter_list.append(next_conversions)
return converter_list
def build_conversions(self):
"""
builds the dictionary
"""
# from bits to bits or bytes
for index, units in enumerate(self.bit_units):
self[units] = dict(list(zip(self.to_units, self.prefix_conversions[index] +
self.bits_to_bytes[index])))
# from bytes to bits or bytes
for index, units in enumerate(self.byte_units):
self[units] = dict(list(zip(self.to_units, self.bytes_to_bits[index] +
self.prefix_conversions[index])))
return
# end class BaseConverter
bit_units = [UnitNames.bits,
UnitNames.kbits,
UnitNames.mbits,
UnitNames.gbits,
UnitNames.terabits,
UnitNames.petabits,
UnitNames.exabits,
UnitNames.zettabits,
UnitNames.yottabits]
byte_units = [UnitNames.bytes,
UnitNames.kbytes,
UnitNames.mbytes,
UnitNames.gbytes,
UnitNames.terabytes,
UnitNames.petabytes,
UnitNames.exabytes,
UnitNames.zettabytes,
UnitNames.yottabytes]
decimal_to_units = bit_units + byte_units
KILO = 10**3
class UnitConverter(BaseConverter):
"""
The UnitConverter makes conversions based on a base-10 system
"""
def __init__(self):
super(UnitConverter, self).__init__(to_units=decimal_to_units,
kilo_prefix=KILO)
self.build_conversions()
return
# end class UnitConverter
DecimalUnitConverter = UnitConverter
to_bits = [BinaryUnitNames.bits,
BinaryUnitNames.kibibits,
BinaryUnitNames.mebibits,
BinaryUnitNames.gibibits,
BinaryUnitNames.tebibits,
BinaryUnitNames.pebibits,
BinaryUnitNames.exbibits,
BinaryUnitNames.zebibits,
BinaryUnitNames.yobibits]
to_bytes = [BinaryUnitNames.bytes,
BinaryUnitNames.kibibytes,
BinaryUnitNames.mebibytes,
BinaryUnitNames.gibibytes,
BinaryUnitNames.tebibytes,
BinaryUnitNames.pebibytes,
BinaryUnitNames.exbibytes,
BinaryUnitNames.zebibytes,
BinaryUnitNames.yobibytes]
binary_to_units = to_bits + to_bytes
KIBI = 2**10
class BinaryUnitconverter(BaseConverter):
"""
The BinaryUnitconverter is a conversion lookup table for binary data
Usage::
converted = old * UnitConverter[old units][new units]
Use class UnitNames to get valid unit names
"""
def __init__(self):
super(BinaryUnitconverter, self).__init__(to_units=binary_to_units,
kilo_prefix=KIBI)
self.build_conversions()
return
# end class BinaryUnitConverter
to_bits = [BinaryUnitNames.bits,
BinaryUnitNames.kibibits,
BinaryUnitNames.mebibits,
BinaryUnitNames.gibibits,
BinaryUnitNames.tebibits,
BinaryUnitNames.pebibits,
BinaryUnitNames.exbibits,
BinaryUnitNames.zebibits,
BinaryUnitNames.yobibits]
to_bytes = [BinaryUnitNames.iperf_bytes,
BinaryUnitNames.iperf_kibibytes,
BinaryUnitNames.iperf_mebibytes,
BinaryUnitNames.iperf_gibibytes,
BinaryUnitNames.iperf_tebibytes,
BinaryUnitNames.iperf_pebibytes,
BinaryUnitNames.iperf_exbibytes,
BinaryUnitNames.iperf_zebibytes,
BinaryUnitNames.iperf_yobibytes]
iperf_binary_to_units = to_bits + to_bytes
class IperfbinaryConverter(BaseConverter):
"""
The IperfbinaryConverter is a conversion lookup table for binary data
Usage::
converter = IperfbinaryConverter()
converted = old * converter[old units][new units]
Use class UnitNames to get valid unit names
"""
def __init__(self):
super(IperfbinaryConverter, self).__init__(to_units=iperf_binary_to_units,
kilo_prefix=KIBI)
self.build_conversions()
return
# end class BinaryUnitConverter
if __name__ == "__builtin__":
unit_converter = UnitConverter()
bits = 10**6
converted = bits * unit_converter['bits']['Mbits']
print("{0} Mbits".format(converted))
if __name__ == "__builtin__":
binary_converter = BinaryUnitconverter()
MBytes = 1
bits = MBytes * binary_converter[BinaryUnitNames.mebibytes][UnitNames.bits]
print("{0:,} bits".format(bits))
if __name__ == '__builtin__':
mbits = bits * unit_converter[UnitNames.bits][UnitNames.mbits]
print('{0} Mbits'.format(mbits))
|
Dr. Roslyn F. Schneider was previously the Global Patient Affairs Lead driving patient-centricity within Pfizer's Medical OrganizationOrganisation. She received her BS from the Sophie Davis School of Biomedical Education of the City College of New York, her MD from Mount Sinai School of Medicine, and her MSc in Pharmaceutical Medicine from Hibernia College. Before joining Pfizer, she provided direct patient care at Beth Israel Medical Center NY, directed specialty programs for doctors in training, and was a Clinical Professor of Medicine at the Albert Einstein College of Medicine.
Dr. Schneider is a fellow of both the American College of Physicians and the American College of Chest Physicians where she has served on the Ethics Committee and the Clinical Research Network. She is a member of the Board of Trustees for the Physician Assistants Foundation and has served as a corporate board member of the Healthcare Businesswomen's Association.
Acute vs. Chronic Cough: What Is It Really Trying To Tell You?
Breast Self-Exams: Should You Do Them?
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""
Follow statistics of my keystrokes
==================================
"""
# Created Wed Sep 16 18:40:15 2015
# Author: Óscar Nájera
from __future__ import division, absolute_import, print_function
import re
import os
import collections
import argparse
parser = argparse.ArgumentParser(description='Key press statistics')
parser.add_argument('-file', default=os.path.expanduser('~/keylog'),
help='Key pressing log file')
parser.add_argument('-txt', action='store_true',
help='is it a text file?')
parser.add_argument('-chr', action='store_true',
help='Count the shift chording ')
arguments = parser.parse_args()
with open(arguments.file, 'r') as keyshom:
data = keyshom.read()
if not arguments.txt:
kdata = re.findall(r'KeyPress.*?\[(\w+)\]', data)
if arguments.chr:
print('yo')
kdata = re.findall(r'KeyPress.*?\[(\w+)\].*?\[Shift.*?\]', data)
collstat = collections.Counter(kdata)
print('Most typed characters')
for i, (char, count) in enumerate(collstat.most_common()):
print(i, char, count)
if arguments.txt:
pair_data = re.findall(r'(\w.)', kdata) + re.findall(r'(.\w)', kdata)
else:
pair_data = list(zip(kdata[:-1], kdata[1:]))
pair_stat = collections.Counter(pair_data)
print('Most recurrent key successions')
for i, (pair, count) in enumerate(pair_stat.most_common(50)):
print(i, pair, count)
|
Thank you so much for the connection. I have been in contact with the person and she is sending the mouse.
What a great concept you have, quite clever. There is a zebra, a tiger, a elephant and a mouse in the series.
If you want to bring back some old memories, Susie found a site where you can watch and download the original Saturday Morning Cartoons Beech-Nut commercial, which has these striped animals in it.
Now we need to find the GREEN STRIPED TIGER that Phyl is looking for.
I’m looking for a green striped stuffed animal from Beech Nut Fruit stripe gum. I’m sorry I don’t have a photo. These were stuffed animals maybe in the 60’s for Fruit Stripe Gum. There were 4 animals…a zebra, and maybe a horse and an elephant. I know my green (lime) striped one was a tiger. I named him Toenail, and he spoke thru me in a funny voice. I would love to find another.
Susie has emailed me to ask for help finding the Orange Striped Mouse in this series. She writes:.
I believe there were four animals to choose from when you sent in your money and the gum wrappers. Mine was an orange stripe mouse with big ears that had wire in them so you could shape them.
I ran your search, Sonya, but I could not find it.
That’s it, Michael. Were these for sale? I’ll get the photo regardless.
They wern’t for sale, they’re from a website that has pictures of vintage cereal boxes and premiums.
Thanks for the page Michael. I think she would enjoy knowing their names.
#060808-4: I am not certain if Phyl was successful in finding his green tiger but I have all 4-striped animals, in the original boxes, from 1965 from Beech-Nut Baby foods in New York. I am looking to sell all of them, and would like to know if you are interested.
Just saw your post and was wondering if you still have any of the striped animals for sale and how much?
Yes, we still have the mouse, horse and elephant in original packing. $40/each. Let me know.
Hi I just found this post in a search. Is the elephant still available?
Hi. I’m looking for the tiger. If anyone still has one I would be very much interested!!! Thanks!!!
Do you still have the orange stripe mouse? This was my brothers favorite toy from that time. I would like to find one for him for his birthday.
We have sold all but the red and white horse from the 60’s Beech Nut Gum collection. So please keep us in mind if anyone is looking for it.
|
import math
import time
def prime_list(lower, upper):
p_ls = [2, 3, 5, 7]
for n in range(lower, upper, 2):
p = True
for d in range(3, int(math.sqrt(n)) + 1):
if n % d == 0:
p = False
break
if p:
p_ls.append(n)
return p_ls
def is_prime(x):
if x % 2 == 0:
return False
d = 3
upper = int(abs(x) ** 0.5 + 1)
while d <= upper:
if x % d == 0:
return False
d += 2
return True
def prime_generator():
num = 5
while True:
prime = True
for d in range(3, int(num ** 0.5 + 1)):
if num % d == 0:
prime = False
break
if prime:
yield num
num += 2
start = time.time()
gen = prime_generator()
primes = [2, 3]
n = 5
longest = 0
total = 0
length = 0
prime = 0
keep_checking_num = True
l = 0
while n < 1000001:
if not is_prime(n):
n += 2
continue
while primes[-1] < n:
primes.append(gen.__next__())
keep_checking_num = True
l = 0
while keep_checking_num:
l += 1
length = 0
total = 0
for i in range(l, len(primes)):
total += primes[i]
length += 1
if total > n:
break
if total == n:
if length > longest:
longest = length
prime = n
print(prime)
keep_checking_num = False
n += 2
print(longest, prime)
print(time.time()-start)
for i in range(primes):
for n in range(primes):
if sum(primes)
|
This entry was posted on Monday, October 29th, 2012 at 10:32 am and tagged with 2012, acquaintances, benefits, class, conditions, convenience, desire, desteni, elegance, elitism, envy, etiquette, evil, friendship, journey to life, luxury, marlenlife, money, politics, social stratus, spoiled brats, stuck up, wealth, wishing for happiness and posted in Behavior, concern, cultural values, Desteni, education, ego, elitism, energy, Equal Rights = Equal Responsibilities, favoritism, freechoice, freespiritism, god, good, human nature, idealism, insanity, Journey To Life, liberation, Life, mind, mind control, mindsplit, money, Money Limitation, motivation, parental patterns, personality design, positivity, power, Sculpting in Time, self esteem, self image, self reflection, special, thoughts, value, world equality, world s ystem, worth. You can follow any responses to this entry through the RSS 2.0 feed.
Me identifico totalmente. De repente toda clase de sensaciones y recuerdos en los que me encontraba pensado y comportándome de la misma manera.
|
"""Shared functions and other definitions."""
from __future__ import absolute_import
import collections
import os
import platform
import random
import shutil
import string
import sys
import tempfile
from functools import partial # pylint: disable=unused-import
from typing import (IO, Any, AnyStr, Callable, # pylint: disable=unused-import
Dict, Iterable, List, MutableMapping, MutableSequence,
Optional, Union)
import pkg_resources
from mypy_extensions import TypedDict
from schema_salad.utils import json_dump, json_dumps # pylint: disable=unused-import
from six.moves import urllib, zip_longest
from typing_extensions import Deque, Text # pylint: disable=unused-import
# move to a regular typing import when Python 3.3-3.6 is no longer supported
# no imports from cwltool allowed
if os.name == 'posix':
if sys.version_info < (3, 5):
import subprocess32 as subprocess # pylint: disable=unused-import
else:
import subprocess # pylint: disable=unused-import
else:
import subprocess # type: ignore
windows_default_container_id = "frolvlad/alpine-bash"
Directory = TypedDict('Directory',
{'class': Text, 'listing': List[Dict[Text, Text]],
'basename': Text})
DEFAULT_TMP_PREFIX = tempfile.gettempdir() + os.path.sep
processes_to_kill = collections.deque() # type: Deque[subprocess.Popen]
def versionstring():
# type: () -> Text
'''
version of CWLtool used to execute the workflow.
'''
pkg = pkg_resources.require("cwltool")
if pkg:
return u"%s %s" % (sys.argv[0], pkg[0].version)
return u"%s %s" % (sys.argv[0], "unknown version")
def aslist(l): # type: (Any) -> MutableSequence[Any]
"""Wraps any non-MutableSequence/list in a list."""
if isinstance(l, MutableSequence):
return l
return [l]
def copytree_with_merge(src, dst): # type: (Text, Text) -> None
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copystat(src, dst)
lst = os.listdir(src)
for item in lst:
spath = os.path.join(src, item)
dpath = os.path.join(dst, item)
if os.path.isdir(spath):
copytree_with_merge(spath, dpath)
else:
shutil.copy2(spath, dpath)
def docker_windows_path_adjust(path):
# type: (Optional[Text]) -> Optional[Text]
r"""
Changes only windows paths so that the can be appropriately passed to the
docker run command as as docker treats them as unix paths.
Example: 'C:\Users\foo to /C/Users/foo (Docker for Windows) or /c/Users/foo
(Docker toolbox).
"""
if path is not None and onWindows():
split = path.split(':')
if len(split) == 2:
if platform.win32_ver()[0] in ('7', '8'): # type: ignore
split[0] = split[0].lower() # Docker toolbox uses lowecase windows Drive letters
else:
split[0] = split[0].capitalize()
# Docker for Windows uses uppercase windows Drive letters
path = ':'.join(split)
path = path.replace(':', '').replace('\\', '/')
return path if path[0] == '/' else '/' + path
return path
def docker_windows_reverse_path_adjust(path):
# type: (Text) -> (Text)
r"""
Change docker path (only on windows os) appropriately back to Window path/
Example: /C/Users/foo to C:\Users\foo
"""
if path is not None and onWindows():
if path[0] == '/':
path = path[1:]
else:
raise ValueError("not a docker path")
splitpath = path.split('/')
splitpath[0] = splitpath[0]+':'
return '\\'.join(splitpath)
return path
def docker_windows_reverse_fileuri_adjust(fileuri):
# type: (Text) -> (Text)
r"""
On docker in windows fileuri do not contain : in path
To convert this file uri to windows compatible add : after drive letter,
so file:///E/var becomes file:///E:/var
"""
if fileuri is not None and onWindows():
if urllib.parse.urlsplit(fileuri).scheme == "file":
filesplit = fileuri.split("/")
if filesplit[3][-1] != ':':
filesplit[3] = filesplit[3]+':'
return '/'.join(filesplit)
return fileuri
raise ValueError("not a file URI")
return fileuri
def onWindows():
# type: () -> (bool)
""" Check if we are on Windows OS. """
return os.name == 'nt'
def convert_pathsep_to_unix(path): # type: (Text) -> (Text)
"""
On windows os.path.join would use backslash to join path, since we would
use these paths in Docker we would convert it to use forward slashes: /
"""
if path is not None and onWindows():
return path.replace('\\', '/')
return path
def cmp_like_py2(dict1, dict2): # type: (Dict[Text, Any], Dict[Text, Any]) -> int
"""
Comparision function to be used in sorting as python3 doesn't allow sorting
of different types like str() and int().
This function re-creates sorting nature in py2 of heterogeneous list of
`int` and `str`
"""
# extract lists from both dicts
first, second = dict1["position"], dict2["position"]
# iterate through both list till max of their size
for i, j in zip_longest(first, second):
if i == j:
continue
# in case 1st list is smaller
# should come first in sorting
if i is None:
return -1
# if 1st list is longer,
# it should come later in sort
elif j is None:
return 1
# if either of the list contains str element
# at any index, both should be str before comparing
if isinstance(i, str) or isinstance(j, str):
return 1 if str(i) > str(j) else -1
# int comparison otherwise
return 1 if i > j else -1
# if both lists are equal
return 0
def bytes2str_in_dicts(inp # type: Union[MutableMapping[Text, Any], MutableSequence[Any], Any]
):
# type: (...) -> Union[Text, MutableSequence[Any], MutableMapping[Text, Any]]
"""
Convert any present byte string to unicode string, inplace.
input is a dict of nested dicts and lists
"""
# if input is dict, recursively call for each value
if isinstance(inp, MutableMapping):
for k in inp:
inp[k] = bytes2str_in_dicts(inp[k])
return inp
# if list, iterate through list and fn call
# for all its elements
if isinstance(inp, MutableSequence):
for idx, value in enumerate(inp):
inp[idx] = bytes2str_in_dicts(value)
return inp
# if value is bytes, return decoded string,
elif isinstance(inp, bytes):
return inp.decode('utf-8')
# simply return elements itself
return inp
def visit_class(rec, cls, op):
# type: (Any, Iterable, Union[Callable[..., Any], partial[Any]]) -> None
"""Apply a function to with "class" in cls."""
if isinstance(rec, MutableMapping):
if "class" in rec and rec.get("class") in cls:
op(rec)
for d in rec:
visit_class(rec[d], cls, op)
if isinstance(rec, MutableSequence):
for d in rec:
visit_class(d, cls, op)
def visit_field(rec, field, op):
# type: (Any, Iterable, Union[Callable[..., Any], partial[Any]]) -> None
"""Apply a function to mapping with 'field'."""
if isinstance(rec, MutableMapping):
if field in rec:
rec[field] = op(rec[field])
for d in rec:
visit_field(rec[d], field, op)
if isinstance(rec, MutableSequence):
for d in rec:
visit_field(d, field, op)
def random_outdir(): # type: () -> Text
""" Return the random directory name chosen to use for tool / workflow output """
# compute this once and store it as a function attribute - each subsequent call will return the same value
if not hasattr(random_outdir, 'outdir'):
random_outdir.outdir = '/' + ''.join([random.choice(string.ascii_letters) for _ in range(6)]) # type: ignore
return random_outdir.outdir # type: ignore
|
Access, download, and save income tax forms for Virginia Tax Help. Click on the form name for additional details and access to prior year forms.
Virginia Schedule Adj Instructions Find Virginia schedule adj instructions at eSmart Tax today. E-file your state and federal tax returns with us and receive the biggest refund guaranteed!
Virginia Form 760-Pmt Instructions Find Virginia form 760-pmt instructions at eSmart Tax today. E-file your state and federal tax returns with us and receive the biggest refund guaranteed!
Virginia Form 760 Instructions Find Virginia form 760 instructions at eSmart Tax today. E-file your state and federal tax returns with us and receive the biggest refund guaranteed!
|
# class linearregressionresultsviewer:
# registered = True # Value to define db operator
#
# def __init__(self):
# self.n = 0
# self.mydata = dict()
# self.variablenames = []
#
# def step(self, *args):
# # if self.n == 0:
# # print args, len(args)
# # self.noofvariables = args[4]
# # self.noofclusters = args[5]
# try:
# self.variablenames.append(str(args[0]))
# self.mydata[(args[0])] = str(args[1]), str(args[2]), str(args[3]), str(args[4])
# self.n += 1
# # if self.n <= self.noofvariables :
# # self.variablenames.append(str(args[1]))
# except (ValueError, TypeError):
# raise
#
# def final(self):
# yield ('linearregressionresult',)
#
# myresult = "{\"resources\": [{\"name\": \"linear-regression\", \"profile\": \"tabular-data-resource\", \
# \"data\": [[\"variable\", \"estimate\", \"standard_error\", \"t-value\", \"p-value\"]"
# if len(self.variablenames) != 0:
# myresult += ","
# for i in xrange(len(self.variablenames)):
# myresult += "[\"" + str(self.variablenames[i]) + "\","
# # row=[]
# # row.append(self.variablenames[i])
# for j in xrange(4):
# myresult += "\"" + str(self.mydata[(self.variablenames[i])][j]) + "\""
# if j < 3:
# myresult += ","
# # row.append(self.mydata[(self.variablenames[i])][j])
# # myresult+= str(row)
# if i < len(self.variablenames) - 1:
# myresult += "],"
#
# if len(self.variablenames) != 0:
# myresult += "]"
#
# myresult += "],\"schema\": { \"fields\": [{\"name\": \"variable\", \"type\": \"string\"}, \
# {\"name\": \"estimate\", \"type\": \"number\"},{\"name\": \"standard_error\", \"type\": \"number\"}, \
# {\"name\": \"t-value\", \"type\": \"number\"}, {\"name\": \"p-value\", \"type\": \"string\"}] } }]}"
#
# yield (myresult,)
#
#
# if not ('.' in __name__):
# """
# This is needed to be able to test the function, put it at the end of every
# new function you create
# """
# import sys
# from functions import *
#
# testfunction()
# if __name__ == "__main__":
# reload(sys)
# sys.setdefaultencoding('utf-8')
# import doctest
#
# doctest.testmod()
|
OTAN is accepting applications for its two academies: the Technology Integration Mentor Academy (TIMAC) and the Online Teaching Academy (OTAC), but the deadline is fast approaching. These are year-long projects that focus on integrating technology into the classroom, and effective practices in online teaching and learning, respectively. Get your application in now!
OCTAE is Working to Get Everyone On!
Important organizational changes have taken place within the California Department of Education. With the retirement of Dr. Patricia Terry, manager of the Adult Education Office (AEO), Superintendent of Public Instruction Tom Torlakson has made the decision to move the AEO out of the Coordinated Student Support and Adult Education Division (CSSAED) under the direction of Gordon Jackson and into the ... Complete article..
For a May 19, 2014 post, Ashley Cronin and the team collected resources from Edutopia and the Web to help us navigate the possibilities of blended learning, an approach that combines face-to-face and technology-mediated learning, with some student control over time, path, place, or pace.
New Web-based Class Activity for September!
Are you preparing your students for the high school equivalency test? Looking for non-fiction reading material at multiple reading levels? Do you want to integrate the College and Career Readiness standards into your classroom? Could you use current articles for your ESL students who are reading at a 4th grade level or higher? Newsela is a Web site containing articles on non-fiction topics. But it is much more.
OTAN offers short, online workshops to meet the demands of busy adult educators. Check the California Adult Education Professional Development calendar for our online workshops as we gear up for another exciting year!
Virtual Nerd Mobile Math by Pearson Education is a free app that provides on-the-go access to Virtual Nerd's extensive video library. Interactive tutorial videos review fundamental math concepts in middle school and high school. App is designed for iPhone and iPad, but may also work with an iPod touch. Requires iOS 6.0 or later.
This site from Pearson offers over 1,500 video lessons covering Middle Grades Math through Algebra 2. The instructor is easy to follow and this might be a great resource for learners who struggle with basic math concepts. It would also be a great resource for the "flipped" classroom. After identifying a learners' math level, assign videos to watch at home and then any questions can be answered in class. Also see the Virtual Nerd Mobile Math app (above) for on-the-go learning.
The Dallas Fed has created interactive whiteboard activities for use in the high school classroom. The new lessons are part of the popular Everyday Economics series and Building Wealth in the Classroom. Download the .notebook files and accompanying PDF procedure documents at the site. Their Economic Education-Classroom Resources section has many more resources for teaching financial literacy. You may want to explore them, too.
English Current is a free ESL Web site for students and teachers of English. The lessons here are designed to take up about 1 hr of class time. Lessons focus on speaking (50%), reading comprehension (30%), and vocabulary building (20%). Good for English conversation lessons. How to use English Current . Some lessons have printable worksheets for those without Internet access. There is a lot of variety in topics. It has lists of conversation questions, phrasal verbs, and work-related idioms, as well as exercises on many language learning topics. For Intermediate-Advanced learners.
Are you looking for teaching materials or lessons that will help prepare your students for the GED while addressing a real health issue affecting their lives and the lives of their family and friends? Then try these three new health literacy lessons designed for ABE/GED instruction which provide compelling information about the hazards of tobacco use while teaching basic literacy skills. Each lesson comes complete with lesson overview, multiple learning activities, teacher notes, and answer key.
Offers a variety of study, practice, and tests on geography in the USA and across the world. Good activites for learning the states and their capitals as well as world geography. Also offers very basic online math practice (addition, subtraction, multiplication, division), including printable worksheets. Suitable for EL Civics, ESL, ABE, and ASE classes.
|
#!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vpc_igw
short_description: Manage an AWS VPC Internet gateway
description:
- Manage an AWS VPC Internet gateway
version_added: "2.0"
author: Robert Estelle (@erydo)
options:
vpc_id:
description:
- The VPC ID for the VPC in which to manage the Internet Gateway.
required: true
default: null
state:
description:
- Create or terminate the IGW
required: false
default: present
choices: [ 'present', 'absent' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Ensure that the VPC has an Internet Gateway.
# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use in setting up NATs etc.
ec2_vpc_igw:
vpc_id: vpc-abcdefgh
state: present
register: igw
'''
try:
import boto.ec2
import boto.vpc
from boto.exception import EC2ResponseError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
if __name__ != '__main__':
raise
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
class AnsibleIGWException(Exception):
pass
def ensure_igw_absent(vpc_conn, vpc_id, check_mode):
igws = vpc_conn.get_all_internet_gateways(
filters={'attachment.vpc-id': vpc_id})
if not igws:
return {'changed': False}
if check_mode:
return {'changed': True}
for igw in igws:
try:
vpc_conn.detach_internet_gateway(igw.id, vpc_id)
vpc_conn.delete_internet_gateway(igw.id)
except EC2ResponseError as e:
raise AnsibleIGWException(
'Unable to delete Internet Gateway, error: {0}'.format(e))
return {'changed': True}
def ensure_igw_present(vpc_conn, vpc_id, check_mode):
igws = vpc_conn.get_all_internet_gateways(
filters={'attachment.vpc-id': vpc_id})
if len(igws) > 1:
raise AnsibleIGWException(
'EC2 returned more than one Internet Gateway for VPC {0}, aborting'
.format(vpc_id))
if igws:
return {'changed': False, 'gateway_id': igws[0].id}
else:
if check_mode:
return {'changed': True, 'gateway_id': None}
try:
igw = vpc_conn.create_internet_gateway()
vpc_conn.attach_internet_gateway(igw.id, vpc_id)
return {'changed': True, 'gateway_id': igw.id}
except EC2ResponseError as e:
raise AnsibleIGWException(
'Unable to create Internet Gateway, error: {0}'.format(e))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
vpc_id = dict(required=True),
state = dict(default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if not HAS_BOTO:
module.fail_json(msg='boto is required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
vpc_id = module.params.get('vpc_id')
state = module.params.get('state', 'present')
try:
if state == 'present':
result = ensure_igw_present(connection, vpc_id, check_mode=module.check_mode)
elif state == 'absent':
result = ensure_igw_absent(connection, vpc_id, check_mode=module.check_mode)
except AnsibleIGWException as e:
module.fail_json(msg=str(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
I am looking for small group tours of Ireland. I would prefer the group size to be no more than 20. So far, I have found Vagabond and Wolfhound Tours. If anyone has traveled with either of these tour companies, I would be interested in your opinion. Of course, if anyone has info on any other small group tour operators for Ireland, please share your experience. Thanks so much!
|
from model.contact import Contact
from model.group import Group
from fixture.orm import ORMFixture
import random
db = ORMFixture(host='127.0.0.1', name='addressbook', user='root', password='')
def test_add_contact_to_group(app):
old_groups = db.get_group_list()
if len(old_groups) == 0:
app.group.create(Group(name="New_group"))
groups = db.get_group_list()
group = random.choice(groups)
old_contacts_in_group = db.get_contacts_in_group(group)
if len(db.get_contact_list()) == 0 or len(db.get_contact_list()) == len(old_contacts_in_group):
app.contact.fill_new(Contact(firstname="NEWSContact"))
contacts = db.get_contacts_not_in_group(group)
contact = random.choice(contacts)
app.contact.add_contact_in_group(contact, group)
new_contact_in_group = db.get_contacts_in_group(group)
assert len(old_contacts_in_group) + 1 == len(new_contact_in_group)
old_contacts_in_group.append(contact)
assert sorted(old_contacts_in_group, key=Contact.id_or_max) == sorted(new_contact_in_group, key=Contact.id_or_max)
|
All returns must be accompanied by a Return Merchandise Authorization form. Returns and/or exchanges will not be accepted unless authorization has been issued by findpowercord.com . To receive an RMA (Returns Merchandise Authorization), please send an email to admin@findpowercord.com with your order number and the details of the issue you are experiencing with your item.
You should expect to receive your refund within four weeks of giving your package to the return shipper, however, in many cases you will receive a refund more quickly. This time period includes the transit time for us to receive your return from the shipper( 5 to 10 business days), the time it takes us to process your return once we receive it(3 to 5 business days), and the time it takes your bank to process our refund request ( 5 to 10 business days).
If you need to return an item, simply login to your account, view the order using the " Complete Orders" link under the My Account menu and click the Return Item(s) button, we'll notidy you via e-mail of your refund once we've received and processed the returned item.
|
# coding=utf-8
"""Homogeneous storage system.
.. moduleauthor:: Stéphane Vialette <vialette@gmail.com>
"""
from .storagesystem import StorageSystem
from .storagesystemsnapshotcontroller import SuspendedStorageSystemSnapshotController
from .storagesystemexception import StorageSystemException
class HomogeneousStorageSystem(StorageSystem):
# storage system type
TYPE = "homogeneous"
def __init__(self, capacity, cpu, transfer_time_manager, name=None):
"""Initialize this homogeneous storage system.
:param capacity: The capacity of each storage unit of this storage system.
:type capacity: Numeric.
:param cpu:The number of cpu of each storage unit of this storage system.
:type cpu: int.
"""
super(self.__class__, self).__init__(transfer_time_manager, name)
# capacity
if capacity <= 0:
raise StorageSystemException("non-positive capacity '{}'".format(capacity))
self._capacity = capacity
# cpu
if cpu <= 0:
raise StorageSystemException("non-positive cpu '{}'".format(cpu))
if not isinstance(cpu, int):
raise StorageSystemException("non-integer cpu '{}'".format(cpu))
self._cpu = cpu
@property
def capacity(self):
"""Return the capacity of each storage unit.
"""
return self._capacity
@property
def cpu(self):
"""Return the number of cpu of each storage unit.
"""
return self._cpu
def add_storage_unit(self, environment, storage_unit_name = None):
"""Add a new storage unit to this storage system.
"""
return super(self.__class__, self).add_storage_unit(environment, self.capacity, self.cpu, storage_unit_name)
def homogeneous_storage_system_builder(environment, number_of_storage_units, capacity, cpu, transfer_time_manager,
name=None,
storage_unit_names = None):
"""Convenient function to create an homogeneous storage system.
:param number_of_storage_units: The number of storage units to be created.
:type number_of_storage_units: int.
:param capacity: The capacity of each storage unit.
:type capacity: Numeric.
:param cpu: The number of cpu of each storag unit.
:type cpu: int.
:param name: An optional name for this storage system.
:type name: string.
:param storage_unit_names: An optional list of names for the storage units.
:type storage_unit_names: [string].
"""
# number of storage units
if not isinstance(number_of_storage_units, int):
raise StorageSystemException("non-integer number of storage units")
if number_of_storage_units < 0:
raise StorageSystemException("negative number of storage units")
# take care of storage unit names
if storage_unit_names is not None:
if len(storage_unit_names) != number_of_storage_units:
msg = "bad number of storage unit names, expected {} got {}".format(number_of_storage_units, len(storage_unit_names))
raise StorageSystemException(msg)
# create the storage system
homogeneous_storage_system = HomogeneousStorageSystem(capacity, cpu, transfer_time_manager, name)
# suspend the storage system snapshot controller while we are adding
# the storage units.
with SuspendedStorageSystemSnapshotController(homogeneous_storage_system):
# add the storage units to the storage system
for i in range(number_of_storage_units):
storage_unit_name = None
if storage_unit_names is not None:
storage_unit_name = storage_unit_names[i]
homogeneous_storage_system.add_storage_unit(environment, storage_unit_name)
# let the snapshot controller know about the new storage units
homogeneous_storage_system.force_snapshot(environment)
# return back to the caller the new storage system
return homogeneous_storage_system
|
Use these instructor’s helps to guide Pathfinders in completing their class requirements. Copyright 2016.
Use these instructor’s helps to guide Pathfinders in completing their class requirements. Copyright 2014.
Each student needs one card for the class they are completing. The cards serve as an individual guide or completion record for the class requirements. Copyright 2014.
|
"""
********************************************************************************
artists
********************************************************************************
.. currentmodule:: compas_ghpython.artists
.. rst-class:: lead
Artists for visualising (painting) COMPAS objects with GHPython.
Artists convert COMPAS objects to Rhino geometry and data.
.. code-block:: python
pass
----
Geometry Artists
================
.. autosummary::
:toctree: generated/
:nosignatures:
CircleArtist
FrameArtist
LineArtist
PointArtist
PolylineArtist
Datastructure Artists
=====================
.. autosummary::
:toctree: generated/
:nosignatures:
MeshArtist
NetworkArtist
VolMeshArtist
Robot Artist
============
.. autosummary::
:toctree: generated/
:nosignatures:
RobotModelArtist
Base Classes
============
.. autosummary::
:toctree: generated/
:nosignatures:
BaseArtist
PrimitiveArtist
ShapeArtist
"""
from __future__ import absolute_import
from ._artist import BaseArtist
from ._primitiveartist import PrimitiveArtist
from ._shapeartist import ShapeArtist
from .circleartist import CircleArtist
from .frameartist import FrameArtist
from .lineartist import LineArtist
from .pointartist import PointArtist
from .polylineartist import PolylineArtist
from .meshartist import MeshArtist
from .networkartist import NetworkArtist
from .volmeshartist import VolMeshArtist
from .robotmodelartist import RobotModelArtist
__all__ = [
'BaseArtist',
'PrimitiveArtist',
'ShapeArtist',
'CircleArtist',
'FrameArtist',
'LineArtist',
'PointArtist',
'PolylineArtist',
'MeshArtist',
'NetworkArtist',
'VolMeshArtist',
'RobotModelArtist'
]
|
Graphic design is needed in every social media campaign. This is an important element that contributes to the success of digital marketing. The graphic design should be as creative as possible with original designs to attract targeted consumers.
Graphic design is needed by those who want to market their brands and businesses online. The design is aims to boost promotion so that the marketing goals will be achieved. It is also useful to polish branding to open more marketplace. As one of the best graphic designs companies in Nigeria, AdHang knows very well what is needed to create a creative and attractive social media campaign. To get the best results, AdHang uses effective methods such as banner advert in size, online ads link, as well as strategic digital marketing uses various components in digital marketing.
The creative works offered by AdHang include online banner design, article writing, video production, posters, website design, social media campaigns, and headlines writing. As the best creative agency in Nigeria, AdHang comes with a lot of things to offer to the customers. The company provides the best digital marketing experts in Nigeria with more than 10 years of creative experience. The clients also will be able to have access to a bunch of creativity that will help them to connect brands to the target customers. Marketing ideas created are truly creative and originals to meet the customers’ satisfaction. With relatively cheaper prices, customers can get the best practices and international standards in all creative works.
“AdHang’s creative team is not only graphic designers and content producers, instead talented professionals comprise creative strategists branding consultants that will help your brand connects with its target audiences in Nigeria,” said the company’s representative.
AdHang is a digital marketing agency that focuses on obtaining Search Engine Optimization (SEO) services in Nigeria. As one of the best online advertising agencies in the country, the company offers a complete digital marketing package consisting of a search engines, social media, influencers, mobile and content marketing, as well as internet display. With the support of SEO experts and years of experience, AdHang has transformed into a giant digital marketing agency in Nigeria. For more info, please visit https://www.adhang.com/.
Office: D 07 Century mall, St.Patrick's junction, Ojo Alaba international market, Lagos, Nigeria.
|
import unittest
from ascetic import validators
from ascetic.databases import databases
from ascetic.models import Model
from ascetic.relations import ForeignKey
Author = Book = None
class TestCompositeRelation(unittest.TestCase):
maxDiff = None
create_sql = {
'postgresql': """
DROP TABLE IF EXISTS ascetic_composite_author CASCADE;
CREATE TABLE ascetic_composite_author (
id integer NOT NULL,
lang VARCHAR(6) NOT NULL,
first_name VARCHAR(40) NOT NULL,
last_name VARCHAR(40) NOT NULL,
bio TEXT,
PRIMARY KEY (id, lang)
);
DROP TABLE IF EXISTS ascetic_composite_book CASCADE;
CREATE TABLE ascetic_composite_book (
id integer NOT NULL,
lang VARCHAR(6) NOT NULL,
title VARCHAR(255),
author_id integer,
PRIMARY KEY (id, lang),
FOREIGN KEY (author_id, lang) REFERENCES ascetic_composite_author (id, lang) ON DELETE CASCADE
);
""",
'mysql': """
DROP TABLE IF EXISTS ascetic_composite_author CASCADE;
CREATE TABLE ascetic_composite_author (
id INT(11) NOT NULL,
lang VARCHAR(6) NOT NULL,
first_name VARCHAR(40) NOT NULL,
last_name VARCHAR(40) NOT NULL,
bio TEXT,
PRIMARY KEY (id, lang)
);
DROP TABLE IF EXISTS ascetic_composite_book CASCADE;
CREATE TABLE ascetic_composite_book (
id INT(11) NOT NULL,
lang VARCHAR(6) NOT NULL,
title VARCHAR(255),
author_id INT(11),
PRIMARY KEY (id, lang),
FOREIGN KEY (author_id, lang) REFERENCES ascetic_composite_author (id, lang)
);
""",
'sqlite3': """
DROP TABLE IF EXISTS ascetic_composite_author;
CREATE TABLE ascetic_composite_author (
id INTEGER NOT NULL,
lang VARCHAR(6) NOT NULL,
first_name VARCHAR(40) NOT NULL,
last_name VARCHAR(40) NOT NULL,
bio TEXT,
PRIMARY KEY (id, lang)
);
DROP TABLE IF EXISTS ascetic_composite_book;
CREATE TABLE ascetic_composite_book (
id INTEGER NOT NULL,
lang VARCHAR(6) NOT NULL,
title VARCHAR(255),
author_id INT(11),
PRIMARY KEY (id, lang),
FOREIGN KEY (author_id, lang) REFERENCES ascetic_composite_author (id, lang)
);
"""
}
@classmethod
def create_models(cls):
class Author(Model):
class Mapper(object):
db_table = 'ascetic_composite_author'
defaults = {'bio': 'No bio available'}
validations = {'first_name': validators.Length(),
'last_name': (validators.Length(), lambda x: x != 'BadGuy!' or 'Bad last name', )}
class Book(Model):
author = ForeignKey(Author, related_field=('id', 'lang'), field=('author_id', 'lang'), related_name='books')
class Mapper(object):
db_table = 'ascetic_composite_book'
return locals()
@classmethod
def setUpClass(cls):
db = databases['default']
db.cursor().execute(cls.create_sql[db.engine])
for model_name, model in cls.create_models().items():
globals()[model_name] = model
def setUp(self):
db = databases['default']
db.identity_map.disable()
for table in ('ascetic_composite_author', 'ascetic_composite_book'):
db.execute('DELETE FROM {0}'.format(db.qn(table)))
def test_model(self):
author = Author(
id=1,
lang='en',
first_name='First name',
last_name='Last name',
)
self.assertIn('first_name', dir(author))
self.assertIn('last_name', dir(author))
author.save()
author_pk = (1, 'en')
author = Author.get(author_pk)
self.assertEqual(author.pk, author_pk)
book = Book(
id=5,
lang='en',
title='Book title'
)
book.author = author
book.save()
book_pk = (5, 'en')
book = Book.get(book_pk)
self.assertEqual(book.pk, book_pk)
self.assertEqual(book.author.pk, author_pk)
author = Author.get(author_pk)
self.assertEqual(author.books[0].pk, book_pk)
|
Florence is the capital city of the Italian region of Tuscany. It is known as the "cradle of the Renaissance" (la culla del Rinascimento) because of its monuments, churches, and other historic buildings. It has also been called The Athens of the MIddle Ages. The domed cathedral of the city, built by Filippo Brunelleschi and known as The Duomo is the best known site in Florence. The historic center of the city was named a UNESCO World Heritage Site in 1982 and Forbes magazine ranks Florence as one of the most beautiful cities in the world.
Whatever your reason for visiting the Florence area, Villaway.com can help you find the perfect Florence area luxury vacation rental. Our properties range from 3 bedrooms and 3 baths, accommodating 6, up to 10 bedrooms and 10 baths, accommodating up to 20. These luxury Florence area vacation properties are very diverse, so we are able to offer properties in the city or country, apartments to manors, modern to classic. Regardless of location or size, our guests can expect the same caliber of indulgence at any of our luxury Florence area vacation villas. A small sampling of amenities includes expansive views of the surrounding area, the craftsmanship of local artisans and artists, sweeping living rooms, gourmet fully equipped kitchens, extravagant master suites, with separate sittings area and private verandas. Master bath feature dual vanities, large Jacuzzi tubs and separate oversized walk-in showers.
The Santa Maria del Fiore, the domed cathedral often called the Duomo, is known as the world’s largest masonry dome. This majestic cathedral features 600 years worth of incredible architecture and works of art. The Ponte Vecchio is one of Florence’s most photographed bridges. It spans the Arno River and is noted for its three segmented arches. The bridge was first built by the Etruscans and later rebuilt in the 14th century. The Uffizi Gallery is regarded as one of the world’s greatest art museums. The former palace was built in 1560 and later evolved into the gallery. It has been open to the public since 1765.
|
import asposeslidescloud
from asposeslidescloud.SlidesApi import SlidesApi
from asposeslidescloud.SlidesApi import ApiException
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
from asposestoragecloud.StorageApi import ResponseMessage
apiKey = "XXXXX" #sepcify App Key
appSid = "XXXXX" #sepcify App SID
apiServer = "http://api.aspose.com/v1.1"
data_folder = "../../../data/"
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Slides API SDK
api_client = asposeslidescloud.ApiClient.ApiClient(apiKey, appSid, True)
slidesApi = SlidesApi(api_client);
#set input file name
name = "sample-input.pptx"
storage = "AsposeDropboxStorage"
try:
#upload file to 3rd party cloud storage
response = storageApi.PutCreate(name, data_folder + name, storage=storage)
#invoke Aspose.Slides Cloud SDK API to delete all slides from a presentation
response = slidesApi.DeleteSlidesCleanSlidesList(name, storage=storage)
if response.Status == "OK":
#download presentation from 3rd party cloud storage
response = storageApi.GetDownload(Path=name, storage=storage)
outfilename = "c:/temp/" + name
with open(outfilename, 'wb') as f:
for chunk in response.InputStream:
f.write(chunk)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
|
2. Tell your driver "Liquor World" on Paradise and Harmon Corner. Map is below.
|
import math
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, Button, Field, Hidden, HTML, Div
from crispy_forms.bootstrap import FormActions, AppendedText, StrictButton, InlineField
from django import forms
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import resolve, reverse
from django.db import models
from django.db.models import F, ExpressionWrapper, FloatField, IntegerField, CharField, Case, When, Sum, Func, Min, Q
from django.shortcuts import render, redirect
from django.utils.encoding import python_2_unicode_compatible
from cooking.helpers import prepareContext
from cooking.models import Ingredient
from cooking.inventory.inventory import inventory_data, add_to_inventory
def project_shopping_list_data(proj):
return Ingredient.objects.filter(receipe__meal__project=proj).annotate(
# Copy ri.measurement for easier access
measurement=F('receipe_ingredient__measurement'),
# Also copy ri.remarks for easier access
mr_remarks=F('receipe_ingredient__remarks'),
# Exact price = (mr.person_count / r.default_person_count) * i.price
exact_price_tmp=ExpressionWrapper((F('receipe__meal_receipe__person_count') / F('receipe__default_person_count')) * F('price'), output_field=FloatField()),
exact_amount_tmp=Case(
When(buying_measurement=F('receipe_ingredient__measurement'),
then=(F('receipe_ingredient__amount') / F('receipe__default_person_count')) * F('receipe__meal_receipe__person_count')),
When(calculation_measurement=F('receipe_ingredient__measurement'),
then=(((F('receipe_ingredient__amount') / F('calculation_quantity')) * F('buying_quantity')) / F('receipe__default_person_count')) * F('receipe__meal_receipe__person_count')),
default=0,
output_field=FloatField()),
exact_calculation_amount_tmp=Case(
When(calculation_measurement__isnull=True,
then=None),
When(buying_measurement=F('receipe_ingredient__measurement'),
then=(((F('receipe_ingredient__amount') / F('buying_quantity')) * F('calculation_quantity')) / F('receipe__default_person_count')) * F('receipe__meal_receipe__person_count')),
When(calculation_measurement=F('receipe_ingredient__measurement'),
then=(F('receipe_ingredient__amount') / F('receipe__default_person_count')) * F('receipe__meal_receipe__person_count')),
default=None,
output_field=FloatField()),
).annotate(
exact_amount=Sum('exact_amount_tmp'),
first_occurrence=Min('receipe__meal__time'),
).annotate(
exact_calculation_amount=Case(When(calculation_measurement__isnull=False, then=F('exact_amount') / F('buying_quantity') * F('calculation_quantity')),
default=None,
output_field=FloatField()),
exact_buying_count=(F('exact_amount') / F('buying_quantity')),
buying_count=Func((F('exact_amount') / F('buying_quantity')) + 0.5, function='ROUND'),
).annotate(
effective_amount=F('buying_count') * F('buying_quantity'),
effective_calculation_amount=F('buying_count') * F('calculation_quantity'),
effective_price=ExpressionWrapper(F('buying_count') * F('price'), output_field=FloatField()),
#).values('first_occurrence', 'name', 'id', 'buying_measurement', 'buying_quantity', 'calculation_measurement', 'calculation_quantity', 'exact_amount', 'exact_calculation_amount', 'effective_amount', 'effective_calculation_amount', 'remarks', 'effective_price', 'buying_count', 'price'
)
def subtract_inventory(proj, shopping_list):
inventory = list(inventory_data(proj))
sl = list(shopping_list)
for item in sl:
for inv in (x for x in inventory if x.ingredient.id == item.id):
# Subtract the buying count
item.exact_buying_count -= inv.exact_buying_count
#print('Subtracting ' + str(inv.amount) + inv.measurement + ' from ' + item.name)
#inventory.remove(inv) # for optimization remove this element
# Recalculate all the other properties
# I most propably forgot something here
item.exact_amount = item.exact_buying_count * item.buying_quantity
if(item.calculation_measurement):
item.exact_calculation_amount = item.exact_buying_count * item.calculation_quantity
item.buying_count = math.ceil(item.exact_buying_count)
item.effective_amount = item.buying_count * item.buying_quantity
if(item.calculation_measurement):
item.effective_calculation_amount = item.buying_count * item.calculation_quantity
item.effective_price = item.buying_count * float(item.price)
return [x for x in sl if x.exact_buying_count > 0.000001]
@login_required
def project_shopping_list(request):
context = prepareContext(request)
if('active_project' not in context):
return redirect('cooking:projects')
if('activate_inventory' in request.GET):
request.session['inventory_active'] = True
elif('deactivate_inventory' in request.GET):
request.session['inventory_active'] = False
elif('inventory_active' not in request.session):
request.session['inventory_active'] = True
if(request.session['inventory_active']):
if('send_to_inventory' in request.GET):
sl = project_shopping_list_data(context['active_project'])
sl = subtract_inventory(context['active_project'], sl)
for item in sl:
add_to_inventory(context['active_project'], item)
context['shopping_list'] = project_shopping_list_data(context['active_project'])
if(request.session['inventory_active']):
context['shopping_list'] = subtract_inventory(context['active_project'], context['shopping_list'])
#context['total_exact_price'] = context['shopping_list'].aggregate(tp=Sum('exact_price')).get('tp')
context['total_effective_price'] = sum([float(x.effective_price) for x in context['shopping_list']])
context['pagetitle'] = 'Shopping List'
context['inventory_active'] = request.session['inventory_active']
return render(request, 'listings/shopping_list.html', context)
@login_required
def project_shopping_list_csv(request):
context = prepareContext(request)
if('active_project' not in context):
return redirect('cooking:projects')
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="shoppinglist.csv"'
writer = UnicodeWriter(response)
writer.writerow(['First Use', 'Ingredient', 'Exact Amount 1', '', 'Exact Amount 2', '', 'Effective Amount 1', '', 'Effective Amount 2', '', 'Buying Count', 'Effective Price', 'Remarks'])
if('inventory_active' not in request.session):
request.session['inventory_active'] = True
shoppinglist = project_shopping_list_data(context['active_project'])
if(request.session['inventory_active']):
shoppinglist = subtract_inventory(context['active_project'], shoppinglist)
for item in shoppinglist:
if(item.exact_amount > 0):
writer.writerow([item.first_occurrence,
item.name,
item.exact_amount,
conv_measurement(item.buying_measurement, item.exact_amount),
item.exact_calculation_amount,
conv_measurement(item.calculation_measurement, item.exact_calculation_amount),
item.effective_amount,
conv_measurement(item.buying_measurement, item.effective_amount),
item.effective_calculation_amount,
conv_measurement(item.calculation_measurement, item.effective_calculation_amount),
item.buying_count,
item.effective_price,
item.remarks])
return response
|
Marine preserves double risk of drowning - KUAM.com-KUAM News: On Air. Online. On Demand.
Guam - A Centers for Disease Control and Prevention, National Institute for Occupational Safety and Health has released the findings of a study that show a direct correlation between the enforcement of marine preserves and the risk of drowning for indigenous fishermen.
The risk of drowning more than doubled for Chamorro fishermen after marine preserve areas were enforced on the island a decade ago. That was the major finding of a study released by the centers for disease control and prevention, national institute for occupational safety and health.
"It's something we've always known in the fishing community. Our fishing community has dwindled," said Manny Duenas, president of the Guam Fishermen's Co-op. "We used to be 1,000 fishermen, now we're less than 200 fishermen." CDC officials spent about three weeks on Guam last year conducting the study that found that indigenous fishermen used to fish on the western and southern coasts of Guam, but when the preserves were established, they became more reliant on fishing on the East Coast - increasing their exposure to more hazardous conditions resulting in a higher risk of drowning.
Duenas stresses that fishermen aren't against the preserves, but would like the regulations to be amended, telling KUAM News, "If you see the impact to this small population doubling, then there's really a concern that someone should address in the government and visit the community and see what's really going on and causing our demise. The value of these preserves - is it worth the bodies?"
The stringent regulations in traditional fishing areas at the Tumon Bay, Piti Bomb Holes, Sasa Bay, Achang Reef Flat and Pati Point preserves have resulted in fishermen heading to the eastern side of the island. "We respect the preserves and we believe in what it's supposed to do," Duenas noted. "But at the same time we also have to put a balance between human life and what the environment will provide for us as it has for thousands of years.
Local fishermen are drafting new rules to change the island's five marine preserves into culturally managed areas that they say would promote traditional fishing practices.
"That way, our older adults, before they leave they can teach us the safety factors. If you look all the drownings and deaths, they're mostly young people and I hate to see the next generation, any of my children suffer a consequence because of our inability to teach them properly. And I think those skills, whether you're teaching in education or you're teaching in real life. Those skills are detrimental to survival of the people and its culture," he shared.
Other fishermen we spoke with today suggested the government issue fishing permits or allow fishing in selected preserves at certain times around the year. The Department of Agriculture did not return KUAM's calls for comment on the CDC study.
Meanwhile, the main sponsor of the bill that created the marine preserves, former senator Joanne Brown, says she has not seen the study but questions its validity and intent. She says the preserves were established to protect dwindling marine resources.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.