text stringlengths 0 1.05M | meta dict |
|---|---|
"""An enumerated type :class:`Direction` whose elements are the four cardinal directions:
``NORTH``, ``SOUTH``, ``EAST``, and ``WEST``.
A :class:`Direction` knows what :class:`Direction` is to the left, the right, or opposite itself
using the methods :func:`left`, :func:`right`, and :func:`opposite`.
Usage::
direction = Direction.NORTH
direction.left() # => Direction.WEST
direction.right() # => Direction.EAST
direction.opposite() # => Direction.SOUTH
"""
import enum
@enum.unique
class Direction(enum.Enum):
"""Represent the four compass directions."""
NORTH = 0
EAST = 1
SOUTH = 2
WEST = 3
def left(self):
"""Return the direction to the left."""
return self.__class__((self.value + 3) % 4)
def right(self):
"""Return the direction to the right."""
return self.__class__((self.value + 1) % 4)
def opposite(self):
"""Return the opposite direction."""
return self.__class__((self.value + 2) % 4)
def __repr__(self):
# Only show Direction.NAME, instead of the default '<Direction.NAME: value>`.
return "{}.{}".format(self.__class__.__name__, self.name)
__all__ = ['Direction']
| {
"repo_name": "sredmond/acmpy",
"path": "campy/util/direction.py",
"copies": "1",
"size": "1253",
"license": "mit",
"hash": 4175504047089450500,
"line_mean": 29.325,
"line_max": 96,
"alpha_frac": 0.584197925,
"autogenerated": false,
"ratio": 3.6744868035190614,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9740670238903586,
"avg_score": 0.003602897923094968,
"num_lines": 40
} |
"""An Env holds the environment context that a pod is running in."""
import time
from protorpc import messages
from grow.common import urls
class Name(object):
DEV = 'dev'
class EnvConfig(messages.Message):
host = messages.StringField(1)
scheme = messages.StringField(2)
port = messages.IntegerField(3)
name = messages.StringField(4)
cached = messages.BooleanField(5, default=True)
fingerprint = messages.StringField(6)
dev = messages.BooleanField(7, default=False)
class Env(object):
def __init__(self, config):
self.name = config.name
self.config = config
self.cached = config.cached
self.fingerprint = config.fingerprint or str(int(time.time()))
def __repr__(self):
return '<Env: {}>'.format(self.url)
@property
def host(self):
return self.config.host or 'localhost'
@host.setter
def host(self, value):
self.config.host = value
@property
def port(self):
return self.config.port
@port.setter
def port(self, value):
self.config.port = value
@property
def scheme(self):
return self.config.scheme
@scheme.setter
def scheme(self, value):
self.config.scheme = value
@property
def dev(self):
return self.config.dev
@property
def url(self):
return urls.Url(
path='/',
host=self.host,
port=self.port,
scheme=self.scheme)
def to_wsgi_env(self):
return {
'REQUEST_METHOD': 'GET',
'SERVER_NAME': self.host,
'SERVER_PORT': str(self.port),
'wsgi.url_scheme': self.scheme,
}
| {
"repo_name": "grow/pygrow",
"path": "grow/pods/env.py",
"copies": "2",
"size": "1705",
"license": "mit",
"hash": 5168747951569636000,
"line_mean": 21.7333333333,
"line_max": 70,
"alpha_frac": 0.5964809384,
"autogenerated": false,
"ratio": 3.9559164733178656,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 75
} |
""" A nested hash-map/dictionary object for Python """
import collections
try:
basestring
except NameError:
basestring = str
__author__ = "Nick Stanisha <github.com/nickstanisha>"
__version__ = "0.1"
__all__ = ['NestedDict', 'leaf_values', 'nested_keys', 'to_nested_dict']
def _dfs_generator(dictionary):
if not isinstance(dictionary, dict):
raise TypeError("Unsupported type '{}'".format(type(dictionary).__name__))
stack = [(val, [key]) for key, val in dictionary.items()]
while stack:
d, path = stack.pop()
if isinstance(d, dict):
stack.extend([(val, path + [key]) for key, val in d.items()])
else:
yield tuple(path), d
def to_nested_dict(dictionary):
""" Casts a given `dict` as a `NestedDict` (does not change the contents of the original `dict`)
Returns
-------
d : NestedDict
The contents of the original `dict`, in a `NestedDict` object
"""
d = NestedDict()
for path, val in _dfs_generator(dictionary):
d[path] = val
return d
def paths(dictionary):
""" Return tuples representing paths to the bottom of a dict
Returns
-------
out : list[tuple]
A list of tuples representing paths retrieved in depth-first order from the dict
Examples
--------
>>> d = {1: {2: {3: 4, 4: 5}}, 2: {3: {4: 5}}}
>>> print(nested_keys(d))
[(2, 3, 4), (1, 2, 4), (1, 2, 3)]
"""
return [path for path, _ in _dfs_generator(dictionary)]
def leaf_values(dictionary):
""" Returns the values at the bottom of a dictionary
Returns
-------
out : list
A list of the bottom-most values in a dictionary, retrieved in depth-first order
Examples
--------
>>> d = {1: {2: {3: 4, 4: 5}}, 2: {3: {4: 5}}}
>>> print(leaf_values(d))
[5, 5, 4]
"""
return [val for _, val in _dfs_generator(dictionary)]
class NestedDict(dict):
""" An object representing a dictionary of dictionaries of dictionaries ...
In order to avoid code like this
>>> if 'a' in dictionary:
... if 'b' in dictionary['a']
... dictionary['a']['b']['c'] = 3
... else:
... dictionary['a']['b'] = {'c': 3}
... else:
... dictionary['a'] = {'b': {'c': 3}}
NestedDict enables the following syntax
>>> nested_dictionary['a', 'b', 'c'] = 3
A defaultdict coult be used to accomplish a similar goal, but only to
a finite depth specified at the time of construction
>>> # Nested dictionary of depth 4
>>> d = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
NestedDict is able to handle nested dictinoaries of arbitrary depth. Additionally,
since NestedDict extends `dict`, it prints nicely to the console by default
>>> my_default_dict
defaultdict(<function <lambda> at 0x10077f840>, {1: defaultdict(<function <lambda>.<locals>.<lambda> at 0x10185a400>, {2: 3})})
>>> my_nested_dict
{1: {2: 3}}
"""
def __init__(self, *args, **kwargs):
if args and isinstance(args[0], dict) and not isinstance(args[0], NestedDict):
for path, val in _dfs_generator(args[0]):
self[path] = val
else:
super(NestedDict, self).__init__(*args, **kwargs)
@staticmethod
def _split_key(key):
if isinstance(key, collections.Sequence) and not isinstance(key, basestring):
return key[0], key[1:]
else:
return key, []
def __getitem__(self, key):
cur_key, downstream = self._split_key(key)
if downstream:
return super(NestedDict, self).__getitem__(cur_key)[downstream]
else:
return super(NestedDict, self).__getitem__(cur_key)
def __setitem__(self, key, value):
cur_key, downstream = self._split_key(key)
if downstream:
if cur_key not in self or not isinstance(super(NestedDict, self).__getitem__(cur_key), NestedDict):
super(NestedDict, self).__setitem__(cur_key, NestedDict())
super(NestedDict, self).__getitem__(cur_key)[downstream] = value
else:
if isinstance(value, dict) and not isinstance(value, NestedDict):
super(NestedDict, self).__setitem__(cur_key, NestedDict(value))
else:
super(NestedDict, self).__setitem__(cur_key, value)
def __delitem__(self, key):
if isinstance(key, collections.Sequence) and not isinstance(key, basestring):
upstream, cur_key = key[:-1], key[-1]
d = self[upstream] if upstream else self
super(NestedDict, d).__delitem__(cur_key)
else:
super(NestedDict, self).__delitem__(key)
def get(self, key, default=None):
""" A short-circuit to `dict.get`, will not parse tuples into a path before applying changes
Examples
--------
>>> v = d[(1, 2, 3),] # will raise if the key (1, 2, 3) does not exist in d
>>> v = d.get((1, 2, 3)) # will return `None` if the key (1, 2, 3) does not exist in d
"""
try:
return super(NestedDict, self).__getitem__(key)
except KeyError:
return default
def set(self, key, value):
""" A short-circuit to `dict.__setitem__`, will not parse tuples into a path before applying changes
Examples
--------
>>> # The following are equivalent
>>> d = NestedDict()
>>> d[(1, 2, 3),] = 4
>>> d[[(1, 2, 3)]] = 4
>>> d.set((1, 2, 3), 4)
"""
return super(NestedDict, self).__setitem__(key, value)
def delete(self, key):
""" A short-circuit to `dict.__delitem__`, will not parse tuples into a path before applying changes
Examples
--------
>>> # The following are equivalent
>>> d = NestedDict()
>>> del d[(1, 2, 3),]
>>> d.delete((1, 2, 3))
"""
return super(NestedDict, self).__delitem__(key)
def leaf_values(self):
""" Return the values at the bottom of a nested dict (Analogous to `dict.values`)
Returns
-------
out : list
A list of leaf values retrieved in depth-first order from the NestedDict
"""
return [val for _, val in _dfs_generator(self)]
def paths(self):
""" Return tuples representing paths to the bottom of a nested dict (Analogous to `dict.keys`)
Returns
-------
out : list[tuple]
A list of tuples representing paths retrieved in depth-first order from the NestedDict
"""
return [path for path, _ in _dfs_generator(self)]
def nested_update(self, obj):
""" Works like `dict.update` except only the leaf values of the supplied dictionary are
used to update `self`
Examples
--------
>>> print(d)
{1: {2: {3: {4: 5, 5: 6}}}, 2: {3: 5, 4: 16}}
>>> print(e)
{1: {2: {3: {5: 7}}}, 2: {5: 1}}
>>> d.nested_update(e)
>>> print(d)
{1: {2: {3: {4: 5, 5: 7}}}, 2: {3: 5, 4: 16, 5: 1}}
"""
for path, val in _dfs_generator(obj):
self[path] = val
| {
"repo_name": "nickstanisha/nesteddict",
"path": "nesteddict.py",
"copies": "1",
"size": "7539",
"license": "mit",
"hash": 7077487431594029000,
"line_mean": 33.2681818182,
"line_max": 135,
"alpha_frac": 0.531768139,
"autogenerated": false,
"ratio": 3.9846723044397465,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009956667686843652,
"num_lines": 220
} |
"""An estimator that learns to ensemble.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from adanet import core
from adanet.autoensemble.common import _GeneratorFromCandidatePool
import tensorflow.compat.v2 as tf
class AutoEnsembleEstimator(core.Estimator): # pylint: disable=g-classes-have-attributes
# pyformat: disable
"""A :class:`tf.estimator.Estimator` that learns to ensemble models.
Specifically, it learns to ensemble models from a candidate pool using the
Adanet algorithm.
.. code-block:: python
# A simple example of learning to ensemble linear and neural network
# models.
import adanet
import tensorflow as tf
feature_columns = ...
head = MultiClassHead(n_classes=10)
# Learn to ensemble linear and DNN models.
estimator = adanet.AutoEnsembleEstimator(
head=head,
candidate_pool=lambda config: {
"linear":
tf.estimator.LinearEstimator(
head=head,
feature_columns=feature_columns,
config=config,
optimizer=...),
"dnn":
tf.estimator.DNNEstimator(
head=head,
feature_columns=feature_columns,
config=config,
optimizer=...,
hidden_units=[1000, 500, 100])},
max_iteration_steps=50)
# Input builders
def input_fn_train:
# Returns tf.data.Dataset of (x, y) tuple where y represents label's
# class index.
pass
def input_fn_eval:
# Returns tf.data.Dataset of (x, y) tuple where y represents label's
# class index.
pass
def input_fn_predict:
# Returns tf.data.Dataset of (x, None) tuple.
pass
estimator.train(input_fn=input_fn_train, steps=100)
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
predictions = estimator.predict(input_fn=input_fn_predict)
Or to train candidate subestimators on different training data subsets:
.. code-block:: python
train_data_files = [...]
# Learn to ensemble linear and DNN models.
estimator = adanet.AutoEnsembleEstimator(
head=head,
candidate_pool=lambda config: {
"linear":
adanet.AutoEnsembleSubestimator(
tf.estimator.LinearEstimator(
head=head,
feature_columns=feature_columns,
config=config,
optimizer=...),
make_train_input_fn(train_data_files[:-1])),
"dnn":
adanet.AutoEnsembleSubestimator(
tf.estimator.DNNEstimator(
head=head,
feature_columns=feature_columns,
config=config,
optimizer=...,
hidden_units=[1000, 500, 100]),
make_train_input_fn(train_data_files[0:]))},
max_iteration_steps=50)
estimator.train(input_fn=make_train_input_fn(train_data_files), steps=100)
Args:
head: A :class:`tf.contrib.estimator.Head` instance for computing loss and
evaluation metrics for every candidate.
candidate_pool: List of :class:`tf.estimator.Estimator` and
:class:`AutoEnsembleSubestimator` objects, or dict of string name to
:class:`tf.estimator.Estimator` and :class:`AutoEnsembleSubestimator`
objects that are candidate subestimators to ensemble at each iteration.
The order does not directly affect which candidates will be included in
the final ensemble, but will affect the name of the candidate. When using
a dict, the string key becomes the candidate subestimator's name.
Alternatively, this argument can be a function that takes a `config`
argument and returns the aforementioned values in case the
objects need to be re-instantiated at each adanet iteration.
max_iteration_steps: Total number of steps for which to train candidates per
iteration. If `OutOfRange` or `StopIteration` occurs in the middle,
training stops before `max_iteration_steps` steps.
logits_fn: A function for fetching the subnetwork logits from a
:class:`tf.estimator.EstimatorSpec`, which should obey the following
signature:
- `Args`: Can only have following argument:
- estimator_spec: The candidate's :class:`tf.estimator.EstimatorSpec`.
- `Returns`: Logits :class:`tf.Tensor` or dict of string to logits
:class:`tf.Tensor` (for multi-head) for the candidate subnetwork
extracted from the given `estimator_spec`. When `None`, it will
default to returning `estimator_spec.predictions` when they are a
:class:`tf.Tensor` or the :class:`tf.Tensor` for the key 'logits' when
they are a dict of string to :class:`tf.Tensor`.
last_layer_fn: An optional function for fetching the subnetwork last_layer
from a :class:`tf.estimator.EstimatorSpec`, which should obey the
following signature:
- `Args`: Can only have following argument:
- estimator_spec: The candidate's :class:`tf.estimator.EstimatorSpec`.
- `Returns`: Last layer :class:`tf.Tensor` or dict of string to last
layer :class:`tf.Tensor` (for multi-head) for the candidate subnetwork
extracted from the given `estimator_spec`. The last_layer can be used
for learning ensembles or exporting them as embeddings.
When `None`, it will default to using the logits as the last_layer.
ensemblers: See :class:`adanet.Estimator`.
ensemble_strategies: See :class:`adanet.Estimator`.
evaluator: See :class:`adanet.Estimator`.
metric_fn: See :class:`adanet.Estimator`.
force_grow: See :class:`adanet.Estimator`.
adanet_loss_decay: See :class:`adanet.Estimator`.
worker_wait_timeout_secs: See :class:`adanet.Estimator`.
model_dir: See :class:`adanet.Estimator`.
config: See :class:`adanet.Estimator`.
debug: See :class:`adanet.Estimator`.
enable_ensemble_summaries: See :class:`adanet.Estimator`.
enable_subnetwork_summaries: See :class:`adanet.Estimator`.
global_step_combiner_fn: See :class:`adanet.Estimator`.
max_iterations: See :class:`adanet.Estimator`.
replay_config: See :class:`adanet.Estimator`.
**kwargs: Extra keyword args passed to the parent.
Returns:
An :class:`adanet.AutoEnsembleEstimator` instance.
Raises:
ValueError: If any of the candidates in `candidate_pool` are not
:class:`tf.estimator.Estimator` instances.
"""
# pyformat: enable
def __init__(self,
head,
candidate_pool,
max_iteration_steps,
ensemblers=None,
ensemble_strategies=None,
logits_fn=None,
last_layer_fn=None,
evaluator=None,
metric_fn=None,
force_grow=False,
adanet_loss_decay=.9,
worker_wait_timeout_secs=7200,
model_dir=None,
config=None,
debug=False,
enable_ensemble_summaries=True,
enable_subnetwork_summaries=True,
global_step_combiner_fn=tf.math.reduce_mean,
max_iterations=None,
replay_config=None,
**kwargs):
subnetwork_generator = _GeneratorFromCandidatePool(candidate_pool,
logits_fn, last_layer_fn)
super(AutoEnsembleEstimator, self).__init__(
head=head,
subnetwork_generator=subnetwork_generator,
max_iteration_steps=max_iteration_steps,
ensemblers=ensemblers,
ensemble_strategies=ensemble_strategies,
evaluator=evaluator,
metric_fn=metric_fn,
force_grow=force_grow,
adanet_loss_decay=adanet_loss_decay,
worker_wait_timeout_secs=worker_wait_timeout_secs,
model_dir=model_dir,
config=config,
debug=debug,
enable_ensemble_summaries=enable_ensemble_summaries,
enable_subnetwork_summaries=enable_subnetwork_summaries,
global_step_combiner_fn=global_step_combiner_fn,
max_iterations=max_iterations,
replay_config=replay_config,
**kwargs)
class AutoEnsembleTPUEstimator(core.TPUEstimator): # pylint: disable=g-classes-have-attributes
# pyformat: disable
"""A :class:`tf.estimator.tpu.TPUEstimator` that learns to ensemble models.
Specifically, it learns to ensemble models from a candidate pool using the
Adanet algorithm.
This estimator is capable of training and evaluating on TPU. It can ensemble
both :class:`tf.estimator.tpu.TPUEstimator` candidates as well as regular
:class:`tf.estimator.Estimator` candidates, as long as these candidates are
TPU compatible.
Note the following restrictions compared to AutoEnsembleEstimator:
* All candidates must wrap their optimizers with a
:class:`tf.tpu.CrossShardOptimizer`.
* The `input_fn` must expose a `params` argument.
* The `model_fn` of :class:`tf.estimator.tpu.TPUEstimator` candidates must
also expose a `params` argument.
WARNING: This Estimator is a work in progress and the API could change at any
moment. May not support all AutoEnsembleEstimator features.
.. code-block:: python
# A simple example of learning to ensemble linear and neural network
# models on TPU.
import adanet
import tensorflow as tf
feature_columns = ...
head = MultiClassHead(n_classes=10)
# Learn to ensemble linear and DNN models.
estimator = adanet.AutoEnsembleTPUEstimator(
head=head,
candidate_pool=lambda config: {
"linear":
tf.estimator.LinearEstimator(
head=head,
feature_columns=feature_columns,
config=config,
optimizer=tf.tpu.CrossShardOptimizer(...)),
"dnn":
tf.estimator.DNNEstimator(
head=head,
feature_columns=feature_columns,
config=config,
optimizer=tf.tpu.CrossShardOptimzier(...),
hidden_units=[1000, 500, 100])},
max_iteration_steps=50)
# Input builders
def input_fn_train(params):
# Returns tf.data.Dataset of (x, y) tuple where y represents label's
# class index.
pass
def input_fn_eval(params):
# Returns tf.data.Dataset of (x, y) tuple where y represents label's
# class index.
pass
def input_fn_predict():
# Returns tf.data.Dataset of (x, None) tuple.
pass
estimator.train(input_fn=input_fn_train, steps=100)
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
predictions = estimator.predict(input_fn=input_fn_predict)
Args:
head: A :class:`tf.contrib.estimator.Head` instance for computing loss and
evaluation metrics for every candidate.
candidate_pool: List of :class:`tf.estimator.tpu.TPUEstimator` and
:class:`AutoEnsembleSubestimator` objects, or dict of string name to
:class:`tf.estimator.tpu.TPUEstimator` and
:class:`AutoEnsembleSubestimator` objects that are candidate subestimators
to ensemble at each iteration. The order does not directly affect which
candidates will be included in the final ensemble, but will affect the
name of the candidate. When using a dict, the string key becomes the
candidate subestimator's name. Alternatively, this argument can be a
function that takes a `config` argument and returns the aforementioned
values in case the objects need to be re-instantiated at each adanet
iteration.
max_iteration_steps: See :class:`adanet.Estimator`.
logits_fn: A function for fetching the subnetwork logits from a
:class:`tf.estimator.EstimatorSpec`, which should obey the following
signature:
- `Args`: Can only have following argument:
- estimator_spec: The candidate's :class:`tf.estimator.EstimatorSpec`.
- `Returns`: Logits :class:`tf.Tensor` or dict of string to logits
:class:`tf.Tensor` (for multi-head) for the candidate subnetwork
extracted from the given `estimator_spec`. When `None`, it will
default to returning `estimator_spec.predictions` when they are a
:class:`tf.Tensor` or the :class:`tf.Tensor` for the key 'logits' when
they are a dict of string to :class:`tf.Tensor`.
last_layer_fn: An optional function for fetching the subnetwork last_layer
from a :class:`tf.estimator.EstimatorSpec`, which should obey the
following signature:
- `Args`: Can only have following argument:
- estimator_spec: The candidate's :class:`tf.estimator.EstimatorSpec`.
- `Returns`: Last layer :class:`tf.Tensor` or dict of string to last
layer :class:`tf.Tensor` (for multi-head) for the candidate subnetwork
extracted from the given `estimator_spec`. The last_layer can be used
for learning ensembles or exporting them as embeddings.
When `None`, it will default to using the logits as the last_layer.
ensemblers: See :class:`adanet.Estimator`.
ensemble_strategies: See :class:`adanet.Estimator`.
evaluator: See :class:`adanet.Estimator`.
metric_fn: See :class:`adanet.Estimator`.
force_grow: See :class:`adanet.Estimator`.
adanet_loss_decay: See :class:`adanet.Estimator`.
model_dir: See :class:`adanet.Estimator`.
config: See :class:`adanet.Estimator`.
use_tpu: See :class:`adanet.Estimator`.
eval_on_tpu: See :class:`adanet.Estimator`.
export_to_tpu: See :class:`adanet.Estimator`.
train_batch_size: See :class:`adanet.Estimator`.
eval_batch_size: See :class:`adanet.Estimator`.
embedding_config_spec: See :class:`adanet.Estimator`.
debug: See :class:`adanet.Estimator`.
enable_ensemble_summaries: See :class:`adanet.Estimator`.
enable_subnetwork_summaries: See :class:`adanet.Estimator`.
global_step_combiner_fn: See :class:`adanet.Estimator`.
max_iterations: See :class:`adanet.Estimator`.
replay_config: See :class:`adanet.Estimator`.
**kwargs: Extra keyword args passed to the parent.
Returns:
An :class:`adanet.AutoEnsembleTPUEstimator` instance.
Raises:
ValueError: If any of the candidates in `candidate_pool` are not
:class:`tf.estimator.Estimator` instances.
"""
# pyformat: enable
def __init__(self,
head,
candidate_pool,
max_iteration_steps,
ensemblers=None,
ensemble_strategies=None,
logits_fn=None,
last_layer_fn=None,
evaluator=None,
metric_fn=None,
force_grow=False,
adanet_loss_decay=.9,
model_dir=None,
config=None,
use_tpu=True,
eval_on_tpu=True,
export_to_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
embedding_config_spec=None,
debug=False,
enable_ensemble_summaries=True,
enable_subnetwork_summaries=True,
global_step_combiner_fn=tf.math.reduce_mean,
max_iterations=None,
replay_config=None,
**kwargs):
subnetwork_generator = _GeneratorFromCandidatePool(candidate_pool,
logits_fn, last_layer_fn)
super(AutoEnsembleTPUEstimator, self).__init__(
head=head,
subnetwork_generator=subnetwork_generator,
max_iteration_steps=max_iteration_steps,
ensemblers=ensemblers,
ensemble_strategies=ensemble_strategies,
evaluator=evaluator,
metric_fn=metric_fn,
force_grow=force_grow,
adanet_loss_decay=adanet_loss_decay,
model_dir=model_dir,
config=config,
use_tpu=use_tpu,
eval_on_tpu=eval_on_tpu,
export_to_tpu=export_to_tpu,
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
predict_batch_size=predict_batch_size,
embedding_config_spec=embedding_config_spec,
debug=debug,
enable_ensemble_summaries=enable_ensemble_summaries,
enable_subnetwork_summaries=enable_subnetwork_summaries,
global_step_combiner_fn=global_step_combiner_fn,
max_iterations=max_iterations,
replay_config=replay_config,
**kwargs)
| {
"repo_name": "tensorflow/adanet",
"path": "adanet/autoensemble/estimator.py",
"copies": "1",
"size": "17628",
"license": "apache-2.0",
"hash": -3537677981760136700,
"line_mean": 41.5797101449,
"line_max": 95,
"alpha_frac": 0.6358066712,
"autogenerated": false,
"ratio": 4.173295454545454,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011843282758123655,
"num_lines": 414
} |
"""An Ethereum client simulator that provides instant results and quick
feedback during development involving smart contracts.
https://github.com/ConsenSys/eth-testrpc
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
from testrpc import __version__
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='eth-testrpc',
version=__version__,
description='An Ethereum simulator for aiding smart contract development.',
long_description=long_description,
url='https://github.com/ConsenSys/eth-testrpc',
author='ConsenSys',
author_email='info@consensys.net',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
keywords='ethereum blockchain development testing',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=[
'jsonrpclib',
'serpent',
'ethereum',
],
entry_points={
'console_scripts': [
'testrpc=testrpc.__main__:main',
],
}
)
| {
"repo_name": "Firescar96/eth-testrpc",
"path": "setup.py",
"copies": "2",
"size": "1429",
"license": "mit",
"hash": -5830539315146508000,
"line_mean": 30.0652173913,
"line_max": 79,
"alpha_frac": 0.6417074878,
"autogenerated": false,
"ratio": 4.014044943820225,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5655752431620225,
"avg_score": null,
"num_lines": null
} |
"""An Ethereum client simulator that provides instant results and quick
feedback during development involving smart contracts.
https://github.com/pipermerriam/eth-testrpc
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='eth-testrpc',
version="1.3.5",
description='An Ethereum simulator for aiding smart contract development.',
long_description=long_description,
url='https://github.com/pipermerriam/eth-testrpc',
author='Piper Merriam',
author_email='pipermerriam@gmail.com',
license='MIT',
classifiers=[
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='ethereum blockchain development testing',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=[
'Werkzeug>=0.11.10',
'click>=6.6',
'ethereum>=1.6.1,<2.0.0',
'json-rpc>=1.10.3',
'rlp>=0.4.7,<=0.6.0',
],
extras_require={
'gevent': [
"gevent>=1.1.1,<1.2.0",
],
},
entry_points={
'console_scripts': [
'testrpc-py=testrpc.cli:runserver',
],
}
)
| {
"repo_name": "ConsenSys/eth-testrpc",
"path": "setup.py",
"copies": "3",
"size": "1640",
"license": "mit",
"hash": 6005074619318827000,
"line_mean": 29.9433962264,
"line_max": 79,
"alpha_frac": 0.6085365854,
"autogenerated": false,
"ratio": 3.5964912280701755,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 53
} |
"""A NetworkRegularizer that targets inference latency."""
from typing import List, Optional, Type
from morph_net.framework import batch_norm_source_op_handler
from morph_net.framework import conv2d_transpose_source_op_handler as conv2d_transpose_handler
from morph_net.framework import conv_source_op_handler as conv_handler
from morph_net.framework import generic_regularizers
from morph_net.framework import matmul_source_op_handler as matmul_handler
from morph_net.framework import op_handler_decorator
from morph_net.framework import op_handlers
from morph_net.framework import op_regularizer_manager as orm
from morph_net.network_regularizers import cost_calculator
from morph_net.network_regularizers import logistic_sigmoid_regularizer
from morph_net.network_regularizers import resource_function
import tensorflow.compat.v1 as tf
class LogisticSigmoidLatencyRegularizer(
logistic_sigmoid_regularizer.LogisticSigmoidRegularizer):
"""A LogisticSigmoidRegularizer that targets Latency.
Args:
output_boundary: An OpRegularizer will be created for all these
operations, and recursively for all ops they depend on via data
dependency that does not involve ops from input_boundary.
batch_size: Integer batch size to calculate cost/loss for.
regularize_on_mask: Bool. If True uses the binary mask as the
regularization vector. Else uses the probability vector.
alive_threshold: Float. Threshold below which values are considered dead.
This can be used both when mask_as_alive_vector is True and then the
threshold is used to binarize the sampled values and
when mask_as_alive_vector is False, and then the threshold is on the
channel probability.
mask_as_alive_vector: Bool. If True use the thresholded sampled mask
as the alive vector. Else, use thresholded probabilities from the
logits.
regularizer_decorator: A string, the name of the regularizer decorators to
use. Supported decorators are listed in
op_regularizer_decorator.SUPPORTED_DECORATORS.
decorator_parameters: A dictionary of parameters to pass to the decorator
factory. To be used only with decorators that requires parameters,
otherwise use None.
input_boundary: A list of ops that represent the input boundary of the
subgraph being regularized (input boundary is not regularized).
force_group: List of regex for ops that should be force-grouped. Each
regex corresponds to a separate group. Use '|' operator to specify
multiple patterns in a single regex. See op_regularizer_manager for more
detail.
regularizer_blacklist: List of regex for ops that should not be
regularized. See op_regularizer_manager for more detail.
"""
def __init__(self,
output_boundary: List[tf.Operation],
hardware,
batch_size=1,
regularize_on_mask=True,
alive_threshold=0.1,
mask_as_alive_vector=True,
regularizer_decorator: Optional[Type[
generic_regularizers.OpRegularizer]] = None,
decorator_parameters=None,
input_boundary: Optional[List[tf.Operation]] = None,
force_group=None,
regularizer_blacklist=None):
self._hardware = hardware
self._batch_size = batch_size
super().__init__(
output_boundary=output_boundary,
regularize_on_mask=regularize_on_mask,
alive_threshold=alive_threshold,
mask_as_alive_vector=mask_as_alive_vector,
regularizer_decorator=regularizer_decorator,
decorator_parameters=decorator_parameters,
input_boundary=input_boundary,
force_group=force_group,
regularizer_blacklist=regularizer_blacklist)
def get_calculator(self):
return cost_calculator.CostCalculator(
self._manager, resource_function.latency_function_factory(
self._hardware, self._batch_size))
@property
def name(self):
return 'LogisticSigmoidLatency'
@property
def cost_name(self):
return self._hardware + ' Latency'
class GammaLatencyRegularizer(generic_regularizers.NetworkRegularizer):
"""A NetworkRegularizer that targets latency using Gamma L1."""
def __init__(self,
output_boundary: List[tf.Operation],
gamma_threshold,
hardware,
batch_size=1,
regularizer_decorator: Optional[Type[
generic_regularizers.OpRegularizer]] = None,
decorator_parameters=None,
input_boundary: Optional[List[tf.Operation]] = None,
force_group=None,
regularizer_blacklist=None) -> None:
"""Creates a GammaLatencyRegularizer object.
Latency cost and regularization loss is calculated for a specified hardware
platform.
Args:
output_boundary: An OpRegularizer will be created for all these
operations, and recursively for all ops they depend on via data
dependency that does not involve ops from input_boundary.
gamma_threshold: A float scalar, will be used as a 'gamma_threshold' for
all instances GammaL1Regularizer created by this class.
hardware: String name of hardware platform to target. Must be a key from
resource_function.PEAK_COMPUTE.
batch_size: Integer batch size to calculate cost/loss for.
regularizer_decorator: A string, the name of the regularizer decorators
to use. Supported decorators are listed in
op_regularizer_decorator.SUPPORTED_DECORATORS.
decorator_parameters: A dictionary of parameters to pass to the decorator
factory. To be used only with decorators that requires parameters,
otherwise use None.
input_boundary: A list of ops that represent the input boundary of the
subgraph being regularized (input boundary is not regularized).
force_group: List of regex for ops that should be force-grouped. Each
regex corresponds to a separate group. Use '|' operator to specify
multiple patterns in a single regex. See op_regularizer_manager for
more detail.
regularizer_blacklist: List of regex for ops that should not be
regularized. See op_regularizer_manager for more detail.
"""
source_op_handler = batch_norm_source_op_handler.BatchNormSourceOpHandler(
gamma_threshold)
if regularizer_decorator:
source_op_handler = op_handler_decorator.OpHandlerDecorator(
source_op_handler, regularizer_decorator,
decorator_parameters)
op_handler_dict = op_handlers.get_gamma_op_handler_dict()
op_handler_dict.update({
'FusedBatchNorm': source_op_handler,
'FusedBatchNormV2': source_op_handler,
'FusedBatchNormV3': source_op_handler,
})
self._manager = orm.OpRegularizerManager(
output_boundary, op_handler_dict, input_boundary=input_boundary,
force_group=force_group, regularizer_blacklist=regularizer_blacklist)
self._calculator = cost_calculator.CostCalculator(
self._manager,
resource_function.latency_function_factory(hardware, batch_size))
self._hardware = hardware
def get_regularization_term(self, ops=None):
return self._calculator.get_regularization_term(ops)
def get_cost(self, ops=None):
return self._calculator.get_cost(ops)
@property
def op_regularizer_manager(self):
return self._manager
@property
def name(self):
return 'Latency'
@property
def cost_name(self):
return self._hardware + ' Latency'
class GroupLassoLatencyRegularizer(generic_regularizers.NetworkRegularizer):
"""A NetworkRegularizer that targets Latency using L1 group lasso."""
def __init__(self,
output_boundary,
threshold,
hardware,
batch_size=1,
l1_fraction=0,
regularizer_decorator=None,
decorator_parameters=None,
input_boundary=None,
force_group=None,
regularizer_blacklist=None):
"""Creates a GroupLassoFlopsRegularizer object.
Args:
output_boundary: An OpRegularizer will be created for all these
operations, and recursively for all ops they depend on via data
dependency that does not involve ops from input_boundary.
threshold: A float scalar, will be used as a 'threshold' for all
regularizer instances created by this class.
hardware: String name of hardware platform to target. Must be a key from
resource_function.PEAK_COMPUTE.
batch_size: Integer batch size to calculate cost/loss for.
l1_fraction: Relative weight of L1 in L1 + L2 regularization.
regularizer_decorator: A class of OpRegularizer decorator to use.
decorator_parameters: A dictionary of parameters to pass to the decorator
factory. To be used only with decorators that requires parameters,
otherwise use None.
input_boundary: A list of ops that represent the input boundary of the
subgraph being regularized (input boundary is not regularized).
force_group: List of regex for ops that should be force-grouped. Each
regex corresponds to a separate group. Use '|' operator to specify
multiple patterns in a single regex. See op_regularizer_manager for more
detail.
regularizer_blacklist: List of regex for ops that should not be
regularized. See op_regularizer_manager for more detail.
"""
custom_handlers = {
'Conv2D':
conv_handler.ConvSourceOpHandler(threshold, l1_fraction),
'Conv3D':
conv_handler.ConvSourceOpHandler(threshold, l1_fraction),
'Conv2DBackpropInput':
conv2d_transpose_handler.Conv2DTransposeSourceOpHandler(
threshold, l1_fraction),
'MatMul':
matmul_handler.MatMulSourceOpHandler(threshold, l1_fraction)
}
if regularizer_decorator:
for key in custom_handlers:
custom_handlers[key] = op_handler_decorator.OpHandlerDecorator(
custom_handlers[key], regularizer_decorator, decorator_parameters)
op_handler_dict = op_handlers.get_group_lasso_op_handler_dict()
op_handler_dict.update(custom_handlers)
self._manager = orm.OpRegularizerManager(
output_boundary,
op_handler_dict,
input_boundary=input_boundary,
force_group=force_group,
regularizer_blacklist=regularizer_blacklist)
self._calculator = cost_calculator.CostCalculator(
self._manager,
resource_function.latency_function_factory(hardware, batch_size))
self._hardware = hardware
def get_regularization_term(self, ops=None):
return self._calculator.get_regularization_term(ops)
def get_cost(self, ops=None):
return self._calculator.get_cost(ops)
@property
def op_regularizer_manager(self):
return self._manager
@property
def name(self):
return 'Latency'
@property
def cost_name(self):
return self._hardware + ' Latency'
| {
"repo_name": "google-research/morph-net",
"path": "morph_net/network_regularizers/latency_regularizer.py",
"copies": "1",
"size": "11194",
"license": "apache-2.0",
"hash": 7609016438589566000,
"line_mean": 41.241509434,
"line_max": 94,
"alpha_frac": 0.6931391817,
"autogenerated": false,
"ratio": 4.41577909270217,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.560891827440217,
"avg_score": null,
"num_lines": null
} |
"""A NetworkRegularizer that targets the number of FLOPs."""
from __future__ import absolute_import
from __future__ import division
# [internal] enable type annotations
from __future__ import print_function
from typing import List, Optional, Type
from morph_net.framework import batch_norm_source_op_handler
from morph_net.framework import conv2d_transpose_source_op_handler as conv2d_transpose_handler
from morph_net.framework import conv_source_op_handler as conv_handler
from morph_net.framework import generic_regularizers
from morph_net.framework import matmul_source_op_handler as matmul_handler
from morph_net.framework import op_handler_decorator
from morph_net.framework import op_handlers
from morph_net.framework import op_regularizer_manager as orm
from morph_net.network_regularizers import cost_calculator
from morph_net.network_regularizers import logistic_sigmoid_regularizer
from morph_net.network_regularizers import resource_function
import tensorflow.compat.v1 as tf
class LogisticSigmoidFlopsRegularizer(
logistic_sigmoid_regularizer.LogisticSigmoidRegularizer):
"""A LogisticSigmoidRegularizer that targets FLOPs."""
def get_calculator(self):
return cost_calculator.CostCalculator(
self._manager, resource_function.flop_function)
@property
def name(self):
return 'LogisticSigmoidFlops'
@property
def cost_name(self):
return 'FLOPs'
class GammaFlopsRegularizer(generic_regularizers.NetworkRegularizer):
"""A NetworkRegularizer that targets FLOPs using Gamma L1 as OpRegularizer."""
def __init__(self,
output_boundary: List[tf.Operation],
gamma_threshold,
regularizer_decorator: Optional[Type[
generic_regularizers.OpRegularizer]] = None,
decorator_parameters=None,
input_boundary: Optional[List[tf.Operation]] = None,
force_group=None,
regularizer_blacklist=None):
"""Creates a GammaFlopsRegularizer object.
Args:
output_boundary: An OpRegularizer will be created for all these
operations, and recursively for all ops they depend on via data
dependency that does not involve ops from input_boundary.
gamma_threshold: A float scalar, will be used as a 'gamma_threshold' for
all instances GammaL1Regularizer created by this class.
regularizer_decorator: A class of OpRegularizer decorator to use.
decorator_parameters: A dictionary of parameters to pass to the decorator
factory. To be used only with decorators that requires parameters,
otherwise use None.
input_boundary: A list of ops that represent the input boundary of the
subgraph being regularized (input boundary is not regularized).
force_group: List of regex for ops that should be force-grouped. Each
regex corresponds to a separate group. Use '|' operator to specify
multiple patterns in a single regex. See op_regularizer_manager for more
detail.
regularizer_blacklist: List of regex for ops that should not be
regularized. See op_regularizer_manager for more detail.
"""
source_op_handler = batch_norm_source_op_handler.BatchNormSourceOpHandler(
gamma_threshold)
if regularizer_decorator:
source_op_handler = op_handler_decorator.OpHandlerDecorator(
source_op_handler, regularizer_decorator, decorator_parameters)
op_handler_dict = op_handlers.get_gamma_op_handler_dict()
op_handler_dict.update({
'FusedBatchNorm': source_op_handler,
'FusedBatchNormV2': source_op_handler,
'FusedBatchNormV3': source_op_handler,
})
self._manager = orm.OpRegularizerManager(
output_boundary,
op_handler_dict,
input_boundary=input_boundary,
force_group=force_group,
regularizer_blacklist=regularizer_blacklist)
self._calculator = cost_calculator.CostCalculator(
self._manager, resource_function.flop_function)
def get_regularization_term(self, ops=None):
return self._calculator.get_regularization_term(ops)
def get_cost(self, ops=None):
return self._calculator.get_cost(ops)
@property
def op_regularizer_manager(self):
return self._manager
@property
def name(self):
return 'GammaFlops'
@property
def cost_name(self):
return 'FLOPs'
class GroupLassoFlopsRegularizer(generic_regularizers.NetworkRegularizer):
"""A NetworkRegularizer that targets FLOPs using L1 group lasso."""
def __init__(self,
output_boundary: List[tf.Operation],
threshold,
l1_fraction=0,
regularizer_decorator: Optional[Type[
generic_regularizers.OpRegularizer]] = None,
decorator_parameters=None,
input_boundary: Optional[List[tf.Operation]] = None,
force_group=None,
regularizer_blacklist=None):
"""Creates a GroupLassoFlopsRegularizer object.
Args:
output_boundary: An OpRegularizer will be created for all these
operations, and recursively for all ops they depend on via data
dependency that does not involve ops from input_boundary.
threshold: A float scalar, will be used as a 'threshold' for all
regularizer instances created by this class.
l1_fraction: Relative weight of L1 in L1 + L2 regularization.
regularizer_decorator: A class of OpRegularizer decorator to use.
decorator_parameters: A dictionary of parameters to pass to the decorator
factory. To be used only with decorators that requires parameters,
otherwise use None.
input_boundary: A list of ops that represent the input boundary of the
subgraph being regularized (input boundary is not regularized).
force_group: List of regex for ops that should be force-grouped. Each
regex corresponds to a separate group. Use '|' operator to specify
multiple patterns in a single regex. See op_regularizer_manager for more
detail.
regularizer_blacklist: List of regex for ops that should not be
regularized. See op_regularizer_manager for more detail.
"""
custom_handlers = {
'Conv2D':
conv_handler.ConvSourceOpHandler(threshold, l1_fraction),
'Conv3D':
conv_handler.ConvSourceOpHandler(threshold, l1_fraction),
'Conv2DBackpropInput':
conv2d_transpose_handler.Conv2DTransposeSourceOpHandler(
threshold, l1_fraction),
'MatMul':
matmul_handler.MatMulSourceOpHandler(threshold, l1_fraction)
}
if regularizer_decorator:
for key in custom_handlers:
custom_handlers[key] = op_handler_decorator.OpHandlerDecorator(
custom_handlers[key], regularizer_decorator, decorator_parameters)
op_handler_dict = op_handlers.get_group_lasso_op_handler_dict()
op_handler_dict.update(custom_handlers)
self._manager = orm.OpRegularizerManager(
output_boundary,
op_handler_dict,
input_boundary=input_boundary,
force_group=force_group,
regularizer_blacklist=regularizer_blacklist)
self._calculator = cost_calculator.CostCalculator(
self._manager, resource_function.flop_function)
def get_regularization_term(self, ops=None):
return self._calculator.get_regularization_term(ops)
def get_cost(self, ops=None):
return self._calculator.get_cost(ops)
@property
def op_regularizer_manager(self):
return self._manager
@property
def name(self):
return 'GroupLassoFlops'
@property
def cost_name(self):
return 'FLOPs'
| {
"repo_name": "google-research/morph-net",
"path": "morph_net/network_regularizers/flop_regularizer.py",
"copies": "1",
"size": "7684",
"license": "apache-2.0",
"hash": -4737877778328226000,
"line_mean": 39.0208333333,
"line_max": 94,
"alpha_frac": 0.7048412285,
"autogenerated": false,
"ratio": 4.2359426681367145,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007330906353183832,
"num_lines": 192
} |
"""A NetworkRegularizer that targets the number of weights in the model."""
from __future__ import absolute_import
from __future__ import division
# [internal] enable type annotations
from __future__ import print_function
from typing import List, Optional, Text, Type
from morph_net.framework import batch_norm_source_op_handler
from morph_net.framework import conv2d_transpose_source_op_handler as conv2d_transpose_handler
from morph_net.framework import conv_source_op_handler as conv_handler
from morph_net.framework import generic_regularizers
from morph_net.framework import matmul_source_op_handler as matmul_handler
from morph_net.framework import op_handler_decorator
from morph_net.framework import op_handlers
from morph_net.framework import op_regularizer_manager as orm
from morph_net.network_regularizers import cost_calculator
from morph_net.network_regularizers import logistic_sigmoid_regularizer
from morph_net.network_regularizers import resource_function
import tensorflow.compat.v1 as tf
class LogisticSigmoidModelSizeRegularizer(
logistic_sigmoid_regularizer.LogisticSigmoidRegularizer):
"""A LogisticSigmoidRegularizer that targets model size."""
def get_calculator(self):
return cost_calculator.CostCalculator(
self._manager, resource_function.model_size_function)
@property
def name(self):
return 'LogisticSigmoidModelSize'
@property
def cost_name(self):
return 'ModelSize'
class GammaModelSizeRegularizer(generic_regularizers.NetworkRegularizer):
"""A NetworkRegularizer that targets model size using Gamma L1."""
def __init__(self,
output_boundary: List[tf.Operation],
gamma_threshold,
regularizer_decorator: Optional[Type[
generic_regularizers.OpRegularizer]] = None,
decorator_parameters=None,
input_boundary: Optional[List[tf.Operation]] = None,
force_group=None,
regularizer_blacklist=None):
"""Creates a GammaModelSizeRegularizer object.
Args:
output_boundary: An OpRegularizer will be created for all these
operations, and recursively for all ops they depend on via data
dependency that does not involve ops from input_boundary.
gamma_threshold: A float scalar, will be used as a 'gamma_threshold' for
all instances GammaL1Regularizer created by this class.
regularizer_decorator: A string, the name of the regularizer decorators
to use. Supported decorators are listed in
op_regularizer_decorator.SUPPORTED_DECORATORS.
decorator_parameters: A dictionary of parameters to pass to the decorator
factory. To be used only with decorators that requires parameters,
otherwise use None.
input_boundary: A list of ops that represent the input boundary of the
subgraph being regularized (input boundary is not regularized).
force_group: List of regex for ops that should be force-grouped. Each
regex corresponds to a separate group. Use '|' operator to specify
multiple patterns in a single regex. See op_regularizer_manager for
more detail.
regularizer_blacklist: List of regex for ops that should not be
regularized. See op_regularizer_manager for more detail.
"""
source_op_handler = batch_norm_source_op_handler.BatchNormSourceOpHandler(
gamma_threshold)
if regularizer_decorator:
source_op_handler = op_handler_decorator.OpHandlerDecorator(
source_op_handler, regularizer_decorator, decorator_parameters)
op_handler_dict = op_handlers.get_gamma_op_handler_dict()
op_handler_dict.update({
'FusedBatchNorm': source_op_handler,
'FusedBatchNormV2': source_op_handler,
'FusedBatchNormV3': source_op_handler,
})
self._manager = orm.OpRegularizerManager(
output_boundary, op_handler_dict, input_boundary=input_boundary,
force_group=force_group, regularizer_blacklist=regularizer_blacklist)
self._calculator = cost_calculator.CostCalculator(
self._manager, resource_function.model_size_function)
def get_regularization_term(self, ops=None):
return self._calculator.get_regularization_term(ops)
def get_cost(self, ops=None):
return self._calculator.get_cost(ops)
@property
def op_regularizer_manager(self):
return self._manager
@property
def name(self):
return 'GammaModelSize'
@property
def cost_name(self):
return 'ModelSize'
class GroupLassoModelSizeRegularizer(generic_regularizers.NetworkRegularizer):
"""A NetworkRegularizer that targets model size using L1 group lasso."""
def __init__(self,
output_boundary: List[tf.Operation],
threshold,
l1_fraction=0.0,
regularizer_decorator: Optional[Type[
generic_regularizers.OpRegularizer]] = None,
decorator_parameters=None,
input_boundary: Optional[List[tf.Operation]] = None,
force_group: Optional[List[Text]] = None,
regularizer_blacklist: Optional[List[Text]] = None):
"""Creates a GroupLassoModelSizeRegularizer object.
Args:
output_boundary: An OpRegularizer will be created for all these
operations, and recursively for all ops they depend on via data
dependency that does not involve ops from input_boundary.
threshold: A float scalar, will be used as a 'threshold' for all
regularizer instances created by this class.
l1_fraction: A float scalar. The relative weight of L1 in L1 + L2
regularization.
regularizer_decorator: A class of OpRegularizer decorator to use.
decorator_parameters: A dictionary of parameters to pass to the decorator
factory. To be used only with decorators that requires parameters,
otherwise use None.
input_boundary: A list of ops that represent the input boundary of the
subgraph being regularized (input boundary is not regularized).
force_group: List of regex for ops that should be force-grouped. Each
regex corresponds to a separate group. Use '|' operator to specify
multiple patterns in a single regex. See op_regularizer_manager for more
detail.
regularizer_blacklist: List of regex for ops that should not be
regularized. See op_regularizer_manager for more detail.
"""
custom_handlers = {
'Conv2D':
conv_handler.ConvSourceOpHandler(threshold, l1_fraction),
'Conv3D':
conv_handler.ConvSourceOpHandler(threshold, l1_fraction),
'Conv2DBackpropInput':
conv2d_transpose_handler.Conv2DTransposeSourceOpHandler(
threshold, l1_fraction),
'MatMul':
matmul_handler.MatMulSourceOpHandler(threshold, l1_fraction)
}
if regularizer_decorator:
for key in custom_handlers:
custom_handlers[key] = op_handler_decorator.OpHandlerDecorator(
custom_handlers[key], regularizer_decorator, decorator_parameters)
op_handler_dict = op_handlers.get_group_lasso_op_handler_dict()
op_handler_dict.update(custom_handlers)
self._manager = orm.OpRegularizerManager(
output_boundary,
op_handler_dict,
input_boundary=input_boundary,
force_group=force_group,
regularizer_blacklist=regularizer_blacklist)
self._calculator = cost_calculator.CostCalculator(
self._manager, resource_function.model_size_function)
def get_regularization_term(self, ops=None):
return self._calculator.get_regularization_term(ops)
def get_cost(self, ops=None):
return self._calculator.get_cost(ops)
@property
def op_regularizer_manager(self):
return self._manager
@property
def name(self):
return 'GroupLassoModelSize'
@property
def cost_name(self):
return 'ModelSize'
| {
"repo_name": "google-research/morph-net",
"path": "morph_net/network_regularizers/model_size_regularizer.py",
"copies": "1",
"size": "7932",
"license": "apache-2.0",
"hash": 4083665041272447000,
"line_mean": 40.3125,
"line_max": 94,
"alpha_frac": 0.7081442259,
"autogenerated": false,
"ratio": 4.303852414541509,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5511996640441509,
"avg_score": null,
"num_lines": null
} |
"""A network socket data source."""
import logging
import socket
from .base import DataSourceError
from .socket import SocketDataSource
LOG = logging.getLogger(__name__)
class NetworkDataSource(SocketDataSource):
"""A data source reading from a network socket, as implemented
in the openxc-vehicle-simulator .
"""
DEFAULT_PORT = 50001
def __init__(self, host=None, port=None, **kwargs):
"""Initialize a connection to the network socket.
Kwargs:
host - optionally override the default network host (default is local machine)
port - optionally override the default network port (default is 50001)
log_mode - optionally record or print logs from the network source
Raises:
DataSourceError if the socket connection cannot be opened.
"""
super(NetworkDataSource, self).__init__(**kwargs)
self.host = host or socket.gethostbyname(socket.gethostname())
self.port = port or self.DEFAULT_PORT
self.port = int(self.port)
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((self.host, self.port))
except (OSError, socket.error) as e:
raise DataSourceError("Unable to open socket connection at "
"%s:%s: %s" % (self.host,self.port, e))
else:
LOG.debug("Opened socket connection at %s:%s", self.host, self.port)
| {
"repo_name": "openxc/openxc-python",
"path": "openxc/sources/network.py",
"copies": "1",
"size": "1472",
"license": "bsd-3-clause",
"hash": -8358752840784691000,
"line_mean": 34.0476190476,
"line_max": 90,
"alpha_frac": 0.640625,
"autogenerated": false,
"ratio": 4.380952380952381,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016361003834708177,
"num_lines": 42
} |
""" A neural chatbot using sequence to sequence model with
attentional decoder.
This is based on Google Translate Tensorflow model
https://github.com/tensorflow/models/blob/master/tutorials/rnn/translate/
"""
from __future__ import division
from __future__ import print_function
import argparse
import os
import random
import sys
import time
import numpy as np
import tensorflow as tf
from model import ChatBotModel
import config
import data
def _get_random_bucket(train_buckets_scale):
""" Get a random bucket from which to choose a training sample """
rand = random.random()
return min([i for i in xrange(len(train_buckets_scale))
if train_buckets_scale[i] > rand])
def _assert_lengths(encoder_size, decoder_size, encoder_inputs, decoder_inputs, decoder_masks):
""" Assert that the encoder inputs, decoder inputs, and decoder masks are
of the expected lengths """
if len(encoder_inputs) != encoder_size:
raise ValueError("Encoder length must be equal to the one in bucket,"
" %d != %d." % (len(encoder_inputs), encoder_size))
if len(decoder_inputs) != decoder_size:
raise ValueError("Decoder length must be equal to the one in bucket,"
" %d != %d." % (len(decoder_inputs), decoder_size))
if len(decoder_masks) != decoder_size:
raise ValueError("Weights length must be equal to the one in bucket,"
" %d != %d." % (len(decoder_masks), decoder_size))
def run_step(sess, model, encoder_inputs, decoder_inputs, decoder_masks, bucket_id, forward_only):
""" Run one step in training.
@forward_only: boolean value to decide whether a backward path should be created
forward_only is set to True when you just want to evaluate on the test set,
or when you want to the bot to be in chat mode. """
encoder_size, decoder_size = config.BUCKETS[bucket_id]
_assert_lengths(encoder_size, decoder_size, encoder_inputs, decoder_inputs, decoder_masks)
# input feed: encoder inputs, decoder inputs, target_weights, as provided.
input_feed = {}
for step in xrange(encoder_size):
input_feed[model.encoder_inputs[step].name] = encoder_inputs[step]
for step in xrange(decoder_size):
input_feed[model.decoder_inputs[step].name] = decoder_inputs[step]
input_feed[model.decoder_masks[step].name] = decoder_masks[step]
last_target = model.decoder_inputs[decoder_size].name
input_feed[last_target] = np.zeros([model.batch_size], dtype=np.int32)
# output feed: depends on whether we do a backward step or not.
if not forward_only:
output_feed = [model.train_ops[bucket_id], # update op that does SGD.
model.gradient_norms[bucket_id], # gradient norm.
model.losses[bucket_id]] # loss for this batch.
else:
output_feed = [model.losses[bucket_id]] # loss for this batch.
for step in xrange(decoder_size): # output logits.
output_feed.append(model.outputs[bucket_id][step])
outputs = sess.run(output_feed, input_feed)
if not forward_only:
return outputs[1], outputs[2], None # Gradient norm, loss, no outputs.
else:
return None, outputs[0], outputs[1:] # No gradient norm, loss, outputs.
def _get_buckets():
""" Load the dataset into buckets based on their lengths.
train_buckets_scale is the inverval that'll help us
choose a random bucket later on.
"""
test_buckets = data.load_data('test_ids.enc', 'test_ids.dec')
data_buckets = data.load_data('train_ids.enc', 'train_ids.dec')
train_bucket_sizes = [len(data_buckets[b]) for b in xrange(len(config.BUCKETS))]
print("Number of samples in each bucket:\n", train_bucket_sizes)
train_total_size = sum(train_bucket_sizes)
# list of increasing numbers from 0 to 1 that we'll use to select a bucket.
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in xrange(len(train_bucket_sizes))]
print("Bucket scale:\n", train_buckets_scale)
return test_buckets, data_buckets, train_buckets_scale
def _get_skip_step(iteration):
""" How many steps should the model train before it saves all the weights. """
if iteration < 100:
return 30
return 100
def _check_restore_parameters(sess, saver):
""" Restore the previously trained parameters if there are any. """
ckpt = tf.train.get_checkpoint_state(os.path.dirname(config.CPT_PATH + '/checkpoint'))
if ckpt and ckpt.model_checkpoint_path:
print("Loading parameters for the Chatbot")
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print("Initializing fresh parameters for the Chatbot")
def _eval_test_set(sess, model, test_buckets):
""" Evaluate on the test set. """
for bucket_id in xrange(len(config.BUCKETS)):
if len(test_buckets[bucket_id]) == 0:
print(" Test: empty bucket %d" % (bucket_id))
continue
start = time.time()
encoder_inputs, decoder_inputs, decoder_masks = data.get_batch(test_buckets[bucket_id],
bucket_id,
batch_size=config.BATCH_SIZE)
_, step_loss, _ = run_step(sess, model, encoder_inputs, decoder_inputs,
decoder_masks, bucket_id, True)
print('Test bucket {}: loss {}, time {}'.format(bucket_id, step_loss, time.time() - start))
def train():
""" Train the bot """
test_buckets, data_buckets, train_buckets_scale = _get_buckets()
# in train mode, we need to create the backward path, so forwrad_only is False
model = ChatBotModel(False, config.BATCH_SIZE)
model.build_graph()
saver = tf.train.Saver()
with tf.Session() as sess:
print('Running session')
sess.run(tf.global_variables_initializer())
# sess.run(tf.initialize_all_variables())
_check_restore_parameters(sess, saver)
iteration = model.global_step.eval()
total_loss = 0
while True:
skip_step = _get_skip_step(iteration)
bucket_id = _get_random_bucket(train_buckets_scale)
encoder_inputs, decoder_inputs, decoder_masks = data.get_batch(data_buckets[bucket_id],
bucket_id,
batch_size=config.BATCH_SIZE)
start = time.time()
_, step_loss, _ = run_step(sess, model, encoder_inputs, decoder_inputs, decoder_masks, bucket_id, False)
total_loss += step_loss
iteration += 1
if iteration % skip_step == 0:
print('Iter {}: loss {}, time {}'.format(iteration, total_loss/skip_step, time.time() - start))
start = time.time()
total_loss = 0
saver.save(sess, os.path.join(config.CPT_PATH, 'chatbot'), global_step=model.global_step)
if iteration % (10 * skip_step) == 0:
# Run evals on development set and print their loss
_eval_test_set(sess, model, test_buckets)
start = time.time()
sys.stdout.flush()
def _get_user_input():
""" Get user's input, which will be transformed into encoder input later """
print("> ", end="")
sys.stdout.flush()
return sys.stdin.readline()
def _find_right_bucket(length):
""" Find the proper bucket for an encoder input based on its length """
return min([b for b in xrange(len(config.BUCKETS))
if config.BUCKETS[b][0] >= length])
def _construct_response(output_logits, inv_dec_vocab):
""" Construct a response to the user's encoder input.
@output_logits: the outputs from sequence to sequence wrapper.
output_logits is decoder_size np array, each of dim 1 x DEC_VOCAB
This is a greedy decoder - outputs are just argmaxes of output_logits.
"""
print(output_logits[0])
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
# If there is an EOS symbol in outputs, cut them at that point.
if config.EOS_ID in outputs:
outputs = outputs[:outputs.index(config.EOS_ID)]
# Print out sentence corresponding to outputs.
# return " ".join([tf.compat.as_str(inv_dec_vocab[output]) for output in outputs])
return "".join([inv_dec_vocab[output] for output in outputs])
def chat():
""" in test mode, we don't to create the backward path
"""
_, enc_vocab = data.load_vocab(os.path.join(config.PROCESSED_PATH, 'vocab.enc'))
inv_dec_vocab, _ = data.load_vocab(os.path.join(config.PROCESSED_PATH, 'vocab.dec'))
model = ChatBotModel(True, batch_size=1)
model.build_graph()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# sess.run(tf.initialize_all_variables())
_check_restore_parameters(sess, saver)
output_file = open(os.path.join(config.PROCESSED_PATH, config.OUTPUT_FILE), 'a+')
# Decode from standard input.
max_length = config.BUCKETS[-1][0]
print('Welcome to TensorBro. Say something. Enter to exit. Max length is', max_length)
while True:
line = _get_user_input()
if len(line) > 0 and line[-1] == '\n':
line = line[:-1]
if line == '':
break
output_file.write('HUMAN ++++ ' + line + '\n')
# Get token-ids for the input sentence.
# token_ids = data.sentence2id(enc_vocab, str(line))
token_ids = data.sentence2id(enc_vocab, line.decode('utf-8'))
if (len(token_ids) > max_length):
print('Max length I can handle is:', max_length)
line = _get_user_input()
continue
# Which bucket does it belong to?
bucket_id = _find_right_bucket(len(token_ids))
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs, decoder_inputs, decoder_masks = data.get_batch([(token_ids, [])],
bucket_id,
batch_size=1)
# Get output logits for the sentence.
_, _, output_logits = run_step(sess, model, encoder_inputs, decoder_inputs,
decoder_masks, bucket_id, True)
response = _construct_response(output_logits, inv_dec_vocab)
print(response)
output_file.write('BOT ++++ ' + response + '\n')
output_file.write('=============================================\n')
output_file.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mode', choices={'train', 'chat'},
default='train', help="mode. if not specified, it's in the train mode")
args = parser.parse_args()
if not os.path.isdir(config.PROCESSED_PATH):
data.prepare_raw_data()
data.process_data()
print('Data ready!')
# create checkpoints folder if there isn't one already
data.make_dir(config.CPT_PATH)
if args.mode == 'train':
train()
elif args.mode == 'chat':
chat()
if __name__ == '__main__':
main()
| {
"repo_name": "JackJiang1989/Wechat_Bot",
"path": "chatbot/chatbot.py",
"copies": "1",
"size": "11546",
"license": "mit",
"hash": 3097117237450880000,
"line_mean": 44.1015625,
"line_max": 116,
"alpha_frac": 0.5996015936,
"autogenerated": false,
"ratio": 3.89935832489024,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9973999681631303,
"avg_score": 0.0049920473717873625,
"num_lines": 256
} |
""" A Neural Network Module
Written by: Hayg Astourian
Last Modified: 9/15/2013 """
import math
import numpy as np
def logistic(val):
""" The logistic function to be used as a limiter """
return 1 / (1 + math.exp(-val))
class Layer(object):
""" A class to represent layers in the neural network """
def __init__(self, num_nodes, num_nodes_in_prev_layer):
self.weights_arr = np.random.rand(num_nodes, num_nodes_in_prev_layer)
def get_weights(self):
""" Returns the weights array for the entire layer
weights_arr is an n x n weights matrix for a layer
Row i is the weight vector of node i """
return self.weights_arr
class NeuralNetwork(object):
""" A class to represent a neural network. It can be trained and used to
make approximations of the function for which it has been given data """
def __init__(self, nodes_per_layer, num_layers, num_outputs, limiter):
self.nodes_per_layer = nodes_per_layer
layer_maker = lambda npl: Layer(npl, npl)
self.layers = [layer_maker(nodes_per_layer) for _ in range(num_layers)]
self.outputs = Layer(num_outputs, nodes_per_layer)
self.limiter = np.vectorize(limiter)
def predict(self, signal):
""" Returns the value of the hypothesis at the signal """
for layer in self.layers:
# signal is activation values for current layer, now
signal = self.limiter(np.dot(layer.get_weights(), signal))
assert signal.shape == (self.nodes_per_layer,)
# m x n * n x 1 = m x 1 outputs
return self.limiter(np.dot(self.outputs.get_weights(), signal))
def back_propagate(self, delta):
""" Runs back propagation using the delta as the error signal """
pass
def train(self, feature_vectors, labels, error_threshold, max_iterations):
""" Trains the neural net using the provided data and labels. Training
stops only when either the error threshold is passed or we reach
the maximum iterations """
assert len(feature_vectors) == len(labels)
iterations, delta = 0, 100000
while delta > error_threshold and iterations < max_iterations:
for i in range(len(feature_vectors)):
# example is an n x 1 feature vector
# label is a 0 or 1
example, label = feature_vectors[i], labels[i]
delta = (self.predict(example) - label)**2
self.back_propagate(delta)
iterations += 1
| {
"repo_name": "copacetic/nn",
"path": "nn.py",
"copies": "1",
"size": "2575",
"license": "mit",
"hash": 8807891319045363000,
"line_mean": 41.9166666667,
"line_max": 80,
"alpha_frac": 0.6240776699,
"autogenerated": false,
"ratio": 4.05511811023622,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0027846510699908564,
"num_lines": 60
} |
""" A neural network trainer with various options: rmsprop, nerterov, momentum etc.
This trainer is used for multiple types of neural nets: dbns and cnns and it is designed
to be adaptable to other types of networks as well.
"""
__author__ = "Mihaela Rosca"
__contact__ = "mihaela.c.rosca@gmail.com"
import numpy as np
import theano
from theano import tensor as T
import common
import debug
DEBUG = False
theanoFloat = theano.config.floatX
class BatchTrainer(object):
"""
Abstract class used to define updates on the parameters of neural networks
during training.
Subclasses must call the constructor of this class in their constructors, and
have to define their cost function using a method called 'cost'.
Supports momentum updates and nesterov updates, both with rmsprop
or without (see the TrainingOptions class for more details in the available
training options. Also supports L1 and L2 weight decay.
"""
def __init__(self, params, weights, training_options):
self.params = params
self.training_options = training_options
self.weights = weights if weights else []
# Required for momentum and rmsprop
self.oldUpdates = []
self.oldMeanSquares = []
for param in params:
oldDParam = theano.shared(value=np.zeros(shape=param.shape.eval(),
dtype=theanoFloat),
name='oldDParam')
self.oldUpdates += [oldDParam]
oldMeanSquare = theano.shared(value=np.zeros(shape=param.shape.eval(),
dtype=theanoFloat),
name='oldMeanSquare')
self.oldMeanSquares += [oldMeanSquare]
def trainFixedEpochs(self, x, y, data, labels, maxEpochs):
training_options = self.training_options
trainModel = self._makeTrainFunction(x, y, data, labels)
epochTrainingErrors = []
nrMiniBatchesTrain = max(data.shape.eval()[1] / training_options.miniBatchSize, 1)
best_training_error = np.inf
bestWeights = None
bestBiases = None
bestEpoch = 0
save_best_weights = training_options.save_best_weights
try:
for epoch in xrange(maxEpochs):
print "epoch " + str(epoch)
momentum = training_options.momentumForEpochFunction(training_options.momentumMax, epoch)
sum_error = 0.0
for batchNr in xrange(nrMiniBatchesTrain):
trainError = trainModel(batchNr, momentum) / training_options.miniBatchSize
sum_error += trainError
mean_error = sum_error / nrMiniBatchesTrain
if save_best_weights:
if mean_error < best_training_error:
best_training_error = mean_error
# Save the weights which are the best ones
bestWeights = self.weights
bestBiases = self.biases
bestEpoch = epoch
print "training error " + str(mean_error)
epochTrainingErrors += [mean_error]
except KeyboardInterrupt:
print "you have interrupted training"
print "we will continue testing with the state of the network as it is"
if save_best_weights:
if bestWeights is not None and bestBiases is not None:
self.weights = bestWeights
self.biases = bestBiases
print "number of epochs"
print epoch + 1
def trainWithValidation(self, x, y, data, labels, validationData, validationLabels,
classificationCost, maxEpochs, validation_criteria):
if validation_criteria == "patience":
self._trainModelPatience(x, y, data, labels, validationData, validationLabels, classificationCost, maxEpochs)
elif validation_criteria == "consecutive_decrease":
self._trainLoopWithValidation(x, y, data, labels, validationData, validationLabels, classificationCost, maxEpochs)
else:
raise Exception("unknown validation validation_criteria: " + str(validation_criteria))
def _trainLoopWithValidation(self, x, y, data, labels, validationData, validationLabels,
classificationCost, maxEpochs):
lastValidationError = np.inf
consecutive_decrease_error_count = 0.0
epoch = 0
training_options = self.training_options
save_best_weights = training_options.save_best_weights
miniBatchSize = training_options.miniBatchSize
nrMiniBatchesTrain = max(data.shape.eval()[0] / miniBatchSize, 1)
miniBatchValidateSize = min(validationData.shape.eval()[0], miniBatchSize * 10)
nrMiniBatchesValidate = max(validationData.shape.eval()[0] / miniBatchValidateSize, 1)
trainModel = self._makeTrainFunction(x, y, data, labels)
validateModel = self._makeValidateModelFunction(
x, y, validationData, validationLabels, classificationCost, miniBatchValidateSize)
trainNoDropout = self._makeValidateModelFunction(
x, y, data, labels, classificationCost, miniBatchSize)
validationErrors = []
trainingErrors = []
trainingErrorsNoDropout = []
bestValidationError = np.inf
bestWeights = None
bestBiases = None
bestEpoch = 0
try:
while epoch < maxEpochs and consecutive_decrease_error_count < 8:
print "epoch " + str(epoch)
momentum = self.training_options.momentumForEpochFunction(training_options.momentumMax, epoch)
sumErrors = 0.0
sumErrorsNoDropout = 0.0
for batchNr in xrange(nrMiniBatchesTrain):
sumErrors += trainModel(batchNr, momentum) / miniBatchSize
sumErrorsNoDropout += trainNoDropout(batchNr) / miniBatchSize
trainingErrors += [sumErrors / nrMiniBatchesTrain]
trainingErrorsNoDropout += [sumErrorsNoDropout / nrMiniBatchesTrain]
meanValidations = map(validateModel, xrange(nrMiniBatchesValidate))
meanValidationError = sum(meanValidations) / len(meanValidations)
validationErrors += [meanValidationError]
if save_best_weights:
if meanValidationError < bestValidationError:
bestValidationError = meanValidationError
# Save the weights which are the best ones
bestWeights = self.weights
bestBiases = self.biases
bestEpoch = epoch
consecutive_decrease_error_count = consecutive_decrease_error_count + 1 if meanValidationError > lastValidationError else 0
lastValidationError = meanValidationError
epoch += 1
except KeyboardInterrupt:
print "you have interrupted training"
print "we will continue testing with the state of the network as it is"
# TODO: flag for plotting
common.plotTrainingAndValidationErros(trainingErrors, validationErrors)
common.plotTrainingAndValidationErros(trainingErrorsNoDropout, validationErrors)
print "number of epochs"
print epoch + 1
def _trainModelPatience(self, x, y, data, labels, validationData, validationLabels,
classificationCost, maxEpochs):
training_options = self.training_options
save_best_weights = training_options.save_best_weights
miniBatchSize = training_options.miniBatchSize
nrMiniBatchesTrain = max(data.shape.eval()[0] / miniBatchSize, 1)
miniBatchValidateSize = min(validationData.shape.eval()[0], miniBatchSize * 10)
nrMiniBatchesValidate = max(validationData.shape.eval()[0] / miniBatchValidateSize, 1)
trainModel = self._makeTrainFunction(x, y, data, labels)
validateModel = self._makeValidateModelFunction(
x, y, validationData, validationLabels, classificationCost, miniBatchValidateSize)
trainNoDropout = self._makeValidateModelFunction(
x, y, data, labels, classificationCost, miniBatchSize)
epoch = 0
doneTraining = False
patience = 10 * nrMiniBatchesTrain # do at least 10 passes trough the data no matter what
patienceIncrease = 2 # Increase our patience up to patience * patienceIncrease
bestValidationError = np.inf
bestWeights = None
bestBiases = None
bestEpoch = 0
validationErrors = []
trainingErrors = []
trainingErrorNoDropout = []
try:
while (epoch < maxEpochs) and not doneTraining:
# Train the net with all data
print "epoch " + str(epoch)
momentum = training_options.momentumForEpochFunction(training_options.momentumMax, epoch)
for batchNr in xrange(nrMiniBatchesTrain):
iteration = epoch * nrMiniBatchesTrain + batchNr
trainingErrorBatch = trainModel(batchNr, momentum) / training_options.miniBatchSize
meanValidations = map(validateModel, xrange(nrMiniBatchesValidate))
meanValidationError = sum(meanValidations) / len(meanValidations)
if meanValidationError < bestValidationError:
print "increasing patience, still improving during training..."
patience = max(patience, iteration * patienceIncrease)
bestValidationError = meanValidationError
if save_best_weights:
# Save the weights which are the best ones
bestWeights = self.weights
bestBiases = self.biases
bestEpoch = epoch
validationErrors += [meanValidationError]
trainingErrors += [trainingErrorBatch]
trainingErrorNoDropout += [trainNoDropout(batchNr)]
if patience <= iteration:
doneTraining = True
epoch += 1
except KeyboardInterrupt:
print "you have interrupted training"
print "we will continue testing with the state of the network as it is"
# TODO: double check
if save_best_weights:
if bestWeights is not None and bestBiases is not None:
self.weights = bestWeights
self.biases = bestBiases
common. plotTrainingAndValidationErros(trainingErrors, validationErrors)
common.plotTrainingAndValidationErros(trainingErrorNoDropout, validationErrors)
print "number of epochs"
print epoch
# TODO: document cost
def _makeValidateModelFunction(self, x, y, data, labels, cost, miniBatchSize):
miniBatchIndex = T.lscalar()
return theano.function(
inputs=[miniBatchIndex],
outputs=T.mean(cost(y)),
givens={
x: data[miniBatchIndex * miniBatchSize:(miniBatchIndex + 1) * miniBatchSize],
y: labels[miniBatchIndex * miniBatchSize:(miniBatchIndex + 1) * miniBatchSize]})
def _makeTrainFunction(self, x, y, data, labels):
error = T.sum(self.cost(y))
training_options = self.training_options
for w in self.weights:
error += training_options.weightDecayL1 * T.sum(abs(w))
error += training_options.weightDecayL2 * T.sum(w ** 2)
miniBatchIndex = T.lscalar()
momentum = T.fscalar()
if DEBUG:
mode = theano.compile.MonitorMode(post_func=debug.detect_nan).excluding(
'local_elemwise_fusion', 'inplace')
else:
mode = None
if training_options.nesterov:
preDeltaUpdates, updates = self._buildUpdatesNesterov(error, training_options, momentum)
updateParamsWithMomentum = theano.function(
inputs=[momentum],
outputs=[],
updates=preDeltaUpdates,
mode=mode)
updateParamsWithGradient = theano.function(
inputs =[miniBatchIndex, momentum],
outputs=error,
updates=updates,
givens={
x: data[miniBatchIndex * training_options.miniBatchSize:(miniBatchIndex + 1) * training_options.miniBatchSize],
y: labels[miniBatchIndex * training_options.miniBatchSize:(miniBatchIndex + 1) * training_options.miniBatchSize]},
mode=mode)
def trainModel(miniBatchIndex, momentum):
updateParamsWithMomentum(momentum)
return updateParamsWithGradient(miniBatchIndex, momentum)
else:
updates = self._buildUpdatesSimpleMomentum(error, training_options, momentum)
trainModel = theano.function(
inputs=[miniBatchIndex, momentum],
outputs=error,
updates=updates,
mode=mode,
givens={
x: data[miniBatchIndex * training_options.miniBatchSize:(miniBatchIndex + 1) * training_options.miniBatchSize],
y: labels[miniBatchIndex * training_options.miniBatchSize:(miniBatchIndex + 1) * training_options.miniBatchSize]})
# returns the function that trains the model
return trainModel
def _buildUpdatesNesterov(self, error, training_options, momentum):
if training_options.momentumFactorForLearningRate:
lrFactor = np.float32(1.0) - momentum
else:
lrFactor = np.float32(1.0)
preDeltaUpdates = []
for param, oldUpdate in zip(self.params, self.oldUpdates):
preDeltaUpdates.append((param, param + momentum * oldUpdate))
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs
deltaParams = T.grad(error, self.params)
updates = []
parametersTuples = zip(self.params,
deltaParams,
self.oldUpdates,
self.oldMeanSquares)
for param, delta, oldUpdate, oldMeanSquare in parametersTuples:
if training_options.rmsprop:
meanSquare = 0.9 * oldMeanSquare + 0.1 * delta ** 2
paramUpdate = - lrFactor * training_options.batchLearningRate * delta / T.sqrt(meanSquare + 1e-8)
updates.append((oldMeanSquare, meanSquare))
else:
paramUpdate = - lrFactor * training_options.batchLearningRate * delta
newParam = param + paramUpdate
updates.append((param, newParam))
updates.append((oldUpdate, momentum * oldUpdate + paramUpdate))
return preDeltaUpdates, updates
def _buildUpdatesSimpleMomentum(self, error, training_options, momentum):
if training_options.momentumFactorForLearningRate:
lrFactor = np.float32(1.0) - momentum
else:
lrFactor = np.float32(1.0)
deltaParams = T.grad(error, self.params)
updates = []
parametersTuples = zip(self.params,
deltaParams,
self.oldUpdates,
self.oldMeanSquares)
for param, delta, oldUpdate, oldMeanSquare in parametersTuples:
paramUpdate = momentum * oldUpdate
if training_options.rmsprop:
meanSquare = 0.9 * oldMeanSquare + 0.1 * delta ** 2
paramUpdate += - lrFactor * training_options.batchLearningRate * delta / T.sqrt(meanSquare + 1e-8)
updates.append((oldMeanSquare, meanSquare))
else:
paramUpdate += - lrFactor * training_options.batchLearningRate * delta
newParam = param + paramUpdate
updates.append((param, newParam))
updates.append((oldUpdate, paramUpdate))
return updates
| {
"repo_name": "mihaelacr/pydeeplearn",
"path": "code/lib/batchtrainer.py",
"copies": "1",
"size": "14670",
"license": "bsd-3-clause",
"hash": 8395465300915853000,
"line_mean": 37.6052631579,
"line_max": 131,
"alpha_frac": 0.6777096115,
"autogenerated": false,
"ratio": 4.17948717948718,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006473113502083931,
"num_lines": 380
} |
"""A neuron model based on a biologically inspired neuronal architecture."""
import logging
from math import pi, exp, log
import random
from collections import namedtuple, OrderedDict
from threading import Thread
import numpy as np
import cv2
from matplotlib.pyplot import figure, plot, axis, show, subplots_adjust, title, xlabel, ylabel, axhline
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from .util.quadtree import Rect, QuadTree
# Different neuron distributions
Uniform = namedtuple('Uniform', ['low', 'high']) # a uniform distribution over the half-open interval [low, high)
MultivariateUniform = namedtuple('MultivariateUniform', ['lows', 'highs']) # a uniform distribution in multiple dimensions
Normal = namedtuple('Normal', ['mu', 'sigma']) # a normal distribution, defined by mean (mu) and std. dev. (sigma)
MultivariateNormal = namedtuple('MultivariateNormal', ['mu', 'cov']) # a multivariate normal distribution, defined by mean vector (mu) of length N and covariance matrix (cov) of size NxN
SymmetricNormal = namedtuple('SymmetricNormal', Normal._fields + ('center',))
SymmetricLogNormal = namedtuple('SymmetricLogNormal', SymmetricNormal._fields)
# Neuron membrane potential levels
threshold_potential = -0.055 # volts; level that triggers action potential
action_potential_peak = 0.04 # volts; maximum potential reached during an action potential event
action_potential_trough = Normal(-0.08, 0.001) # volts; minimum potential reached due to hyperpolarization during an action potential event
# Synaptic strength distribution (TODO find out from literature what these values should be)
synaptic_strength = Normal(0.011, 0.001) # essentially volts (mean, s.d.); potential transmitted to post-synaptic neuron when the pre-synaptic neuron undergoes an action potential event
# Timing, decay and dynamic action potential parameters
self_depolarization_rate = 0.75 # volts per sec.; rate at which potential rises during an action potential event
refractory_period = 0.1 # secs.; minimum time between two action potentials (should be an emergent effect when simulating action potential in detail)
min_update_time = 0.025 # secs.; minimum time between updates
synapse_inhibition_period = 0.5 # secs.; duration for which the effect of inhibition lasts at a synapse
neuron_inhibition_period = 1.25 # secs.; duration for which the effect of inhibition lasts in a neuron
# Graph parameters
neuron_plot_colors = 'bgrcmy'
population_plot_colors = cm.jet(np.random.uniform(0.0, 1.0, 10)) #['darkblue', 'darkgreen', 'darkred', 'darkmagenta', 'olive', 'coral', ] # TODO find a better color-map
inhibitory_connection_color = 'red'
class GrowthCone:
default_maxLength = 15.0 # maximum length a projection with these growth cone parameters can grow to
default_spreadFactor = 0.5 # radius at length L = spreadFactor * L
def __init__(self, direction, maxLength=default_maxLength, spreadFactor=default_spreadFactor):
self.direction = direction # unit vector
self.maxLength = maxLength
self.spreadFactor = spreadFactor
def score(self, anchor, target):
"""Return the probability that this growth cone starting at anchor would reach target."""
dist_vec = target - anchor
projection = np.dot(self.direction, dist_vec)
if projection < 0.0 or projection > self.maxLength:
return 0.0
else:
perp_distance = np.sqrt(np.linalg.norm(dist_vec, ord=2)**2 - projection**2)
perp_limit = projection * self.spreadFactor
return (perp_limit - perp_distance) / perp_limit if perp_distance < perp_limit else 0.0
def getTerminationRect(self, anchor):
"""Given an anchor, return the approximate region of termination as a rectangle."""
radius = self.maxLength * self.spreadFactor
return Rect(anchor[0] - radius, anchor[1] - radius, anchor[0] + radius, anchor[1] + radius)
def __str__(self):
return "GrowthCone: {{ direction: {}, maxLength: {}, spreadFactor: {} }}".format(self.direction, self.maxLength, self.spreadFactor)
class Synapse:
"""A synapse with references to pre-synaptic and post-synaptic neurons, and an optional gatekeeper neuron."""
def __init__(self, pre, post, strength=None, gatekeeper=None):
self.pre = pre
self.post = post
self.strength = strength if strength is not None else np.random.normal(synaptic_strength.mu, synaptic_strength.sigma)
self.gatekeeper = gatekeeper # NOTE do we need to store this here?
#print "Synapse.__init__(): pre = {}, post = {}, strength = {}, gatekeeper = {}".format(self.pre.id, self.post.id, self.strength, self.gatekeeper.id if self.gatekeeper is not None else None)
# TODO Implement learning/boosting of synaptic strength
# Synapse-level inhibition
self.isInhibited = False
self.uninhibitAt = -1.0
def transmitActionPotential(self, timeNow):
if self.isInhibited and self.uninhibitAt <= timeNow:
self.isInhibited = False
#print "Synapse.transmitActionPotential(): timeNow = {}, self.uninhibitAt = {} [{}]".format(timeNow, self.uninhibitAt, "INHIBITED" if self.isInhibited else "UNINHIBITED")
if not self.isInhibited:
self.post.accumulate(self.strength)
def transmitGradedPotential(self, timeNow):
if self.isInhibited and self.uninhibitAt <= timeNow:
self.isInhibited = False
#print "Synapse.transmitGradedPotential(): timeNow = {}, self.uninhibitAt = {} [{}]".format(timeNow, self.uninhibitAt, "INHIBITED" if self.isInhibited else "UNINHIBITED")
if not self.isInhibited:
#self.post.accumulate((self.pre.potential - self.pre.resting_potential.mu))
self.post.accumulate((self.pre.potential - self.pre.resting_potential.mu) * self.strength)
# TODO Figure out how to quantize graded potential transmission
def inhibit(self, timeNow, duration=synapse_inhibition_period):
#print "Synapse.inhibit(): timeNow = {}, duration = {}".format(timeNow, duration)
self.isInhibited = True
self.uninhibitAt = timeNow + duration
class Neuron(object):
"""A simple excitable neuron cell with synaptic connections, potential accumulation and decay functionality."""
id_ctr = 0 # auto-incremented counter to assign unique IDs to instances
_str_attrs = ['id', 'location', 'potential'] # which attributes to include in string representation; subclasses can override this
resting_potential = Normal(-0.07, 0.001) # volts (mean, s.d.); resting / equillibrium potential
potential_decay = 1.0 # per-sec.; rate at which potential decays trying to reach equillibrium
p_factor = 1.0 # factor used to scale update probability
min_p = 0.15 # minimum update probability, to prevent starving; maximum is implicitly 1.0
def __init__(self, location, timeNow):
self.id = Neuron.id_ctr
Neuron.id_ctr += 1
self.location = location # location in 3-space
self.timeLastFired = self.timeLastUpdated = self.timeCurrent = timeNow
self.deltaTime = 0.0
self.potential = np.random.normal(self.resting_potential.mu, self.resting_potential.sigma) # current membrane potential
self.potentialLastUpdated = self.potential # last computed potential, useful for calculating rate of change
self.potentialAccumulated = 0.0 # potential accumulated from synaptic inputs
self.p = np.random.uniform(0.0, 0.25) # update probability: [0, 1] (actually, no need to clip at 1)
self.synapses = list()
self.gatedNeurons = list()
self.gatedSynapses = list()
# Neuron-level inhibition
self.isInhibited = False
self.uninhibitAt = -1.0
self.timeLastPlotted = self.timeCurrent # [graph]
self.potentialLastPlotted = self.potential # [graph]
self.plotColor = neuron_plot_colors[self.id % len(neuron_plot_colors)] # [graph]
self.inhibitoryConnectionColor = inhibitory_connection_color # [graph]
def synapseWith(self, neuron, strength=None, gatekeeper=None):
s = Synapse(self, neuron, strength, gatekeeper)
self.synapses.append(s)
if gatekeeper is not None:
gatekeeper.gateSynapse(s)
def gateNeuron(self, neuron):
self.gatedNeurons.append(neuron)
def gateSynapse(self, synapse):
self.gatedSynapses.append(synapse)
def accumulate(self, deltaPotential):
self.potentialAccumulated += deltaPotential
def updateWithP(self, timeNow):
if self.p >= random.random():
self.update(timeNow)
def update(self, timeNow):
if self.isInhibited and self.uninhibitAt <= timeNow:
self.isInhibited = False
# NOTE potentialAccumulated is not reset here so that it can start responding immediately
if self.isInhibited:
self.potentialAccumulated = 0.0 # lose any potential gained in this period
else:
self.timeCurrent = timeNow
self.deltaTime = self.timeCurrent - self.timeLastUpdated
if self.deltaTime < min_update_time:
return
self.updatePotential()
self.updateP()
self.potentialLastUpdated = self.potential
self.timeLastUpdated = self.timeCurrent
def updatePotential(self):
# Fire action potential, if we've reached peak
if self.potential >= action_potential_peak:
self.fireActionPotential()
self.timeLastFired = self.timeCurrent
self.potential = np.random.normal(action_potential_trough.mu, action_potential_trough.sigma) # repolarization/falling phase (instantaneous)
# Decay potential
#self.potential = self.resting_potential.mu + (self.potential - self.resting_potential.mu) * exp(-self.potential_decay * self.deltaTime) # exponential decay
self.potential -= self.potential_decay * (self.potential - self.resting_potential.mu) * self.deltaTime # approximated exponential decay
# Accumulate/integrate incoming potentials
self.potential += self.potentialAccumulated # integrate signals accumulated from neighbors
self.potentialAccumulated = 0.0 # reset accumulator (don't want to double count!)
# Check for action potential event
if self.potential > threshold_potential and (self.timeCurrent - self.timeLastFired) >= refractory_period:
self.actionPotential()
#print self.id, self.timeCurrent, self.potential # [log: potential]
# TODO This is the ideal point to gather potential observation; "Fire action potential" step should come immediately after this (instead of at the beginning of updatePotential) in order to prevent any posible delays
# TODO Implement neuron-level inhibition (?)
def updateP(self):
self.p = np.clip(self.p_factor * abs(self.potential - self.potentialLastUpdated) / self.deltaTime, self.min_p, 1.0)
#if self.p > 1.0: self.p = 1.0 # no need to clip at 1 because of the way this is used
def actionPotential_approximate(self):
# Action potential - approximate method: Instantaneous rise
self.potential = action_potential_peak # depolarization/rising phase (instantaneous)
def actionPotential_accurate(self):
# Action potential - accurate method: Gradual rise (harder to do in real time)
#print "[SELF-DEPOLARIZATION]"
#self.potential += self_depolarization_rate * self.deltaTime # contant depolarization
self.potential += (action_potential_peak + 0.02 - self.potential) * 10 * self_depolarization_rate * self.deltaTime # smoothed depolarization, hackish
actionPotential = actionPotential_approximate # pick _accurate for more realistic action potential dynamics
def fireActionPotential(self):
# Fire action potential to neighbor neurons through axon (TODO introduce transmission delay?)
#print "Neuron.fireActionPotential() [{}]".format(self.id) # [debug]
for synapse in self.synapses:
synapse.transmitActionPotential(self.timeCurrent)
for neuron in self.gatedNeurons:
neuron.inhibit(self.timeCurrent)
for synapse in self.gatedSynapses:
synapse.inhibit(self.timeCurrent)
def sendGradedPotential(self):
# Send graded potential to neighbor neurons through axon
#print "Neuron.sendGradedPotential() [{}]".format(self.id) # [debug]
for synapse in self.synapses:
synapse.transmitGradedPotential(self.timeCurrent)
def inhibit(self, timeNow, duration=neuron_inhibition_period):
#print "Neuron.inhibit(): timeNow = {}, duration = {}".format(timeNow, duration)
self.isInhibited = True
self.uninhibitAt = timeNow + duration
self.potentialLastUpdated = self.potential = np.random.normal(self.resting_potential.mu, self.resting_potential.sigma) # reset potential
self.timeLastUpdated = timeNow
def plot(self):
plot((self.timeLastPlotted, self.timeCurrent), (self.potentialLastPlotted, self.potential), self.plotColor) # [graph]
self.timeLastPlotted = self.timeCurrent
self.potentialLastPlotted = self.potential
def __str__(self):
return "{}: {{ {} }}".format(self.__class__.__name__, ", ".join("{}: {}".format(attr, getattr(self, attr)) for attr in self.__class__._str_attrs))
class Population(object):
default_bounds = np.float32([[-50.0, -50.0, -5.0], [50.0, 50.0, 5.0]])
default_distribution = MultivariateNormal(mu=np.float32([0.0, 0.0, 0.0]), cov=(np.float32([400, 400, 4]) * np.identity(3, dtype=np.float32)))
id_ctr = 0 # auto-incremented counter to assign unique IDs to instances
def __init__(self, numNeurons=1000, timeNow=0.0, neuronTypes=[Neuron], bounds=default_bounds, neuronLocations=None, distribution=default_distribution, **kwargs):
self.id = Population.id_ctr
Population.id_ctr += 1
self.numNeurons = numNeurons
self.timeNow = timeNow
self.neuronTypes = neuronTypes
self.bounds = bounds
self.center = (self.bounds[0] + self.bounds[1]) / 2
self.distribution = distribution
self.isConnected = False
self.plotColor = population_plot_colors[self.id % len(population_plot_colors)] # [graph]
self.inhibitoryConnectionColor = inhibitory_connection_color # [graph]
self.logger = logging.getLogger(self.__class__.__name__) # we could use "{}.{}".format(self.__class__.__name__, self.id) instead, but that'll create separate loggers for each Population
self.logger.info("Creating {}".format(self))
self.logger.debug("Bounds: x: {}, y: {}, z: {}".format(self.bounds[:,0], self.bounds[:,1], self.bounds[:,2]))
# * Designate neuron locations
if neuronLocations is not None:
self.neuronLocations = neuronLocations
else:
self.neuronLocations = []
if isinstance(self.distribution, MultivariateUniform):
# NOTE self.distribution has to be a 3-channel MultivariateUniform, even if the third channel is a constant (low=high)
self.neuronLocations = np.column_stack([
np.random.uniform(self.distribution.lows[0], self.distribution.highs[0], self.numNeurons),
np.random.uniform(self.distribution.lows[1], self.distribution.highs[1], self.numNeurons),
np.random.uniform(self.distribution.lows[2], self.distribution.highs[2], self.numNeurons)])
#self.logger.debug("MultivariateUniform array shape: {}".format(self.neuronLocations.shape))
elif isinstance(self.distribution, MultivariateNormal):
#self.logger.debug("Distribution: mu: {}, cov: {}".format(self.distribution.mu, self.distribution.cov)) # ugly
self.neuronLocations = np.random.multivariate_normal(self.distribution.mu, self.distribution.cov, self.numNeurons)
elif isinstance(self.distribution, SymmetricNormal):
thetas = np.random.uniform(pi, -pi, self.numNeurons) # symmetric in any direction around Z axis
rads = np.random.normal(self.distribution.mu, self.distribution.sigma, self.numNeurons) # varies radially
xLocs, yLocs = cv2.polarToCart(rads, thetas)
zLocs = np.repeat(np.float32([self.distribution.center[2]]), self.numNeurons).reshape((self.numNeurons, 1)) # constant z, repeated as a column vector
#self.logger.debug("SymmetricNormal array shapes:- x: {}, y: {}, z: {}".format(xLocs.shape, yLocs.shape, zLocs.shape))
self.neuronLocations = np.column_stack([
self.distribution.center[0] + xLocs,
self.distribution.center[1] + yLocs,
zLocs]) # build Nx3 numpy array
elif isinstance(self.distribution, SymmetricLogNormal):
thetas = np.random.uniform(pi, -pi, self.numNeurons) # symmetric in any direction around Z axis
rads = np.random.lognormal(self.distribution.mu, self.distribution.sigma, self.numNeurons) # varies radially
xLocs, yLocs = cv2.polarToCart(rads, thetas)
zLocs = np.repeat(np.float32([self.distribution.center[2]]), self.numNeurons).reshape((self.numNeurons, 1)) # constant z, repeated as a column vector
#self.logger.debug("SymmetricLogNormal array shapes:- x: {}, y: {}, z: {}".format(xLocs.shape, yLocs.shape, zLocs.shape))
self.neuronLocations = np.column_stack([
self.distribution.center[0] + xLocs,
self.distribution.center[1] + yLocs,
zLocs]) # build Nx3 numpy array
else:
raise ValueError("Unknown distribution type: {}".format(type(self.distribution)))
# TODO Include (non-central) F distribution (suitable for rods)
# Clip (clamp) neuron locations that are outside bounds
np.clip(self.neuronLocations[:, 0], self.bounds[0, 0], self.bounds[1, 0], out=self.neuronLocations[:, 0])
np.clip(self.neuronLocations[:, 1], self.bounds[0, 1], self.bounds[1, 1], out=self.neuronLocations[:, 1])
#print "Out-of-bounds neuron locations:", [loc for loc in self.neuronLocations if not ((self.bounds[0, 0] <= loc[0] <= self.bounds[1, 0]) and (self.bounds[0, 1] <= loc[1] <= self.bounds[1, 1]))] # [debug]
#print "Neuron locations:\n", self.neuronLocations # [debug]
# * Create neurons
self.neurons = self.numNeurons * [None]
self.neuronPlotColors = self.numNeurons * [None]
for i in xrange(self.numNeurons):
self.neurons[i] = random.choice(self.neuronTypes)(self.neuronLocations[i], self.timeNow, **kwargs)
self.neuronPlotColors[i] = self.neurons[i].plotColor
# * Build spatial index using quadtree (assuming neurons are roughly in a layer)
boundingRect = (self.bounds[0, 0], self.bounds[0, 1], self.bounds[1, 0], self.bounds[1, 1])
self.qtree = QuadTree(self.neurons, depth=int(log(self.numNeurons, 2)), bounding_rect=boundingRect)
# TODO Move this to Projection
def connectWith(self, population, maxConnectionsPerNeuron, growthCone=None, allowSelfConnections=False):
if growthCone is not None:
self.growthCone = growthCone
else:
growthConeDirection = population.center - self.center
growthConeLength = np.linalg.norm(growthConeDirection, ord=2)
growthConeDirection /= growthConeLength # need a unit vector
self.growthCone = GrowthCone(growthConeDirection, maxLength=growthConeLength * 2.0, spreadFactor=1)
self.numSynapses = 0
self.numDisconnectedNeurons = 0
# * For each neuron in this population
for a in self.neurons:
# ** Compute search rectangle in target population to select candidate neurons
rect = self.growthCone.getTerminationRect(a.location)
# ** Find candidate neurons from the other population
candidates = []
for b in population.qtree.hit(rect): # optimized spatial range query
if a == b and not allowSelfConnections: continue # skip connecting to self, in case target population is same as this population
growthConeScore = self.growthCone.score(a.location, b.location)
if growthConeScore > 0.1:
candidates.append((growthConeScore, b))
# ** Sort candidates based on scores, and pick top n (TODO: Add some probabilistic noise?)
candidates.sort(key=lambda pair: pair[0], reverse=True)
for i in xrange(min(maxConnectionsPerNeuron, len(candidates))):
a.synapseWith(candidates[i][1]) # TODO: Use score as synaptic strength?
self.numSynapses += len(a.synapses)
if not a.synapses:
self.numDisconnectedNeurons += 1
self.logger.debug("Pre: {}, post: {}, #synapses: {}, (avg.: {} per pre-neuron), #disconnected: {}".format(len(self.neurons), len(population.neurons), self.numSynapses, float(self.numSynapses) / len(self.neurons), self.numDisconnectedNeurons))
self.isConnected = True
def plotNeuronLocations3D(self, ax=None, showConnections=True, showInhibitoryConnections=False, populationColor=None, connectionColor=None, inhibitoryConnectionColor=None, equalScaleZ=False):
standalone = False
if ax is None:
standalone = True
fig = figure()
ax = fig.gca(projection='3d')
self.logger.debug("Population {}: showConnections: {}, showInhibitoryConnections: {}, populationColor: {}, connectionColor: {}, inhibitoryConnectionColor: {}".format(self.id, showConnections, showInhibitoryConnections, populationColor, connectionColor, inhibitoryConnectionColor))
ax.scatter(self.neuronLocations[:,0], self.neuronLocations[:,1], self.neuronLocations[:,2], c=(self.neuronPlotColors if populationColor is None else populationColor))
if showConnections and self.isConnected:
for n in self.neurons:
#frm = n.location
#to = n.location + self.growthCone.maxLength * self.growthCone.direction
#ax.plot((frm[0], to[0]), (frm[1], to[1]), (frm[2], to[2])) # [debug: draw growth cone vector]
#print "Population.plotNeuronLocations3D(): {} {} @ ({:.2f}, {:.2f}): {} synapses, {} gated neurons".format(n.__class__.__name__, n.id, n.location[0], n.location[0], len(n.synapses), len(n.gatedNeurons)) # [debug]
for s in n.synapses:
ax.plot((n.location[0], s.post.location[0]), (n.location[1], s.post.location[1]), (n.location[2], s.post.location[2]), c=(n.plotColor if connectionColor is None else connectionColor), alpha=0.75)
if showInhibitoryConnections: # TODO also add gatedSynapses, if being used in framework
for t in n.gatedNeurons:
ax.plot((n.location[0], t.location[0]), (n.location[1], t.location[1]), (n.location[2], t.location[2]), c=(n.inhibitoryConnectionColor if inhibitoryConnectionColor is None else inhibitoryConnectionColor), alpha=0.75)
if standalone: # TODO prevent code duplication
plot_bounds = self.bounds
plot_sizes = (plot_bounds[1] - plot_bounds[0])
max_plot_size = max(plot_sizes)
plot_centers = (plot_bounds[0] + plot_bounds[1]) / 2
x_bounds = [plot_centers[0] - max_plot_size / 2, plot_centers[0] + max_plot_size / 2]
y_bounds = [plot_centers[1] - max_plot_size / 2, plot_centers[1] + max_plot_size / 2]
if equalScaleZ:
z_bounds = [plot_centers[2] - max_plot_size / 2, plot_centers[2] + max_plot_size / 2] # Z axis scaled the same way as rest
else:
z_bounds = plot_bounds[:, 2] # separate scale for Z axis
ax.auto_scale_xyz(x_bounds, y_bounds, z_bounds)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
show()
def __str__(self):
return "Population {}: {{ numNeurons: {}, neuronTypes: [{}] }}".format(self.id, self.numNeurons, ", ".join(t.__name__ for t in self.neuronTypes))
def __repr__(self):
return "Population {}: {{ numNeurons: {}, neuronTypes: [{}], bounds: {}, distribution: {} }}".format(self.id, self.numNeurons, ", ".join(t.__name__ for t in self.neuronTypes), repr(self.bounds), self.distribution)
class Projection(object):
"""A set of connections from one Population to another."""
pass # TODO Implement this class by pulling out connection-related methods from Population
def plotPopulations(populations, populationColors=None, showConnections=True, showInhibitoryConnections=False, connectionColors=None, inhibitoryConnectionColors=None, equalScaleZ=False):
if populationColors is None:
populationColors = [p.plotColor for p in populations]
if showConnections:
if connectionColors is None:
connectionColors = [p.plotColor for p in populations] # same as plotColor
if inhibitoryConnectionColors is None:
inhibitoryConnectionColors = [p.inhibitoryConnectionColor for p in populations]
fig = figure()
ax = fig.gca(projection='3d') # effectively same as fig.add_subplot(111, projection='3d')
plot_bounds = np.float32([np.repeat(np.inf, 3), np.repeat(-np.inf, 3)])
for population, populationColor, connectionColor, inhibitoryConnectionColor in zip(populations, populationColors, connectionColors, inhibitoryConnectionColors):
population.plotNeuronLocations3D(ax, showConnections=showConnections, showInhibitoryConnections=showInhibitoryConnections, populationColor=populationColor, connectionColor=connectionColor, inhibitoryConnectionColor=inhibitoryConnectionColor)
plot_bounds[0, :] = np.minimum(plot_bounds[0], population.bounds[0])
plot_bounds[1, :] = np.maximum(plot_bounds[1], population.bounds[1])
# Use aggregate bounds from all populations to size up the plot
plot_sizes = (plot_bounds[1] - plot_bounds[0])
max_plot_size = max(plot_sizes)
plot_centers = (plot_bounds[0] + plot_bounds[1]) / 2
x_bounds = [plot_centers[0] - max_plot_size / 2, plot_centers[0] + max_plot_size / 2]
y_bounds = [plot_centers[1] - max_plot_size / 2, plot_centers[1] + max_plot_size / 2]
if equalScaleZ:
z_bounds = [plot_centers[2] - max_plot_size / 2, plot_centers[2] + max_plot_size / 2] # Z axis scaled the same way as rest
else:
z_bounds = plot_bounds[:, 2] # separate scale for Z axis
ax.auto_scale_xyz(x_bounds, y_bounds, z_bounds)
ax.set_aspect('equal')
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
subplots_adjust(left=0.0, right=1.0, top=1.0, bottom=0.0)
show()
# Neuron-specific utility constructs and methods
def setup_neuron_plot(plotTitle="Neuron", xlab="Time (s)", ylab="Membrane potential (V)"):
if plotTitle is not None: title(plotTitle)
if xlab is not None: xlabel(xlab)
if ylab is not None: ylabel(ylab)
axhline(y = Neuron.resting_potential.mu, color = 'k', linestyle = '--')
axhline(y = threshold_potential, color = 'r', linestyle = '--')
axhline(y = action_potential_peak, color = 'c', linestyle = '--')
axhline(y = action_potential_trough.mu, color = 'm', linestyle = '--')
class NeuronMonitor(object):
"""A live plotting thread to monitor neuron output."""
# Instance parameters with default values that automatically get populated as object attributes in __init__(), overwritten by matching kwargs
_params = dict(
duration=5.0, # seconds of data to show (approx.)
sampling_rate=15, # Hz
show_axvline=True, # show a solid vertical line to indicate current time
axvline_params=dict(linewidth=3, color='r'),
show_axhlines=True, # show dotted horizontal lines to mark resting potential, threshold potential, etc.
axhline_resting_potential=Neuron.resting_potential.mu,
axhline_threshold_potential=threshold_potential,
axhline_action_potential_peak=action_potential_peak,
axhline_action_potential_trough=action_potential_trough.mu,
show_legend=True)
# Axes parameters passed to Figure.gca(), also overwritten by kwargs
_axes_params=dict(
title="Neuron",
xlabel="Time (s)",
ylabel="Membrane potential (V)",
ylim=(action_potential_trough.mu - 0.01, action_potential_peak + 0.02))
# Common plotting parameters passed to Axes.plot(), also overwritten by kwargs
_plot_params = dict()
@classmethod
def resolve_params(cls, default_params, given_params):
"""A generator over default_params, updated with values from given_params with matching keys.
To convert resulting (param, value) pairs into a dict, use: dict(resolve_params(..., ...))
NOTE: Matched items from given_params are popped to enforce unique matching and the idea of *leftover* params.
TODO: Make this a generic utility method.
"""
# Equivalent dict comprehension: {param: given_params.pop(param, value) for param, value in default_params}
for param, value in default_params.iteritems():
yield param, given_params.pop(param, value) # pick from given_params, if supplied, popping it
def __init__(self, **kwargs):
self.logger = logging.getLogger(self.__class__.__name__)
# Set instance attributes from kwargs with defaults from _params
for param, value in self.resolve_params(self._params, kwargs):
setattr(self, param, value)
# Process remaining kwargs
self.axes_params = dict(self.resolve_params(self._axes_params, kwargs)) # pops matched params from kwargs
# TODO Modify xlabel to say "Time (s) mod {duration}"?
self.plot_params = dict(self.resolve_params(self._plot_params, kwargs))
# Initialize other members
self.num_samples = self.duration * self.sampling_rate
self.times = np.linspace(0.0, self.duration, self.num_samples) # pick num_samples samples in the range [0.0, duration]
self.sample_index = 0 # common index into each channel's samples array
self.channels = OrderedDict()
def addChannel(self, label, obj, attr='potential', analog=True, color=None):
"""Add a new channel to this monitor for plotting obj.attr."""
channel = dict(obj=obj, attr=attr, analog=analog, color=color, samples=np.repeat(np.float32(getattr(obj, attr, 0.0)), self.num_samples))
self.channels[label] = channel # samples will be plotted when start() is called
def start(self, run_setup=True, run_update_loop=True):
"""Create plots, optionally run setup before and begin update loop after.
NOTE: If setup() and update() are run externally, all these methods should be called from the same dedicated thread/process.
"""
# Setup graphics
if run_setup:
self.setup()
# Create initial plots
i = 0 # for counting backup (auto) colors; won't be used if channel colors are specified
for label, channel in self.channels.iteritems():
# TODO Create different plots based on analog flag
channel['plot'] = self.ax.plot(self.times, channel['samples'], label=label, color=(channel['color'] if channel['color'] is not None else cm.jet(1. * i / len(self.channels))), **self.plot_params)[0] # assuming first (and only) returned plot is the one we want
i += 1
if self.show_legend:
self.ax.legend(loc='upper right')
self.fig.show()
# Begin update loop
if run_update_loop:
self.update_loop()
def stop(self):
if hasattr(self, 'update_timer'):
self.update_timer.stop()
self.logger.debug("Update loop stopped")
def setup(self):
self.fig = figure(figsize=(12, 9)) # figsize is in inches
self.ax = self.fig.gca(**self.axes_params)
if self.show_axhlines:
# TODO This only draws known axhlines; to extend this, use:
# for axhline_name, params in self.vars() if axhline_name.startswith('axhline'):
# self.ax.axhline(dict(linestyle='--').update(**params)) # default to dashed lines
# Corresponding items in _params will need to be updated:
# axhline_resting_potential=dict(y=self.neuron_threshold_potential, color='r', linestyle='--')
self.ax.axhline(y=self.axhline_resting_potential, color='k', linestyle='--')
self.ax.axhline(y=self.axhline_threshold_potential, color='r', linestyle='--')
self.ax.axhline(y=self.axhline_action_potential_peak, color='c', linestyle='--')
self.ax.axhline(y=self.axhline_action_potential_trough, color='m', linestyle='--')
if self.show_axvline:
self.axvline = self.ax.axvline(**self.axvline_params)
def update_loop(self):
self.update_timer = self.fig.canvas.new_timer(interval=int(1000 / self.sampling_rate))
self.update_timer.add_callback(self.update)
self.update_timer.start()
self.logger.debug("Update loop started")
def update(self):
"""Update all channel plots, copying in respective attr value from obj. Meant to be called from same thread/process as start()."""
if self.show_axvline:
self.axvline.set_xdata(self.times[self.sample_index])
for channel in self.channels.itervalues():
channel['samples'][self.sample_index] = getattr(channel['obj'], channel['attr']) # retrieve current value
channel['plot'].set_ydata(channel['samples']) # update plot
self.sample_index = (self.sample_index + 1) % self.num_samples # increment common index
self.fig.canvas.draw()
def test_population():
logging.basicConfig(format="%(levelname)s | %(name)s | %(funcName)s() | %(message)s", level=logging.DEBUG) # sets up basic logging, if it's not already configured
timeNow = 0.0
population1 = Population(numNeurons=1000, timeNow=timeNow)
population2 = Population(numNeurons=500, timeNow=timeNow, bounds=np.float32([[-25.0, -25.0, 7.5], [25.0, 25.0, 12.5]]), distribution=MultivariateNormal(mu=np.float32([0.0, 0.0, 10.0]), cov=(np.float32([400, 400, 4]) * np.identity(3))))
growthConeDirection = population2.distribution.mu - population1.distribution.mu
growthConeDirection /= np.linalg.norm(growthConeDirection, ord=2) # need a unit vector
population1.connectWith(population2, maxConnectionsPerNeuron=25, growthCone=GrowthCone(growthConeDirection))
#population2.plotNeuronLocations3D(equalScaleZ=True) # e.g.: plot a single neuron population
plotPopulations([population1, population2], populationColors=['b', 'r'], showConnections=True, connectionColors=[None, None], equalScaleZ=True)
# NOTE: For connectionColors, pass None to draw connection lines with pre-neuron's color; or specify colors explicitly, e.g.: connectionColors=[(0.9, 0.8, 1.0, 0.5), None]
if __name__ == "__main__":
test_population()
| {
"repo_name": "napratin/nap",
"path": "nap/neuron.py",
"copies": "1",
"size": "33717",
"license": "mit",
"hash": -2421335344178411000,
"line_mean": 53.5582524272,
"line_max": 284,
"alpha_frac": 0.702553608,
"autogenerated": false,
"ratio": 3.510724698042482,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4713278306042482,
"avg_score": null,
"num_lines": null
} |
""" An event dispatcher that goes right into the model storage. """
from status_app.models import EventBucket, RawEvent
from django.db.models import Count
from datetime import timedelta
def dispatch(source, event_type, timestamp, value, private_detail, host):
# XXX - assuming event_type is coming in as RawEvent.<foo> integer value
RawEvent.objects.create(source=source, event_type=event_type,
timestamp=timestamp, value=value,
private_detail=private_detail, host=host)
all_hosts = EventBucket.ALL_HOST_BUCKET
timestamp = timestamp.replace(microsecond=0)
minute_start = timestamp.replace(second=0)
hour_start = timestamp.replace(second=0, minute=0)
day_start = timestamp.replace(second=0, minute=0, hour=0)
buckets = []
buckets.append(EventBucket.objects.get_or_create(source=source,
host=host, event_type=event_type,
bucket_type=EventBucket.MINUTE,
start_time=minute_start)[0])
buckets.append(EventBucket.objects.get_or_create(source=source,
host=all_hosts, event_type=event_type,
bucket_type=EventBucket.MINUTE,
start_time=minute_start)[0])
buckets.append(EventBucket.objects.get_or_create(source=source,
host=host, event_type=event_type,
bucket_type=EventBucket.HOUR,
start_time=hour_start)[0])
buckets.append(EventBucket.objects.get_or_create(source=source,
host=all_hosts, event_type=event_type,
bucket_type=EventBucket.HOUR,
start_time=hour_start)[0])
buckets.append(EventBucket.objects.get_or_create(source=source,
host=host, event_type=event_type,
bucket_type=EventBucket.DAY,
start_time=day_start)[0])
buckets.append(EventBucket.objects.get_or_create(source=source,
host=all_hosts, event_type=event_type,
bucket_type=EventBucket.DAY,
start_time=day_start)[0])
for bucket in buckets:
# I'd like this to be replaced with something like
# UPDATE bucket set value = value + 1
if RawEvent.PASS_FAIL == event_type:
bucket.total_count = bucket.total_count + 1
if value == True:
bucket.total_pass = bucket.total_pass + 1
elif RawEvent.INTERVAL == event_type:
bucket.total_count = bucket.total_count + 1
bucket.total_time = bucket.total_time + value
elif RawEvent.TEXT == event_type:
if EventBucket.MINUTE == bucket.bucket_type:
start_time = minute_start
end_time = start_time + timedelta(minutes=1)
elif EventBucket.HOUR == bucket.bucket_type:
start_time = hour_start
end_time = start_time + timedelta(hours=1)
elif EventBucket.DAY == bucket.bucket_type:
start_time = day_start
end_time = start_time + timedelta(days=1)
# XXX - is there a more efficient way to get this?
filter1 = RawEvent.objects.filter(source=bucket.source,
event_type = bucket.event_type,
timestamp__gte = start_time,
timestamp__lt = end_time)
if EventBucket.ALL_HOST_BUCKET != bucket.host:
filtered = filter1.filter(host = bucket.host)
else:
filtered = filter1
bucket.unique_values = filtered.aggregate(Count('value', distinct=True))["value__count"]
bucket.save()
| {
"repo_name": "vegitron/status-app",
"path": "status_app/dispatcher/model.py",
"copies": "1",
"size": "3923",
"license": "apache-2.0",
"hash": 1121295613793619100,
"line_mean": 41.6413043478,
"line_max": 100,
"alpha_frac": 0.5602854958,
"autogenerated": false,
"ratio": 4.368596881959911,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016901599829396607,
"num_lines": 92
} |
"""An event handler, which stores events and passes Pygame events to them."""
import pygame as pg
from ..conf import conf
from . import inputs
from .evts import BaseEvent, Event
from . import conffile
class EventHandler (object):
"""Handles events.
EventHandler(scheduler)
:arg scheduler: :class:`sched.Scheduler <engine.sched.Scheduler>` instance to
use for determining the current framerate.
You probably want to call :meth:`normalise`, then call :meth:`update` every
frame to process and progagate Pygame events and call callbacks.
Some notes:
- An event may be placed in a 'domain', which is represented by a string name.
- Events are named or unnamed, and an :class:`EventHandler` acts like a
``dict`` of named events (only supports getting, setting and deleting
items).
- The ``'domain'`` name is reserved.
- The ``__contains__`` method (``event in event_handler``) works for
:class:`BaseEvent <engine.evt.evts.BaseEvent>` instances as well as names.
"""
def __init__ (self, scheduler):
#: As passed to the constructor.
self.scheduler = scheduler
#: A ``set`` of domains that will receive relevant events.
self.active_domains = set()
#: A ``set`` of domains that have been disabled through
#: :meth:`disable`.
self.inactive_domains = set()
self._evts_by_domain = {}
#: A ``set`` of all registered unnamed events.
self.evts = set()
# {name: event} for named events; wrapped by this class like a dict
self._named_evts = {}
#: All inputs registered with events in this handler.
self.inputs = set()
# inputs prefiltered by Input.filters
self._filtered_inputs = ('type', {inputs.UNFILTERABLE: set()})
# identifiers for initialised devices
self._init_data = set()
# all registered modifiers
self._mods = {}
#: Whether to capture the mouse cursor by centring it on the window
#: every frame. You might also want to grab all input
#: (``pygame.event.set_grab``).
self.autocentre_mouse = False
def __contains__ (self, item):
return (item in self._named_evts or item in self.evts or
item in self._named_evts.itervalues())
def __getitem__ (self, item):
return self._named_evts[item]
def __setitem__ (self, item, val):
self.add(**{item: val})
def __delitem__ (self, item):
self.rm(item)
def add (self, *evts, **named_evts):
"""Register events.
add(*evts, **named_evts) -> unnamed
Arguments are any number of events. Keyword arguments define named events with
the key as the name. An event can be a
:class:`BaseEvent <engine.evt.evts.BaseEvent>` instance, or a sequence of
Pygame event IDs and functions to create an
:class:`Event <engine.evt.evts.BaseEvent>` that listens for the given
Pygame events and has the functions as callbacks. For example,
::
handler.add(
(pygame.KEYDOWN, f1, f2),
(f3, pygame.KEYDOWN, f4, pygame.KEYUP)
)
will register callbacks ``f1`` and ``f2`` for ``keydown`` events, and ``f3`` and
``f4`` for both ``keydown`` and ``keyup`` events.
:return: a list of added unnamed events (positional arguments) (possibly
created in this call).
"""
new_unnamed = []
unnamed = self.evts
all_named = self._named_evts
by_domain = self._evts_by_domain
# extract domain from keyword args
if 'domain' in named_evts:
domain = named_evts['domain']
if domain is not None and not isinstance(domain, basestring):
raise ValueError('invalid domain (or, \'domain\' is an '
'invalid event name)')
del named_evts['domain']
else:
domain = None
if domain not in by_domain:
# domain doesn't exist yet
by_domain[domain] = set()
if domain is not None:
self.active_domains.add(domain)
for evts in (((None, evt) for evt in evts), named_evts.iteritems()):
for name, evt in evts:
if not isinstance(evt, BaseEvent):
# got (possibly mixed) list of pgevts/cbs: create event
pgevts = []
cbs = []
for item in evt:
(cbs if callable(item) else pgevts).append(item)
evt = Event(*(inputs.BasicInput(pgevt)
for pgevt in pgevts)).cb(*cbs)
if evt.eh is not None:
if evt.eh is self:
# already own this event
prev_domain = evt._domain
if domain != prev_domain:
# change registered domain
by_domain[prev_domain].remove(evt)
if not by_domain[prev_domain]:
del by_domain[prev_domain]
evt._domain = domain
by_domain[domain].add(evt)
prev_name = evt._regname
if name != prev_name:
# change registered name
if prev_name is None:
unnamed.remove(evt)
else:
del all_named[prev_name]
evt._regname = name
if name is None:
unnamed.add(evt)
else:
all_named[name] = evt
else:
# owned by another handler
raise RuntimeError('an event should not be added to '
'more than one EventHandler')
else:
# new event
evt.eh = self
evt._changed = False
evt._domain = domain
evt._regname = name
by_domain[domain].add(evt)
if name is None:
unnamed.add(evt)
new_unnamed.append(evt)
else:
all_named[name] = evt
self._add_inputs(*evt.inputs)
return new_unnamed
def rm (self, *evts):
"""Takes any number of registered event names or events to remove them.
Raises ``KeyError`` if any arguments are missing.
"""
unnamed = self.evts
named = self._named_evts
by_domain = self._evts_by_domain
active = self.active_domains
inactive = self.inactive_domains
for evt in evts:
if isinstance(evt, basestring):
# got name
evt = named[evt] # raises KeyError
if evt.eh is self:
evt.eh = None
domain = evt._domain
by_domain[domain].remove(evt)
if not by_domain[domain]:
del by_domain[domain]
if domain in active:
active.remove(domain)
else:
inactive.remove(domain)
evt._domain = None
if evt._regname is None:
unnamed.remove(evt)
else:
del named[evt._regname]
evt._regname = None
self._rm_inputs(*evt.inputs)
else:
raise KeyError(evt)
def cb (self, pos_cbs={}, **kw_cbs):
"""Attach callbacks to named events.
Each dict has keys as event names and values as callback functions or sequences
of callback functions. For example::
evthandler.cb({'jump': jump}, walk=[e.walk for e in entities])
"""
for evt_cbs in (pos_cbs, kw_cbs):
for evt_name, cbs in evt_cbs.iteritems():
if callable(cbs):
cbs = [cbs]
self[evt_name].cb(*cbs)
def _prefilter (self, filtered, filters, i):
attr, filtered = filtered
filters = dict(filters)
# Input guarantees that this is non-empty
vals = filters.pop(attr, (inputs.UNFILTERABLE,))
for val in vals:
if val in filtered:
child = filtered[val]
else:
# create new branch
filtered[val] = child = set()
# add input to child
if isinstance(child, tuple):
self._prefilter(child, filters, i)
else:
# reached the end of a branch: child is a set of inputs
if filters:
# create new levels for each remaining filter
for attr, vals in filters.iteritems():
child = (attr, {inputs.UNFILTERABLE: child})
filtered[val] = child
self._prefilter(child, filters, i)
else:
child.add(i)
def _unprefilter (self, filtered, filters, i):
filters = dict(filters)
attr, filtered = filtered
# Input guarantees that this is non-empty
vals = filters.pop(attr, (inputs.UNFILTERABLE,))
for val in vals:
assert val in filtered
child = filtered[val]
if isinstance(child, tuple):
self._unprefilter(child, filters, i)
child = child[1]
else:
# reached the end of a branch: child is a set of inputs
assert i in child
child.remove(i)
if not child:
# child is now empty
if val is inputs.UNFILTERABLE:
# retain the UNFILTERABLE branch
filtered[val] = set()
else:
del filtered[val]
if attr != 'type' and not any(filtered.itervalues()):
# all branches are empty (but always retain the 'type' branch)
filtered.clear()
def _add_inputs (self, *inps):
mods = self._mods
inps = list(inps)
while inps:
i = inps.pop()
if isinstance(i, BaseEvent):
inps.extend(i.inputs)
if i in self.inputs:
# already added (might happen if events share an input)
continue
i._init()
if isinstance(i, inputs.ButtonInput):
# add mods, sorted by device and device ID
for m in i.mods:
added = False
if m.device in inputs.mod_devices[i.device]:
this_mods = (mods.setdefault(m.device, {})
.setdefault(i._device_id, {}))
if m in this_mods:
this_mods[m].add(i)
# already added as an input
else:
this_mods[m] = set((i,))
if not added:
added = True
self._add_inputs(m)
self.inputs.add(i)
self._prefilter(self._filtered_inputs, i.filters, i)
def _rm_inputs (self, *inps):
mods = self._mods
for i in inps:
if i not in self.inputs:
# already removed (might happen if events share an input)
continue
if isinstance(i, inputs.ButtonInput):
for m in i.mods:
rmd = False
if m.device in inputs.mod_devices[i.device]:
d1 = mods[m.device]
d2 = d1[i._device_id]
d3 = d2[m]
assert i in d3
d3.remove(i)
if not d3:
del d2[m]
if not rmd:
rmd = True
self._rm_inputs(m)
if not d2:
del d1[i._device_id]
if not d1:
del mods[m.device]
self.inputs.remove(i)
self._unprefilter(self._filtered_inputs, i.filters, i)
def update (self):
"""Process Pygame events and call callbacks."""
all_inputs = self._filtered_inputs
mods = self._mods
pgevts = pg.event.get()
# centre mouse
if self.autocentre_mouse:
sfc = pg.display.get_surface()
if sfc is not None:
pg.mouse.set_pos(sfc.get_rect().center)
# remove the Pygame event this sends
pg.event.clear(pg.MOUSEMOTION)
for pgevt in pgevts:
# find matching inputs
sources = [all_inputs]
inps = []
while sources:
source = sources.pop()
if isinstance(source, tuple):
attr, filtered = source
if hasattr(pgevt, attr):
val = getattr(pgevt, attr)
if val in filtered:
sources.append(filtered[val])
sources.append(filtered[inputs.UNFILTERABLE])
else:
inps.append(source)
for i in set().union(*inps):
if i.handle(pgevt) and i.evt is not None:
evt = i.evt
while evt is not None:
evt._changed = True
evt = evt.evt
# call callbacks
by_domain = self._evts_by_domain
for domains in ((None,), self.active_domains):
for domain in domains:
if domain is not None or domain in by_domain:
for evt in by_domain[domain]:
changed = evt._changed
evt._changed = False
evt.respond(changed)
def domains (self, *domains):
"""Get a set of all events in the given domains.
domains(*domains) -> evts
"""
evts = set()
for domain in domains:
if domain is None:
raise KeyError(domain)
evts.update(self._evts_by_domain[domain]) # raises KeyError
return evts
def _load_evts (self, evts, domain):
# load events as parsed from config file
if 'domain' in evts:
raise ValueError('\'domain\' may not be used as an event name')
evts['domain'] = domain
self.add(**evts)
return evts
def load (self, filename, domain = None):
"""Load events from a configuration file (see
:mod:`conffile <engine.evt.conffile>`).
load(filename, domain = None) -> evts
:arg filename: a filename to load as the configuration file, under
:data:`conf.EVT_DIR`.
:arg domain: domain to place loaded events in.
:return: ``{name: event}`` for loaded events.
"""
with open(conf.EVT_DIR + filename) as f:
evts = conffile.parse(f)
return self._load_evts(evts, domain)
def load_s (self, s, domain = None):
"""Load events from a configuration string (see
:mod:`conffile <engine.evt.conffile>`).
load_s(s, domain = None) -> evts
:arg s: string to parse as an event configuration.
:arg domain: domain to place loaded events in.
:return: ``{name: event}`` for loaded events.
"""
return self._load_evts(conffile.parse_s(s), domain)
def save (self, name, *domains):
"""Not implemented."""
# save everything in the domains to file
pass
def save_s (self, *domains):
"""Not implemented."""
pass
def unload (self, *domains):
"""Remove all events in the given domains.
unload(*domains) -> evts
:return: all removed events as ``(unnamed, named)``, like :meth:`domains`.
Raises ``KeyError`` if a domain is missing.
"""
unnamed, named = self.domains(*domains) # raises KeyError
# now all domains exist so we can safely make changes
# this removes empty domains
self.rm(*unnamed)
self.rm(*named)
return (unnamed, named)
def disable (self, *domains):
"""Disable event handling in all of the given domains.
Missing or already disabled domains are ignored (a domain is missing if it is
empty).
"""
active = self.active_domains
inactive = self.inactive_domains
for domain in domains:
if domain in active:
active.remove(domain)
inactive.add(domain)
def enable (self, *domains):
"""Re-enable event handling in all of the given domains.
Missing or already active domains are ignored. Beware that state is preserved,
so buttons that were held when disabled remain held when enabled, no matter how
much time has passed, without sending a
:data:`DOWN <engine.evt.evts.bmode.DOWN>`.
"""
active = self.active_domains
inactive = self.inactive_domains
for domain in domains:
if domain in inactive:
inactive.remove(domain)
active.add(domain)
def assign_devices (self, **devices):
"""Assign device IDs to inputs by device variable.
:arg devices: keyword arguments with the argument name the variable and the
value the new device ID for each input with this device variable.
See :attr:`Input.device_var <engine.evt.inputs.Input.device_var>` and
:attr:`Input.device_id <engine.evt.inputs.Input.device_id>` for details
(including possible device ID values).
"""
for i in self.inputs:
if i.device_var is not None and i.device_var in devices:
i.device_id = devices[i.device_var]
def grab (self, cb, *types):
"""Not implemented."""
# grabs next button-type input from given devices/types and passes it to cb
# types are device name or (device, type_name) (see inputs_by_name)
# need to be able to track _every_ button-type input, so, eg. axes can be used
pass
def normalise (self):
"""Determine and set states of all inputs, where possible.
This includes axis positions, button held states, etc..
You should generally call this whenever you start using this event handler,
either for the first time, or after a period of letting something else handle
events.
"""
for i in self.inputs:
i.normalise()
for es in (self._named_evts.itervalues(), self.evts):
for e in es:
e._changed = True
def monitor_deadzones (self, *deadzones):
"""Not implemented."""
# takes list of (device, id, *args); do for all if none given
pass
def stop_monitor_deadzones (self):
"""Not implemented."""
# returns {(device, id, attrs): deadzone}, attrs is required attribute values on the input (pad axis: {'axis': axis_input.axis})
# can register other deadzone events?
pass
def set_deadzones (self, *deadzones):
"""Set deadzones for all registered inputs that support it.
:attr deadzones:
any number of ``((device, device_id=True, attrs={}), deadzone)`` tuples to
set the ``deadzone`` attribute of each matching input to ``deadzone``.
``device_id`` may be a variable
(:attr:`Input.device_var <engine.evt.inputs.Input.device_var>`) or
non-string ID
(:attr:`Input.device_id <engine.evt.inputs.Input.device_id>`). ``attrs``
is a dict of attributes the input must have. See also
:attr:`Input.device_id <engine.evt.inputs.Input.device_id>`.
An item may also be just ``(device, deadzone)``.
"""
for ident, dz in deadzones:
if isinstance(ident, basestring):
# just got a device
ident = [ident]
else:
ident = list(ident)
if len(ident) == 0:
raise ValueError('invalid input identifier: empty sequence')
if len(ident) == 1:
# accept any device ID
ident.append(True)
if len(ident) == 2:
# no more constraints
ident.append({})
device, dev_id, attrs = ident
got_var = isinstance(dev_id, basestring)
for i in self.inputs:
if got_var:
match_dev_id = i.device_var == dev_id
else:
match_dev_id = (i.device_id is not None and
(dev_id is True or i.device_id == dev_id))
if i.device == device and match_dev_id:
if all(getattr(i, attr) == val
for attr, val in attrs.iteritems()):
try:
getattr(i, 'deadzone')
except AttributeError:
# doesn't have a deadzone
pass
else:
i.deadzone = dz
| {
"repo_name": "ikn/pygame-template",
"path": "game/engine/evt/handler.py",
"copies": "1",
"size": "21298",
"license": "bsd-3-clause",
"hash": 7589461671924645000,
"line_mean": 35.9116117851,
"line_max": 136,
"alpha_frac": 0.5200018781,
"autogenerated": false,
"ratio": 4.434311888403081,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017020296458053843,
"num_lines": 577
} |
"""An event loop.
This event loop should handle both asynchronous App Engine RPC objects
(specifically urlfetch, memcache and datastore RPC objects) and arbitrary
callback functions with an optional time delay.
Normally, event loops are singleton objects, though there is no
enforcement of this requirement.
The API here is inspired by Monocle.
"""
import collections
import logging
import os
import threading
import time
from google.appengine.api.apiproxy_rpc import RPC
from google.appengine.datastore import datastore_rpc
from . import utils
logging_debug = utils.logging_debug
IDLE = RPC.IDLE
RUNNING = RPC.RUNNING
FINISHING = RPC.FINISHING
class EventLoop(object):
"""An event loop."""
def __init__(self):
"""Constructor."""
self.current = collections.deque() # FIFO list of (callback, args, kwds)
self.idlers = collections.deque() # Cyclic list of (callback, args, kwds)
self.inactive = 0 # How many idlers in a row were no-ops
self.queue = [] # Sorted list of (time, callback, args, kwds)
self.rpcs = {} # Map of rpc -> (callback, args, kwds)
def insort_event_right(self, event, lo=0, hi=None):
"""Insert event in queue, and keep it sorted assuming queue is sorted.
If event is already in queue, insert it to the right of the rightmost
event (to keep FIFO order).
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(self.queue)
while lo < hi:
mid = (lo + hi) // 2
if event[0] < self.queue[mid][0]: hi = mid
else: lo = mid + 1
self.queue.insert(lo, event)
# TODO: Rename to queue_callback?
def queue_call(self, delay, callback, *args, **kwds):
"""Schedule a function call at a specific time in the future."""
if delay is None:
self.current.append((callback, args, kwds))
return
if delay < 1e9:
when = delay + time.time()
else:
# Times over a billion seconds are assumed to be absolute.
when = delay
self.insort_event_right((when, callback, args, kwds))
def queue_rpc(self, rpc, callback=None, *args, **kwds):
"""Schedule an RPC with an optional callback.
The caller must have previously sent the call to the service.
The optional callback is called with the remaining arguments.
NOTE: If the rpc is a MultiRpc, the callback will be called once
for each sub-RPC. TODO: Is this a good idea?
"""
if rpc is None:
return
if rpc.state not in (RUNNING, FINISHING):
raise RuntimeError('rpc must be sent to service before queueing')
if isinstance(rpc, datastore_rpc.MultiRpc):
rpcs = rpc.rpcs
if len(rpcs) > 1:
# Don't call the callback until all sub-rpcs have completed.
rpc.__done = False
def help_multi_rpc_along(r=rpc, c=callback, a=args, k=kwds):
if r.state == FINISHING and not r.__done:
r.__done = True
c(*a, **k)
# TODO: And again, what about exceptions?
callback = help_multi_rpc_along
args = ()
kwds = {}
else:
rpcs = [rpc]
for rpc in rpcs:
self.rpcs[rpc] = (callback, args, kwds)
def add_idle(self, callback, *args, **kwds):
"""Add an idle callback.
An idle callback can return True, False or None. These mean:
- None: remove the callback (don't reschedule)
- False: the callback did no work; reschedule later
- True: the callback did some work; reschedule soon
If the callback raises an exception, the traceback is logged and
the callback is removed.
"""
self.idlers.append((callback, args, kwds))
def run_idle(self):
"""Run one of the idle callbacks.
Returns:
True if one was called, False if no idle callback was called.
"""
if not self.idlers or self.inactive >= len(self.idlers):
return False
idler = self.idlers.popleft()
callback, args, kwds = idler
logging_debug('idler: %s', callback.__name__)
res = callback(*args, **kwds)
# See add_idle() for the meaning of the callback return value.
if res is not None:
if res:
self.inactive = 0
else:
self.inactive += 1
self.idlers.append(idler)
else:
logging_debug('idler %s removed', callback.__name__)
return True
def run0(self):
"""Run one item (a callback or an RPC wait_any).
Returns:
A time to sleep if something happened (may be 0);
None if all queues are empty.
"""
if self.current:
self.inactive = 0
callback, args, kwds = self.current.popleft()
logging_debug('nowevent: %s', callback.__name__)
callback(*args, **kwds)
return 0
if self.run_idle():
return 0
delay = None
if self.queue:
delay = self.queue[0][0] - time.time()
if delay <= 0:
self.inactive = 0
_, callback, args, kwds = self.queue.pop(0)
logging_debug('event: %s', callback.__name__)
callback(*args, **kwds)
# TODO: What if it raises an exception?
return 0
if self.rpcs:
self.inactive = 0
rpc = datastore_rpc.MultiRpc.wait_any(self.rpcs)
if rpc is not None:
logging_debug('rpc: %s.%s', rpc.service, rpc.method)
# Yes, wait_any() may return None even for a non-empty argument.
# But no, it won't ever return an RPC not in its argument.
if rpc not in self.rpcs:
raise RuntimeError('rpc %r was not given to wait_any as a choice %r' %
(rpc, self.rpcs))
callback, args, kwds = self.rpcs[rpc]
del self.rpcs[rpc]
if callback is not None:
callback(*args, **kwds)
# TODO: Again, what about exceptions?
return 0
return delay
def run1(self):
"""Run one item (a callback or an RPC wait_any) or sleep.
Returns:
True if something happened; False if all queues are empty.
"""
delay = self.run0()
if delay is None:
return False
if delay > 0:
time.sleep(delay)
return True
def run(self):
"""Run until there's nothing left to do."""
# TODO: A way to stop running before the queue is empty.
self.inactive = 0
while True:
if not self.run1():
break
class _State(threading.local):
event_loop = None
_EVENT_LOOP_KEY = '__EVENT_LOOP__'
_state = _State()
def get_event_loop():
"""Return a EventLoop instance.
A new instance is created for each new HTTP request. We determine
that we're in a new request by inspecting os.environ, which is reset
at the start of each request. Also, each thread gets its own loop.
"""
# TODO: Make sure this works with the multithreaded Python 2.7 runtime.
ev = None
if os.getenv(_EVENT_LOOP_KEY):
ev = _state.event_loop
if ev is None:
ev = EventLoop()
_state.event_loop = ev
os.environ[_EVENT_LOOP_KEY] = '1'
return ev
def queue_call(*args, **kwds):
ev = get_event_loop()
ev.queue_call(*args, **kwds)
def queue_rpc(rpc, callback=None, *args, **kwds):
ev = get_event_loop()
ev.queue_rpc(rpc, callback, *args, **kwds)
def add_idle(callback, *args, **kwds):
ev = get_event_loop()
ev.add_idle(callback, *args, **kwds)
def run():
ev = get_event_loop()
ev.run()
def run1():
ev = get_event_loop()
return ev.run1()
def run0():
ev = get_event_loop()
return ev.run0()
| {
"repo_name": "adviti/melange",
"path": "thirdparty/google_appengine/google/appengine/ext/ndb/eventloop.py",
"copies": "1",
"size": "7479",
"license": "apache-2.0",
"hash": -6365087279443564000,
"line_mean": 27.5458015267,
"line_max": 80,
"alpha_frac": 0.6264206445,
"autogenerated": false,
"ratio": 3.6217917675544795,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.969546865592565,
"avg_score": 0.010548751225766166,
"num_lines": 262
} |
#An event passed between objects
#FIXME class method to register event type IDs
#FIXME self.TypeID per type
class TEvent(object):
self.TypeIDs = {"C6ACD4DAEE9A4030B58088D901CBB37F": TEventSynthetic,
"832E5EC291334D2193A9D51D90ADAFEA": TEVentCreateActor,
"": TEventActorCreated,
"8DBFB5BFDD0B485AA037FFBEABC65CC3": TEventRenameActor,
"": TEventActorRenamed,
"DA1E0FF394494772835956FEB9EEB694": TEventSetActorType,
"8DA1D94500C94441815F71CF8F28CEFE": TEventUnsetActorType,
"6E5F1DCE9CA34B519649306973FC3EB6": TEventMakeFavoredUser,
"2D3DC63D80564C06AF99D767401984F8": TEventMakeNotFavoredUser,
"5710BEFD4F3C426C80DE44AA3E41A84B": TEventUnsetFavoredUser,
"D54741C973764E7394596AC3053796CD": TEventDeleteActor,
"F3544CC4C3C3441C8873D6601F14D1D8": TEventCreateDataObject,
"DBB3A77DDEC64694B22583A9C4246816": TEventRenameDataObject,
"0D1B455664ED41A39F46D46F44C2CFDD": TEventMakeAsset,
"55F1ED222616479DA953A0E5A4603368": TEventMakeNotAsset,
"1F1F9497CA454B01B765C98890FEC3A2": TEventUnsetAsset,
"1000C7F55AB049969011AB1107D086F5": TEventDeleteDataObject,
"0A104426A2A64F118E297CDFACD4DB01": TEventSetCreateAllowed,
"1AC690A271834656AC4D460BBA5CDB7D": TEventUnsetCreateAllowed,
"A08F75611E9247009B9E5DEED1F6B5FA": TEventSetCreateRules,
"6BA0180D2E5F439DB649EEA2569515D5": TEventUnsetCreateRules}
#FIXME eventTypeID shouldn't be necessary when calling with subclass
def __init__(self, changeID, isCreation = False, targetID = None,
targetKey = None, targetTypeID = None, eventTypeID, eventParams)
self._changeID = changeID
self._isCreation = isCreation
self._targetID = targetID
self._targetKey = targetKey
self._targetTypeID = targetTypeID
self._type = eventTypeID
self._params = eventParams
self.isSynthetic = False
pass
def getChangeID(self):
#which external change ID we're associated with
pass
def isCreation(self):
pass
def getTypeID(self):
pass
def getTargetID(self):
pass
def getTargetKey(self):
pass
def getTargetTypeID(self):
pass
def getParams(self):
pass
def resolve(self, targetID):
self._targetID = targetID
self._targetKey = None
pass
def clone(self, changeID):
return self.TypeIDs[self._type](changeID, self._target, self._targetType,
self._type, self._params)
pass
class TEventSynthetic(TEvent):
def __init__(self, changeID):
super.__init__(changeID, "C6ACD4DAEE9A4030B58088D901CBB37F", {})
self._events = []
self.isSynthentic = True
def addSynthetic(self, event):
self._events.append(event)
def getEvents(self):
return self._events
def clone(self, changeID):
ev = self.TypeIDs[self._type](changeID)
for event in self._events:
ev.addSynthetic(event.clone())
return ev
class TEVentCreateActor(TEvent): pass #newName, newType = None, favoredUser = None
class TEventRenameActor(TEvent): pass #newName
class TEventSetActorType(TEvent): pass #newType
class TEventUnsetActorType(TEvent): pass
class TEventMakeFavoredUser(TEvent): pass
class TEventMakeNotFavoredUser(TEvent): pass
class TEventUnsetFavoredUser(TEvent): pass
class TEventDeleteActor(TEvent): pass
class TEventCreateDataObject(TEvent): pass #newName, isAsset = None
class TEventRenameDataObject(TEvent): pass #newName
class TEventMakeAsset(TEvent): pass
class TEventMakeNotAsset(TEvent): pass
class TEventUnsetAsset(TEvent): pass
class TEventDeleteDataObject(TEvent): pass
class TEventSetCreateAllowed(TEvent): pass #actorName, assetName, value
class TEventUnsetCreateAllowed(TEvent): pass #actorName, assetName
class TEventSetCreateRules(TEvent): pass #actorName, assetName, value
class TEventUnsetCreateRules(TEvent): pass #actorName, assetName
| {
"repo_name": "Dymaxion00/octotrike",
"path": "TEvent.py",
"copies": "1",
"size": "4304",
"license": "mit",
"hash": 596323240850011300,
"line_mean": 41.6138613861,
"line_max": 82,
"alpha_frac": 0.6751858736,
"autogenerated": false,
"ratio": 3.3235521235521235,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9417227775447015,
"avg_score": 0.016302044341021633,
"num_lines": 101
} |
"""A New API for simplifying logical operations and comparisons of arrays.
>>> from napi import *
>>> exec(nsource)
>>> neval
<function __main__.neval>
:func:`.neval` function that behaves similar to :func:`eval` handles
chained comparisons and logical operations of arrays delicately:
>>> a = arange(8)
>>> neval('2 <= a < 3 or a > 5')
array([ True, True, True, False, False, False, True, True], dtype=bool)"""
import os
import imp
from .functions import *
from .transformers import *
from . import transformers
__all__ = ['nsource', 'nexec', 'neval'] + transformers.__all__
__version__ = '0.2.1'
class String(str):
def __call__(self, neval='neval', nexec='nexec'):
neval = ' {}('.format(neval)
nexec = ' {}('.format(nexec)
return self.replace(' neval(', neval).replace('nexec', nexec)
nsource = String(open(os.path.join(imp.find_module('napi')[1],
'functions.py')).read())
def register_magic():
from .magics import NapiMagics
ip = get_ipython()
if ip is not None:
ip.register_magics(NapiMagics(ip))
try:
from IPython import get_ipython
except ImportError:
pass
else:
register_magic()
| {
"repo_name": "abakan/napi",
"path": "napi/__init__.py",
"copies": "1",
"size": "1200",
"license": "mit",
"hash": -5530927830338021000,
"line_mean": 22.5294117647,
"line_max": 78,
"alpha_frac": 0.6275,
"autogenerated": false,
"ratio": 3.287671232876712,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44151712328767123,
"avg_score": null,
"num_lines": null
} |
import re
import logging
import sys
from procgame.game import Mode
from procgame.game.advancedmode import AdvancedMode
class RgbShowPlayer(AdvancedMode):
def __init__(self, game, priority=3):
super(RgbShowPlayer, self).__init__(game, priority, mode_type=AdvancedMode.System)
self.logger = logging.getLogger("RgbShowPlayer")
self.shows = {}
self.active_shows = []
self.prior_lamp_states = {}
def load(self, key, filename):
# load the show
self.shows[key] = RgbShow(self.game, key, filename)
def stop(self, key, cleanup=False):
if(key not in self.active_shows):
self.logger.info("suppressing request to stop inactive show: %s" % key)
return
if(cleanup):
self.shows[key].restart()
self.shows[key].stop()
self.cancel_delayed(name=key)
self.active_shows.remove(key)
def stop_all(self):
for key in self.active_shows:
self.shows[key].stop()
self.cancel_delayed(name=key)
self.active_shows = []
def restart(self, key):
if(key not in self.active_shows):
self.logger.info("suppressing request to restart inactive show: %s" % key)
return
self.shows[key].restart()
def play_show(self, key, repeat=None, callback=None, save_state=True):
""" plays an RgbShow -- if non-repeating, the callback function will be called on completion
use repeat to override the behavior described in the show file
"""
if(key not in self.shows):
self.logger.info("suppressing request to play unknown show: %s" % key)
return
if(key in self.active_shows):
self.logger.info("suppressing request to play already active show: %s" % key)
return
# TODO: determine which lamps are already in use and disable them...
self.logger.info("Show '%s' is starting." % key)
if(save_state):
self.save_state(key)
self.shows[key].set_callback(self.restore_state, key)
self.active_shows.append(key)
if(repeat is not None):
self.shows[key].repeat = repeat
self.shows[key].restart()
# self.shows[key].debug_show()
self.__update_show(key)
def save_state(self, key):
""" saves the current state of the devices used in the show 'key'
so they can be restored at the conclusion of playback. If the
device already has a saved state, we assume it's already in use
by another show (and the state was stored at that time), so when
playback of this new show finishes that state should be restored.
"""
if(key not in self.shows):
self.logger.info("suppressing request to save_state for unknown show: %s" % key)
return
device_list = self.shows[key].get_device_list()
for device in device_list:
if(device.name not in self.prior_lamp_states):
if(not callable(device.state)):
state = device.state
else:
state = device.state()
if state['outputDriveTime'] == 0: # only store indef schedules
sched = state['timeslots']
else:
sched = 0x0
r = {'device':device, 'schedule':sched}
if(hasattr(device,'color')):
r['color']=device.color
# self.logger.info("saving state for device '%s' (%x)" % (device.name,sched))
self.prior_lamp_states[device.name] = r
def restore_state(self, key):
""" this method is used when a show (identified by key) has finished,
so that lamps can be restored to their state prior to the playback
of this show.
"""
if(key not in self.shows):
self.logger.info("suppressing request to restore_state for unknown show: %s" % key)
return
device_list = self.shows[key].get_device_list()
for device in device_list:
# make sure device isn't in use in another show!
if(self.is_device_in_use(device.name, exclude=key)):
self.logger.info("Not restoring state for device '%s' because it's still in use elsewhere" % device.name)
pass
elif(device.name in self.prior_lamp_states):
# self.logger.info("restoring state for device '%s'" % device.name)
r = self.prior_lamp_states[device.name]
if('color' in r):
device.set_color(r['color'])
device.schedule(r['schedule'])
if(key not in self.active_shows):
del self.prior_lamp_states[device.name]
def is_device_in_use(self, name, exclude=None):
show_list = self.active_shows[:]
if exclude is not None and exclude in show_list:
show_list.remove(exclude)
for s in show_list:
if(self.shows[s].is_device_in_use(name)):
return True
return False
def __update_show(self, key):
if(key not in self.shows):
raise ValueError, "request to update unknown show: %s" % key
return
if(key not in self.active_shows):
raise ValueError, "request to update inactive show: %s" % key
return
if(self.shows[key].update()):
# if it returns true, the show is still live
self.delay(name=key,
event_type=None,
delay=(self.shows[key].time)/1000.0, # delay is in seconds...
handler=self.__update_show,
param=key)
else:
self.logger.info("Show '%s' is done." % key)
self.active_shows.remove(key)
if(len(self.active_shows)==0):
self.logger.info("all shows done, calling update lamps")
self.game.update_lamps()
# show is done
pass
def reset(self):
# TODO: ???
pass
class RgbShow(object):
def __init__(self, game, key, filename):
self.logger = logging.getLogger("rgbShow")
self.logger.info("loading RgbShow '%s'" % filename)
self.game = game
self.color_map = {}
self.tracks = []
self.length = 0
self.hold = False
self.repeat = False
self.time = 33
self.callback_fired = False
self.callback = None
self.callback_param = None
self.now = 0
self.key = key
self.shows_over = False
f = open(filename, 'r')
for line in f.readlines():
if (line.lstrip().startswith('#') or line.lstrip().rstrip()==""):
# comment or blank line, ignore
pass
elif(line.lstrip().startswith('!')):
# header data
t = line.lstrip()[1:].lstrip()
k = t[0:1]
# print("t=%s;k=%s" % (t, k))
if(t.find('~>')>=0):
# FADE TO
v = t[t.find("~>")+2:].lstrip().rstrip()
v=int(v,16)
c = [v >> 16, (v & 0x00ff00) >> 8 , v & 0x0000ff]
self.color_map[k] = {'color': c, 'fade': True}
elif(t.find('=>')>=0):
# IMMEDIATE COLOR CHANGE
v = t[t.find("=>")+2:].lstrip().rstrip()
if(v=='None'):
self.color_map[k] = None
else:
v=int(v,16)
c = [v >> 16, (v & 0x00ff00) >> 8 , v & 0x0000ff]
self.color_map[k] = {'color': c, 'fade': False}
elif(t.find('=')>0):
# RGB Show Parameter
k = t[:t.find("=")-1].lstrip().rstrip()
v = t[t.find("=")+1:].lstrip().rstrip()
if(k=="time"):
self.time = int(v)
pass
elif(k=="repeat"):
tmp = v.lower()
self.repeat = (tmp =='true' or tmp == '1')
pass
elif(k=="hold"):
tmp = v.lower()
self.hold = (tmp =='true' or tmp == '1')
pass
else:
raise ValueError, "Could not parse RgbShow header line: '%s'" % line
else:
# bad line!
raise ValueError, "Could not parse RgbShow header line: '%s'" % line
pass
else:
# track data
t = RgbTrack(line, self.color_map, self)
self.tracks.append(t)
self.length = t.length
f.close()
def debug_show(self):
self.logger.info("Show Parameters:")
self.logger.info(" hold: %s" % self.hold)
self.logger.info(" repeat: %s" % self.repeat)
self.logger.info(" time: %s" % self.time)
self.logger.info("Show Color Map:")
for k,v in self.color_map.iteritems():
self.logger.info("%s:%s" % (k, v))
self.logger.info("Show Tracks:")
for t in self.tracks:
self.logger.info("%s: <%s>" % (t.name,str(t)))
def stop(self):
self.shows_over = True
def restart(self):
self.now = 0
self.shows_over = False
for t in self.tracks:
# t.fn([0,0,0], 0) # set this lamp's color to black
# t.device.enable() # turn on the device (schedule-wise)
pass
def update(self):
# self.logger.debug("Show '%s' received update(%d/%d)" % (self.key, self.now, self.length))
if(self.now < self.length):
for track in self.tracks:
track.update(self.now)
self.now += 1
return True
else:
# if(self.now >= self.length):
# show is done playing through once, but is it *done*
if(self.callback is not None and not self.callback_fired):
self.logger.info("show '%s' is done; calling callback" % self.key)
self.callback(self.callback_param)
self.callback_fired = True
if(self.repeat):
self.now = 0
self.callback_fired = False
return True
if(self.hold):
# reset back to the last frame
self.now = self.length-1
return True
return False
def is_device_in_use(self, name):
for t in self.tracks:
if(t.name == name and t.enabled):
return True
return False
def get_device_list(self):
""" returns a list of gameitems that are in use by this show """
devices = []
for t in self.tracks:
# if(t.enabled):
devices.append(t.device)
return devices
def set_callback(self, callback_fn, callback_param):
self.callback = callback_fn;
self.callback_fired = False
self.callback_param = callback_param
class RgbTrack(object):
def __str__(self):
return "".join([str(t)+":"+str(v)+";" for t,v in enumerate(self.data)])
def update(self, now):
# self.logger.debug("Track '%s' received update(%d) [length of the track is (%d)]" % (self.name, now, self.length))
if(self.enabled):
if(now >= len(self.data)):
raise ValueError, "Track '%s' received index '%d' beyond the length of the track (%d)" % (self.name, now, self.length)
cmd = self.data[now]
if(cmd is not None):
cmd.process_command()
self.device.enable()
def __init__(self, line, color_map, show):
self.logger = logging.getLogger("rgbTrack")
self.data = []
self.device = None
self.fn = None
self.enabled = True # a track may be disabled if it's device is in use by another playing show
#print line
line_re = re.compile('\s*(?P<type>\S+\:)?\s*(?P<name>\S+)\s*\| (?P<data>.*)$')
m = line_re.match(line)
if m is None:
raise ValueError("Regexp didn't match on track line: " + line)
device_type = m.group('type')
self.name = m.group('name')
# build function map
if(device_type is None):
# auto-detect
if(self.name in show.game.leds):
device_type = "led"
self.device = show.game.leds[self.name]
elif(self.name in show.game.lamps):
device_type = "lamp"
self.device = show.game.lamps[self.name]
elif(hasattr(show.game, 'wsRGBs') and self.name in show.game.wsRGBs):
device_type = "rgb"
self.device = show.game.wsRGBs[self.name]
else:
raise ValueError, "RGB Track created for unknown device named '%s'" % self.name
if(device_type == "lamp"):
fn = show.game.lamps[self.name].set_color
elif(device_type == "led"):
fn = show.game.leds[self.name].color_with_fade
elif(device_type == "rgb"):
fn = show.game.wsRGBs[self.name].set_color
else:
raise ValueError, "RGB Track created for unknown device named '%s'" % self.name
self.fn = fn
self.device_type = device_type
data = m.group('data')
self.data = [None]* len(data)
last_color = None
last_run_starts = 0
last_run_length = 0
for i in range(0,len(data),1):
this_color = data[i]
if(this_color!=last_color):
# end prev run, start new run
if(last_color is not None):
# save old run
cdata = color_map[last_color]
if(cdata is None):
c = None
elif(cdata['fade']):
c = RgbCommand(self.name, fn, cdata['color'], last_run_length*show.time)
else:
c = RgbCommand(self.name, fn, cdata['color'], 0)
self.data[last_run_starts] = c
# start new run
last_run_length = 0
last_run_starts = i
if(i==len(data)-1): # last slot
if(last_run_length==0) or (last_color==this_color): # just started a new run; so store this run
cdata = color_map[this_color]
if(cdata is None):
c = None
elif(cdata['fade']):
c = RgbCommand(self.name, fn, cdata['color'], last_run_length*show.time)
else:
c = RgbCommand(self.name, fn, cdata['color'], 0)
self.data[last_run_starts] = c
else:
# continuing run
last_run_length += 1
last_color = this_color
self.length = len(data)
class RgbCommand(object):
def __init__(self, name, fn, new_color, transition_time):
self.new_color = new_color
self.time = transition_time
self.name = name
self.fn = fn
def __str__(self):
return "[name=%s color='%s';time='%s']" % (self.name, self.new_color, self.time)
def process_command(self):
# print(" doing %s" % str(self))
self.fn(self.new_color, self.time) | {
"repo_name": "mjocean/PyProcGameHD-SkeletonGame",
"path": "procgame/modes/rgbshow.py",
"copies": "1",
"size": "15845",
"license": "mit",
"hash": -1689324641747687400,
"line_mean": 36.9090909091,
"line_max": 134,
"alpha_frac": 0.505080467,
"autogenerated": false,
"ratio": 3.9972250252270434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9971826952220677,
"avg_score": 0.0060957080012732905,
"num_lines": 418
} |
'''An example application that uses ddp_asyncio's MeteorFilesUploader to upload a file to an instance of Meteor-Files' demo application (https://github.com/VeliovGroup/Meteor-Files-Demos)'''
import asyncio
import sys
import logging
logger = logging.getLogger('websockets')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
from ddp_asyncio import DDPClient
from ddp_asyncio.extras import MeteorFilesUploader
class DemoUploader:
def __init__(self, address):
self.client = DDPClient(address)
self.uploader = MeteorFilesUploader(self.client, 'uploadedFiles')
async def go(self, filename, loop):
await self.client.connect()
print('Starting upload...')
upload = self.uploader.start_upload(filename, loop = loop, meta = {
'blamed': 0,
'secured': False,
'unlisted': False
})
while not upload.complete:
print('{:.1f}% uploaded'.format(upload.progress * 100))
await asyncio.sleep(1)
print('Upload complete. File ID: {}'.format(upload._id))
du = DemoUploader(sys.argv[1])
loop = asyncio.get_event_loop()
loop.run_until_complete(du.go(sys.argv[2], loop))
| {
"repo_name": "hunternet93/ddp_asyncio",
"path": "example_upload_file.py",
"copies": "1",
"size": "1245",
"license": "mit",
"hash": -3758121675330581000,
"line_mean": 31.7631578947,
"line_max": 190,
"alpha_frac": 0.6546184739,
"autogenerated": false,
"ratio": 3.772727272727273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9817662944279204,
"avg_score": 0.021936560469613682,
"num_lines": 38
} |
'''An example application that uses ddp_asyncio to retrieve all public to-do lists from Meteor's reference Todos application (https://github.com/meteor/todos)'''
import asyncio
import sys
from ddp_asyncio import DDPClient
class TodoRetriever:
def __init__(self, address):
self.client = DDPClient(address)
async def go(self):
await self.client.connect()
lists = self.client.get_collection('lists')
todos = self.client.get_collection('todos')
sub = await self.client.subscribe('lists.public')
await sub.wait()
lists_sorted = sorted(lists.values(), key = lambda l: l.name)
for l in lists_sorted:
sub = await self.client.subscribe('todos.inList', {'listId': l._id})
await sub.wait()
print(l.name)
for todo in filter(lambda t: t.listId == l._id, todos.values()):
print(' [{}] {}'.format('X' if todo.checked else ' ', todo.text))
print()
td = TodoRetriever(sys.argv[1])
asyncio.get_event_loop().run_until_complete(td.go())
| {
"repo_name": "hunternet93/ddp_asyncio",
"path": "example_retrieve_todos.py",
"copies": "1",
"size": "1144",
"license": "mit",
"hash": 7021850546298954000,
"line_mean": 31.6857142857,
"line_max": 161,
"alpha_frac": 0.5839160839,
"autogenerated": false,
"ratio": 3.7880794701986753,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48719955540986754,
"avg_score": null,
"num_lines": null
} |
'''An example application that uses ddp_asyncio to watch all public to-do lists from Meteor's reference Todos application (https://github.com/meteor/todos)'''
import asyncio
import sys
from ddp_asyncio import DDPClient
class TodoWatcher:
def __init__(self, address):
self.client = DDPClient(address)
self.lists_subs = {}
async def watch_lists(self, lists, lists_q):
while True:
event = await lists_q.get()
if event.type == 'added':
print('List created: "{}"'.format(event.fields.name))
sub = await self.client.subscribe('todos.inList', {'listId': event._id})
self.lists_subs[event._id] = sub
elif event.type == 'changed':
if event.fields.get('name'):
print('List renamed to "{}"'.format(event.fields.name))
elif event.type == 'removed':
print('List deleted: "{}"'.format(event._id))
await self.client.unsubscribe(self.lists_subs[event._id])
del self.lists_subs[event._id]
async def watch_todos(self, todos, todos_q):
while True:
event = await todos_q.get()
if event.type == 'added':
print('Task created: "{}"'.format(event.fields.text))
elif event.type == 'changed':
if event.fields.get('name'):
print('Task changed to "{}"'.format(event.fields.text))
if not event.fields.get('checked') == None:
if event.fields.checked:
print('Task marked complete: "{}"'.format(todos[event._id].text))
else:
print('Task marked incomplete: "{}"'.format(todos[event._id].text))
elif event.type == 'removed':
print('Task deleted: "{}"'.format(event._id))
async def go(self, loop):
print('Connecting to server...')
while True:
try:
session = await self.client.connect()
except ConnectionRefusedError or ConnectionResetError:
await asyncio.sleep(1)
continue
print('Connected to server.')
lists = self.client.get_collection('lists')
lists_q = lists.get_queue()
lists_task = loop.create_task(self.watch_lists(lists, lists_q))
todos = self.client.get_collection('todos')
todos_q = todos.get_queue()
todos_task = loop.create_task(self.watch_todos(todos, todos_q))
sub = await self.client.subscribe('lists.public')
await sub.wait()
await self.client.disconnection()
print('Lost connection to server, attempting to reestablish...')
lists_task.cancel()
todos_task.cancel()
loop = asyncio.get_event_loop()
td = TodoWatcher(sys.argv[1])
loop.run_until_complete(td.go(loop))
| {
"repo_name": "hunternet93/ddp_asyncio",
"path": "example_watch_todos.py",
"copies": "1",
"size": "3159",
"license": "mit",
"hash": -1807840557446850600,
"line_mean": 35.3103448276,
"line_max": 158,
"alpha_frac": 0.5163026274,
"autogenerated": false,
"ratio": 4.375346260387811,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.018425291831005663,
"num_lines": 87
} |
"""An example application using Confuse for configuration."""
from __future__ import division, absolute_import, print_function
import confuse
import argparse
template = {
'library': confuse.Filename(),
'import_write': confuse.OneOf([bool, 'ask', 'skip']),
'ignore': confuse.StrSeq(),
'plugins': list,
'paths': {
'directory': confuse.Filename(),
'default': confuse.Filename(relative_to='directory'),
},
'servers': confuse.Sequence(
{
'hostname': str,
'options': confuse.StrSeq(),
}
)
}
config = confuse.LazyConfig('ConfuseExample', __name__)
def main():
parser = argparse.ArgumentParser(description='example Confuse program')
parser.add_argument('--library', '-l', dest='library', metavar='LIBPATH',
help='library database file')
parser.add_argument('--directory', '-d', dest='paths.directory',
metavar='DIRECTORY',
help='destination music directory')
parser.add_argument('--verbose', '-v', dest='verbose', action='store_true',
help='print debugging messages')
args = parser.parse_args()
config.set_args(args, dots=True)
print('configuration directory is', config.config_dir())
# Use a boolean flag and the transient overlay.
if config['verbose']:
print('verbose mode')
config['log']['level'] = 2
else:
config['log']['level'] = 0
print('logging level is', config['log']['level'].get(int))
valid = config.get(template)
# Some validated/converted values.
print('library is', valid.library)
print('directory is', valid.paths.directory)
print('paths.default is', valid.paths.default)
print('servers are', [s.hostname for s in valid.servers])
| {
"repo_name": "sampsyo/confuse",
"path": "example/__init__.py",
"copies": "2",
"size": "1825",
"license": "mit",
"hash": 6392642925575807000,
"line_mean": 30.4655172414,
"line_max": 79,
"alpha_frac": 0.6076712329,
"autogenerated": false,
"ratio": 4.185779816513762,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5793451049413761,
"avg_score": null,
"num_lines": null
} |
"""An example demomstrating a derived StatusMsg class.
Created by nomuus <"".join(["adus@um.ex", "te@rn.um"]).replace(".", "").replace("@", "") + "@" + "nomuus.com">
This will print specially formatted file path status messages
using an overridden StatusMsg.format() virtual method.
"""
__version__ = "1.0"
__copyright__ = """Copyright (c) 2011, nomuus. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import sys
from statusmsg import StatusMsg
class path_status(StatusMsg):
def format(self, kwmsg):
msg = "(%d): " % kwmsg["count"]
dot = "..."
sep = os.sep
t = kwmsg["msg"]
if self.label_width() > -1:
label_len = self.label_width()
else:
label_len = 0
max_width = self.max_width
msg_len = label_len + len(msg)
tmp_len = msg_len + len(t)
dot_len = len(dot)
sep_len = len(sep)
if tmp_len > max_width:
head, tail = os.path.split(t)
y = max_width - msg_len - (dot_len + sep_len) - len(tail)
if y > 0:
path_short = head[:y] + dot + sep + tail
else:
path_short = t[:max_width - msg_len]
elif tmp_len < max_width:
path_short = t
else:
path_short = ""
return "%s%s" % (msg, path_short)
def set_max_width(self, width):
if width < 1:
self.max_width = 79
else:
self.max_width = width
###########################################################################
def _fake_path_generator(width, max_paths):
if width < 1:
width = 79
if max_paths < 1:
max_paths = 1000
for x in range(0, max_paths):
s = "path" * width
yield "%s%s%s%d.xyz" % (os.sep, s, os.sep, x)
###########################################################################
def main():
pstatus = path_status(sys.stdout)
pstatus.set_max_width(79)
kwmsg = {"msg": "", "count": 0}
pstatus.label("Paths over 79 characters ")
for f in _fake_path_generator(width=100, max_paths=6000):
kwmsg["msg"] = f
kwmsg["count"] = kwmsg["count"] + 1
pstatus.write(kwmsg)
pstatus.flush()
pstatus.label("Paths over 79 characters: Completed\n")
pstatus.label("Paths under 79 characters ")
kwmsg["count"] = 0
for f in _fake_path_generator(width=1, max_paths=6000):
kwmsg["msg"] = f
kwmsg["count"] = kwmsg["count"] + 1
pstatus.write(kwmsg)
pstatus.flush()
pstatus.label("Paths under 79 characters: Completed\n")
###########################################################################
if __name__ == "__main__":
main() | {
"repo_name": "nomuus/statusmsg",
"path": "example_pathstatus.py",
"copies": "1",
"size": "4228",
"license": "bsd-3-clause",
"hash": 7617839338237230000,
"line_mean": 34.2416666667,
"line_max": 110,
"alpha_frac": 0.5910596026,
"autogenerated": false,
"ratio": 4.042065009560229,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5133124612160229,
"avg_score": null,
"num_lines": null
} |
"""An example file showing how to make a geometry.
This particular example creates a 3x3 geometry, with 8 regular pins and one
Gd-157 2 wt-percent enriched. All pins are segmented.
"""
from collections import OrderedDict
import math
import numpy as np
import openmc
def density_to_mat(dens_dict):
"""Generates an OpenMC material from a cell ID and self.number_density.
Parameters
----------
dens_dict : dict
Dictionary mapping nuclide names to densities
Returns
-------
openmc.Material
The OpenMC material filled with nuclides.
"""
mat = openmc.Material()
for key in dens_dict:
mat.add_nuclide(key, 1.0e-24*dens_dict[key])
mat.set_density('sum')
return mat
def generate_initial_number_density():
""" Generates initial number density.
These results were from a CASMO5 run in which the gadolinium pin was
loaded with 2 wt percent of Gd-157.
"""
# Concentration to be used for all fuel pins
fuel_dict = OrderedDict()
fuel_dict['U235'] = 1.05692e21
fuel_dict['U234'] = 1.00506e19
fuel_dict['U238'] = 2.21371e22
fuel_dict['O16'] = 4.62954e22
fuel_dict['O17'] = 1.127684e20
fuel_dict['I135'] = 1.0e10
fuel_dict['Xe135'] = 1.0e10
fuel_dict['Xe136'] = 1.0e10
fuel_dict['Cs135'] = 1.0e10
fuel_dict['Gd156'] = 1.0e10
fuel_dict['Gd157'] = 1.0e10
# fuel_dict['O18'] = 9.51352e19 # Does not exist in ENDF71, merged into 17
# Concentration to be used for the gadolinium fuel pin
fuel_gd_dict = OrderedDict()
fuel_gd_dict['U235'] = 1.03579e21
fuel_gd_dict['U238'] = 2.16943e22
fuel_gd_dict['Gd156'] = 3.95517E+10
fuel_gd_dict['Gd157'] = 1.08156e20
fuel_gd_dict['O16'] = 4.64035e22
fuel_dict['I135'] = 1.0e10
fuel_dict['Xe136'] = 1.0e10
fuel_dict['Xe135'] = 1.0e10
fuel_dict['Cs135'] = 1.0e10
# There are a whole bunch of 1e-10 stuff here.
# Concentration to be used for cladding
clad_dict = OrderedDict()
clad_dict['O16'] = 3.07427e20
clad_dict['O17'] = 7.48868e17
clad_dict['Cr50'] = 3.29620e18
clad_dict['Cr52'] = 6.35639e19
clad_dict['Cr53'] = 7.20763e18
clad_dict['Cr54'] = 1.79413e18
clad_dict['Fe54'] = 5.57350e18
clad_dict['Fe56'] = 8.74921e19
clad_dict['Fe57'] = 2.02057e18
clad_dict['Fe58'] = 2.68901e17
clad_dict['Cr50'] = 3.29620e18
clad_dict['Cr52'] = 6.35639e19
clad_dict['Cr53'] = 7.20763e18
clad_dict['Cr54'] = 1.79413e18
clad_dict['Ni58'] = 2.51631e19
clad_dict['Ni60'] = 9.69278e18
clad_dict['Ni61'] = 4.21338e17
clad_dict['Ni62'] = 1.34341e18
clad_dict['Ni64'] = 3.43127e17
clad_dict['Zr90'] = 2.18320e22
clad_dict['Zr91'] = 4.76104e21
clad_dict['Zr92'] = 7.27734e21
clad_dict['Zr94'] = 7.37494e21
clad_dict['Zr96'] = 1.18814e21
clad_dict['Sn112'] = 4.67352e18
clad_dict['Sn114'] = 3.17992e18
clad_dict['Sn115'] = 1.63814e18
clad_dict['Sn116'] = 7.00546e19
clad_dict['Sn117'] = 3.70027e19
clad_dict['Sn118'] = 1.16694e20
clad_dict['Sn119'] = 4.13872e19
clad_dict['Sn120'] = 1.56973e20
clad_dict['Sn122'] = 2.23076e19
clad_dict['Sn124'] = 2.78966e19
# Gap concentration
# Funny enough, the example problem uses air.
gap_dict = OrderedDict()
gap_dict['O16'] = 7.86548e18
gap_dict['O17'] = 2.99548e15
gap_dict['N14'] = 3.38646e19
gap_dict['N15'] = 1.23717e17
# Concentration to be used for coolant
# No boron
cool_dict = OrderedDict()
cool_dict['H1'] = 4.68063e22
cool_dict['O16'] = 2.33427e22
cool_dict['O17'] = 8.89086e18
# Store these dictionaries in the initial conditions dictionary
initial_density = OrderedDict()
initial_density['fuel_gd'] = fuel_gd_dict
initial_density['fuel'] = fuel_dict
initial_density['gap'] = gap_dict
initial_density['clad'] = clad_dict
initial_density['cool'] = cool_dict
# Set up libraries to use
temperature = OrderedDict()
sab = OrderedDict()
# Toggle betweeen MCNP and NNDC data
MCNP = False
if MCNP:
temperature['fuel_gd'] = 900.0
temperature['fuel'] = 900.0
# We approximate temperature of everything as 600K, even though it was
# actually 580K.
temperature['gap'] = 600.0
temperature['clad'] = 600.0
temperature['cool'] = 600.0
else:
temperature['fuel_gd'] = 293.6
temperature['fuel'] = 293.6
temperature['gap'] = 293.6
temperature['clad'] = 293.6
temperature['cool'] = 293.6
sab['cool'] = 'c_H_in_H2O'
# Set up burnable materials
burn = OrderedDict()
burn['fuel_gd'] = True
burn['fuel'] = True
burn['gap'] = False
burn['clad'] = False
burn['cool'] = False
return temperature, sab, initial_density, burn
def segment_pin(n_rings, n_wedges, r_fuel, r_gap, r_clad):
""" Calculates a segmented pin.
Separates a pin with n_rings and n_wedges. All cells have equal volume.
Pin is centered at origin.
"""
# Calculate all the volumes of interest
v_fuel = math.pi * r_fuel**2
v_gap = math.pi * r_gap**2 - v_fuel
v_clad = math.pi * r_clad**2 - v_fuel - v_gap
v_ring = v_fuel / n_rings
v_segment = v_ring / n_wedges
# Compute ring radiuses
r_rings = np.zeros(n_rings)
for i in range(n_rings):
r_rings[i] = math.sqrt(1.0/(math.pi) * v_ring * (i+1))
# Compute thetas
theta = np.linspace(0, 2*math.pi, n_wedges + 1)
# Compute surfaces
fuel_rings = [openmc.ZCylinder(x0=0, y0=0, R=r_rings[i])
for i in range(n_rings)]
fuel_wedges = [openmc.Plane(A=math.cos(theta[i]), B=math.sin(theta[i]))
for i in range(n_wedges)]
gap_ring = openmc.ZCylinder(x0=0, y0=0, R=r_gap)
clad_ring = openmc.ZCylinder(x0=0, y0=0, R=r_clad)
# Create cells
fuel_cells = []
if n_wedges == 1:
for i in range(n_rings):
cell = openmc.Cell(name='fuel')
if i == 0:
cell.region = -fuel_rings[0]
else:
cell.region = +fuel_rings[i-1] & -fuel_rings[i]
fuel_cells.append(cell)
else:
for i in range(n_rings):
for j in range(n_wedges):
cell = openmc.Cell(name='fuel')
if i == 0:
if j != n_wedges-1:
cell.region = (-fuel_rings[0]
& +fuel_wedges[j]
& -fuel_wedges[j+1])
else:
cell.region = (-fuel_rings[0]
& +fuel_wedges[j]
& -fuel_wedges[0])
else:
if j != n_wedges-1:
cell.region = (+fuel_rings[i-1]
& -fuel_rings[i]
& +fuel_wedges[j]
& -fuel_wedges[j+1])
else:
cell.region = (+fuel_rings[i-1]
& -fuel_rings[i]
& +fuel_wedges[j]
& -fuel_wedges[0])
fuel_cells.append(cell)
# Gap ring
gap_cell = openmc.Cell(name='gap')
gap_cell.region = +fuel_rings[-1] & -gap_ring
fuel_cells.append(gap_cell)
# Clad ring
clad_cell = openmc.Cell(name='clad')
clad_cell.region = +gap_ring & -clad_ring
fuel_cells.append(clad_cell)
# Moderator
mod_cell = openmc.Cell(name='cool')
mod_cell.region = +clad_ring
fuel_cells.append(mod_cell)
# Form universe
fuel_u = openmc.Universe()
fuel_u.add_cells(fuel_cells)
return fuel_u, v_segment, v_gap, v_clad
def generate_geometry(n_rings, n_wedges):
""" Generates example geometry.
This function creates the initial geometry, a 9 pin reflective problem.
One pin, containing gadolinium, is discretized into sectors.
In addition to what one would do with the general OpenMC geometry code, it
is necessary to create a dictionary, volume, that maps a cell ID to a
volume. Further, by naming cells the same as the above materials, the code
can automatically handle the mapping.
Parameters
----------
n_rings : int
Number of rings to generate for the geometry
n_wedges : int
Number of wedges to generate for the geometry
"""
pitch = 1.26197
r_fuel = 0.412275
r_gap = 0.418987
r_clad = 0.476121
n_pin = 3
# This table describes the 'fuel' to actual type mapping
# It's not necessary to do it this way. Just adjust the initial conditions
# below.
mapping = ['fuel', 'fuel', 'fuel',
'fuel', 'fuel_gd', 'fuel',
'fuel', 'fuel', 'fuel']
# Form pin cell
fuel_u, v_segment, v_gap, v_clad = segment_pin(n_rings, n_wedges, r_fuel, r_gap, r_clad)
# Form lattice
all_water_c = openmc.Cell(name='cool')
all_water_u = openmc.Universe(cells=(all_water_c, ))
lattice = openmc.RectLattice()
lattice.pitch = [pitch]*2
lattice.lower_left = [-pitch*n_pin/2, -pitch*n_pin/2]
lattice_array = [[fuel_u for i in range(n_pin)] for j in range(n_pin)]
lattice.universes = lattice_array
lattice.outer = all_water_u
# Bound universe
x_low = openmc.XPlane(x0=-pitch*n_pin/2, boundary_type='reflective')
x_high = openmc.XPlane(x0=pitch*n_pin/2, boundary_type='reflective')
y_low = openmc.YPlane(y0=-pitch*n_pin/2, boundary_type='reflective')
y_high = openmc.YPlane(y0=pitch*n_pin/2, boundary_type='reflective')
z_low = openmc.ZPlane(z0=-10, boundary_type='reflective')
z_high = openmc.ZPlane(z0=10, boundary_type='reflective')
# Compute bounding box
lower_left = [-pitch*n_pin/2, -pitch*n_pin/2, -10]
upper_right = [pitch*n_pin/2, pitch*n_pin/2, 10]
root_c = openmc.Cell(fill=lattice)
root_c.region = (+x_low & -x_high
& +y_low & -y_high
& +z_low & -z_high)
root_u = openmc.Universe(universe_id=0, cells=(root_c, ))
geometry = openmc.Geometry(root_u)
v_cool = pitch**2 - (v_gap + v_clad + n_rings * n_wedges * v_segment)
# Store volumes for later usage
volume = {'fuel': v_segment, 'gap':v_gap, 'clad':v_clad, 'cool':v_cool}
return geometry, volume, mapping, lower_left, upper_right
def generate_problem(n_rings=5, n_wedges=8):
""" Merges geometry and materials.
This function initializes the materials for each cell using the dictionaries
provided by generate_initial_number_density. It is assumed a cell named
'fuel' will have further region differentiation (see mapping).
Parameters
----------
n_rings : int, optional
Number of rings to generate for the geometry
n_wedges : int, optional
Number of wedges to generate for the geometry
"""
# Get materials dictionary, geometry, and volumes
temperature, sab, initial_density, burn = generate_initial_number_density()
geometry, volume, mapping, lower_left, upper_right = generate_geometry(n_rings, n_wedges)
# Apply distribmats, fill geometry
cells = geometry.root_universe.get_all_cells()
for cell_id in cells:
cell = cells[cell_id]
if cell.name == 'fuel':
omc_mats = []
for cell_type in mapping:
omc_mat = density_to_mat(initial_density[cell_type])
if cell_type in sab:
omc_mat.add_s_alpha_beta(sab[cell_type])
omc_mat.temperature = temperature[cell_type]
omc_mat.depletable = burn[cell_type]
omc_mat.volume = volume['fuel']
omc_mats.append(omc_mat)
cell.fill = omc_mats
elif cell.name != '':
omc_mat = density_to_mat(initial_density[cell.name])
if cell.name in sab:
omc_mat.add_s_alpha_beta(sab[cell.name])
omc_mat.temperature = temperature[cell.name]
omc_mat.depletable = burn[cell.name]
omc_mat.volume = volume[cell.name]
cell.fill = omc_mat
return geometry, lower_left, upper_right
| {
"repo_name": "johnnyliu27/openmc",
"path": "tests/regression_tests/deplete/example_geometry.py",
"copies": "2",
"size": "12328",
"license": "mit",
"hash": -644931347556570400,
"line_mean": 31.6137566138,
"line_max": 93,
"alpha_frac": 0.5804672291,
"autogenerated": false,
"ratio": 3.0598163315959295,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9638482089163636,
"avg_score": 0.0003602943064586623,
"num_lines": 378
} |
"""An example flask application demonstrating server-sent events."""
from hashlib import sha1
from shutil import rmtree
from stat import S_ISREG, ST_CTIME, ST_MODE
import json
import os
import time
from PIL import Image, ImageFile
from gevent.event import AsyncResult
from gevent.queue import Empty, Queue
from gevent.timeout import Timeout
import flask
DATA_DIR = 'data'
KEEP_ALIVE_DELAY = 25
MAX_IMAGE_SIZE = 800, 600
MAX_IMAGES = 10
MAX_DURATION = 300
APP = flask.Flask(__name__, static_folder=DATA_DIR)
BROADCAST_QUEUE = Queue()
try: # Reset saved files on each start
rmtree(DATA_DIR, True)
os.mkdir(DATA_DIR)
except OSError:
pass
def broadcast(message):
"""Notify all waiting waiting gthreads of message."""
waiting = []
try:
while True:
waiting.append(BROADCAST_QUEUE.get(block=False))
except Empty:
pass
print('Broadcasting {} messages'.format(len(waiting)))
for item in waiting:
item.set(message)
def receive():
"""Generator that yields a message at least every KEEP_ALIVE_DELAY seconds.
yields messages sent by `broadcast`.
"""
now = time.time()
end = now + MAX_DURATION
tmp = None
# Heroku doesn't notify when clients disconnect so we have to impose a
# maximum connection duration.
while now < end:
if not tmp:
tmp = AsyncResult()
BROADCAST_QUEUE.put(tmp)
try:
yield tmp.get(timeout=KEEP_ALIVE_DELAY)
tmp = None
except Timeout:
yield ''
now = time.time()
def safe_addr(ip_addr):
"""Strip off the trailing two octets of the IP address."""
return '.'.join(ip_addr.split('.')[:2] + ['xxx', 'xxx'])
def save_normalized_image(path, data):
"""Generate an RGB thumbnail of the provided image."""
image_parser = ImageFile.Parser()
try:
image_parser.feed(data)
image = image_parser.close()
except IOError:
return False
image.thumbnail(MAX_IMAGE_SIZE, Image.ANTIALIAS)
if image.mode != 'RGB':
image = image.convert('RGB')
image.save(path)
return True
def event_stream(client):
"""Yield messages as they come in."""
force_disconnect = False
try:
for message in receive():
yield 'data: {}\n\n'.format(message)
print('{} force closing stream'.format(client))
force_disconnect = True
finally:
if not force_disconnect:
print('{} disconnected from stream'.format(client))
@APP.route('/post', methods=['POST'])
def post():
"""Handle image uploads."""
sha1sum = sha1(flask.request.data).hexdigest()
target = os.path.join(DATA_DIR, '{}.jpg'.format(sha1sum))
message = json.dumps({'src': target,
'ip_addr': safe_addr(flask.request.access_route[0])})
try:
if save_normalized_image(target, flask.request.data):
broadcast(message) # Notify subscribers of completion
except Exception as exception: # Output errors
return '{}'.format(exception)
return 'success'
@APP.route('/stream')
def stream():
"""Handle long-lived SSE streams."""
return flask.Response(event_stream(flask.request.access_route[0]),
mimetype='text/event-stream')
@APP.route('/')
def home():
"""Provide the primary view along with its javascript."""
# Code adapted from: http://stackoverflow.com/questions/168409/
image_infos = []
for filename in os.listdir(DATA_DIR):
filepath = os.path.join(DATA_DIR, filename)
file_stat = os.stat(filepath)
if S_ISREG(file_stat[ST_MODE]):
image_infos.append((file_stat[ST_CTIME], filepath))
images = []
for i, (_, path) in enumerate(sorted(image_infos, reverse=True)):
if i >= MAX_IMAGES:
os.unlink(path)
continue
images.append('<div><img alt="User uploaded image" src="{}" /></div>'
.format(path))
return """
<!doctype html>
<title>Image Uploader</title>
<meta charset="utf-8" />
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js"></script>
<script src="//ajax.googleapis.com/ajax/libs/jqueryui/1.10.1/jquery-ui.min.js"></script>
<link rel="stylesheet" href="//ajax.googleapis.com/ajax/libs/jqueryui/1.10.1/themes/vader/jquery-ui.css" />
<style>
body {
max-width: 800px;
margin: auto;
padding: 1em;
background: black;
color: #fff;
font: 16px/1.6 menlo, monospace;
text-align:center;
}
a {
color: #fff;
}
.notice {
font-size: 80%%;
}
#drop {
font-weight: bold;
text-align: center;
padding: 1em 0;
margin: 1em 0;
color: #555;
border: 2px dashed #555;
border-radius: 7px;
cursor: default;
}
#drop.hover {
color: #f00;
border-color: #f00;
border-style: solid;
box-shadow: inset 0 3px 4px #888;
}
</style>
<h3>Image Uploader</h3>
<p>Upload an image for everyone to see. Valid images are pushed to everyone
currently connected, and only the most recent %s images are saved.</p>
<p>The complete source for this Flask web service can be found at:
<a href="https://github.com/bboe/flask-image-uploader">https://github.com/bboe/flask-image-uploader</a></p>
<p class="notice">Disclaimer: The author of this application accepts no responsibility for the
images uploaded to this web service. To discourage the submission of obscene images, IP
addresses with the last two octets hidden will be visibly associated with uploaded images.</p>
<noscript>Note: You must have javascript enabled in order to upload and
dynamically view new images.</noscript>
<fieldset>
<p id="status">Select an image</p>
<div id="progressbar"></div>
<input id="file" type="file" />
<div id="drop">or drop image here</div>
</fieldset>
<h3>Uploaded Images (updated in real-time)</h3>
<div id="images">%s</div>
<script>
function sse() {
var source = new EventSource('/stream');
source.onmessage = function(e) {
if (e.data == '')
return;
var data = $.parseJSON(e.data);
var upload_message = 'Image uploaded by ' + data['ip_addr'];
var image = $('<img>', {alt: upload_message, src: data['src']});
var container = $('<div>').hide();
container.append($('<div>', {text: upload_message}));
container.append(image);
$('#images').prepend(container);
image.load(function(){
container.show('blind', {}, 1000);
});
};
}
function file_select_handler(to_upload) {
var progressbar = $('#progressbar');
var status = $('#status');
var xhr = new XMLHttpRequest();
xhr.upload.addEventListener('loadstart', function(e1){
status.text('uploading image');
progressbar.progressbar({max: e1.total});
});
xhr.upload.addEventListener('progress', function(e1){
if (progressbar.progressbar('option', 'max') == 0)
progressbar.progressbar('option', 'max', e1.total);
progressbar.progressbar('value', e1.loaded);
});
xhr.onreadystatechange = function(e1) {
if (this.readyState == 4) {
if (this.status == 200)
var text = 'upload complete: ' + this.responseText;
else
var text = 'upload failed: code ' + this.status;
status.html(text + '<br/>Select an image');
progressbar.progressbar('destroy');
}
};
xhr.open('POST', '/post', true);
xhr.send(to_upload);
};
function handle_hover(e) {
e.originalEvent.stopPropagation();
e.originalEvent.preventDefault();
e.target.className = (e.type == 'dragleave' || e.type == 'drop') ? '' : 'hover';
}
$('#drop').bind('drop', function(e) {
handle_hover(e);
if (e.originalEvent.dataTransfer.files.length < 1) {
return;
}
file_select_handler(e.originalEvent.dataTransfer.files[0]);
}).bind('dragenter dragleave dragover', handle_hover);
$('#file').change(function(e){
file_select_handler(e.target.files[0]);
e.target.value = '';
});
sse();
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-510348-17']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
""" % (MAX_IMAGES, '\n'.join(images)) # noqa
if __name__ == '__main__':
APP.debug = True
APP.run('0.0.0.0', threaded=True)
| {
"repo_name": "bboe/flask-image-uploader",
"path": "app.py",
"copies": "1",
"size": "8779",
"license": "bsd-2-clause",
"hash": 9099749923990132000,
"line_mean": 29.8035087719,
"line_max": 117,
"alpha_frac": 0.6122565212,
"autogenerated": false,
"ratio": 3.555690562980964,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46679470841809634,
"avg_score": null,
"num_lines": null
} |
#An example for the Kaplan-Meier estimator
from __future__ import print_function
from statsmodels.compat.python import lrange
import statsmodels.api as sm
import matplotlib.pyplot as plt
import numpy as np
from statsmodels.sandbox.survival2 import KaplanMeier
#Getting the strike data as an array
dta = sm.datasets.strikes.load()
print('basic data')
print('\n')
dta = list(dta.values()[-1])
print(dta[lrange(5),:])
print('\n')
#Create the KaplanMeier object and fit the model
km = KaplanMeier(dta,0)
km.fit()
#show the results
km.plot()
print('basic model')
print('\n')
km.summary()
print('\n')
#Mutiple survival curves
km2 = KaplanMeier(dta,0,exog=1)
km2.fit()
print('more than one curve')
print('\n')
km2.summary()
print('\n')
km2.plot()
#with censoring
censoring = np.ones_like(dta[:,0])
censoring[dta[:,0] > 80] = 0
dta = np.c_[dta,censoring]
print('with censoring')
print('\n')
print(dta[lrange(5),:])
print('\n')
km3 = KaplanMeier(dta,0,exog=1,censoring=2)
km3.fit()
km3.summary()
print('\n')
km3.plot()
#Test for difference of survival curves
log_rank = km3.test_diff([0.0645,-0.03957])
print('log rank test')
print('\n')
print(log_rank)
print('\n')
#The zeroth element of log_rank is the chi-square test statistic
#for the difference between the survival curves for exog = 0.0645
#and exog = -0.03957, the index one element is the degrees of freedom for
#the test, and the index two element is the p-value for the test
wilcoxon = km3.test_diff([0.0645,-0.03957], rho=1)
print('Wilcoxon')
print('\n')
print(wilcoxon)
print('\n')
#Same info as log_rank, but for Peto and Peto modification to the
#Gehan-Wilcoxon test
#User specified functions for tests
#A wider range of rates can be accessed by using the 'weight' parameter
#for the test_diff method
#For example, if the desire weights are S(t)*(1-S(t)), where S(t) is a pooled
#estimate for the survival function, this could be computed by doing
def weights(t):
#must accept one arguement, even though it is not used here
s = KaplanMeier(dta,0,censoring=2)
s.fit()
s = s.results[0][0]
s = s * (1 - s)
return s
#KaplanMeier provides an array of times to the weighting function
#internally, so the weighting function must accept one arguement
test = km3.test_diff([0.0645,-0.03957], weight=weights)
print('user specified weights')
print('\n')
print(test)
print('\n')
#Groups with nan names
#These can be handled by passing the data to KaplanMeier as an array of strings
groups = np.ones_like(dta[:,1])
groups = groups.astype('S4')
groups[dta[:,1] > 0] = 'high'
groups[dta[:,1] <= 0] = 'low'
dta = dta.astype('S4')
dta[:,1] = groups
print('with nan group names')
print('\n')
print(dta[lrange(5),:])
print('\n')
km4 = KaplanMeier(dta,0,exog=1,censoring=2)
km4.fit()
km4.summary()
print('\n')
km4.plot()
#show all the plots
plt.show()
| {
"repo_name": "bsipocz/statsmodels",
"path": "statsmodels/sandbox/examples/ex_kaplan_meier.py",
"copies": "33",
"size": "2838",
"license": "bsd-3-clause",
"hash": 1557644463796147700,
"line_mean": 21.8870967742,
"line_max": 79,
"alpha_frac": 0.7043692741,
"autogenerated": false,
"ratio": 2.679886685552408,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011664979413922149,
"num_lines": 124
} |
# An example from scipy cookbook demonstrating the use of numpy arrays in vtk
import numpy as np
import vtk
def main():
colors = vtk.vtkNamedColors()
# We begin by creating the data we want to render.
# For this tutorial, we create a 3D-image containing three overlaping cubes.
# This data can of course easily be replaced by data from a medical CT-scan or anything else three dimensional.
# The only limit is that the data must be reduced to unsigned 8 bit or 16 bit integers.
data_matrix = np.zeros([75, 75, 75], dtype=np.uint8)
data_matrix[0:35, 0:35, 0:35] = 50
data_matrix[25:55, 25:55, 25:55] = 100
data_matrix[45:74, 45:74, 45:74] = 150
# For VTK to be able to use the data, it must be stored as a VTK-image.
# This can be done by the vtkImageImport-class which
# imports raw data and stores it.
dataImporter = vtk.vtkImageImport()
# The previously created array is converted to a string of chars and imported.
data_string = data_matrix.tostring()
dataImporter.CopyImportVoidPointer(data_string, len(data_string))
# The type of the newly imported data is set to unsigned char (uint8)
dataImporter.SetDataScalarTypeToUnsignedChar()
# Because the data that is imported only contains an intensity value
# (it isnt RGB-coded or someting similar), the importer must be told this is the case.
dataImporter.SetNumberOfScalarComponents(1)
# The following two functions describe how the data is stored and the dimensions of the array it is stored in.
# For this simple case, all axes are of length 75 and begins with the first element.
# For other data, this is probably not the case.
# I have to admit however, that I honestly dont know the difference between SetDataExtent()
# and SetWholeExtent() although VTK complains if not both are used.
dataImporter.SetDataExtent(0, 74, 0, 74, 0, 74)
dataImporter.SetWholeExtent(0, 74, 0, 74, 0, 74)
# The following class is used to store transparency-values for later retrival.
# In our case, we want the value 0 to be
# completely opaque whereas the three different cubes are given different transparency-values to show how it works.
alphaChannelFunc = vtk.vtkPiecewiseFunction()
alphaChannelFunc.AddPoint(0, 0.0)
alphaChannelFunc.AddPoint(50, 0.05)
alphaChannelFunc.AddPoint(100, 0.1)
alphaChannelFunc.AddPoint(150, 0.2)
# This class stores color data and can create color tables from a few color points.
# For this demo, we want the three cubes to be of the colors red green and blue.
colorFunc = vtk.vtkColorTransferFunction()
colorFunc.AddRGBPoint(50, 1.0, 0.0, 0.0)
colorFunc.AddRGBPoint(100, 0.0, 1.0, 0.0)
colorFunc.AddRGBPoint(150, 0.0, 0.0, 1.0)
# The previous two classes stored properties.
# Because we want to apply these properties to the volume we want to render,
# we have to store them in a class that stores volume properties.
volumeProperty = vtk.vtkVolumeProperty()
volumeProperty.SetColor(colorFunc)
volumeProperty.SetScalarOpacity(alphaChannelFunc)
volumeMapper = vtk.vtkFixedPointVolumeRayCastMapper()
volumeMapper.SetInputConnection(dataImporter.GetOutputPort())
# The class vtkVolume is used to pair the previously declared volume as well as the properties
# to be used when rendering that volume.
volume = vtk.vtkVolume()
volume.SetMapper(volumeMapper)
volume.SetProperty(volumeProperty)
# With almost everything else ready, its time to initialize the renderer and window, as well as
# creating a method for exiting the application
renderer = vtk.vtkRenderer()
renderWin = vtk.vtkRenderWindow()
renderWin.AddRenderer(renderer)
renderInteractor = vtk.vtkRenderWindowInteractor()
renderInteractor.SetRenderWindow(renderWin)
# We add the volume to the renderer ...
renderer.AddVolume(volume)
renderer.SetBackground(colors.GetColor3d("MistyRose"))
# ... and set window size.
renderWin.SetSize(400, 400)
# A simple function to be called when the user decides to quit the application.
def exitCheck(obj, event):
if obj.GetEventPending() != 0:
obj.SetAbortRender(1)
# Tell the application to use the function as an exit check.
renderWin.AddObserver("AbortCheckEvent", exitCheck)
renderInteractor.Initialize()
# Because nothing will be rendered without any input, we order the first render manually
# before control is handed over to the main-loop.
renderWin.Render()
renderInteractor.Start()
if __name__ == '__main__':
main()
| {
"repo_name": "lorensen/VTKExamples",
"path": "src/Python/Utilities/VTKWithNumpy.py",
"copies": "1",
"size": "4643",
"license": "apache-2.0",
"hash": 3401967594837281300,
"line_mean": 44.5196078431,
"line_max": 119,
"alpha_frac": 0.722808529,
"autogenerated": false,
"ratio": 3.843543046357616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5066351575357616,
"avg_score": null,
"num_lines": null
} |
# An example from scipy cookbook demonstrating the use of numpy arrys in vtk
import vtk
from numpy import *
filename = "stagbeetle832x832x494.dat"
nx = 832
ny = 832
nz = 494
data = fromfile(filename, dtype=uint16)
# We begin by creating the data we want to render.
# For this tutorial, we create a 3D-image containing three overlaping cubes.
# This data can of course easily be replaced by data from a medical CT-scan or anything else three dimensional.
# The only limit is that the data must be reduced to unsigned 8 bit or 16 bit integers.
#data_matrix = zeros([750, 750, 750], dtype=uint8)
#data_matrix[0:350, 0:350, 0:350] = 50
#data_matrix[250:550, 250:550, 250:550] = 100
#data_matrix[450:740, 450:740, 450:740] = 150
print data[0]
print data[1]
print data[2]
data_matrix = data[3:]/16;
print (all(data_matrix < 256))
# For VTK to be able to use the data, it must be stored as a VTK-image. This can be done by the vtkImageImport-class which
# imports raw data and stores it.
dataImporter = vtk.vtkImageImport()
# The preaviusly created array is converted to a string of chars and imported.
data_string = data_matrix.tostring()
dataImporter.CopyImportVoidPointer(data_string, len(data_string))
# The type of the newly imported data is set to unsigned char (uint8)
#dataImporter.SetDataScalarTypeToUnsignedChar()
dataImporter.SetDataScalarTypeToUnsignedShort()
# Because the data that is imported only contains an intensity value (it isnt RGB-coded or someting similar), the importer
# must be told this is the case.
dataImporter.SetNumberOfScalarComponents(1)
# The following two functions describe how the data is stored and the dimensions of the array it is stored in. For this
# simple case, all axes are of length 75 and begins with the first element. For other data, this is probably not the case.
# I have to admit however, that I honestly dont know the difference between SetDataExtent() and SetWholeExtent() although
# VTK complains if not both are used.
#dataImporter.SetDataExtent(0, 749, 0, 749, 0, 749)
#dataImporter.SetWholeExtent(0, 749, 0, 749, 0, 749)
dataImporter.SetDataExtent(0, nx-1, 0, ny-1, 0, nz-1)
dataImporter.SetWholeExtent(0, nx-1, 0, ny-1, 0, nz-1)
# The following class is used to store transparencyv-values for later retrival. In our case, we want the value 0 to be
# completly opaque whereas the three different cubes are given different transperancy-values to show how it works.
alphaChannelFunc = vtk.vtkPiecewiseFunction()
alphaChannelFunc.AddPoint(0, 0.0)
alphaChannelFunc.AddPoint(50, 0.05)
alphaChannelFunc.AddPoint(100, 0.1)
alphaChannelFunc.AddPoint(150, 0.2)
# This class stores color data and can create color tables from a few color points. For this demo, we want the three cubes
# to be of the colors red green and blue.
colorFunc = vtk.vtkColorTransferFunction()
colorFunc.AddRGBPoint(50, 1.0, 0.0, 0.0)
colorFunc.AddRGBPoint(100, 0.0, 1.0, 0.0)
colorFunc.AddRGBPoint(150, 0.0, 0.0, 1.0)
# The preavius two classes stored properties. Because we want to apply these properties to the volume we want to render,
# we have to store them in a class that stores volume prpoperties.
volumeProperty = vtk.vtkVolumeProperty()
volumeProperty.SetColor(colorFunc)
volumeProperty.SetScalarOpacity(alphaChannelFunc)
# This class describes how the volume is rendered (through ray tracing).
compositeFunction = vtk.vtkVolumeRayCastCompositeFunction()
# We can finally create our volume. We also have to specify the data for it, as well as how the data will be rendered.
volumeMapper = vtk.vtkVolumeRayCastMapper()
volumeMapper.SetVolumeRayCastFunction(compositeFunction)
volumeMapper.SetInputConnection(dataImporter.GetOutputPort())
# The class vtkVolume is used to pair the preaviusly declared volume as well as the properties to be used when rendering that volume.
volume = vtk.vtkVolume()
volume.SetMapper(volumeMapper)
volume.SetProperty(volumeProperty)
# With almost everything else ready, its time to initialize the renderer and window, as well as creating a method for exiting the application
renderer = vtk.vtkRenderer()
renderWin = vtk.vtkRenderWindow()
renderWin.AddRenderer(renderer)
renderInteractor = vtk.vtkRenderWindowInteractor()
renderInteractor.SetRenderWindow(renderWin)
# We add the volume to the renderer ...
renderer.AddVolume(volume)
# ... set background color to white ...
renderer.SetBackground(0,0,0)
# ... and set window size.
renderWin.SetSize(1024, 768)
# A simple function to be called when the user decides to quit the application.
def exitCheck(obj, event):
if obj.GetEventPending() != 0:
obj.SetAbortRender(1)
# Tell the application to use the function as an exit check.
renderWin.AddObserver("AbortCheckEvent", exitCheck)
renderInteractor.Initialize()
# Because nothing will be rendered without any input, we order the first render manually before control is handed over to the main-loop.
renderWin.Render()
renderInteractor.Start()
| {
"repo_name": "egaburov/volren",
"path": "render.py",
"copies": "1",
"size": "4943",
"license": "apache-2.0",
"hash": 4156212151614553000,
"line_mean": 44.7685185185,
"line_max": 141,
"alpha_frac": 0.7776653854,
"autogenerated": false,
"ratio": 3.471207865168539,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4748873250568539,
"avg_score": null,
"num_lines": null
} |
""" An example highlighting the difference between DMD and streaming DMD
Streaming DMD is a modification of the "standard" DMD procedure that
produces *APPROXIMATIONS* of the DMD modes and eigenvalues. The benefit
of this procedure is that it can be applied to data sets with large
(in theory, infinite) numbers of snapshots provided the underlying
system is effectively low-rank.
Returns
-------
Outputs a plot comparing the streaming and standard eigenvalues
"""
import sys
sys.path.append('..')
import dmdtools
import numpy as np
import matplotlib.pyplot as plt
max_rank = 0 # maximum allowable rank of the DMD operator (0 = unlimited)
n_snaps = 501 # total number of snapshots to be processed
n_states = 4000 # number of states
noise_cov = 1.e-4 # measurement noise covariance
dt = 0.01 # timestep
np.random.seed(0)
def snapshots(n_states, n_snaps, noise_cov=0):
# Define the example system
v1 = np.random.randn(n_states)
v2 = np.random.randn(n_states)
v3 = np.random.randn(n_states)
v4 = np.random.randn(n_states)
# characteristic frequencies
f1 = 5.2
f2 = 1.0
for k in range(n_snaps):
x = (v1 * np.cos(2 * np.pi * f1 * dt * k) +
v2 * np.cos(2 * np.pi * f2 * dt * k) +
v3 * np.sin(2 * np.pi * f1 * dt * k) +
v4 * np.sin(2 * np.pi * f2 * dt * k))
yield x + np.sqrt(noise_cov) * np.random.randn(n_states)
def standard_dmd():
X = np.zeros((n_states, n_snaps-1))
Y = np.zeros((n_states, n_snaps-1))
snaps = snapshots(n_states, n_snaps, noise_cov)
x = snaps.next()
for k, y in enumerate(snaps):
X[:, k] = x
Y[:, k] = y
x = y
DMD = dmdtools.DMD()
DMD.fit(X, Y)
return DMD.modes, DMD.evals
def streaming_dmd():
sdmd = dmdtools.StreamingDMD(max_rank)
snaps = snapshots(n_states, n_snaps, noise_cov)
x = snaps.next()
for y in snaps:
sdmd.update(x, y)
x = y
return sdmd.compute_modes()
def main(streaming):
modes, evals = streaming_dmd() if streaming else standard_dmd()
fdmd = np.abs(np.angle(evals)) / (2 * np.pi * dt)
n_modes = len(fdmd)
ydmd = np.zeros(n_modes)
for i in range(n_modes):
ydmd[i] = np.linalg.norm(modes[:, i] * np.abs(evals[i]))
ydmd /= max(ydmd)
plt.stem(fdmd, ydmd)
plt.show()
def compare_methods():
np.random.seed(0)
modes, evals = standard_dmd()
np.random.seed(0)
modes2, evals2 = streaming_dmd()
evals.sort()
evals2.sort()
# print("standard:")
# print(evals)
# print("\nstreaming:")
# print(evals2)
plt.plot(evals.real, evals.imag, 'x')
plt.plot(evals2.real, evals2.imag, '+')
plt.legend(["DMD", "Streaming"])
plt.title("DMD Spectrum")
plt.xlabel(r"$\Re(\lambda)$")
plt.ylabel(r"$\Im(\lambda)$")
plt.show()
print(np.allclose(evals, evals2))
if __name__ == "__main__":
streaming = True
#main(streaming)
compare_methods()
| {
"repo_name": "cwrowley/dmdtools",
"path": "python/scripts/streaming_dmd_example.py",
"copies": "1",
"size": "3073",
"license": "bsd-3-clause",
"hash": -8083911567896038000,
"line_mean": 26.6846846847,
"line_max": 76,
"alpha_frac": 0.5942076147,
"autogenerated": false,
"ratio": 3.0246062992125986,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.910840681194848,
"avg_score": 0.0020814203928239015,
"num_lines": 111
} |
""" An example highlighting the difference between TLS-DMD and DMD
TLS-DMD is a total least squares variant of DMD, which can produce
superior results when the data provided to the method are noisy.
This example is meant to highlight the difference between the two
methods on a simple problem where the true solution is already known.
Returns
-------
Outputs a plot comparing the true, DMD, and TLS-DMD eigenvalues
"""
import sys
sys.path.append('..')
import numpy as np
import matplotlib.pyplot as plt
import dmdtools
if __name__ == "__main__":
np.random.seed(0)
# ======== System Parameters =======
n_rank = 2 # True rank of the system
n = 250 # Number of states
m = 1000 # Number of snapshots
std = 5e-1 # standard deviation of the noise
# The true system is 2 dimensional and oscillatory
Alow = np.diag(np.exp([1j, 0.65j]))
data = np.zeros((n_rank, m+1), dtype="complex")
data[:, 0] = np.random.randn(n_rank) + 1j*np.random.randn(n_rank)
for ii in xrange(m):
data[:, ii+1] = Alow.dot(data[:, ii])
Q = np.linalg.qr(np.random.randn(n, 2))[0]
data = Q.dot(data)
data = np.r_[data.real, data.imag] # Split and stack real and image parts
# Add noise to the data
noisy_data = data + std*np.random.randn(data.shape[0], data.shape[1])
# Create a new figure for output
fig = plt.figure(1)
th = np.linspace(0, 2*np.pi, 101)
plt.plot(np.cos(th), np.sin(th), '-', color='0.75', lw=4)
plt.plot(np.diag(Alow).real, np.diag(Alow).imag, 'ko', ms=14)
# Note: n_rank is doubled because we only deal with real numbers
dmd = dmdtools.DMD(n_rank*2, False, False) # "standard" DMD
dmd = dmd.fit(noisy_data)
dmd_vals, dmd_modes = dmd.get_mode_pairs(sortby="LM")
# Plot the DMD eigenvalues
plt.plot(dmd_vals.real, dmd_vals.imag, 'rv', ms=14)
# With TLS DMD
tlsdmd = dmdtools.DMD(n_rank*2, False, True) # "standard" DMD
tlsdmd = tlsdmd.fit(noisy_data)
tlsdmd_vals, tlsdmd_modes = tlsdmd.get_mode_pairs(sortby="LM")
# Plot the DMD eigenvalues
plt.plot(tlsdmd_vals.real, tlsdmd_vals.imag, 'b^', ms=14)
plt.xlabel("$\Re(\mu)$")
plt.ylabel("$\Im(\mu)$")
plt.legend(["Unit Circle", "True", "DMD", "TLS-DMD"], "lower left")
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.gca().set_aspect("equal")
plt.title("DMD vs TLS-DMD")
plt.savefig("tls_dmd_comparison.pdf")
plt.show()
| {
"repo_name": "cwrowley/dmdtools",
"path": "python/scripts/total_dmd_example.py",
"copies": "1",
"size": "2465",
"license": "bsd-3-clause",
"hash": -7980647217084054000,
"line_mean": 32.7671232877,
"line_max": 78,
"alpha_frac": 0.6292089249,
"autogenerated": false,
"ratio": 2.980652962515115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9021895270056887,
"avg_score": 0.017593323471645574,
"num_lines": 73
} |
"""An example how to use the Jsp Evaluator
"""
from jspeval import JspEvaluator
from jspmodel import JspModel
from jspsolution import JspSolution
def example_func():
"""Demonstrates the usage of JspEval, JspModel and JspSolution.
"""
# instanciate a model
model = JspModel("xml/example.xml")
# create solutions
# random
random_solution = model.get_random_solution()
# custom
custom_solution = JspSolution(model, [0.0, 0.25, 0.75, 0.5])
# instanciate an evaluator for the solutions
evaluator = JspEvaluator(model)
# generate the machine assignments for the solutions
random_assign = evaluator.build_machine_assignment(random_solution)
custom_assign = evaluator.build_machine_assignment(custom_solution)
# calculate the schedules for the solutions
random_sched = evaluator.execute_schedule(random_assign)
custom_sched = evaluator.execute_schedule(custom_assign)
# calculate the metrics for the assignment and schedules
random_metrics = evaluator.get_metrics(random_assign, random_sched)
custom_metrics = evaluator.get_metrics(custom_assign, custom_sched)
# print the metrics out
print("random solution:")
for metric in random_metrics:
print(metric, ": ", random_metrics[metric], sep="")
print()
print("custom solution:")
for metric in custom_metrics:
print(metric, ": ", custom_metrics[metric], sep="")
if __name__ == "__main__":
example_func()
| {
"repo_name": "mYstar/JSPEval",
"path": "example.py",
"copies": "1",
"size": "1477",
"license": "apache-2.0",
"hash": 900085857904064500,
"line_mean": 31.1086956522,
"line_max": 71,
"alpha_frac": 0.7000677048,
"autogenerated": false,
"ratio": 3.9177718832891246,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008051529790660225,
"num_lines": 46
} |
"""An example implementation of a Gaussian noise model."""
from functools import partial
import numpy as np
import scipy.stats as ss
import elfi
def Gauss(mu, sigma, n_obs=50, batch_size=1, random_state=None):
"""Sample the Gaussian distribution.
Parameters
----------
mu : float, array_like
sigma : float, array_like
n_obs : int, optional
batch_size : int, optional
random_state : RandomState, optional
"""
# Standardising the parameter's format.
mu = np.asanyarray(mu).reshape((-1, 1))
sigma = np.asanyarray(sigma).reshape((-1, 1))
y = ss.norm.rvs(loc=mu, scale=sigma, size=(batch_size, n_obs), random_state=random_state)
return y
def ss_mean(x):
"""Return the summary statistic corresponding to the mean."""
ss = np.mean(x, axis=1)
return ss
def ss_var(x):
"""Return the summary statistic corresponding to the variance."""
ss = np.var(x, axis=1)
return ss
def get_model(n_obs=50, true_params=None, seed_obs=None):
"""Return a complete Gaussian noise model.
Parameters
----------
n_obs : int, optional
the number of observations
true_params : list, optional
true_params[0] corresponds to the mean,
true_params[1] corresponds to the standard deviation
seed_obs : int, optional
seed for the observed data generation
Returns
-------
m : elfi.ElfiModel
"""
if true_params is None:
true_params = [10, 2]
y_obs = Gauss(*true_params, n_obs=n_obs, random_state=np.random.RandomState(seed_obs))
sim_fn = partial(Gauss, n_obs=n_obs)
m = elfi.ElfiModel()
elfi.Prior('uniform', -10, 50, model=m, name='mu')
elfi.Prior('truncnorm', 0.01, 5, model=m, name='sigma')
elfi.Simulator(sim_fn, m['mu'], m['sigma'], observed=y_obs, name='Gauss')
elfi.Summary(ss_mean, m['Gauss'], name='S1')
elfi.Summary(ss_var, m['Gauss'], name='S2')
elfi.Distance('euclidean', m['S1'], m['S2'], name='d')
return m
| {
"repo_name": "lintusj1/elfi",
"path": "elfi/examples/gauss.py",
"copies": "1",
"size": "2003",
"license": "bsd-3-clause",
"hash": -5291244375266691000,
"line_mean": 26.0675675676,
"line_max": 93,
"alpha_frac": 0.627558662,
"autogenerated": false,
"ratio": 3.256910569105691,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4384469231105691,
"avg_score": null,
"num_lines": null
} |
"""An example implementation of the bivariate g-and-k model."""
from functools import partial
import numpy as np
import scipy.stats as ss
import elfi
EPS = np.finfo(float).eps
def GNK(a, b, g, k, c=0.8, n_obs=50, batch_size=1, random_state=None):
"""Sample the univariate g-and-k distribution.
References
----------
[1] Drovandi, Christopher C., and Anthony N. Pettitt. "Likelihood-free
Bayesian estimation of multivariate quantile distributions."
Computational Statistics & Data Analysis 55.9 (2011): 2541-2556.
[2] Allingham, David, R. AR King, and Kerrie L. Mengersen. "Bayesian
estimation of quantile distributions."Statistics and Computing 19.2
(2009): 189-201.
Parameters
----------
a : float or array_like
The location.
b : float or array_like
The scale.
g : float or array_like
The skewness.
k : float or array_like
The kurtosis.
c : float, optional
The overall asymmetry parameter, as a convention fixed to 0.8 [2].
n_obs : int, optional
The number of the observed points
batch_size : int, optional
random_state : np.random.RandomState, optional
Returns
-------
array_like
The yielded points.
"""
# Standardising the parameters
a = np.asanyarray(a).reshape((-1, 1))
b = np.asanyarray(b).reshape((-1, 1))
g = np.asanyarray(g).reshape((-1, 1))
k = np.asanyarray(k).reshape((-1, 1))
# Sampling from the z term, Equation 1, [2].
z = ss.norm.rvs(size=(batch_size, n_obs), random_state=random_state)
# Yielding Equation 1, [2].
term_exp = (1 - np.exp(-g * z)) / (1 + np.exp(-g * z))
y = a + b * (1 + c * (term_exp)) * (1 + z**2)**k * z
# Dedicating an axis for the data dimensionality.
y = np.expand_dims(y, axis=2)
return y
def get_model(n_obs=50, true_params=None, stats_summary=None, seed_obs=None):
"""Return an initialised univariate g-and-k model.
Parameters
----------
n_obs : int, optional
The number of the observed points.
true_params : array_like, optional
The parameters defining the model.
stats_summary : array_like, optional
The chosen summary statistics, expressed as a list of strings.
Options: ['ss_order'], ['ss_robust'], ['ss_octile'].
seed_obs : np.random.RandomState, optional
Returns
-------
elfi.ElfiModel
"""
m = elfi.ElfiModel()
# Initialising the default parameter settings as given in [2].
if true_params is None:
true_params = [3, 1, 2, .5]
if stats_summary is None:
stats_summary = ['ss_order']
# Initialising the default prior settings as given in [2].
elfi.Prior('uniform', 0, 10, model=m, name='a')
elfi.Prior('uniform', 0, 10, model=m, name='b')
elfi.Prior('uniform', 0, 10, model=m, name='g')
elfi.Prior('uniform', 0, 10, model=m, name='k')
# Generating the observations.
y_obs = GNK(*true_params, n_obs=n_obs,
random_state=np.random.RandomState(seed_obs))
# Defining the simulator.
fn_sim = partial(GNK, n_obs=n_obs)
elfi.Simulator(fn_sim, m['a'], m['b'], m['g'], m['k'], observed=y_obs,
name='GNK')
# Initialising the chosen summary statistics.
fns_summary_all = [ss_order, ss_robust, ss_octile]
fns_summary_chosen = []
for fn_summary in fns_summary_all:
if fn_summary.__name__ in stats_summary:
summary = elfi.Summary(fn_summary, m['GNK'],
name=fn_summary.__name__)
fns_summary_chosen.append(summary)
elfi.Discrepancy(euclidean_multidim, *fns_summary_chosen, name='d')
return m
def euclidean_multidim(*simulated, observed):
"""Calculate the multi-dimensional Euclidean distance.
Parameters
----------
*simulated: array_like
The simulated points.
observed : array_like
The observed points.
Returns
-------
array_like
"""
pts_sim = np.column_stack(simulated)
pts_obs = np.column_stack(observed)
d_multidim = np.sum((pts_sim - pts_obs)**2., axis=1)
d_squared = np.sum(d_multidim, axis=1)
d = np.sqrt(d_squared)
return d
def ss_order(y):
"""Obtain the order summary statistic, [2].
The statistic reaches an optimal performance upon a low number of
observations.
Parameters
----------
y : array_like
The yielded points.
Returns
-------
array_like
"""
ss_order = np.sort(y)
return ss_order
def ss_robust(y):
"""Obtain the robust summary statistic, [1].
The statistic reaches an optimal performance upon a high number of
observations.
Parameters
----------
y : array_like
The yielded points.
Returns
-------
array_like
"""
ss_a = _get_ss_a(y)
ss_b = _get_ss_b(y)
ss_g = _get_ss_g(y)
ss_k = _get_ss_k(y)
ss_robust = np.stack((ss_a, ss_b, ss_g, ss_k), axis=1)
return ss_robust
def ss_octile(y):
"""Obtain the octile summary statistic.
The statistic reaches an optimal performance upon a high number of
observations. As reported in [1], it is more stable than ss_robust.
Parameters
----------
y : array_like
The yielded points.
Returns
-------
array_like
"""
octiles = np.linspace(12.5, 87.5, 7)
E1, E2, E3, E4, E5, E6, E7 = np.percentile(y, octiles, axis=1)
ss_octile = np.stack((E1, E2, E3, E4, E5, E6, E7), axis=1)
return ss_octile
def _get_ss_a(y):
L2 = np.percentile(y, 50, axis=1)
ss_a = L2
return ss_a
def _get_ss_b(y):
L1, L3 = np.percentile(y, [25, 75], axis=1)
ss_b = L3 - L1
# Adjusting the zero values to avoid division issues.
ss_b_ravelled = ss_b.ravel()
idxs_zero = np.where(ss_b_ravelled == 0)[0]
ss_b_ravelled[idxs_zero] += EPS
n_dim = y.shape[-1]
n_batches = y.shape[0]
ss_b = ss_b_ravelled.reshape(n_batches, n_dim)
return ss_b
def _get_ss_g(y):
L1, L2, L3 = np.percentile(y, [25, 50, 75], axis=1)
ss_b = _get_ss_b(y)
ss_g = np.divide(L3 + L1 - 2 * L2, ss_b)
return ss_g
def _get_ss_k(y):
E1, E3, E5, E7 = np.percentile(y, [12.5, 37.5, 62.5, 87.5], axis=1)
ss_b = _get_ss_b(y)
ss_k = np.divide(E7 - E5 + E3 - E1, ss_b)
return ss_k
| {
"repo_name": "lintusj1/elfi",
"path": "elfi/examples/gnk.py",
"copies": "1",
"size": "6376",
"license": "bsd-3-clause",
"hash": -2257393796004886800,
"line_mean": 24.1023622047,
"line_max": 77,
"alpha_frac": 0.5908092848,
"autogenerated": false,
"ratio": 3.105698977106673,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41965082619066735,
"avg_score": null,
"num_lines": null
} |
"""An example implementation of the univariate g-and-k model."""
from functools import partial
import numpy as np
import scipy.stats as ss
import elfi
from elfi.examples.gnk import euclidean_multidim, ss_octile, ss_order, ss_robust
EPS = np.finfo(float).eps
def BiGNK(a1, a2, b1, b2, g1, g2, k1, k2, rho, c=.8, n_obs=150, batch_size=1,
random_state=None):
"""Sample the bi g-and-k distribution.
References
----------
[1] Drovandi, Christopher C., and Anthony N. Pettitt. "Likelihood-free
Bayesian estimation of multivariate quantile distributions."
Computational Statistics & Data Analysis 55.9 (2011): 2541-2556.
[2] Allingham, David, R. AR King, and Kerrie L. Mengersen. "Bayesian
estimation of quantile distributions."Statistics and Computing 19.2
(2009): 189-201.
Parameters
----------
a1 : float or array_like
The location (the 1st dimension).
a2 : float or array_like
The location (the 2nd dimension).
b1 : float or array_like
The scale (the 1st dimension).
b2 : float or array_like
The scale (the 2nd dimension).
g1 : float or array_like
The skewness (the 1st dimension).
g2 : float or array_like
The skewness (the 2nd dimension).
k1 : float or array_like
The kurtosis (the 1st dimension).
k2 : float or array_like
The kurtosis (the 2nd dimension).
rho : float or array_like
The dependence between components (dimensions), [1].
c : float, optional
The overall asymmetry parameter, as a convention fixed to 0.8 [2].
n_obs : int, optional
The number of the observed points
batch_size : int, optional
random_state : np.random.RandomState, optional
Returns
-------
array_like
The yielded points.
"""
# Standardising the parameters
a1 = np.asanyarray(a1).reshape((-1, 1))
a2 = np.asanyarray(a2).reshape((-1, 1))
b1 = np.asanyarray(b1).reshape((-1, 1))
b2 = np.asanyarray(b2).reshape((-1, 1))
g1 = np.asanyarray(g1).reshape((-1, 1))
g2 = np.asanyarray(g2).reshape((-1, 1))
k1 = np.asanyarray(k1).reshape((-1, 1, 1))
k2 = np.asanyarray(k2).reshape((-1, 1, 1))
rho = np.asanyarray(rho).reshape((-1, 1))
a = np.hstack((a1, a2))
b = np.hstack((b1, b2))
g = np.hstack((g1, g2))
k = np.hstack((k1, k2))
# Sampling from the z term, Equation 3 [1].
z = []
for i in range(batch_size):
matrix_cov = np.array([[1, rho[i]], [rho[i], 1]])
z_el = ss.multivariate_normal.rvs(cov=matrix_cov,
size=(n_obs),
random_state=random_state)
z.append(z_el)
z = np.array(z)
# Obtaining the first bracket term, Equation 3 [1].
gdotz = np.einsum('ik,ijk->ijk', g, z)
term_exp = (1 - np.exp(-gdotz)) / (1 + np.exp(-gdotz))
term_first = np.einsum('ik,ijk->ijk', b, (1 + c * (term_exp)))
# Obtaining the second bracket term, Equation 3 [1].
term_second_unraised = 1 + np.power(z, 2)
k = np.repeat(k, n_obs, axis=2)
k = np.swapaxes(k, 1, 2)
term_second = np.power(term_second_unraised, k)
# Yielding Equation 3, [1].
term_product = term_first * term_second * z
term_product_misaligned = np.swapaxes(term_product, 1, 0)
y_misaligned = np.add(a, term_product_misaligned)
y = np.swapaxes(y_misaligned, 1, 0)
# print(y.shape)
return y
def get_model(n_obs=150, true_params=None, stats_summary=None, seed_obs=None):
"""Return an initialised bivariate g-and-k model.
Parameters
----------
n_obs : int, optional
The number of the observed points.
true_params : array_like, optional
The parameters defining the model.
stats_summary : array_like, optional
The chosen summary statistics, expressed as a list of strings.
Options: ['ss_order'], ['ss_robust'], ['ss_octile'].
seed_obs : np.random.RandomState, optional
Returns
-------
elfi.ElfiModel
"""
m = elfi.ElfiModel()
# Initialising the default parameter settings as given in [1].
if true_params is None:
true_params = [3, 4, 1, 0.5, 1, 2, .5, .4, 0.6]
if stats_summary is None:
stats_summary = ['ss_robust']
# Initialising the default prior settings as given in [1].
elfi.Prior('uniform', 0, 5, model=m, name='a1')
elfi.Prior('uniform', 0, 5, model=m, name='a2')
elfi.Prior('uniform', 0, 5, model=m, name='b1')
elfi.Prior('uniform', 0, 5, model=m, name='b2')
elfi.Prior('uniform', -5, 10, model=m, name='g1')
elfi.Prior('uniform', -5, 10, model=m, name='g2')
elfi.Prior('uniform', -.5, 5.5, model=m, name='k1')
elfi.Prior('uniform', -.5, 5.5, model=m, name='k2')
elfi.Prior('uniform', -1 + EPS, 2 - 2 * EPS, model=m, name='rho')
# Generating the observations.
y_obs = BiGNK(*true_params, n_obs=n_obs,
random_state=np.random.RandomState(seed_obs))
# Defining the simulator.
fn_sim = partial(BiGNK, n_obs=n_obs)
elfi.Simulator(fn_sim, m['a1'], m['a2'], m['b1'], m['b2'], m['g1'],
m['g2'], m['k1'], m['k2'], m['rho'], observed=y_obs,
name='BiGNK')
# Initialising the chosen summary statistics.
fns_summary_all = [ss_order, ss_robust, ss_octile]
fns_summary_chosen = []
for fn_summary in fns_summary_all:
if fn_summary.__name__ in stats_summary:
summary = elfi.Summary(fn_summary, m['BiGNK'],
name=fn_summary.__name__)
fns_summary_chosen.append(summary)
# Defining the distance metric.
elfi.Discrepancy(euclidean_multidim, *fns_summary_chosen, name='d')
return m
| {
"repo_name": "lintusj1/elfi",
"path": "elfi/examples/bignk.py",
"copies": "1",
"size": "5779",
"license": "bsd-3-clause",
"hash": -7229852703802814000,
"line_mean": 34.0242424242,
"line_max": 80,
"alpha_frac": 0.5957778162,
"autogenerated": false,
"ratio": 3.072301967038809,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4168079783238809,
"avg_score": null,
"num_lines": null
} |
#An example input file for multimesa.py
#run command is python multimesa.py example3.py
#Location to write out folders too
output_folder="output/"
# if set to 1 we number the folders otherwise use the _name value
folder_num=1
#Defaults assume varaibles are linearly looped and that the name of varaible is the mesa one.
#Assumes varaibles exist inside control_inlist unless specified as star
semi_list=[0.0,0.001,0.01,0.1]
semi_name="alpha_semiconvection"
semi_section="control"
over_list=[0.0,0.001,0.016,0.2]
over_name='overshoot_f_above_nonburn'
over_section='control'
thermo_list=[0.0,0.1,1.0,10.0]
thermo_name="thermo_haline_coeff"
thermo_section='control'
am_list=[0.0,0.5,1.0,1.5]
am_name="am_nu_factor"
am_section="control"
#This function is called once per iteration with the current set of parameters
#This then lets us set other parameters which may be dependant on the inputs
#For instance lets say we have mass=8,9,10 and parameter y=1,2,3
#we could say when mass<9 set z=0.01 when mass>=9 set z=0.02 unless y <2 in which case z=0.0
#It should return 3 lists, where the lists are the mesa_name,value and section
#Note if you have set folder_num=0 this will not add these names to the output folder path
#If you dont care about this stuff just comment out the function, it doesn't need to exist
#Note names are the short name ie for mass_name='initial_mass', names='mass' not initial_mass
def callback(names,val):
outName=[]
outVal=[]
outSec=[]
semi=0
over=0
thermo=0
am=0
#Loops over both lists at the same time
for i,j in zip(names,val):
if i=='semi':
semi=float(j)
if i=='over':
over=float(j)
if i=='thermo':
thermo=float(j)
if i=='am':
am=float(j)
if semi >0.0:
outName.append('allow_semiconvective_mixing')
outVal.append('.true.')
outSec.append('control')
else:
outName.append('allow_semiconvective_mixing')
outVal.append('.false.')
outSec.append('control')
if over >0.0:
outName.append('overshoot_f_below_nonburn')
outName.append('overshoot_f_above_burn_h')
outName.append('overshoot_f_below_burn_h')
outName.append('overshoot_f_above_burn_he')
outName.append('overshoot_f_below_burn_he')
outName.append('overshoot_f_above_burn_z')
outName.append('overshoot_f_below_burn_z')
a=[over]*7
outVal=outVal+a
a=['control']*7
outSec=outSec+a
if thermo >0.0:
outName.append('allow_thermohaline_mixing')
outVal.append('.true.')
outSec.append('control')
else:
outName.append('allow_thermohaline_mixing')
outVal.append('.false.')
outSec.append('control')
return outName,outVal,outSec
| {
"repo_name": "rjfarmer/multimesa",
"path": "example3.py",
"copies": "1",
"size": "2602",
"license": "isc",
"hash": -4043533106600896500,
"line_mean": 26.1041666667,
"line_max": 93,
"alpha_frac": 0.7194465796,
"autogenerated": false,
"ratio": 2.7160751565762005,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8473156851777415,
"avg_score": 0.09247297687975714,
"num_lines": 96
} |
'''An example module demonstrating the implementation of a processor'''
import ctypes
import math
class DummyProcessor:
'''An example processor that simply changes the viewport color'''
def __init__(self):
'''Construct a DummyProcessor with a buffer for drawing into'''
self.value = 0
self.buffer = (ctypes.c_ubyte * 3)(0)
def process_data(self, data):
'''Accept converted data to be consumed by the processor'''
pass
def destroy(self):
'''Cleanup function called when the processor is no longer needed'''
pass
def update(self, timestep):
'''Advance the processor by the timestep and update the viewport image'''
self.value += timestep
interval = 3.0
while self.value > interval:
self.value -= interval
alpha = self.value / interval
alpha = 1.0 - (0.5 * math.cos(2 * math.pi * alpha) + 0.5)
ctypes.memset(self.buffer, int(255*alpha), 3)
return ctypes.byref(self.buffer)
| {
"repo_name": "Kupoman/BlenderRealtimeEngineAddon",
"path": "brte/processors/dummy.py",
"copies": "1",
"size": "1033",
"license": "mit",
"hash": 3682803634215855600,
"line_mean": 27.6944444444,
"line_max": 81,
"alpha_frac": 0.6214908035,
"autogenerated": false,
"ratio": 4.233606557377049,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5355097360877049,
"avg_score": null,
"num_lines": null
} |
"""An example module to show autodoc style.
Contains an example constant, :class:`Storage` class for storing objects and
helper function :func:`store_integers` for storing only integers.
"""
import datetime
#: Example integer constant.
INT_CONSTANT = 1
#: Example integer constant.
STR_CONSTANT = 'string'
class Storage(object):
"""A class for storing objects.
This is an example class to show autodoc style.
It stores a list of objects and saves date of last appended item.
Example usage::
>>> storage = Storage(['foo', 'bar'])
>>> storage.items
['foo', 'bar']
>>> storage.last_updated
datetime.datetime(2013, 8, 15, 13, 41, 38, 515797)
>>> storage.add_item('baz')
>>> storage.items
['foo', 'bar', 'baz']
>>> storage.last_updated
datetime.datetime(2013, 8, 15, 13, 41, 40, 595544)
:param items:
Optional list of items to start with.
"""
def __init__(self, items=None):
#: List of items, add new item using :meth:`add_item`.
self.items = items or []
#: :py:class:`datetime.datetime` of last item update, will be set
#: to :py:meth:`datetime.datetime.now` on object instantiation.
self.last_updated = datetime.datetime.now()
def add_item(self, item):
"""Append item to the list.
:attr:`last_updated` will be set to :py:meth:`datetime.datetime.now`.
:param item:
Something to append to :attr:`items`.
"""
self.items.append(item)
self.last_updated = datetime.datetime.now()
def store_integers(items, allow_zero=True):
"""Store integers from the given list in a storage.
This is an example function to show autodoc style.
Return :class:`Storage` instance with integers from the given list.
Examples::
>>> storage = store_integers([1, 'foo', 2, 'bar', 0])
>>> storage.items
[1, 2, 0]
>>> storage = store_integers([1, 'foo', 2, 'bar', 0], allow_zero=False)
>>> storage.items
[1, 2]
:param items:
List of objects of any type, only :class:`int` instances will be
stored.
:param allow_zero:
Boolean -- if ``False``, ``0`` integers will be skipped.
Defaults to ``True``.
"""
ints = [x for x in items if isinstance(x, int) and (allow_zero or x != 0)]
storage = Storage(ints)
return storage
| {
"repo_name": "espdev/sphinx-fancy-theme",
"path": "docs/source/example.py",
"copies": "1",
"size": "2446",
"license": "mit",
"hash": -834757733094383200,
"line_mean": 26.4831460674,
"line_max": 79,
"alpha_frac": 0.5993458708,
"autogenerated": false,
"ratio": 3.9136,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5012945870800001,
"avg_score": null,
"num_lines": null
} |
#An example of a class
#$ header class Shape(public)
#$ header method __init__(Shape, double, double)
#$ header method area(Shape) results(double)
#$ header method perimeter(Shape) results(double)
#$ header method describe(Shape,str)
#$ header method authorName(Shape,str)
#$ header method scaleSize(Shape, double)
class Shape:
def __init__(self, x, y):
self.x = x
self.y = y
self.description = "This shape has not been described yet"
self.author = "Nobody has claimed to make this shape yet"
def area(self):
y = self.x * self.y
return y
def perimeter(self):
x = 2 * self.x + 2 * self.y
return x
def describe(self, text):
self.description = text
def authorName(self, text):
self.author = text
def scaleSize(self, scale):
self.x = self.x * scale
self.y = self.y * scale
rectangle = Shape(100., 45.)
#finding the area of your rectangle:
print(rectangle.area())
#finding the perimeter of your rectangle:
print(rectangle.perimeter())
#describing the rectangle
rectangle.describe("A wide rectangle, more than twice as wide as it is tall")
#making the rectangle 50% smaller
rectangle.scaleSize(0.5)
#re-printing the new area of the rectangle
print(rectangle.area())
| {
"repo_name": "ratnania/pyccel",
"path": "tests/codegen/scripts/classes_4.py",
"copies": "2",
"size": "1292",
"license": "mit",
"hash": -6345758568791810000,
"line_mean": 24.84,
"line_max": 77,
"alpha_frac": 0.6602167183,
"autogenerated": false,
"ratio": 3.530054644808743,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00784735965492306,
"num_lines": 50
} |
"""An example of Appium running on Sauce using Sauce Connect to access a local webserver.
This test assumes SAUCE_USERNAME and SAUCE_ACCESS_KEY are environment variables
set to your Sauce Labs username and access key.
You'll also need Sauce-Connect.jar in this test directory so we can start it to enable
a tunnel between Sauce Labs and your machine
This is an All-In-One bundle test that does a lot more than usual test would. It does following
things that you would normally do in a different way:
- starts Sauce Connect - which you would normally start from console with
"java -jar Sauce-Connect.jar SAUCE_USERNAME SAUCE_ACCESS_KEY"
- starts a local webserver on port 9999 that serves a sample string - normally you would
like to connect to your own webserver
"""
import unittest
from selenium import webdriver
import os
import subprocess
import sys
import select
from SimpleHTTPServer import SimpleHTTPRequestHandler
from StringIO import StringIO
from threading import Thread
from BaseHTTPServer import HTTPServer
from SocketServer import ThreadingMixIn
SAUCE_USERNAME = os.environ.get('SAUCE_USERNAME')
SAUCE_ACCESS_KEY = os.environ.get('SAUCE_ACCESS_KEY')
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
class MyRequestHandler(SimpleHTTPRequestHandler):
"""Serve a sample HTML page so we can check if test works properly"""
def do_GET(self):
f = StringIO()
f.write("<html><body>Welcome to the flipside!</body></html>")
f.seek(0)
#send code 200 response
self.send_response(200)
#send header first
self.send_header('Content-type', 'text-html')
self.end_headers()
#send file content to client
self.wfile.write(f.read())
f.close()
return
class Selenium2OnSauce(unittest.TestCase):
def setUpWebServer(self):
# Setting up a local websever in separate thread on port 9999
httpd = ThreadedHTTPServer(("", 9999), MyRequestHandler)
sa = httpd.socket.getsockname()
print "[HTTP Server] Serving HTTP on", sa[0], "port", sa[1], "..."
thread = Thread(target=httpd.serve_forever)
thread.daemon = True # so server gets killed when we exit
thread.start()
def setUpTunnel(self):
# Setting up Sauce Connect tunnel
self.process = subprocess.Popen(["java -jar Sauce-Connect.jar %s %s" % (SAUCE_USERNAME, SAUCE_ACCESS_KEY)], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p = self.process
print "[Sauce Connect]: Waiting for tunnel setup, this make take up to 30s"
is_ready = False
while True:
reads = [p.stdout.fileno(), p.stderr.fileno()]
ret = select.select(reads, [], [])
for fd in ret[0]:
if fd == p.stdout.fileno():
read = p.stdout.readline()
sys.stdout.write("[Sauce Connect]: %s" % read)
if "Connected! You may start your tests." in read:
print "[Sauce Connect]: Tunnel ready, running the test"
is_ready = True
break
if fd == p.stderr.fileno():
read = p.stderr.readline()
sys.stderr.write("[Sauce Connect]: %s" % read)
if "Unable to access jarfile" in read:
self.process.terminate()
raise Exception("Sauce Connect could not start!")
if is_ready:
break
def setUp(self):
self.setUpWebServer()
self.setUpTunnel()
desired_capabilities={
'platformName': 'iOS',
'platformVersion': '7.1',
'deviceName': 'iPhone Simulator',
'browserName': 'safari',
'appiumVersion': '1.2.2',
'name': 'Appium Python iOS Test (Connect)'
}
self.driver = webdriver.Remote(
desired_capabilities=desired_capabilities,
command_executor="http://%s:%s@ondemand.saucelabs.com:80/wd/hub" % (SAUCE_USERNAME, SAUCE_ACCESS_KEY)
)
self.driver.implicitly_wait(30)
def test_basic(self):
driver = self.driver
driver.get("http://127.0.0.1:9999/")
body = self.driver.find_element_by_tag_name("body")
self.assertTrue("Welcome to the flipside!" in body.text)
def tearDown(self):
print("Link to your job: https://saucelabs.com/jobs/%s" % self.driver.session_id)
self.driver.quit()
self.process.terminate()
if __name__ == '__main__':
if not (SAUCE_USERNAME and SAUCE_ACCESS_KEY):
print "Make sure you have SAUCE_USERNAME and SAUCE_ACCESS_KEY set as environment variables."
else:
unittest.main()
| {
"repo_name": "eric-stanley/sample-code",
"path": "sample-code/examples/python/sauce_connect.py",
"copies": "1",
"size": "4833",
"license": "apache-2.0",
"hash": -772900010694041000,
"line_mean": 35.3383458647,
"line_max": 175,
"alpha_frac": 0.6263190565,
"autogenerated": false,
"ratio": 3.987623762376238,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5113942818876238,
"avg_score": null,
"num_lines": null
} |
"""An example of Appium running on Sauce, with a webview.
This test assumes SAUCE_USERNAME and SAUCE_ACCESS_KEY are environment variables
set to your Sauce Labs username and access key."""
from random import randint
from appium import webdriver
from appium import SauceTestCase, on_platforms
from time import sleep
from selenium.webdriver.common.keys import Keys
app = 'http://appium.s3.amazonaws.com/WebViewApp6.0.app.zip'
platforms = [{
'platformName': 'iOS',
'platformVersion': '7.1',
'deviceName': 'iPhone Simulator',
'appiumVersion': '1.3.4',
'app': app
}]
@on_platforms(platforms)
class WebViewIOSSauceTests(SauceTestCase):
def test_get_url(self):
url_el = self.driver.find_element_by_xpath('//UIAApplication[1]/UIAWindow[1]/UIATextField[1]')
url_el.send_keys('http://www.google.com')
go_el = self.driver.find_element_by_accessibility_id('Go')
go_el.click()
self.driver.switch_to.context('WEBVIEW')
search = self.driver.find_element_by_name('q')
search.send_keys('sauce labs')
search.send_keys(Keys.RETURN)
# allow the page to load
sleep(1)
self.assertEquals('sauce labs', self.driver.title[:10])
| {
"repo_name": "Dyazvinsky/sample-code",
"path": "sample-code/examples/python/ios_sauce_webview.py",
"copies": "33",
"size": "1251",
"license": "apache-2.0",
"hash": -6241592564409913000,
"line_mean": 28.7857142857,
"line_max": 102,
"alpha_frac": 0.6690647482,
"autogenerated": false,
"ratio": 3.3095238095238093,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""An example of a proposed NLP pipeline system. Goals are to allow for:
1. default NLP pipeline for any given language
2. users to override default pipeline
3. users to choose alternative code (classes/methods/functions) w/in the CLTK
4. users to use their own custom code (inheriting or replacing those w/in CLTK)
5. flexibility for the I/O for custom code
6. up-front checking whether I/O is possible given available processed text (e.g., a fn might depend on token str,
which must be created first)
7. specify the order in which NLP algos are run
In the following, I propose these new data types:
- ``Language``: Simple, just a place to hold attributes about a language. Can be referenced within
``Process`` or ``Pipeline`` (e.g., ``LatinPipeline.language == LatinLanguage == True``).
- ``Process``: One for each type of NLP algo we cover (e.g., tokenization, sentences splitting, pos tagging,
dependency, phonetics, prosody, etc.). Each of these is the subclassed for each language (e.g,
``TokenizationProcess`` <- ``LatinTokenizationOperation``). Here is defined the code to be used for a given
operation, plus documenting a bit more about it (I/O, description, description).
- ``Word``: This holds basic information for each token (start/end character indices, sentences index occurring
within, raw string) and more advanced info if available (e.g., NER, POS tag, dependency relations).
- ``Pipeline``: One for each language (e.g., ``Pipeline`` <- ``LatinPipeline``). A field in this is ``algo``,
which has as value a given field (e.g., ``LatinPipeline.algo == LatinTokenizationOperation == True``.
- ``Doc``: Returned by the ``NLP`` class (more specifically, by ``NLP().run_pipeline()``). Similar to what spaCy returns, only more transparent (IMHO). To the field ``Doc.words`` will be a list
of ``Word`` (``List[Word]``).
Notes:
- At the end of the module, see a dummy example of the ``cltk.NLP`` class and a use example (in ``"__main__"``),
plus op_output.
- Reqs Python 3.7
"""
import re
from dataclasses import dataclass
from typing import Callable, List
from cltk.languages.glottolog import LANGUAGES
from cltk.nlp import NLP
from cltk.utils.data_types import Word
from cltk.utils.operations import Operation
# #####################################################################################
# #######################START OPERATION TYPE##########################################
def dummy_get_token_indices(text: str) -> List[List[int]]:
"""Get the start/stop char indices of word boundaries.
>>> john_damascus_corinth = "Τοῦτο εἰπὼν, ᾐνίξατο αἰτίους ὄντας"
>>> indices_words = dummy_get_token_indices(text=john_damascus_corinth)
>>> indices_words[0:3]
[[0, 5], [6, 11], [13, 20]]
"""
indices_words = list()
pattern_word = re.compile(r"\w+")
for word_match in pattern_word.finditer(string=text):
idx_word_start, idx_word_stop = word_match.span()
indices_words.append([idx_word_start, idx_word_stop])
return indices_words
@dataclass
class TokenizationOperation(Operation):
"""To be inherited for each language's tokenization declaration.
Example: ``TokenizationProcess`` <- ``LatinTokenizationOperation``
"""
type = "tokenization"
@dataclass
class LatinTokenizationOperation(TokenizationOperation):
"""The default (or one of many) Latin tokenization algorithm."""
name = "CLTK Dummy Latin Tokenizer"
description = "This is a simple regex which divides on word spaces (``r'\w+)`` for illustrative purposes."
input = str
output = List[List[int]] # e.g., [[0, 4], [6, 11], ...]
algorithm = dummy_get_token_indices
language = LANGUAGES["lat"]
# #######################END OPERATION TYPE############################################
# #####################################################################################
# #####################################################################################
# #######################START PIPELINE TYPE###########################################
@dataclass
class Pipeline:
sentence_splitter: Callable[[str], List[List[int]]] = None
word_tokenizer: Callable[[str], List[Word]] = None
dependency: str = None
pos: str = None
scansion: Callable[[str], str] = None
@dataclass
class LatinPipeline(Pipeline):
# sentence_splitter = LatinSplitter().dummy_get_indices
word_tokenizer = LatinTokenizationOperation
language = LANGUAGES["lat"]
# #######################END PIPELINE TYPE#############################################
# #####################################################################################
if __name__ == "__main__":
from cltk.languages.example_texts import LAT
cltk_nlp = NLP(language="lat")
doc_germanica = cltk_nlp.run_pipeline(LAT)
print("")
print("``Doc``:", doc_germanica)
print("")
print("``Doc.pipeline``:", doc_germanica.pipeline)
print("")
print(
"``Doc.pipeline.word_tokenizer.description``:",
doc_germanica.pipeline.word_tokenizer.name,
)
print("")
print(
"``Doc.pipeline.word_tokenizer.description``:",
doc_germanica.pipeline.word_tokenizer.description,
)
print("")
print("``Doc.pipeline.words[:10]``:", doc_germanica.tokens[:10])
print("")
print("``Doc.pipeline.indices_tokens[:10]``:", doc_germanica.indices_tokens[:10])
print("")
| {
"repo_name": "diyclassics/cltk",
"path": "scripts/pipeline_example.py",
"copies": "4",
"size": "5512",
"license": "mit",
"hash": -5976075389355729000,
"line_mean": 35.7583892617,
"line_max": 196,
"alpha_frac": 0.6027022092,
"autogenerated": false,
"ratio": 3.9346264367816093,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0014704009196500043,
"num_lines": 149
} |
"""An example of a very simple space, the real numbers."""
import odl
class Reals(odl.set.LinearSpace):
"""The real numbers."""
def __init__(self):
super(Reals, self).__init__(field=odl.RealNumbers())
def _inner(self, x1, x2):
return x1.__val__ * x2.__val__
def _lincomb(self, a, x1, b, x2, out):
out.__val__ = a * x1.__val__ + b * x2.__val__
def _multiply(self, x1, x2, out):
out.__val__ = x1.__val__ * x2.__val__
def __eq__(self, other):
return isinstance(other, Reals)
def element(self, value=0):
return RealNumber(self, value)
class RealNumber(odl.set.space.LinearSpaceElement):
"""Real vectors are floats."""
__val__ = None
def __init__(self, space, v):
super(RealNumber, self).__init__(space)
self.__val__ = v
def __float__(self):
return self.__val__.__float__()
def __str__(self):
return str(self.__val__)
R = Reals()
x = R.element(5.0)
y = R.element(10.0)
print(x)
print(y)
print(x + y)
print(x * y)
print(x - y)
print(3.14 * x)
| {
"repo_name": "odlgroup/odl",
"path": "examples/space/simple_r.py",
"copies": "2",
"size": "1083",
"license": "mpl-2.0",
"hash": -6421456323615906000,
"line_mean": 19.4339622642,
"line_max": 60,
"alpha_frac": 0.541089566,
"autogenerated": false,
"ratio": 3.0083333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4549422899333334,
"avg_score": null,
"num_lines": null
} |
"""An example of a very simple space, the space rn.
Including some benchmarks with an optimized version.
"""
import numpy as np
import odl
from odl.space.base_tensors import TensorSpace, Tensor
from odl.util.testutils import timer
class SimpleRn(TensorSpace):
"""The real space R^n, non-optimized implmentation."""
def __init__(self, size):
super(SimpleRn, self).__init__(size, dtype=float)
def zero(self):
return self.element(np.zeros(self.size))
def one(self):
return self.element(np.ones(self.size))
def _lincomb(self, a, x1, b, x2, out):
out.data[:] = a * x1.data + b * x2.data
def _inner(self, x1, x2):
return float(np.vdot(x1.data, x2.data))
def _multiply(self, x1, x2, out):
out.data[:] = x1.data * x2.data
def _divide(self, x1, x2, out):
out.data[:] = x1.data / x2.data
def element(self, *args, **kwargs):
if not args and not kwargs:
return self.element(np.empty(self.size))
if isinstance(args[0], np.ndarray):
if args[0].shape == (self.size,):
return RnVector(self, args[0])
else:
raise ValueError('input array {} is of shape {}, expected '
'shape ({},).'.format(args[0], args[0].shape,
self.dim,))
else:
return self.element(np.array(
*args, **kwargs).astype(np.float64, copy=False))
return self.element(np.empty(self.dim, dtype=np.float64))
class RnVector(Tensor):
def __init__(self, space, data):
super(RnVector, self).__init__(space)
self.data = data
def __getitem__(self, index):
return self.data.__getitem__(index)
def __setitem__(self, index, value):
return self.data.__setitem__(index, value)
def asarray(self, *args):
return self.data(*args)
r5 = SimpleRn(5)
# odl.diagnostics.SpaceTest(r5).run_tests()
# Do some tests to compare
n = 10 ** 7
iterations = 10
# Perform some benchmarks with rn
opt_spc = odl.rn(n)
simple_spc = SimpleRn(n)
x, y, z = np.random.rand(n), np.random.rand(n), np.random.rand(n)
ox, oy, oz = (opt_spc.element(x.copy()), opt_spc.element(y.copy()),
opt_spc.element(z.copy()))
sx, sy, sz = (simple_spc.element(x.copy()), simple_spc.element(y.copy()),
simple_spc.element(z.copy()))
if 'cuda' in odl.space.entry_points.tensor_space_impl_names():
cu_spc = odl.rn(n, impl='cuda')
cx, cy, cz = (cu_spc.element(x.copy()), cu_spc.element(y.copy()),
cu_spc.element(z.copy()))
print(" lincomb:")
with timer("SimpleRn"):
for _ in range(iterations):
simple_spc.lincomb(2.13, sx, 3.14, sy, out=sz)
print("result: {}".format(sz[1:5]))
with timer("odl numpy"):
for _ in range(iterations):
opt_spc.lincomb(2.13, ox, 3.14, oy, out=oz)
print("result: {}".format(oz[1:5]))
if 'cuda' in odl.space.entry_points.tensor_space_impl_names():
with timer("odl cuda"):
for _ in range(iterations):
cu_spc.lincomb(2.13, cx, 3.14, cy, out=cz)
print("result: {}".format(cz[1:5]))
print("\n Norm:")
with timer("SimpleRn"):
for _ in range(iterations):
result = sz.norm()
print("result: {}".format(result))
with timer("odl numpy"):
for _ in range(iterations):
result = oz.norm()
print("result: {}".format(result))
if 'cuda' in odl.space.entry_points.tensor_space_impl_names():
with timer("odl cuda"):
for _ in range(iterations):
result = cz.norm()
print("result: {}".format(result))
print("\n Inner:")
with timer("SimpleRn"):
for _ in range(iterations):
result = sz.inner(sx)
print("result: {}".format(result))
with timer("odl numpy"):
for _ in range(iterations):
result = oz.inner(ox)
print("result: {}".format(result))
if 'cuda' in odl.space.entry_points.tensor_space_impl_names():
with timer("odl cuda"):
for _ in range(iterations):
result = cz.inner(cx)
print("result: {}".format(result))
| {
"repo_name": "kohr-h/odl",
"path": "examples/space/simple_rn.py",
"copies": "2",
"size": "4104",
"license": "mpl-2.0",
"hash": 1320289686038083300,
"line_mean": 28.5251798561,
"line_max": 78,
"alpha_frac": 0.5840643275,
"autogenerated": false,
"ratio": 3.152073732718894,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4736138060218894,
"avg_score": null,
"num_lines": null
} |
# An example of embedding CEF browser in wxPython on Linux.
# Important:
# On Linux importing the cefpython module must be
# the very first in your application. This is because CEF makes
# a global tcmalloc hook for memory allocation/deallocation.
# See Issue 73 that is to provide CEF builds with tcmalloc disabled:
# https://code.google.com/p/cefpython/issues/detail?id=73
import ctypes, os, sys
libcef_so = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'libcef.so')
if os.path.exists(libcef_so):
# Import local module
ctypes.CDLL(libcef_so, ctypes.RTLD_GLOBAL)
if 0x02070000 <= sys.hexversion < 0x03000000:
import cefpython_py27 as cefpython
else:
raise Exception("Unsupported python version: %s" % sys.version)
else:
# Import from package
from cefpython3 import cefpython
import wx
import time
import re
import uuid
import platform
# Which method to use for message loop processing.
# EVT_IDLE - wx application has priority (default)
# EVT_TIMER - cef browser has priority
# It seems that Flash content behaves better when using a timer.
# IMPORTANT! On Linux EVT_IDLE does not work, the events seems to
# be propagated only when you move your mouse, which is not the
# expected behavior, it is recommended to use EVT_TIMER on Linux,
# so set this value to False.
USE_EVT_IDLE = False
def GetApplicationPath(file=None):
import re, os, platform
# If file is None return current directory without trailing slash.
if file is None:
file = ""
# Only when relative path.
if not file.startswith("/") and not file.startswith("\\") and (
not re.search(r"^[\w-]+:", file)):
if hasattr(sys, "frozen"):
path = os.path.dirname(sys.executable)
elif "__file__" in globals():
path = os.path.dirname(os.path.realpath(__file__))
else:
path = os.getcwd()
path = path + os.sep + file
if platform.system() == "Windows":
path = re.sub(r"[/\\]+", re.escape(os.sep), path)
path = re.sub(r"[/\\]+$", "", path)
return path
return str(file)
def ExceptHook(excType, excValue, traceObject):
import traceback, os, time, codecs
# This hook does the following: in case of exception write it to
# the "error.log" file, display it to the console, shutdown CEF
# and exit application immediately by ignoring "finally" (os._exit()).
errorMsg = "\n".join(traceback.format_exception(excType, excValue,
traceObject))
errorFile = GetApplicationPath("error.log")
try:
appEncoding = cefpython.g_applicationSettings["string_encoding"]
except:
appEncoding = "utf-8"
if type(errorMsg) == bytes:
errorMsg = errorMsg.decode(encoding=appEncoding, errors="replace")
try:
with codecs.open(errorFile, mode="a", encoding=appEncoding) as fp:
fp.write("\n[%s] %s\n" % (
time.strftime("%Y-%m-%d %H:%M:%S"), errorMsg))
except:
print("cefpython: WARNING: failed writing to error file: %s" % (
errorFile))
# Convert error message to ascii before printing, otherwise
# you may get error like this:
# | UnicodeEncodeError: 'charmap' codec can't encode characters
errorMsg = errorMsg.encode("ascii", errors="replace")
errorMsg = errorMsg.decode("ascii", errors="replace")
print("\n"+errorMsg+"\n")
cefpython.QuitMessageLoop()
cefpython.Shutdown()
os._exit(1)
class MainFrame(wx.Frame):
browser = None
mainPanel = None
def __init__(self):
wx.Frame.__init__(self, parent=None, id=wx.ID_ANY,
title='wxPython CEF 3 example', size=(800,600))
self.CreateMenu()
# Cannot attach browser to the main frame as this will cause
# the menu not to work.
# --
# You also have to set the wx.WANTS_CHARS style for
# all parent panels/controls, if it's deeply embedded.
self.mainPanel = wx.Panel(self, style=wx.WANTS_CHARS)
windowInfo = cefpython.WindowInfo()
windowInfo.SetAsChild(self.mainPanel.GetGtkWidget())
# Linux requires adding "file://" for local files,
# otherwise /home/some will be replaced as http://home/some
self.browser = cefpython.CreateBrowserSync(
windowInfo,
# If there are problems with Flash you can disable it here,
# by disabling all plugins.
browserSettings={"plugins_disabled": False},
navigateUrl="file://"+GetApplicationPath("index.html"))
clientHandler = ClientHandler()
self.browser.SetClientHandler(clientHandler)
cefpython.SetGlobalClientCallback("OnCertificateError",
clientHandler._OnCertificateError)
cefpython.SetGlobalClientCallback("OnBeforePluginLoad",
clientHandler._OnBeforePluginLoad)
jsBindings = cefpython.JavascriptBindings(
bindToFrames=False, bindToPopups=True)
jsBindings.SetFunction("PyPrint", PyPrint)
jsBindings.SetProperty("pyProperty", "This was set in Python")
jsBindings.SetProperty("pyConfig", ["This was set in Python",
{"name": "Nested dictionary", "isNested": True},
[1,"2", None]])
jsBindings.SetObject("external", JavascriptExternal(self.browser))
self.browser.SetJavascriptBindings(jsBindings)
self.Bind(wx.EVT_CLOSE, self.OnClose)
if USE_EVT_IDLE:
# Bind EVT_IDLE only for the main application frame.
self.Bind(wx.EVT_IDLE, self.OnIdle)
def CreateMenu(self):
filemenu = wx.Menu()
filemenu.Append(1, "Open")
exit = filemenu.Append(2, "Exit")
self.Bind(wx.EVT_MENU, self.OnClose, exit)
aboutmenu = wx.Menu()
aboutmenu.Append(1, "CEF Python")
menubar = wx.MenuBar()
menubar.Append(filemenu,"&File")
menubar.Append(aboutmenu, "&About")
self.SetMenuBar(menubar)
def OnClose(self, event):
self.browser.CloseBrowser()
self.Destroy()
def OnIdle(self, event):
cefpython.MessageLoopWork()
def PyPrint(message):
with file(message) as f:
print f.read()
class JavascriptExternal:
mainBrowser = None
stringVisitor = None
def __init__(self, mainBrowser):
self.mainBrowser = mainBrowser
def GoBack(self):
self.mainBrowser.GoBack()
def GoForward(self):
self.mainBrowser.GoForward()
def CreateAnotherBrowser(self):
frame = MainFrame()
frame.Show()
def Print(self, message):
print(message)
def TestAllTypes(self, *args):
print(args)
def ExecuteFunction(self, *args):
self.mainBrowser.GetMainFrame().ExecuteFunction(*args)
def TestJSCallback(self, jsCallback):
print("jsCallback.GetFunctionName() = %s" % jsCallback.GetFunctionName())
print("jsCallback.GetFrame().GetIdentifier() = %s" % \
jsCallback.GetFrame().GetIdentifier())
jsCallback.Call("This message was sent from python using js callback")
def TestJSCallbackComplexArguments(self, jsObject):
jsCallback = jsObject["myCallback"];
jsCallback.Call(1, None, 2.14, "string", ["list", ["nested list", \
{"nested object":None}]], \
{"nested list next":[{"deeply nested object":1}]})
def TestPythonCallback(self, jsCallback):
jsCallback.Call(self.PyCallback)
def PyCallback(self, *args):
message = "PyCallback() was executed successfully! Arguments: %s" \
% str(args)
print(message)
self.mainBrowser.GetMainFrame().ExecuteJavascript(
"window.alert(\"%s\")" % message)
def GetSource(self):
# Must keep a strong reference to the StringVisitor object
# during the visit.
self.stringVisitor = StringVisitor()
self.mainBrowser.GetMainFrame().GetSource(self.stringVisitor)
def GetText(self):
# Must keep a strong reference to the StringVisitor object
# during the visit.
self.stringVisitor = StringVisitor()
self.mainBrowser.GetMainFrame().GetText(self.stringVisitor)
# -------------------------------------------------------------------------
# Cookies
# -------------------------------------------------------------------------
cookieVisitor = None
def VisitAllCookies(self):
# Need to keep the reference alive.
self.cookieVisitor = CookieVisitor()
cookieManager = self.mainBrowser.GetUserData("cookieManager")
if not cookieManager:
print("\nCookie manager not yet created! Visit http website first")
return
cookieManager.VisitAllCookies(self.cookieVisitor)
def VisitUrlCookies(self):
# Need to keep the reference alive.
self.cookieVisitor = CookieVisitor()
cookieManager = self.mainBrowser.GetUserData("cookieManager")
if not cookieManager:
print("\nCookie manager not yet created! Visit http website first")
return
cookieManager.VisitUrlCookies(
"http://www.html-kit.com/tools/cookietester/",
False, self.cookieVisitor)
# .www.html-kit.com
def SetCookie(self):
cookieManager = self.mainBrowser.GetUserData("cookieManager")
if not cookieManager:
print("\nCookie manager not yet created! Visit http website first")
return
cookie = cefpython.Cookie()
cookie.SetName("Created_Via_Python")
cookie.SetValue("yeah really")
cookieManager.SetCookie("http://www.html-kit.com/tools/cookietester/",
cookie)
print("\nCookie created! Visit html-kit cookietester to see it")
def DeleteCookies(self):
cookieManager = self.mainBrowser.GetUserData("cookieManager")
if not cookieManager:
print("\nCookie manager not yet created! Visit http website first")
return
cookieManager.DeleteCookies(
"http://www.html-kit.com/tools/cookietester/",
"Created_Via_Python")
print("\nCookie deleted! Visit html-kit cookietester to see the result")
class StringVisitor:
def Visit(self, string):
print("\nStringVisitor.Visit(): string:")
print("--------------------------------")
print(string)
print("--------------------------------")
class CookieVisitor:
def Visit(self, cookie, count, total, deleteCookie):
if count == 0:
print("\nCookieVisitor.Visit(): total cookies: %s" % total)
print("\nCookieVisitor.Visit(): cookie:")
print(cookie.Get())
# True to continue visiting cookies
return True
class ClientHandler:
# -------------------------------------------------------------------------
# DisplayHandler
# -------------------------------------------------------------------------
def OnLoadingStateChange(self, browser, isLoading, canGoBack,
canGoForward):
print("DisplayHandler::OnLoadingStateChange()")
print("isLoading = %s, canGoBack = %s, canGoForward = %s" \
% (isLoading, canGoBack, canGoForward))
def OnAddressChange(self, browser, frame, url):
print("DisplayHandler::OnAddressChange()")
print("url = %s" % url)
def OnTitleChange(self, browser, title):
print("DisplayHandler::OnTitleChange()")
print("title = %s" % title)
def OnTooltip(self, browser, textOut):
# OnTooltip not yet implemented (both Linux and Windows),
# will be fixed in next CEF release, see Issue 783:
# https://code.google.com/p/chromiumembedded/issues/detail?id=783
print("DisplayHandler::OnTooltip()")
print("text = %s" % textOut[0])
statusMessageCount = 0
def OnStatusMessage(self, browser, value):
if not value:
# Do not notify in the console about empty statuses.
return
self.statusMessageCount += 1
if self.statusMessageCount > 3:
# Do not spam too much.
return
print("DisplayHandler::OnStatusMessage()")
print("value = %s" % value)
def OnConsoleMessage(self, browser, message, source, line):
print("DisplayHandler::OnConsoleMessage()")
print("message = %s" % message)
print("source = %s" % source)
print("line = %s" % line)
# -------------------------------------------------------------------------
# KeyboardHandler
# -------------------------------------------------------------------------
def OnPreKeyEvent(self, browser, event, eventHandle,
isKeyboardShortcutOut):
print("KeyboardHandler::OnPreKeyEvent()")
def OnKeyEvent(self, browser, event, eventHandle):
print("KeyboardHandler::OnKeyEvent()")
print("native_key_code = %s" % event["native_key_code"])
if platform.system() == "Linux":
# F5 = 71
if event["native_key_code"] == 71:
print("F5 pressed! Reloading page..")
browser.ReloadIgnoreCache()
# -------------------------------------------------------------------------
# RequestHandler
# -------------------------------------------------------------------------
def OnBeforeBrowse(self, browser, frame, request, isRedirect):
print("RequestHandler::OnBeforeBrowse()")
print("url = %s" % request.GetUrl()[:70])
return False
def OnBeforeResourceLoad(self, browser, frame, request):
print("RequestHandler::OnBeforeResourceLoad()")
print("url = %s" % request.GetUrl()[:70])
return False
def OnResourceRedirect(self, browser, frame, oldUrl, newUrlOut):
print("RequestHandler::OnResourceRedirect()")
print("old url = %s" % oldUrl[:70])
print("new url = %s" % newUrlOut[0][:70])
def GetAuthCredentials(self, browser, frame, isProxy, host, port, realm,
scheme, callback):
print("RequestHandler::GetAuthCredentials()")
print("host = %s" % host)
print("realm = %s" % realm)
callback.Continue(username="test", password="test")
return True
def OnQuotaRequest(self, browser, originUrl, newSize, callback):
print("RequestHandler::OnQuotaRequest()")
print("origin url = %s" % originUrl)
print("new size = %s" % newSize)
callback.Continue(True)
return True
def GetCookieManager(self, browser, mainUrl):
# Create unique cookie manager for each browser.
# --
# Buggy implementation in CEF, reported here:
# https://code.google.com/p/chromiumembedded/issues/detail?id=1043
cookieManager = browser.GetUserData("cookieManager")
if cookieManager:
return cookieManager
else:
cookieManager = cefpython.CookieManager.CreateManager("")
browser.SetUserData("cookieManager", cookieManager)
return cookieManager
def OnProtocolExecution(self, browser, url, allowExecutionOut):
# There's no default implementation for OnProtocolExecution on Linux,
# you have to make OS system call on your own. You probably also need
# to use LoadHandler::OnLoadError() when implementing this on Linux.
print("RequestHandler::OnProtocolExecution()")
print("url = %s" % url)
if url.startswith("magnet:"):
print("Magnet link allowed!")
allowExecutionOut[0] = True
def _OnBeforePluginLoad(self, browser, url, policyUrl, info):
# Plugins are loaded on demand, only when website requires it,
# the same plugin may be called multiple times.
print("RequestHandler::OnBeforePluginLoad()")
print("url = %s" % url)
print("policy url = %s" % policyUrl)
print("info.GetName() = %s" % info.GetName())
print("info.GetPath() = %s" % info.GetPath())
print("info.GetVersion() = %s" % info.GetVersion())
print("info.GetDescription() = %s" % info.GetDescription())
# False to allow, True to block plugin.
return False
def _OnCertificateError(self, certError, requestUrl, callback):
print("RequestHandler::OnCertificateError()")
print("certError = %s" % certError)
print("requestUrl = %s" % requestUrl)
if requestUrl.startswith(
"https://sage.math.washington.edu:8091/do-not-allow"):
print("Not allowed!")
return False
if requestUrl.startswith(
"https://sage.math.washington.edu:8091/hudson/job/"):
print("Allowed!")
callback.Continue(True)
return True
return False
# -------------------------------------------------------------------------
# LoadHandler
# -------------------------------------------------------------------------
def OnLoadStart(self, browser, frame):
print("LoadHandler::OnLoadStart()")
print("frame url = %s" % frame.GetUrl()[:70])
def OnLoadEnd(self, browser, frame, httpStatusCode):
print("LoadHandler::OnLoadEnd()")
print("frame url = %s" % frame.GetUrl()[:70])
# For file:// urls the status code = 0
print("http status code = %s" % httpStatusCode)
def OnLoadError(self, browser, frame, errorCode, errorTextList, failedUrl):
print("LoadHandler::OnLoadError()")
print("frame url = %s" % frame.GetUrl()[:70])
print("error code = %s" % errorCode)
print("error text = %s" % errorTextList[0])
print("failed url = %s" % failedUrl)
customErrorMessage = "My custom error message!"
frame.LoadUrl("data:text/html,%s" % customErrorMessage)
def OnRendererProcessTerminated(self, browser, status):
print("LoadHandler::OnRendererProcessTerminated()")
statuses = {
cefpython.TS_ABNORMAL_TERMINATION: "TS_ABNORMAL_TERMINATION",
cefpython.TS_PROCESS_WAS_KILLED: "TS_PROCESS_WAS_KILLED",
cefpython.TS_PROCESS_CRASHED: "TS_PROCESS_CRASHED"
}
statusName = "Unknown"
if status in statuses:
statusName = statuses[status]
print("status = %s" % statusName)
def OnPluginCrashed(self, browser, pluginPath):
print("LoadHandler::OnPluginCrashed()")
print("plugin path = %s" % pluginPath)
# -------------------------------------------------------------------------
# LifespanHandler
# -------------------------------------------------------------------------
# Empty place-holders: popupFeatures, windowInfo, client, browserSettings.
def OnBeforePopup(self, browser, frame, targetUrl, targetFrameName,
popupFeatures, windowInfo, client, browserSettings, noJavascriptAccess):
print("LifespanHandler::OnBeforePopup()")
print("targetUrl = %s" % targetUrl)
allowPopups = True
return not allowPopups
class MyApp(wx.App):
timer = None
timerID = 1
timerCount = 0
def OnInit(self):
if not USE_EVT_IDLE:
self.CreateTimer()
frame = MainFrame()
self.SetTopWindow(frame)
frame.Show()
return True
def CreateTimer(self):
# See "Making a render loop":
# http://wiki.wxwidgets.org/Making_a_render_loop
# Another approach is to use EVT_IDLE in MainFrame,
# see which one fits you better.
self.timer = wx.Timer(self, self.timerID)
self.timer.Start(10) # 10ms
wx.EVT_TIMER(self, self.timerID, self.OnTimer)
def OnTimer(self, event):
self.timerCount += 1
# print("wxpython.py: OnTimer() %d" % self.timerCount)
cefpython.MessageLoopWork()
def OnExit(self):
# When app.MainLoop() returns, MessageLoopWork() should
# not be called anymore.
if not USE_EVT_IDLE:
self.timer.Stop()
if __name__ == '__main__':
sys.excepthook = ExceptHook
cefpython.g_debug = False
cefpython.g_debugFile = GetApplicationPath("debug.log")
settings = {
"log_severity": cefpython.LOGSEVERITY_INFO, # LOGSEVERITY_VERBOSE
"log_file": GetApplicationPath("debug.log"), # Set to "" to disable.
"release_dcheck_enabled": True, # Enable only when debugging.
# This directories must be set on Linux
"locales_dir_path": cefpython.GetModuleDirectory()+"/locales",
"resources_dir_path": cefpython.GetModuleDirectory(),
"browser_subprocess_path": "%s/%s" % (
cefpython.GetModuleDirectory(), "subprocess")
}
# print("browser_subprocess_path="+settings["browser_subprocess_path"])
cefpython.Initialize(settings)
print('wx.version=%s' % wx.version())
app = MyApp(False)
app.MainLoop()
# Let wx.App destructor do the cleanup before calling cefpython.Shutdown().
del app
cefpython.Shutdown()
| {
"repo_name": "RazorFlow/metarefresh_braces",
"path": "braces-cefpython/myapp.py",
"copies": "1",
"size": "21093",
"license": "mit",
"hash": 4363906180957723600,
"line_mean": 38.0611111111,
"line_max": 84,
"alpha_frac": 0.5969752999,
"autogenerated": false,
"ratio": 4.148898505114083,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5245873805014083,
"avg_score": null,
"num_lines": null
} |
# An example of embedding CEF Python in PySide application.
import os, sys
import argparse
libcef_dll = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'libcef.dll')
if os.path.exists(libcef_dll):
# Import a local module
if (2,7) <= sys.version_info < (2,8):
import cefpython_py27 as cefpython
elif (3,4) <= sys.version_info < (3,4):
import cefpython_py34 as cefpython
else:
raise Exception("Unsupported python version: %s" % sys.version)
else:
# Import an installed package
from cefpython3 import cefpython
import PySide
from PySide import QtGui
from PySide import QtCore
import ctypes
from threading import Thread
import logging
from server_main import Server
def GetApplicationPath(file=None):
import re, os, platform
# On Windows after downloading file and calling Browser.GoForward(),
# current working directory is set to %UserProfile%.
# Calling os.path.dirname(os.path.realpath(__file__))
# returns for eg. "C:\Users\user\Downloads". A solution
# is to cache path on first call.
if not hasattr(GetApplicationPath, "dir"):
if hasattr(sys, "frozen"):
dir = os.path.dirname(sys.executable)
elif "__file__" in globals():
dir = os.path.dirname(os.path.realpath(__file__))
else:
dir = os.getcwd()
GetApplicationPath.dir = dir
# If file is None return current directory without trailing slash.
if file is None:
file = ""
# Only when relative path.
if not file.startswith("/") and not file.startswith("\\") and (
not re.search(r"^[\w-]+:", file)):
path = GetApplicationPath.dir + os.sep + file
if platform.system() == "Windows":
path = re.sub(r"[/\\]+", re.escape(os.sep), path)
path = re.sub(r"[/\\]+$", "", path)
return path
return str(file)
def ExceptHook(excType, excValue, traceObject):
import traceback, os, time, codecs
# This hook does the following: in case of exception write it to
# the "error.log" file, display it to the console, shutdown CEF
# and exit application immediately by ignoring "finally" (os._exit()).
errorMsg = "\n".join(traceback.format_exception(excType, excValue,
traceObject))
errorFile = GetApplicationPath("error.log")
try:
appEncoding = cefpython.g_applicationSettings["string_encoding"]
except:
appEncoding = "utf-8"
if type(errorMsg) == bytes:
errorMsg = errorMsg.decode(encoding=appEncoding, errors="replace")
try:
with codecs.open(errorFile, mode="a", encoding=appEncoding) as fp:
fp.write("\n[%s] %s\n" % (
time.strftime("%Y-%m-%d %H:%M:%S"), errorMsg))
except:
logging.warning("cefpython: WARNING: failed writing to error file: %s" % (
errorFile))
# Convert error message to ascii before printing, otherwise
# you may get error like this:
# | UnicodeEncodeError: 'charmap' codec can't encode characters
errorMsg = errorMsg.encode("ascii", errors="replace")
errorMsg = errorMsg.decode("ascii", errors="replace")
logging.error("\n"+errorMsg+"\n")
cefpython.QuitMessageLoop()
cefpython.Shutdown()
os._exit(1)
class MainWindow(QtGui.QMainWindow):
mainFrame = None
def __init__(self, url):
super(MainWindow, self).__init__(None)
self.mainFrame = MainFrame(self, url)
self.setCentralWidget(self.mainFrame)
self.resize(1024, 768)
self.setWindowTitle('Welcome to tornado-vice!')
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
def focusInEvent(self, event):
cefpython.WindowUtils.OnSetFocus(int(self.centralWidget().winIdFixed()), 0, 0, 0)
def closeEvent(self, event):
self.mainFrame.browser.CloseBrowser()
class MainFrame(QtGui.QWidget):
browser = None
def __init__(self, parent=None, url="http://www.google.fr/"):
super(MainFrame, self).__init__(parent)
windowInfo = cefpython.WindowInfo()
windowInfo.SetAsChild(int(self.winIdFixed()))
self.browser = cefpython.CreateBrowserSync(windowInfo,
browserSettings={},
navigateUrl=GetApplicationPath(url))
self.browser.SetClientCallback('OnPreKeyEvent', self.OnKeyEvent)
self.show()
def OnKeyEvent(self, browser, event, eventHandle, isKeyboardShortcutOut):
if event['windows_key_code'] == 122:
if event['type'] == cefpython.KEYEVENT_KEYDOWN or \
event['type'] == cefpython.KEYEVENT_RAWKEYDOWN:
if self.parent().isFullScreen():
self.parent().showNormal()
else:
self.parent().showFullScreen()
def winIdFixed(self):
# PySide bug: QWidget.winId() returns <PyCObject object at 0x02FD8788>,
# there is no easy way to convert it to int.
try:
return int(self.winId())
except:
if sys.version_info[0] == 2:
ctypes.pythonapi.PyCObject_AsVoidPtr.restype = ctypes.c_void_p
ctypes.pythonapi.PyCObject_AsVoidPtr.argtypes = [ctypes.py_object]
return ctypes.pythonapi.PyCObject_AsVoidPtr(self.winId())
elif sys.version_info[0] == 3:
ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p
ctypes.pythonapi.PyCapsule_GetPointer.argtypes = [ctypes.py_object]
return ctypes.pythonapi.PyCapsule_GetPointer(self.winId(), None)
def moveEvent(self, event):
cefpython.WindowUtils.OnSize(int(self.winIdFixed()), 0, 0, 0)
def resizeEvent(self, event):
cefpython.WindowUtils.OnSize(int(self.winIdFixed()), 0, 0, 0)
class CefApplication(QtGui.QApplication):
timer = None
def __init__(self, args):
super(CefApplication, self).__init__(args)
self.createTimer()
def createTimer(self):
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.onTimer)
self.timer.start(10)
def onTimer(self):
# The proper way of doing message loop should be:
# 1. In createTimer() call self.timer.start(0)
# 2. In onTimer() call MessageLoopWork() only when
# QtGui.QApplication.instance()->hasPendingEvents() returns False.
# But... there is a bug in Qt, hasPendingEvents() returns always true.
cefpython.MessageLoopWork()
def stopTimer(self):
# Stop the timer after Qt message loop ended, calls to MessageLoopWork()
# should not happen anymore.
self.timer.stop()
class EmbeddedBrowser(Thread):
"""
UI-Thread allowing to run the embedded browser.
"""
def __init__(self, url):
super(EmbeddedBrowser, self).__init__()
self.url = url
def run(self):
logging.info("PySide version: %s" % PySide.__version__)
logging.info("QtCore version: %s" % QtCore.__version__)
sys.excepthook = ExceptHook
settings = {}
settings["log_file"] = GetApplicationPath("debug.log")
settings["log_severity"] = cefpython.LOGSEVERITY_INFO
settings["release_dcheck_enabled"] = True # Enable only when debugging
settings["browser_subprocess_path"] = "%s/%s" % (
cefpython.GetModuleDirectory(), "subprocess")
cefpython.Initialize(settings)
app = CefApplication(sys.argv)
mainWindow = MainWindow(self.url)
mainWindow.show()
app.exec_()
app.stopTimer()
# Need to destroy QApplication(), otherwise Shutdown() fails.
# Unset main window also just to be safe.
del mainWindow
del app
cefpython.Shutdown()
def parse_args():
parser = argparse.ArgumentParser(
description="Run the tornado-vice application",
prog="embed")
parser.add_argument('--verbose', '-v', action="count",
help="Set console logging verbosity level. Default \
displays only ERROR messages, -v enable WARNING messages, -vv enable INFO \
messages and -vvv enable DEBUG messages. Ignored if started using daemon.",
default=0)
parser.add_argument('-q', '--quiet', action="store_true",
help="Remove ALL logging messages from the console.")
return parser.parse_args()
import time
from conf import Conf
ready = False
browser = None
def onReady():
global ready
ready = True
def onKill():
print "Server asked to kill the client!"
if __name__ == '__main__':
print("Starting server...")
server = Server(parse_args(), onReady=onReady)
server.start()
browser = EmbeddedBrowser("http://localhost:%d" % Conf['server']['port'])
while not ready:
time.sleep(0.1)
continue
logging.info("Starting browser...")
# browser can be run synchronously. When closed, the server
# will be killed
browser.run()
# browser.join()
logging.info("Browser closed, now stopping server.")
server.stop()
server.join()
logging.info("Shutted down.")
| {
"repo_name": "Hiestaa/py-vid-tagger",
"path": "main.py",
"copies": "1",
"size": "9443",
"license": "mit",
"hash": -4350070762163971600,
"line_mean": 35.6215139442,
"line_max": 89,
"alpha_frac": 0.6118818172,
"autogenerated": false,
"ratio": 3.981028667790894,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005674273737009506,
"num_lines": 251
} |
# An example of flipping feature polygons right side up.
import datetime
import logging
import sys
import fiona
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
def signed_area(coords):
"""Return the signed area enclosed by a ring using the linear time
algorithm at http://www.cgafaq.info/wiki/Polygon_Area. A value >= 0
indicates a counter-clockwise oriented ring.
"""
xs, ys = map(list, zip(*coords))
xs.append(xs[1])
ys.append(ys[1])
return sum(xs[i]*(ys[i+1]-ys[i-1]) for i in range(1, len(coords)))/2.0
with fiona.open('docs/data/test_uk.shp', 'r') as source:
# Copy the source schema and add two new properties.
schema = source.schema.copy()
schema['properties']['s_area'] = 'float'
schema['properties']['timestamp'] = 'str'
# Create a sink for processed features with the same format and
# coordinate reference system as the source.
with fiona.open(
'oriented-ccw.shp', 'w',
driver=source.driver,
schema=schema,
crs=source.crs
) as sink:
for f in source:
try:
# If any feature's polygon is facing "down" (has rings
# wound clockwise), its rings will be reordered to flip
# it "up".
g = f['geometry']
assert g['type'] == 'Polygon'
rings = g['coordinates']
sa = sum(signed_area(r) for r in rings)
if sa < 0.0:
rings = [r[::-1] for r in rings]
g['coordinates'] = rings
f['geometry'] = g
# Add the signed area of the polygon and a timestamp
# to the feature properties map.
f['properties'].update(
s_area=sa,
timestamp=datetime.datetime.now().isoformat() )
sink.write(f)
except Exception, e:
logging.exception("Error processing feature %s:", f['id'])
| {
"repo_name": "perrygeo/Fiona",
"path": "examples/orient-ccw.py",
"copies": "7",
"size": "2082",
"license": "bsd-3-clause",
"hash": 3467933461332513300,
"line_mean": 31.0307692308,
"line_max": 74,
"alpha_frac": 0.5398655139,
"autogenerated": false,
"ratio": 4.042718446601942,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8082583960501941,
"avg_score": null,
"num_lines": null
} |
"""An example of how to generate a 2D structured points dataset
using numpy arrays. Also shown is a way to visualize this data with
the mayavi2 application.
The script can be run like so::
$ mayavi2 -x structured_points2d.py
Alternatively, it can be run as::
$ python structured_points2d.py
"""
# Author: Prabhu Ramachandran <prabhu at aero dot iitb dot ac dot in>
# Copyright (c) 2007, Enthought, Inc.
# License: BSD style.
from numpy import arange, sqrt, sin
from tvtk.api import tvtk
from mayavi.scripts import mayavi2
# Generate the scalar values.
x = (arange(0.1, 50.0)-25)/2.0
y = (arange(0.1, 50.0)-25)/2.0
r = sqrt(x[:,None]**2+y**2)
z = 5.0*sin(r)/r #
# Make the tvtk dataset.
# tvtk.ImageData is identical and could also be used here.
spoints = tvtk.StructuredPoints(origin=(-12.5,-12.5,0),
spacing=(0.5,0.5,1),
dimensions=(50,50,1))
# Transpose the array data due to VTK's implicit ordering. VTK assumes
# an implicit ordering of the points: X co-ordinate increases first, Y
# next and Z last. We flatten it so the number of components is 1.
spoints.point_data.scalars = z.T.flatten()
spoints.point_data.scalars.name = 'scalar'
# Uncomment the next two lines to save the dataset to a VTK XML file.
#w = tvtk.XMLImageDataWriter(input=spoints, file_name='spoints2d.vti')
#w.write()
# Now view the data.
@mayavi2.standalone
def view():
from mayavi.sources.vtk_data_source import VTKDataSource
from mayavi.filters.warp_scalar import WarpScalar
from mayavi.filters.poly_data_normals import PolyDataNormals
from mayavi.modules.surface import Surface
mayavi.new_scene()
src = VTKDataSource(data = spoints)
mayavi.add_source(src)
mayavi.add_filter(WarpScalar())
mayavi.add_filter(PolyDataNormals())
s = Surface()
mayavi.add_module(s)
if __name__ == '__main__':
view()
| {
"repo_name": "liulion/mayavi",
"path": "docs/source/mayavi/auto/structured_points2d.py",
"copies": "10",
"size": "1902",
"license": "bsd-3-clause",
"hash": 8284907790192792000,
"line_mean": 30.7,
"line_max": 70,
"alpha_frac": 0.6929547844,
"autogenerated": false,
"ratio": 3.1180327868852458,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8810987571285245,
"avg_score": null,
"num_lines": null
} |
"""An example of how to generate a 3D structured points dataset
using numpy arrays. Also shown is a way to visualize this data with
the mayavi2 application.
The script can be run like so::
$ mayavi2 -x structured_points3d.py
Alternatively, it can be run as::
$ python structured_points3d.py
"""
# Author: Prabhu Ramachandran <prabhu at aero dot iitb dot ac dot in>
# Copyright (c) 2007, Enthought, Inc.
# License: BSD style.
from tvtk.api import tvtk
from tvtk.array_handler import get_vtk_array_type
from numpy import array, ogrid, sin, ravel
from mayavi.scripts import mayavi2
# Make the data.
dims = array((128, 128, 128))
vol = array((-5., 5, -5, 5, -5, 5))
origin = vol[::2]
spacing = (vol[1::2] - origin)/(dims -1)
xmin, xmax, ymin, ymax, zmin, zmax = vol
x, y, z = ogrid[xmin:xmax:dims[0]*1j,
ymin:ymax:dims[1]*1j,
zmin:zmax:dims[2]*1j]
x, y, z = [t.astype('f') for t in (x, y, z)]
scalars = sin(x*y*z)/(x*y*z)
# Make the tvtk dataset.
spoints = tvtk.StructuredPoints(origin=origin, spacing=spacing,
dimensions=dims)
# The copy makes the data contiguous and the transpose makes it
# suitable for display via tvtk. Note that it is not necessary to
# make the data contiguous since in that case the array is copied
# internally.
s = scalars.transpose().copy()
spoints.point_data.scalars = ravel(s)
spoints.point_data.scalars.name = 'scalars'
# This is needed in slightly older versions of VTK (like the 5.0.2
# release) to prevent a segfault. VTK does not detect the correct
# data type.
spoints.scalar_type = get_vtk_array_type(s.dtype)
# Uncomment the next two lines to save the dataset to a VTK XML file.
#w = tvtk.XMLImageDataWriter(input=spoints, file_name='spoints3d.vti')
#w.write()
# Now view the data.
@mayavi2.standalone
def view():
from mayavi.sources.vtk_data_source import VTKDataSource
from mayavi.modules.outline import Outline
from mayavi.modules.image_plane_widget import ImagePlaneWidget
mayavi.new_scene()
src = VTKDataSource(data = spoints)
mayavi.add_source(src)
mayavi.add_module(Outline())
mayavi.add_module(ImagePlaneWidget())
if __name__ == '__main__':
view()
| {
"repo_name": "liulion/mayavi",
"path": "docs/source/mayavi/auto/structured_points3d.py",
"copies": "2",
"size": "2210",
"license": "bsd-3-clause",
"hash": 2801453399149719600,
"line_mean": 31.0289855072,
"line_max": 70,
"alpha_frac": 0.6954751131,
"autogenerated": false,
"ratio": 3.082287308228731,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9758456560539541,
"avg_score": 0.0038611721578379783,
"num_lines": 69
} |
# An example of how to generate a feed by hand.
import datetime
import PyRSS2Gen
rss = PyRSS2Gen.RSS2(
title = "Andrew's PyRSS2Gen feed",
link = "http://www.dalkescientific.com/Python/PyRSS2Gen.html",
description = "The latest news about PyRSS2Gen, a "
"Python library for generating RSS2 feeds",
lastBuildDate = datetime.datetime.now(),
items = [
PyRSS2Gen.RSSItem(
title = "PyRSS2Gen-0.0 released",
link = "http://www.dalkescientific.com/news/030906-PyRSS2Gen.html",
description = "Dalke Scientific today announced PyRSS2Gen-0.0, "
"a library for generating RSS feeds for Python. ",
guid = PyRSS2Gen.Guid("http://www.dalkescientific.com/news/"
"030906-PyRSS2Gen.html"),
pubDate = datetime.datetime(2003, 9, 6, 21, 31)),
PyRSS2Gen.RSSItem(
title = "Thoughts on RSS feeds for bioinformatics",
link = "http://www.dalkescientific.com/writings/diary/"
"archive/2003/09/06/RSS.html",
description = "One of the reasons I wrote PyRSS2Gen was to "
"experiment with RSS for data collection in "
"bioinformatics. Last year I came across...",
guid = PyRSS2Gen.Guid("http://www.dalkescientific.com/writings/"
"diary/archive/2003/09/06/RSS.html"),
pubDate = datetime.datetime(2003, 9, 6, 21, 49)),
])
rss.write_xml(open("pyrss2gen.xml", "w"))
| {
"repo_name": "antonsotin/vkfeedtrue",
"path": "PyRSS2Gen/example.py",
"copies": "18",
"size": "1526",
"license": "bsd-2-clause",
"hash": -1672468069548763600,
"line_mean": 42.6,
"line_max": 76,
"alpha_frac": 0.6002621232,
"autogenerated": false,
"ratio": 3.436936936936937,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.018240420887425373,
"num_lines": 35
} |
"""An example of how to generate a structured grid dataset using
numpy arrays. Also shown is a way to visualize this data with
the mayavi2 application.
The script can be run like so::
$ mayavi2 -x structured_grid.py
Alternatively, it can be run as::
$ python structured_grid.py
"""
# Authors: Eric Jones <eric at enthought dot com>
# Prabhu Ramachandran <prabhu at aero dot iitb dot ac dot in>
# Copyright (c) 2007, Enthought, Inc.
# License: BSD style.
import numpy as np
from numpy import cos, sin, pi
from tvtk.api import tvtk
from mayavi.scripts import mayavi2
def generate_annulus(r=None, theta=None, z=None):
""" Generate points for structured grid for a cylindrical annular
volume. This method is useful for generating a structured
cylindrical mesh for VTK (and perhaps other tools).
Parameters
----------
r : array : The radial values of the grid points.
It defaults to linspace(1.0, 2.0, 11).
theta : array : The angular values of the x axis for the grid
points. It defaults to linspace(0,2*pi,11).
z: array : The values along the z axis of the grid points.
It defaults to linspace(0,0,1.0, 11).
Return
------
points : array
Nx3 array of points that make up the volume of the annulus.
They are organized in planes starting with the first value
of z and with the inside "ring" of the plane as the first
set of points. The default point array will be 1331x3.
"""
# Default values for the annular grid.
if r is None: r = np.linspace(1.0, 2.0, 11)
if theta is None: theta = np.linspace(0, 2*pi, 11)
if z is None: z = np.linspace(0.0, 1.0, 11)
# Find the x values and y values for each plane.
x_plane = (cos(theta)*r[:,None]).ravel()
y_plane = (sin(theta)*r[:,None]).ravel()
# Allocate an array for all the points. We'll have len(x_plane)
# points on each plane, and we have a plane for each z value, so
# we need len(x_plane)*len(z) points.
points = np.empty([len(x_plane)*len(z),3])
# Loop through the points for each plane and fill them with the
# correct x,y,z values.
start = 0
for z_plane in z:
end = start + len(x_plane)
# slice out a plane of the output points and fill it
# with the x,y, and z values for this plane. The x,y
# values are the same for every plane. The z value
# is set to the current z
plane_points = points[start:end]
plane_points[:,0] = x_plane
plane_points[:,1] = y_plane
plane_points[:,2] = z_plane
start = end
return points
# Make the data.
dims = (51, 25, 25)
# Note here that the 'x' axis corresponds to 'theta'
theta = np.linspace(0, 2*np.pi, dims[0])
# 'y' corresponds to varying 'r'
r = np.linspace(1, 10, dims[1])
z = np.linspace(0, 5, dims[2])
pts = generate_annulus(r, theta, z)
# Uncomment the following if you want to add some noise to the data.
#pts += np.random.randn(dims[0]*dims[1]*dims[2], 3)*0.04
sgrid = tvtk.StructuredGrid(dimensions=dims)
sgrid.points = pts
s = np.sqrt(pts[:,0]**2 + pts[:,1]**2 + pts[:,2]**2)
sgrid.point_data.scalars = np.ravel(s.copy())
sgrid.point_data.scalars.name = 'scalars'
# Uncomment the next two lines to save the dataset to a VTK XML file.
#w = tvtk.XMLStructuredGridWriter(input=sgrid, file_name='sgrid.vts')
#w.write()
# View the data.
@mayavi2.standalone
def view():
from mayavi.sources.vtk_data_source import VTKDataSource
from mayavi.modules.api import Outline, GridPlane
mayavi.new_scene()
src = VTKDataSource(data=sgrid)
mayavi.add_source(src)
mayavi.add_module(Outline())
g = GridPlane()
g.grid_plane.axis = 'x'
mayavi.add_module(g)
g = GridPlane()
g.grid_plane.axis = 'y'
mayavi.add_module(g)
g = GridPlane()
g.grid_plane.axis = 'z'
mayavi.add_module(g)
if __name__ == '__main__':
view()
| {
"repo_name": "alexandreleroux/mayavi",
"path": "examples/mayavi/advanced_visualization/structured_grid.py",
"copies": "10",
"size": "4027",
"license": "bsd-3-clause",
"hash": 8676657216079601000,
"line_mean": 32.0081967213,
"line_max": 71,
"alpha_frac": 0.6342190216,
"autogenerated": false,
"ratio": 3.2739837398373983,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004469527326531264,
"num_lines": 122
} |
# An example of how to publish a simple wsgi app under isapi_wsgi and serve
# from IIS root.
# Using the ISAPISimpleHandler which will create a new instance for each incoming
# request.
#
# Executing this script (or any server config script) will install the extension
# into your web server and will create a "loader" DLL _demo.dll in the
# current directory. As the server executes, the PyISAPI framework will load
# this module and create the Extension object.
# A Virtual Directory named "isapi-wsgi-demo" is setup. This dir has the ISAPI
# WSGI extension as the only application, mapped to file-extension '*'.
# Therefore, isapi_wsgi extension handles *all* requests in this directory.
#
# To launch this application from a web browser use a url similar to:
#
# http://localhost/
#
# A "Hello world!" and the WSGI environment should be displayed.
def demo_app(environ,start_response):
"""Demo app from wsgiref"""
start_response("200 OK", [('Content-Type', 'text/plain')])
cr = lambda s='': s + '\n'
yield cr("Hello world!")
yield cr()
for item in sorted(environ.items()):
yield cr(' = '.join(item))
import isapi_wsgi
# The entry points for the ISAPI extension.
def __ExtensionFactory__():
return isapi_wsgi.ISAPISimpleHandler(demo_app)
if __name__=='__main__':
# If run from the command-line, install ourselves.
from isapi.install import *
params = ISAPIParameters()
# Setup the virtual directories - this is a list of directories our
# extension uses - in this case only 1.
# Each extension has a "script map" - this is the mapping of ISAPI
# extensions.
sm = [
ScriptMapParams(Extension="*", Flags=0)
]
# To serve from root, just set Name="/"
vd = VirtualDirParameters(Name="/",
Description = "ISAPI-WSGI ISAPISimpleHandler root Demo",
ScriptMaps = sm,
ScriptMapUpdate = "replace"
)
params.VirtualDirs = [vd]
HandleCommandLine(params)
| {
"repo_name": "Coder-666/isapi-wsgi",
"path": "examples/demo_serve_from_root.py",
"copies": "5",
"size": "2117",
"license": "mit",
"hash": -7027218211287635000,
"line_mean": 38.7115384615,
"line_max": 86,
"alpha_frac": 0.6424185168,
"autogenerated": false,
"ratio": 3.9422718808193666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7084690397619366,
"avg_score": null,
"num_lines": null
} |
# An example of how to publish a simple wsgi app under isapi_wsgi using
# the ISAPISimpleHandler which will create a new instance for each incoming
# request.
#
# Executing this script (or any server config script) will install the extension
# into your web server and will create a "loader" DLL _demo.dll in the
# current directory. As the server executes, the PyISAPI framework will load
# this module and create the Extension object.
# A Virtual Directory named "isapi-wsgi-demo" is setup. This dir has the ISAPI
# WSGI extension as the only application, mapped to file-extension '*'.
# Therefore, isapi_wsgi extension handles *all* requests in this directory.
#
# To launch this application from a web browser use a url similar to:
#
# http://localhost/isapi-wsgi-demo/
#
# A "Hello world!" and the WSGI environment should be displayed.
def demo_app(environ,start_response):
"""Demo app from wsgiref"""
start_response("200 OK", [('Content-Type', 'text/plain')])
cr = lambda s='': s + '\n'
yield cr("Hello world!")
yield cr()
for item in sorted(environ.items()):
yield cr(' = '.join(item))
import isapi_wsgi
# The entry points for the ISAPI extension.
def __ExtensionFactory__():
return isapi_wsgi.ISAPISimpleHandler(demo_app)
if __name__=='__main__':
# If run from the command-line, install ourselves.
from isapi.install import *
params = ISAPIParameters()
# Setup the virtual directories - this is a list of directories our
# extension uses - in this case only 1.
# Each extension has a "script map" - this is the mapping of ISAPI
# extensions.
sm = [
ScriptMapParams(Extension="*", Flags=0)
]
vd = VirtualDirParameters(Name="isapi-wsgi-demo",
Description = "ISAPI-WSGI ISAPISimpleHandler Demo",
ScriptMaps = sm,
ScriptMapUpdate = "replace"
)
params.VirtualDirs = [vd]
HandleCommandLine(params)
| {
"repo_name": "jbmohler/isapi-wsgi",
"path": "examples/demo.py",
"copies": "5",
"size": "2069",
"license": "mit",
"hash": -6094352626424980000,
"line_mean": 39.38,
"line_max": 81,
"alpha_frac": 0.6462058966,
"autogenerated": false,
"ratio": 3.925996204933586,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7072202101533587,
"avg_score": null,
"num_lines": null
} |
# An example of how to publish a simple wsgi app under isapi_wsgi using
# the ISAPIThreadPoolHandler.
#
# Executing this script (or any server config script) will install the extension
# into your web server and will create a "loader" DLL _demo_use_threadpool.dll in the
# current directory. As the server executes, the PyISAPI framework will load
# this module and create the Extension object.
# A Virtual Directory named "isapi-wsgi-demo-usethreadpool" is setup. This dir has the ISAPI
# WSGI extension as the only application, mapped to file-extension '*'.
# Therefore, isapi_wsgi extension handles *all* requests in this directory.
#
# To launch this application from a web browser use a url similar to:
#
# http://localhost/isapi-wsgi-demo-use-threadpool/
#
# A "Hello world!" and the WSGI environment should be displayed.
def demo_app(environ,start_response):
"""Demo app from wsgiref"""
from StringIO import StringIO
stdout = StringIO()
print >>stdout, "Hello world!"
print >>stdout
h = environ.items(); h.sort()
for k,v in h:
print >>stdout, k,'=',`v`
start_response("200 OK", [('Content-Type','text/plain')])
return [stdout.getvalue()]
import isapi_wsgi
# The entry points for the ISAPI extension.
def __ExtensionFactory__():
return isapi_wsgi.ISAPIThreadPoolHandler(demo_app)
if __name__=='__main__':
# If run from the command-line, install ourselves.
from isapi.install import *
params = ISAPIParameters()
# Setup the virtual directories - this is a list of directories our
# extension uses - in this case only 1.
# Each extension has a "script map" - this is the mapping of ISAPI
# extensions.
sm = [
ScriptMapParams(Extension="*", Flags=0)
]
vd = VirtualDirParameters(Name="isapi-wsgi-demo-use-threadpool",
Description = "ISAPI-WSGI ISAPIThreadPoolHandler Demo",
ScriptMaps = sm,
ScriptMapUpdate = "replace"
)
params.VirtualDirs = [vd]
HandleCommandLine(params)
| {
"repo_name": "ig0774/isapi_wsgi",
"path": "examples/demo_use_threadpool.py",
"copies": "5",
"size": "2158",
"license": "mit",
"hash": 3027736841573711000,
"line_mean": 39.5,
"line_max": 92,
"alpha_frac": 0.6515291937,
"autogenerated": false,
"ratio": 3.9094202898550723,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011423886239267227,
"num_lines": 52
} |
""" An example of how to use Chaco to render a visual Traits UI editor.
This particular editor allows the user to set two endpoints of an
interval.
"""
from __future__ import with_statement
from traits.etsconfig.api import ETSConfig
if ETSConfig.toolkit == 'wx':
from traitsui.wx.editor import Editor
else:
from traitsui.qt4.editor import Editor
from traitsui.editor_factory import EditorFactory
from enable.window import Window
from enable.api import ColorTrait
from chaco.api import OverlayPlotContainer, create_line_plot, \
LinePlot
from chaco.tools.api import RangeSelection, RangeSelectionOverlay
from traits.api import Int, TraitType, Instance, Float
from math import pi
class Interval(TraitType):
"""Trait that represents an interval.
"""
info_text = "an interval (x,y) where x < y"
def __init__(self, low=0, high=1, **metadata):
value = (low, high)
TraitType.__init__(self, value, **metadata)
self.value = (low, high)
def validate(self, object, name, value):
low, high = value
if low <= high:
return value
self.error(object, name, value)
def create_editor(self):
return IntervalEditor()
class IntervalEditorFactory(EditorFactory):
width = Int(300)
height = Int(40)
def simple_editor(self, ui, object, name, description, parent):
trait = object.trait(name).trait_type
low, high = trait.value
return IntervalEditorImpl(parent, factory=self, ui=ui,
object=object, name=name,
description=description,
low=low,
high=high)
class RangeKnobsOverlay(RangeSelectionOverlay):
radius = Float(3)
low_color = ColorTrait("red")
high_color = ColorTrait("red")
# Override the default alpha and border color, inherited from
# RangeSelectionOverlay; these are more appropriate for our application.
alpha = Float(0.8)
border_color = ColorTrait("black")
def overlay(self, component, gc, view_bounds=None, mode="normal"):
mid_y = component.position[1] + component.bounds[1]/2
# Draw each of a possibly disjoint set of selections
coords = self._get_selection_screencoords()
for coord in coords:
start, end = coord
with gc:
gc.set_alpha(self.alpha)
gc.set_stroke_color(self.border_color_)
gc.set_line_width(self.border_width)
gc.rect(start + self.radius, mid_y - 1,
(end - start - 2*self.radius), 2)
gc.draw_path()
gc.set_fill_color(self.low_color_)
self._circle(gc, start, mid_y, self.radius)
# Have to stroke/fill the path before we change out the
# fill color
gc.draw_path()
gc.set_fill_color(self.high_color_)
self._circle(gc, end, mid_y, self.radius)
gc.draw_path()
def _circle(self, gc, x, y, radius):
with gc:
gc.translate_ctm(x, y)
gc.arc(0, 0, 2*radius, 0, 2*pi)
class IntervalEditorImpl(Editor):
low = Int
high = Int
plot = Instance(LinePlot)
def init(self, parent):
factory = self.factory
container = OverlayPlotContainer(bgcolor='transparent',
padding=0, spacing=0)
window = Window(parent, component=container)
interval = self.high - self.low
data = ([self.low, self.high], [0.5]*2)
plot = create_line_plot(data, color='black', bgcolor="sys_window")
plot.x_mapper.range.low = self.low - interval*0.1
plot.x_mapper.range.high = self.high + interval*0.1
plot.y_mapper.range.high = 1.0
plot.y_mapper.range.low = 0.0
range_selection = RangeSelection(plot, left_button_selects=True)
# Do not allow the user to reset the range
range_selection.event_state = "selected"
range_selection.deselect = lambda x: None
range_selection.on_trait_change(self.update_interval, 'selection')
plot.tools.append(range_selection)
plot.overlays.append(RangeKnobsOverlay(plot))
self.plot = plot
container.add(self.plot)
# To set the low and high, we're actually going to set the
# 'selection' metadata on the line plot to the tuple (low,high).
plot.index.metadata["selections"] = (0, 1.0)
# Tell the editor what to display
self.control = window.control
if ETSConfig.toolkit == 'wx':
self.control.SetSize((factory.width, factory.height))
else:
self.control.setMaximumSize(factory.width, factory.height)
def update_interval(self, value):
low, high = value
low = max(low, 0)
high = min(high, 1)
self.plot.index.metadata['selections'] = (low, high)
self.value = (low, high)
def update_editor(self):
pass
# The user normally uses the factory as if it were an editor, e.g.:
#
# View(Item('interval', editor=IntervalEditor()))
#
IntervalEditor = IntervalEditorFactory
# --- Demonstration ---
if __name__ == "__main__":
from traits.api import HasTraits
from traitsui.api import View, Item
class IntervalTest(HasTraits):
interval = Interval(low=0, high=1)
traits_view = View(Item('interval',
editor=IntervalEditor()
),
resizable=True)
it = IntervalTest()
it.configure_traits()
| {
"repo_name": "burnpanck/chaco",
"path": "examples/demo/chaco_trait_editor.py",
"copies": "2",
"size": "5689",
"license": "bsd-3-clause",
"hash": -8374991004959807000,
"line_mean": 30.6055555556,
"line_max": 76,
"alpha_frac": 0.5972930216,
"autogenerated": false,
"ratio": 3.9370242214532873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006484025234025233,
"num_lines": 180
} |
""" An example of how to use Chaco to render a visual Traits UI editor.
This particular editor allows the user to set two endpoints of an
interval.
"""
# FIXME: WX-only, and broken even there.
from __future__ import with_statement
# Force WX
from traits.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'wx'
from traitsui.editor_factory import EditorFactory
from traitsui.wx.editor import Editor
from enable.window import Window
from enable.api import ColorTrait
from chaco.api import OverlayPlotContainer, create_line_plot, \
LinePlot
from chaco.tools.api import RangeSelection, RangeSelectionOverlay
from traits.api import Int, TraitType, Instance, Float
from math import pi
class Interval(TraitType):
"""Trait that represents an interval.
"""
info_text = "an interval (x,y) where x < y"
def __init__(self, low=0, high=1, **metadata):
value = (low, high)
TraitType.__init__(self, value, **metadata)
self.value = (low, high)
def validate(self, object, name, value):
low, high = value
if low <= high:
return value
self.error(object, name, value)
def create_editor(self):
return IntervalEditor()
class IntervalEditorFactory(EditorFactory):
width = Int(300)
height = Int(40)
def simple_editor(self, ui, object, name, description, parent):
trait = object.trait(name).trait_type
low, high = trait.value
return IntervalEditorImpl(parent, factory=self, ui=ui,
object=object, name=name,
description=description,
low=low,
high=high)
class RangeKnobsOverlay(RangeSelectionOverlay):
radius = Float(3)
low_color = ColorTrait("red")
high_color = ColorTrait("red")
# Override the default alpha and border color, inherited from
# RangeSelectionOverlay; these are more appropriate for our application.
alpha = Float(0.8)
border_color = ColorTrait("black")
def overlay(self, component, gc, view_bounds=None, mode="normal"):
mid_y = component.position[1] + component.bounds[1]/2
# Draw each of a possibly disjoint set of selections
coords = self._get_selection_screencoords()
for coord in coords:
start, end = coord
with gc:
gc.set_alpha(self.alpha)
gc.set_stroke_color(self.border_color_)
gc.set_line_width(self.border_width)
gc.rect(start + self.radius, mid_y - 1,
(end - start - 2*self.radius), 2)
gc.draw_path()
gc.set_fill_color(self.low_color_)
self._circle(gc, start, mid_y, self.radius)
# Have to stroke/fill the path before we change out the
# fill color
gc.draw_path()
gc.set_fill_color(self.high_color_)
self._circle(gc, end, mid_y, self.radius)
gc.draw_path()
def _circle(self, gc, x, y, radius):
with gc:
gc.translate_ctm(x, y)
gc.arc(0, 0, 2*radius, 0, 2*pi)
class IntervalEditorImpl(Editor):
low = Int
high = Int
plot = Instance(LinePlot)
def init(self, parent):
factory = self.factory
container = OverlayPlotContainer(bgcolor='transparent',
padding=0, spacing=0)
window = Window(parent, component=container)
interval = self.high - self.low
data = ([self.low, self.high], [0.5]*2)
plot = create_line_plot(data, color='black', bgcolor="sys_window")
plot.x_mapper.range.low = self.low - interval*0.1
plot.x_mapper.range.high = self.high + interval*0.1
plot.y_mapper.range.high = 1.0
plot.y_mapper.range.low = 0.0
range_selection = RangeSelection(plot, left_button_selects=True)
# Do not allow the user to reset the range
range_selection.event_state = "selected"
range_selection.deselect = lambda x: None
range_selection.on_trait_change(self.update_interval, 'selection')
plot.tools.append(range_selection)
plot.overlays.append(RangeKnobsOverlay(plot))
self.plot = plot
container.add(self.plot)
# To set the low and high, we're actually going to set the
# 'selection' metadata on the line plot to the tuple (low,high).
plot.index.metadata["selections"] = (0, 1.0)
# Tell the editor what to display
self.control = window.control
self.control.SetSize((factory.width, factory.height))
def update_interval(self, value):
low, high = value
low = max(low, 0)
high = min(high, 1)
self.plot.index.metadata['selections'] = (low, high)
self.value = (low, high)
def update_editor(self):
pass
# The user normally uses the factory as if it were an editor, e.g.:
#
# View(Item('interval', editor=IntervalEditor()))
#
IntervalEditor = IntervalEditorFactory
# --- Demonstration ---
if __name__ == "__main__":
from traits.api import HasTraits
from traitsui.api import View, Item
class IntervalTest(HasTraits):
interval = Interval(low=0, high=1)
traits_view = View(Item('interval',
editor=IntervalEditor()
),
resizable=True)
it = IntervalTest()
it.configure_traits()
| {
"repo_name": "ContinuumIO/chaco",
"path": "examples/demo/chaco_trait_editor.py",
"copies": "1",
"size": "5558",
"license": "bsd-3-clause",
"hash": 7176221466003894000,
"line_mean": 30.0502793296,
"line_max": 76,
"alpha_frac": 0.5975170925,
"autogenerated": false,
"ratio": 3.9168428470754053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017972143688736561,
"num_lines": 179
} |
"""An example of how to use IPython1 for plotting remote parallel data
The two files plotting_frontend.ipy and plotting_backend.py go together.
This file (plotting_backend.py) performs the actual computation. For this
example, the computation just generates a set of random numbers that
look like a distribution of particles with 2D position (x,y) and
momentum (px,py). In a real situation, this file would do some time
consuming and complicated calculation, and could possibly make calls
to MPI.
One important feature is that this script can also be run standalone without
IPython. This is nice as it allows it to be run in more traditional
settings where IPython isn't being used.
When used with IPython1, this code is run on the engines. Because this
code doesn't make any plots, the engines don't have to have any plotting
packages installed.
"""
# Imports
import numpy as N
import time
import random
# Functions
def compute_particles(number):
x = N.random.standard_normal(number)
y = N.random.standard_normal(number)
px = N.random.standard_normal(number)
py = N.random.standard_normal(number)
return x, y, px, py
def downsample(array, k):
"""Choose k random elements of array."""
length = array.shape[0]
indices = random.sample(xrange(length), k)
return array[indices]
# Parameters of the run
number = 100000
d_number = 1000
# The actual run
time.sleep(0) # Pretend it took a while
x, y, px, py = compute_particles(number)
# Now downsample the data
downx = downsample(x, d_number)
downy = downsample(x, d_number)
downpx = downsample(px, d_number)
downpy = downsample(py, d_number)
print "downx: ", downx[:10]
print "downy: ", downy[:10]
print "downpx: ", downpx[:10]
print "downpy: ", downpy[:10] | {
"repo_name": "FrankBian/kuma",
"path": "vendor/packages/ipython/docs/examples/kernel/plotting_backend.py",
"copies": "7",
"size": "1755",
"license": "mpl-2.0",
"hash": -1478196287361361000,
"line_mean": 29.8070175439,
"line_max": 76,
"alpha_frac": 0.7373219373,
"autogenerated": false,
"ratio": 3.447937131630648,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00412802606618296,
"num_lines": 57
} |
"""An example of how to use IPython1 for plotting remote parallel data
The two files plotting_frontend.ipy and plotting_backend.py go together.
To run this example, first start the IPython controller and 4
engines::
ipcluster -n 4
Then start ipython in pylab mode::
ipython -pylab
Then a simple "run plotting_frontend.ipy" in IPython will run the
example. When this is done, all the variables (such as number, downx, etc.)
are available in IPython, so for example you can make additional plots.
"""
import numpy as N
from pylab import *
from IPython.kernel import client
# Get an IPython1 client
rc = client.MultiEngineClient()
rc.get_ids()
# Run the simulation on all the engines
rc.run('plotting_backend.py')
# Bring back the data
number = rc.pull('number')
d_number = rc.pull('d_number')
downx = rc.gather('downx')
downy = rc.gather('downy')
downpx = rc.gather('downpx')
downpy = rc.gather('downpy')
print "number: ", sum(number)
print "downsampled number: ", sum(d_number)
# Make a scatter plot of the gathered data
# These calls to matplotlib could be replaced by calls to pygist or
# another plotting package.
figure(1)
scatter(downx, downy)
xlabel('x')
ylabel('y')
figure(2)
scatter(downpx, downpy)
xlabel('px')
ylabel('py')
show() | {
"repo_name": "yongshengwang/hue",
"path": "build/env/lib/python2.7/site-packages/ipython-0.10-py2.7.egg/share/doc/ipython/examples/kernel/plotting_frontend.py",
"copies": "7",
"size": "1263",
"license": "apache-2.0",
"hash": -1897673326489506300,
"line_mean": 23.3076923077,
"line_max": 76,
"alpha_frac": 0.7339667458,
"autogenerated": false,
"ratio": 3.3236842105263156,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007051282051282052,
"num_lines": 52
} |
"""An example of how to use IPython for plotting remote parallel data
The two files plotting_frontend.py and plotting_backend.py go together.
This file (plotting_backend.py) performs the actual computation. For this
example, the computation just generates a set of random numbers that
look like a distribution of particles with 2D position (x,y) and
momentum (px,py). In a real situation, this file would do some time
consuming and complicated calculation, and could possibly make calls
to MPI.
One important feature is that this script can also be run standalone without
IPython. This is nice as it allows it to be run in more traditional
settings where IPython isn't being used.
When used with IPython.parallel, this code is run on the engines. Because this
code doesn't make any plots, the engines don't have to have any plotting
packages installed.
"""
from __future__ import print_function
# Imports
import numpy as N
import time
import random
# Functions
def compute_particles(number):
x = N.random.standard_normal(number)
y = N.random.standard_normal(number)
px = N.random.standard_normal(number)
py = N.random.standard_normal(number)
return x, y, px, py
def downsample(array, k):
"""Choose k random elements of array."""
length = array.shape[0]
indices = random.sample(xrange(length), k)
return array[indices]
# Parameters of the run
number = 100000
d_number = 1000
# The actual run
time.sleep(0) # Pretend it took a while
x, y, px, py = compute_particles(number)
# Now downsample the data
downx = downsample(x, d_number)
downy = downsample(x, d_number)
downpx = downsample(px, d_number)
downpy = downsample(py, d_number)
print("downx: ", downx[:10])
print("downy: ", downy[:10])
print("downpx: ", downpx[:10])
print("downpy: ", downpy[:10])
| {
"repo_name": "sodafree/backend",
"path": "build/ipython/docs/examples/parallel/plotting/plotting_backend.py",
"copies": "2",
"size": "1801",
"license": "bsd-3-clause",
"hash": -4032771477058994700,
"line_mean": 30.0517241379,
"line_max": 79,
"alpha_frac": 0.7373681288,
"autogenerated": false,
"ratio": 3.4970873786407766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002727924189270129,
"num_lines": 58
} |
__author__ = "richard 'ragmondo' green"
import sys
# Change this to point to where you have a copy of the bitcoinj.jar
sys.path.append(r"/path/to/bitcoinj-0.12-SNAPSHOT-bundled.jar")
# This is the address to forward all payments to. Change this (unless you want to send me some testnet coins)
my_address_text = "mzEjmna15T7DXj4HC9MBEG2UJzgFfEYtFo"
# 0 for instant send, 1 for a more realistic example
# if the wallet has no btc in it, then set to 1.
# if it has a confirmed balance in it, then you can set it to 0.
confirm_wait = 1
from org.bitcoinj.core import *
import org.bitcoinj.crypto.KeyCrypterException
import org.bitcoinj.params.MainNetParams
from org.bitcoinj.kits import WalletAppKit
from com.google.common.util.concurrent import FutureCallback
from com.google.common.util.concurrent import Futures
import java.io.File
import traceback,sys
def loud_exceptions(*args):
def _trace(func):
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception, e:
traceback.print_exc()
print "** python exception ",e
raise
except java.lang.Exception,e:
traceback.print_exc()
print "** java exception",e
raise
return wrapper
if len(args) == 1 and callable(args[0]):
return _trace(args[0])
else:
return _trace
@loud_exceptions
def forwardCoins(tx,w,pg,addr):
v = tx.getValueSentToMe(w)
amountToSend = v.subtract(Transaction.REFERENCE_DEFAULT_MIN_TX_FEE)
# v_bigint = java.math.BigInteger(str(v))
sr = w.sendCoins(pg, addr, amountToSend)
class SenderListener(AbstractWalletEventListener):
def __init__(self,pg,address):
super(SenderListener,self). __init__()
self.peerGroup = pg
self.address = address
@loud_exceptions
def onCoinsReceived(self, w, tx, pb, nb):
print "tx received", tx
v = tx.getValueSentToMe(w)
class myFutureCallback(FutureCallback):
@loud_exceptions
def onSuccess(selfx, txn):
forwardCoins(tx,w,self.peerGroup, self.address)
print "creating %s confirm callback..." % (confirm_wait)
Futures.addCallback(tx.getConfidence().getDepthFuture(confirm_wait), myFutureCallback())
if __name__ == "__main__":
params = com.google.bitcoin.params.TestNet3Params.get()
my_address = Address(params,my_address_text)
filePrefix = "forwarding-service-testnet"
f = java.io.File(".")
kit = WalletAppKit(params, f, filePrefix);
print "starting and initialising (please wait).."
kit.startAsync()
kit.awaitRunning()
pg = kit.peerGroup()
wallet = kit.wallet()
sendToAddress = kit.wallet().currentReceiveKey().toAddress(params)
print "send test coins to ", sendToAddress, "qrcode - http://qrickit.com/api/qr?d=%s" % (sendToAddress) # no affiliation with qrickit..
sl = SenderListener(pg,my_address)
wallet.addEventListener(sl)
print "finished initialising .. now in main event loop" | {
"repo_name": "MintcoinCommunity/mintcoinj",
"path": "examples/src/main/python/forwarding.py",
"copies": "9",
"size": "3230",
"license": "apache-2.0",
"hash": 8755904454196364000,
"line_mean": 33.7419354839,
"line_max": 139,
"alpha_frac": 0.6671826625,
"autogenerated": false,
"ratio": 3.488120950323974,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009669204423497179,
"num_lines": 93
} |
__author__ = "richard 'ragmondo' green"
import sys
# Change this to point to where you have a copy of the bitcoinj.jar
sys.path.append(r"/path/to/bitcoinj-core-0.12-bundled.jar")
# This is the address to forward all payments to. Change this (unless you want to send me some testnet coins)
my_address_text = "mzEjmna15T7DXj4HC9MBEG2UJzgFfEYtFo"
# 0 for instant send, 1 for a more realistic example
# if the wallet has no btc in it, then set to 1.
# if it has a confirmed balance in it, then you can set it to 0.
confirm_wait = 1
from org.bitcoinj.core import *
import org.bitcoinj.crypto.KeyCrypterException
import org.bitcoinj.params.MainNetParams
from org.bitcoinj.kits import WalletAppKit
from com.google.common.util.concurrent import FutureCallback
from com.google.common.util.concurrent import Futures
import java.io.File
import sys
def loud_exceptions(*args):
def _trace(func):
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception, e:
print "** python exception ",e
raise
except java.lang.Exception,e:
print "** java exception",e
raise
return wrapper
if len(args) == 1 and callable(args[0]):
return _trace(args[0])
else:
return _trace
@loud_exceptions
def forwardCoins(tx,w,pg,addr):
v = tx.getValueSentToMe(w)
amountToSend = v.subtract(Transaction.REFERENCE_DEFAULT_MIN_TX_FEE)
sr = w.sendCoins(pg, addr, amountToSend)
class SenderListener(AbstractWalletEventListener):
def __init__(self,pg,address):
super(SenderListener,self). __init__()
self.peerGroup = pg
self.address = address
@loud_exceptions
def onCoinsReceived(self, w, tx, pb, nb):
print "tx received", tx
v = tx.getValueSentToMe(w)
class myFutureCallback(FutureCallback):
@loud_exceptions
def onSuccess(selfx, txn):
forwardCoins(tx,w,self.peerGroup, self.address)
print "creating %s confirm callback..." % (confirm_wait)
Futures.addCallback(tx.getConfidence().getDepthFuture(confirm_wait), myFutureCallback())
if __name__ == "__main__":
params = org.bitcoinj.params.TestNet3Params.get()
my_address = Address(params,my_address_text)
filePrefix = "forwarding-service-testnet"
f = java.io.File(".")
kit = WalletAppKit(params, f, filePrefix);
print "starting and initialising (please wait).."
kit.startAsync()
kit.awaitRunning()
pg = kit.peerGroup()
wallet = kit.wallet()
sendToAddress = kit.wallet().currentReceiveKey().toAddress(params)
print "send test coins to ", sendToAddress, "qrcode - http://qrickit.com/api/qr?d=%s" % (sendToAddress) # no affiliation with qrickit..
sl = SenderListener(pg,my_address)
wallet.addEventListener(sl)
print "finished initialising .. now in main event loop"
| {
"repo_name": "TheBlueMatt/bitcoinj",
"path": "examples/src/main/python/forwarding.py",
"copies": "39",
"size": "3090",
"license": "apache-2.0",
"hash": -4638894622181718000,
"line_mean": 33.3333333333,
"line_max": 139,
"alpha_frac": 0.671197411,
"autogenerated": false,
"ratio": 3.460246360582307,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008979073163028786,
"num_lines": 90
} |
__author__ = "richard 'ragmondo' green"
import sys
# Change this to point to where you have a copy of the nubitsj.jar
sys.path.append(r"/path/to/nubitsj-0.12-bundled.jar")
# This is the address to forward all payments to. Change this (unless you want to send me some testnet coins)
my_address_text = "mzEjmna15T7DXj4HC9MBEG2UJzgFfEYtFo"
# 0 for instant send, 1 for a more realistic example
# if the wallet has no btc in it, then set to 1.
# if it has a confirmed balance in it, then you can set it to 0.
confirm_wait = 1
from com.matthewmitchell.nubitsj.core import *
import com.matthewmitchell.nubitsj.crypto.KeyCrypterException
import com.matthewmitchell.nubitsj.params.MainNetParams
from com.matthewmitchell.nubitsj.kits import WalletAppKit
from com.google.common.util.concurrent import FutureCallback
from com.google.common.util.concurrent import Futures
import java.io.File
import sys
def loud_exceptions(*args):
def _trace(func):
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception, e:
print "** python exception ",e
raise
except java.lang.Exception,e:
print "** java exception",e
raise
return wrapper
if len(args) == 1 and callable(args[0]):
return _trace(args[0])
else:
return _trace
@loud_exceptions
def forwardCoins(tx,w,pg,addr):
v = tx.getValueSentToMe(w)
amountToSend = v.subtract(Transaction.REFERENCE_DEFAULT_MIN_TX_FEE)
sr = w.sendCoins(pg, addr, amountToSend)
class SenderListener(AbstractWalletEventListener):
def __init__(self,pg,address):
super(SenderListener,self). __init__()
self.peerGroup = pg
self.address = address
@loud_exceptions
def onCoinsReceived(self, w, tx, pb, nb):
print "tx received", tx
v = tx.getValueSentToMe(w)
class myFutureCallback(FutureCallback):
@loud_exceptions
def onSuccess(selfx, txn):
forwardCoins(tx,w,self.peerGroup, self.address)
print "creating %s confirm callback..." % (confirm_wait)
Futures.addCallback(tx.getConfidence().getDepthFuture(confirm_wait), myFutureCallback())
if __name__ == "__main__":
params = com.google.nubits.params.MainNetParams.get()
my_address = Address(params,my_address_text)
filePrefix = "forwarding-service-mainnet"
f = java.io.File(".")
kit = WalletAppKit(params, f, filePrefix);
print "starting and initialising (please wait).."
kit.startAsync()
kit.awaitRunning()
pg = kit.peerGroup()
wallet = kit.wallet()
sendToAddress = kit.wallet().currentReceiveKey().toAddress(params)
print "send coins to ", sendToAddress, "qrcode - http://qrickit.com/api/qr?d=%s" % (sendToAddress) # no affiliation with qrickit..
sl = SenderListener(pg,my_address)
wallet.addEventListener(sl)
print "finished initialising .. now in main event loop" | {
"repo_name": "Cybnate/NuBitsj",
"path": "examples/src/main/python/forwarding.py",
"copies": "1",
"size": "3141",
"license": "apache-2.0",
"hash": 976026814925577600,
"line_mean": 33.9111111111,
"line_max": 134,
"alpha_frac": 0.6758993951,
"autogenerated": false,
"ratio": 3.3883495145631066,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9519919668867902,
"avg_score": 0.008865848159040988,
"num_lines": 90
} |
__author__ = "richard 'ragmondo' green"
import sys
# Change this to point to where you have a copy of the peercoinj.jar
sys.path.append(r"/path/to/peercoinj-0.12-SNAPSHOT-bundled.jar")
# This is the address to forward all payments to. Change this (unless you want to send me some testnet coins)
my_address_text = "mzEjmna15T7DXj4HC9MBEG2UJzgFfEYtFo"
# 0 for instant send, 1 for a more realistic example
# if the wallet has no btc in it, then set to 1.
# if it has a confirmed balance in it, then you can set it to 0.
confirm_wait = 1
from com.matthewmitchell.peercoinj.core import *
import com.matthewmitchell.peercoinj.crypto.KeyCrypterException
import com.matthewmitchell.peercoinj.params.MainNetParams
from com.matthewmitchell.peercoinj.kits import WalletAppKit
from com.google.common.util.concurrent import FutureCallback
from com.google.common.util.concurrent import Futures
import java.io.File
import traceback,sys
def loud_exceptions(*args):
def _trace(func):
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception, e:
traceback.print_exc()
print "** python exception ",e
raise
except java.lang.Exception,e:
traceback.print_exc()
print "** java exception",e
raise
return wrapper
if len(args) == 1 and callable(args[0]):
return _trace(args[0])
else:
return _trace
@loud_exceptions
def forwardCoins(tx,w,pg,addr):
v = tx.getValueSentToMe(w)
amountToSend = v.subtract(Transaction.REFERENCE_DEFAULT_MIN_TX_FEE)
# v_bigint = java.math.BigInteger(str(v))
sr = w.sendCoins(pg, addr, amountToSend)
class SenderListener(AbstractWalletEventListener):
def __init__(self,pg,address):
super(SenderListener,self). __init__()
self.peerGroup = pg
self.address = address
@loud_exceptions
def onCoinsReceived(self, w, tx, pb, nb):
print "tx received", tx
v = tx.getValueSentToMe(w)
class myFutureCallback(FutureCallback):
@loud_exceptions
def onSuccess(selfx, txn):
forwardCoins(tx,w,self.peerGroup, self.address)
print "creating %s confirm callback..." % (confirm_wait)
Futures.addCallback(tx.getConfidence().getDepthFuture(confirm_wait), myFutureCallback())
if __name__ == "__main__":
params = com.google.peercoin.params.TestNet3Params.get()
my_address = Address(params,my_address_text)
filePrefix = "forwarding-service-testnet"
f = java.io.File(".")
kit = WalletAppKit(params, f, filePrefix);
print "starting and initialising (please wait).."
kit.startAsync()
kit.awaitRunning()
pg = kit.peerGroup()
wallet = kit.wallet()
sendToAddress = kit.wallet().currentReceiveKey().toAddress(params)
print "send test coins to ", sendToAddress, "qrcode - http://qrickit.com/api/qr?d=%s" % (sendToAddress) # no affiliation with qrickit..
sl = SenderListener(pg,my_address)
wallet.addEventListener(sl)
print "finished initialising .. now in main event loop" | {
"repo_name": "kris-davison/capricoinj",
"path": "examples/src/main/python/forwarding.py",
"copies": "4",
"size": "3301",
"license": "apache-2.0",
"hash": -8037531077932004000,
"line_mean": 34.5053763441,
"line_max": 139,
"alpha_frac": 0.6731293547,
"autogenerated": false,
"ratio": 3.4493207941483806,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00934413512932201,
"num_lines": 93
} |
__author__ = "richard 'ragmondo' green"
import sys
# Change this to point to where you have a copy of the spreadcoinj.jar
sys.path.append(r"/path/to/spreadcoinj-0.12-SNAPSHOT-bundled.jar")
# This is the address to forward all payments to. Change this (unless you want to send me some testnet coins)
my_address_text = "mzEjmna15T7DXj4HC9MBEG2UJzgFfEYtFo"
# 0 for instant send, 1 for a more realistic example
# if the wallet has no btc in it, then set to 1.
# if it has a confirmed balance in it, then you can set it to 0.
confirm_wait = 1
from org.spreadcoinj.core import *
import org.spreadcoinj.crypto.KeyCrypterException
import org.spreadcoinj.params.MainNetParams
from org.spreadcoinj.kits import WalletAppKit
from com.google.common.util.concurrent import FutureCallback
from com.google.common.util.concurrent import Futures
import java.io.File
import traceback,sys
def loud_exceptions(*args):
def _trace(func):
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception, e:
traceback.print_exc()
print "** python exception ",e
raise
except java.lang.Exception,e:
traceback.print_exc()
print "** java exception",e
raise
return wrapper
if len(args) == 1 and callable(args[0]):
return _trace(args[0])
else:
return _trace
@loud_exceptions
def forwardCoins(tx,w,pg,addr):
v = tx.getValueSentToMe(w)
amountToSend = v.subtract(Transaction.REFERENCE_DEFAULT_MIN_TX_FEE)
# v_bigint = java.math.BigInteger(str(v))
sr = w.sendCoins(pg, addr, amountToSend)
class SenderListener(AbstractWalletEventListener):
def __init__(self,pg,address):
super(SenderListener,self). __init__()
self.peerGroup = pg
self.address = address
@loud_exceptions
def onCoinsReceived(self, w, tx, pb, nb):
print "tx received", tx
v = tx.getValueSentToMe(w)
class myFutureCallback(FutureCallback):
@loud_exceptions
def onSuccess(selfx, txn):
forwardCoins(tx,w,self.peerGroup, self.address)
print "creating %s confirm callback..." % (confirm_wait)
Futures.addCallback(tx.getConfidence().getDepthFuture(confirm_wait), myFutureCallback())
if __name__ == "__main__":
params = com.google.bitcoin.params.TestNetParams.get()
my_address = Address(params,my_address_text)
filePrefix = "forwarding-service-testnet"
f = java.io.File(".")
kit = WalletAppKit(params, f, filePrefix);
print "starting and initialising (please wait).."
kit.startAsync()
kit.awaitRunning()
pg = kit.peerGroup()
wallet = kit.wallet()
sendToAddress = kit.wallet().currentReceiveKey().toAddress(params)
print "send test coins to ", sendToAddress, "qrcode - http://qrickit.com/api/qr?d=%s" % (sendToAddress) # no affiliation with qrickit..
sl = SenderListener(pg,my_address)
wallet.addEventListener(sl)
print "finished initialising .. now in main event loop" | {
"repo_name": "bitbandi/spreadcoinj",
"path": "examples/src/main/python/forwarding.py",
"copies": "1",
"size": "3247",
"license": "apache-2.0",
"hash": 2440612911168078000,
"line_mean": 33.9247311828,
"line_max": 139,
"alpha_frac": 0.6689251617,
"autogenerated": false,
"ratio": 3.4839055793991416,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9604868311555157,
"avg_score": 0.009592485908796917,
"num_lines": 93
} |
""" An example of how to use the entity system. """
# Matthew Westbrook
#
# An example that demonstrates how to use the entity system.
#
# * The first task is to define your entity systems and components.
# * Then initialize the world and add the entity systems to the world.
# * Construct entities and add their components.
# * Update the world when a tick occurs.
from entsystem.entity_system import EntitySystem
from entsystem.world import World
class PrintableComponent(object):
"""
Component that countains a message to print.
Attributes:
printable: A string containing the message to be printed for the
entity.
"""
def __init__(self, text):
self.printable = text
def set_printable(self, text):
"""Set printed text."""
self.printable = text
def get_printable(self):
"""Return text to be printed."""
return self.printable
class PrintSystem(EntitySystem):
"""
A EntitySystem that prints printable entities.
"""
def __init__(self):
super().__init__([PrintableComponent])
def update(self, entity, delta_time):
"""Overriden method from EntitySystem that is called when the world
updates and procedes to print the entity printable attribute.
This is where most of the EntitySystem's work happens. This method
shouldn't be called by the user. Instead EntitySystem calls this
method each tick to update entities.
Args:
entity: The entity currenty being updated.
delta_time: Time change since last tick.
"""
for _ in range(delta_time):
print(entity.get_component(PrintableComponent).get_printable())
def main():
"""
Creates a world.
Populates world with entities and an entity system that prints attributes
of those entities.
"""
# Setup the world and entity systems
world = World()
printsystem = PrintSystem()
world.add_system(printsystem)
# Create entities
ent = world.create_entity()
ent2 = world.create_entity()
# Add components to the entities
world.add_component(ent, PrintableComponent("Entity1"))
world.add_component(ent2, PrintableComponent("Entity2"))
# Update world when a tick occurs.
print("-------\nLiving Entities:")
world.update(1)
print("-------\nDelete entity2")
world.remove_component(ent2, PrintableComponent)
print("-------\nLiving Entities:")
world.update(1)
print("-------\nDelete PrintSystem")
world.remove_system(PrintSystem)
print("------\nLiving Entities:")
world.update(1)
if __name__ == "__main__":
main()
| {
"repo_name": "horrorvacui/ent-system",
"path": "examples/example.py",
"copies": "1",
"size": "2683",
"license": "mit",
"hash": 7644690501424263000,
"line_mean": 27.8494623656,
"line_max": 77,
"alpha_frac": 0.6515095043,
"autogenerated": false,
"ratio": 4.251980982567353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5403490486867353,
"avg_score": null,
"num_lines": null
} |
# An example of how you can do a wallet in Viper.
# Warning: NOT AUDITED. Do not use to store substantial quantities of funds.
# A list of the owners addresses (there are a maximum of 5 owners)
owners: address[5]
# The number of owners required to approve a transaction
threshold: num
# The number of transactions that have been approved
seq: num
def __init__(_owners: address[5], _threshold: num):
for i in range(5):
if _owners[i]:
self.owners[i] = _owners[i]
self.threshold = _threshold
# `@payable` allows functions to receive ether
@payable
def approve(_seq: num, to: address, value: wei_value, data: bytes <= 4096, sigdata: num256[3][5]) -> bytes <= 4096:
# Throws if the value sent to the contract is less than the sum of the value to be sent
assert msg.value >= value
# Every time the number of approvals starts at 0 (multiple signatures can be added through the sigdata argument)
approvals = 0
# Starts by combining:
# 1) The number of transactions approved thus far.
# 2) The address the transaction is going to be sent to (can be a contract or a user).
# 3) The value in wei that will be sent with this transaction.
# 4) The data to be sent with this transaction (usually data is used to deploy contracts or to call functions on contracts, but you can put whatever you want in it).
# Takes the sha3 (keccak256) hash of the combination
h = sha3(concat(as_bytes32(_seq), as_bytes32(to), as_bytes32(value), data))
# Then we combine the Ethereum Signed message with our previous hash
# Owners will have to sign the below message
h2 = sha3(concat("\x19Ethereum Signed Message:\n32", h))
# Verifies that the caller of approve has entered the correct transaction number
assert self.seq == _seq
# Iterates through all the owners and verifies that there signatures,
# given as the sigdata argument are correct
for i in range(5):
if sigdata[i][0]:
# If an invalid signature is given for an owner then the contract throws
assert ecrecover(h2, sigdata[i][0], sigdata[i][1], sigdata[i][2]) == self.owners[i]
# For every valid signature increase the number of approvals by 1
approvals += 1
# Throw if the number of approvals is less then the number of approvals required (the threshold)
assert approvals >= self.threshold
# The transaction has been approved
# Increase the number of approved transactions by 1
self.seq += 1
# Use raw_call to send the transaction
return raw_call(to, data, outsize=4096, gas=3000000, value=value)
| {
"repo_name": "NedYork/viper",
"path": "examples/wallet/wallet.v.py",
"copies": "1",
"size": "2617",
"license": "mit",
"hash": 2932888019952374300,
"line_mean": 51.34,
"line_max": 169,
"alpha_frac": 0.6962170424,
"autogenerated": false,
"ratio": 3.837243401759531,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004611773685863158,
"num_lines": 50
} |
"""An example of implementing a centralized critic with ObservationFunction.
The advantage of this approach is that it's very simple and you don't have to
change the algorithm at all -- just use callbacks and a custom model.
However, it is a bit less principled in that you have to change the agent
observation spaces to include data that is only used at train time.
See also: centralized_critic.py for an alternative approach that instead
modifies the policy to add a centralized value function.
"""
import numpy as np
from gym.spaces import Dict, Discrete
import argparse
import os
from ray import tune
from ray.rllib.agents.callbacks import DefaultCallbacks
from ray.rllib.examples.models.centralized_critic_models import \
YetAnotherCentralizedCriticModel, YetAnotherTorchCentralizedCriticModel
from ray.rllib.examples.env.two_step_game import TwoStepGame
from ray.rllib.models import ModelCatalog
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.test_utils import check_learning_achieved
parser = argparse.ArgumentParser()
parser.add_argument(
"--framework",
choices=["tf", "tf2", "tfe", "torch"],
default="tf",
help="The DL framework specifier.")
parser.add_argument(
"--as-test",
action="store_true",
help="Whether this script should be run as a test: --stop-reward must "
"be achieved within --stop-timesteps AND --stop-iters.")
parser.add_argument(
"--stop-iters",
type=int,
default=100,
help="Number of iterations to train.")
parser.add_argument(
"--stop-timesteps",
type=int,
default=100000,
help="Number of timesteps to train.")
parser.add_argument(
"--stop-reward",
type=float,
default=7.99,
help="Reward at which we stop training.")
class FillInActions(DefaultCallbacks):
"""Fills in the opponent actions info in the training batches."""
def on_postprocess_trajectory(self, worker, episode, agent_id, policy_id,
policies, postprocessed_batch,
original_batches, **kwargs):
to_update = postprocessed_batch[SampleBatch.CUR_OBS]
other_id = 1 if agent_id == 0 else 0
action_encoder = ModelCatalog.get_preprocessor_for_space(Discrete(2))
# set the opponent actions into the observation
_, opponent_batch = original_batches[other_id]
opponent_actions = np.array([
action_encoder.transform(a)
for a in opponent_batch[SampleBatch.ACTIONS]
])
to_update[:, -2:] = opponent_actions
def central_critic_observer(agent_obs, **kw):
"""Rewrites the agent obs to include opponent data for training."""
new_obs = {
0: {
"own_obs": agent_obs[0],
"opponent_obs": agent_obs[1],
"opponent_action": 0, # filled in by FillInActions
},
1: {
"own_obs": agent_obs[1],
"opponent_obs": agent_obs[0],
"opponent_action": 0, # filled in by FillInActions
},
}
return new_obs
if __name__ == "__main__":
args = parser.parse_args()
ModelCatalog.register_custom_model(
"cc_model", YetAnotherTorchCentralizedCriticModel
if args.framework == "torch" else YetAnotherCentralizedCriticModel)
action_space = Discrete(2)
observer_space = Dict({
"own_obs": Discrete(6),
# These two fields are filled in by the CentralCriticObserver, and are
# not used for inference, only for training.
"opponent_obs": Discrete(6),
"opponent_action": Discrete(2),
})
config = {
"env": TwoStepGame,
"batch_mode": "complete_episodes",
"callbacks": FillInActions,
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
"num_workers": 0,
"multiagent": {
"policies": {
"pol1": (None, observer_space, action_space, {}),
"pol2": (None, observer_space, action_space, {}),
},
"policy_mapping_fn": (
lambda aid, **kwargs: "pol1" if aid == 0 else "pol2"),
"observation_fn": central_critic_observer,
},
"model": {
"custom_model": "cc_model",
},
"framework": args.framework,
}
stop = {
"training_iteration": args.stop_iters,
"timesteps_total": args.stop_timesteps,
"episode_reward_mean": args.stop_reward,
}
results = tune.run("PPO", config=config, stop=stop, verbose=1)
if args.as_test:
check_learning_achieved(results, args.stop_reward)
| {
"repo_name": "ray-project/ray",
"path": "rllib/examples/centralized_critic_2.py",
"copies": "1",
"size": "4674",
"license": "apache-2.0",
"hash": 1351273073586770700,
"line_mean": 32.8695652174,
"line_max": 78,
"alpha_frac": 0.6315789474,
"autogenerated": false,
"ratio": 3.7243027888446214,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.985588173624462,
"avg_score": 0,
"num_lines": 138
} |
"""An example of multi-worker training with Keras model using Strategy API."""
from __future__ import absolute_import, division, print_function
import argparse
import json
import os
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.keras import layers, models
from tensorflow.keras import optimizers
from polyaxon import tracking
from polyaxon.tracking.contrib.keras import PolyaxonKerasCallback, PolyaxonKerasModelCheckpoint
OPTIMIZERS = {
'adam': optimizers.Adam,
'rmsprop': optimizers.RMSprop,
'sgd': optimizers.SGD,
}
def make_datasets_unbatched():
BUFFER_SIZE = 10000
# Scaling MNIST data from (0, 255] to (0., 1.]
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255
return image, label
datasets, info = tfds.load(name='mnist', with_info=True, as_supervised=True)
return datasets['train'].map(scale).cache().shuffle(BUFFER_SIZE)
def get_model(args):
model = models.Sequential()
model.add(
layers.Conv2D(args.conv1_size, (3, 3), activation=args.conv_activation, input_shape=(28, 28, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(args.conv2_size, (3, 3), activation=args.conv_activation))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation=args.conv_activation))
model.add(layers.Dropout(args.dropout))
model.add(layers.Flatten())
model.add(layers.Dense(args.hidden1_size, activation=args.dense_activation))
model.add(layers.Dense(10, activation='softmax'))
model.summary()
model.compile(optimizer=OPTIMIZERS[args.optimizer](learning_rate=args.learning_rate),
loss=args.loss,
metrics=['accuracy'])
return model
def decay(epoch):
if epoch < 3:
return 1e-3
elif epoch >= 3 and epoch < 7:
return 1e-4
else:
return 1e-5
def main(args):
# MultiWorkerMirroredStrategy creates copies of all variables in the model's
# layers on each device across all workers
# if your GPUs don't support NCCL, replace "communication" with another
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
communication=tf.distribute.experimental.CollectiveCommunication.NCCL)
BATCH_SIZE_PER_REPLICA = 64
BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
with strategy.scope():
ds_train = make_datasets_unbatched().batch(BATCH_SIZE).repeat()
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = \
tf.data.experimental.AutoShardPolicy.DATA
ds_train = ds_train.with_options(options)
# Model building/compiling need to be within `strategy.scope()`.
multi_worker_model = get_model(args)
# Function for decaying the learning rate.
# You can define any decay function you need.
# Callback for printing the LR at the end of each epoch.
class PrintLR(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
print('\nLearning rate for epoch {} is {}'.format(
epoch + 1, multi_worker_model.optimizer.lr.numpy()))
callbacks = [
PrintLR(),
tf.keras.callbacks.LearningRateScheduler(decay),
]
# Polyaxon
if TASK_INDEX == 0:
plx_callback = PolyaxonKerasCallback()
plx_model_callback = PolyaxonKerasModelCheckpoint(save_weights_only=True)
log_dir = tracking.get_tensorboard_path()
callbacks = [
tf.keras.callbacks.TensorBoard(log_dir=log_dir),
plx_model_callback,
plx_callback,
]
# Keras' `model.fit()` trains the model with specified number of epochs and
# number of steps per epoch. Note that the numbers here are for demonstration
# purposes only and may not sufficiently produce a model with good quality.
multi_worker_model.fit(ds_train,
epochs=args.epochs,
steps_per_epoch=70,
callbacks=callbacks)
multi_worker_model.save("/tmp/model")
if TASK_INDEX == 0:
tracking.log_model(path="/tmp/model", framework="tensorflow")
if __name__ == '__main__':
os.environ['NCCL_DEBUG'] = 'INFO'
tfds.disable_progress_bar()
# to decide if a worker is chief, get TASK_INDEX in Cluster info
tf_config = json.loads(os.environ.get('TF_CONFIG') or '{}')
TASK_INDEX = tf_config['task']['index']
parser = argparse.ArgumentParser()
parser.add_argument(
'--conv1_size',
type=int,
default=32)
parser.add_argument(
'--conv2_size',
type=int,
default=64
)
parser.add_argument(
'--dropout',
type=float,
default=0.2
)
parser.add_argument(
'--hidden1_size',
type=int,
default=64
)
parser.add_argument(
'--conv_activation',
type=str,
default="relu"
)
parser.add_argument(
'--dense_activation',
type=str,
default="relu"
)
parser.add_argument(
'--optimizer',
type=str,
default='adam'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.001
)
parser.add_argument(
'--epochs',
type=int,
default=10
)
parser.add_argument(
'--loss',
type=str,
default="sparse_categorical_crossentropy"
)
args = parser.parse_args()
# Polyaxon
if TASK_INDEX == 0:
tracking.init()
main(args)
| {
"repo_name": "polyaxon/polyaxon",
"path": "examples/in_cluster/kubeflow/tfjob/run.py",
"copies": "1",
"size": "5644",
"license": "apache-2.0",
"hash": -5029606623364384000,
"line_mean": 28.0927835052,
"line_max": 105,
"alpha_frac": 0.6286321758,
"autogenerated": false,
"ratio": 3.7229551451187337,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4851587320918734,
"avg_score": null,
"num_lines": null
} |
"An example of predicting a music genre from a custom audio file"
import librosa
import logging
import sys
import numpy as np
from keras.models import model_from_json
from GenreFeatureData import (
GenreFeatureData,
) # local python class with Audio feature extraction and genre list
# set logging level
logging.getLogger("tensorflow").setLevel(logging.ERROR)
def load_model(model_path, weights_path):
"Load the trained LSTM model from directory for genre classification"
with open(model_path, "r") as model_file:
trained_model = model_from_json(model_file.read())
trained_model.load_weights(weights_path)
trained_model.compile(
loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
return trained_model
def extract_audio_features(file):
"Extract audio features from an audio file for genre classification"
timeseries_length = 128
features = np.zeros((1, timeseries_length, 33), dtype=np.float64)
y, sr = librosa.load(file)
mfcc = librosa.feature.mfcc(y=y, sr=sr, hop_length=512, n_mfcc=13)
spectral_center = librosa.feature.spectral_centroid(y=y, sr=sr, hop_length=512)
chroma = librosa.feature.chroma_stft(y=y, sr=sr, hop_length=512)
spectral_contrast = librosa.feature.spectral_contrast(y=y, sr=sr, hop_length=512)
features[0, :, 0:13] = mfcc.T[0:timeseries_length, :]
features[0, :, 13:14] = spectral_center.T[0:timeseries_length, :]
features[0, :, 14:26] = chroma.T[0:timeseries_length, :]
features[0, :, 26:33] = spectral_contrast.T[0:timeseries_length, :]
return features
def get_genre(model, music_path):
"Predict genre of music using a trained model"
prediction = model.predict(extract_audio_features(music_path))
predict_genre = GenreFeatureData().genre_list[np.argmax(prediction)]
return predict_genre
if __name__ == "__main__":
PATH = sys.argv[1] if len(sys.argv) == 2 else "./audio/classical_music.mp3"
MODEL = load_model("./weights/model.json", "./weights/model_weights.h5")
GENRE = get_genre(MODEL, PATH)
print("Model predict: {}".format(GENRE))
| {
"repo_name": "ruohoruotsi/LSTM-Music-Genre-Classification",
"path": "predict_example.py",
"copies": "1",
"size": "2122",
"license": "mit",
"hash": 8330477576746012000,
"line_mean": 36.8928571429,
"line_max": 85,
"alpha_frac": 0.7026390198,
"autogenerated": false,
"ratio": 3.300155520995334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4502794540795334,
"avg_score": null,
"num_lines": null
} |
"""An example of the ListModel."""
from pythonicqt.Qt import QtCore, QtGui
from pythonicqt import ListModel
from pythonicqt.examples import ExampleBase
class ExampleListModel(ExampleBase):
"""This Widget demonstrates the ListModel functionality.
You can interactwith a model that behaves like a
Python list (self.list_model), at the same time the model propagates changes
correcly."""
title="ListModel"
def __init__(self, *args, **kwargs):
super(ExampleListModel, self).__init__(*args, **kwargs)
self.list_model = ListModel([1, 'two', u'three'])
#The Views
self._layout = QtGui.QVBoxLayout(self)
self.combo_one = QtGui.QComboBox()
self._layout.addWidget(self.combo_one)
self.combo_two = QtGui.QComboBox()
self._layout.addWidget(self.combo_two)
self.label = QtGui.QLabel("You can edit the items below.")
self._layout.addWidget(self.label)
self.list_view = QtGui.QListView()
self._layout.addWidget(self.list_view)
#Connect Model to Views
self.combo_one.setModel(self.list_model)
self.combo_two.setModel(self.list_model)
self.list_view.setModel(self.list_model)
#Example interaction
self.list_model.append("Now Last")
self.list_model.insert(0, 'Now First')
if __name__ == "__main__":
ExampleListModel.run_example() | {
"repo_name": "Digirolamo/pythonicqt",
"path": "pythonicqt/examples/listmodel_example.py",
"copies": "1",
"size": "1396",
"license": "mit",
"hash": -2980144958761504300,
"line_mean": 36.7567567568,
"line_max": 80,
"alpha_frac": 0.6618911175,
"autogenerated": false,
"ratio": 3.742627345844504,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4904518463344504,
"avg_score": null,
"num_lines": null
} |
## An example of the simple Schnorr sigma protocol
## to prove that one knows x, such that h = g^x for
## a public generator h and g.
from petlib.bn import Bn
from petlib.ec import EcGroup, EcPt
from hashlib import sha256
def challenge(elements):
"""Packages a challenge in a bijective way"""
elem = [len(elements)] + elements
elem_str = map(str, elem)
elem_len = map(lambda x: "%s||%s" % (len(x) , x), elem_str)
state = "|".join(elem_len)
H = sha256()
H.update(state.encode("utf8"))
return H.digest()
def setup():
G = EcGroup(713)
g = G.generator()
o = G.order()
return G, g, o
def prove(params, h, g, x, m=""):
"""Schnorr proof of the statement ZK(x ; h = g^x)"""
assert x * g == h
G, _, o = params
w = o.random()
W = w * g
state = ['schnorr', G.nid(), g, h, m, W]
hash_c = challenge(state)
c = Bn.from_binary(hash_c) % o
r = (w - c * x) % o
return (c, r)
def verify(params, h, g, proof, m=""):
"""Verify the statement ZK(x ; h = g^x)"""
G, _, o = params
c, r = proof
W = (r * g + c * h)
state = ['schnorr', G.nid(), g, h, m, W]
hash_c = challenge(state)
c2 = Bn.from_binary(hash_c) % o
return c == c2
def test_zkp():
params = setup()
G, g, o = params
x = o.random()
h = x * g
## Use it as a Zk proof
proof = prove(params, h, g, x)
assert verify(params, h, g, proof)
assert not verify(params, g, h, proof)
## Use it as a signature scheme
proofm = prove(params, h, g, x, m = "Hello World!")
assert verify(params, h, g, proofm, m = "Hello World!")
assert not verify(params, h, g, proofm, m = "Other String")
| {
"repo_name": "gdanezis/petlib",
"path": "examples/zkp.py",
"copies": "1",
"size": "1694",
"license": "bsd-2-clause",
"hash": -1140757912120717600,
"line_mean": 24.6666666667,
"line_max": 63,
"alpha_frac": 0.554309327,
"autogenerated": false,
"ratio": 2.786184210526316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8797761020132635,
"avg_score": 0.008546503478736034,
"num_lines": 66
} |
"""An example of training A3C against OpenAI Gym Envs.
This script is an example of training a A3C agent against OpenAI Gym envs.
Both discrete and continuous action spaces are supported.
To solve CartPole-v0, run:
python train_a3c_gym.py 8 --env CartPole-v0
To solve InvertedPendulum-v1, run:
python train_a3c_gym.py 8 --env InvertedPendulum-v1 --arch LSTMGaussian --t-max 50 # noqa
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import argparse
import os
# This prevents numpy from using multiple threads
os.environ['OMP_NUM_THREADS'] = '1' # NOQA
import chainer
from chainer import functions as F
from chainer import links as L
import gym
import numpy as np
import chainerrl
from chainerrl.agents import a3c
from chainerrl import experiments
from chainerrl import links
from chainerrl import misc
from chainerrl.optimizers.nonbias_weight_decay import NonbiasWeightDecay
from chainerrl.optimizers import rmsprop_async
from chainerrl import policies
from chainerrl.recurrent import RecurrentChainMixin
from chainerrl import v_function
class A3CFFSoftmax(chainer.ChainList, a3c.A3CModel):
"""An example of A3C feedforward softmax policy."""
def __init__(self, ndim_obs, n_actions, hidden_sizes=(200, 200)):
self.pi = policies.SoftmaxPolicy(
model=links.MLP(ndim_obs, n_actions, hidden_sizes))
self.v = links.MLP(ndim_obs, 1, hidden_sizes=hidden_sizes)
super().__init__(self.pi, self.v)
def pi_and_v(self, state):
return self.pi(state), self.v(state)
class A3CFFMellowmax(chainer.ChainList, a3c.A3CModel):
"""An example of A3C feedforward mellowmax policy."""
def __init__(self, ndim_obs, n_actions, hidden_sizes=(200, 200)):
self.pi = policies.MellowmaxPolicy(
model=links.MLP(ndim_obs, n_actions, hidden_sizes))
self.v = links.MLP(ndim_obs, 1, hidden_sizes=hidden_sizes)
super().__init__(self.pi, self.v)
def pi_and_v(self, state):
return self.pi(state), self.v(state)
class A3CLSTMGaussian(chainer.ChainList, a3c.A3CModel, RecurrentChainMixin):
"""An example of A3C recurrent Gaussian policy."""
def __init__(self, obs_size, action_size, hidden_size=200, lstm_size=128):
self.pi_head = L.Linear(obs_size, hidden_size)
self.v_head = L.Linear(obs_size, hidden_size)
self.pi_lstm = L.LSTM(hidden_size, lstm_size)
self.v_lstm = L.LSTM(hidden_size, lstm_size)
self.pi = policies.FCGaussianPolicy(lstm_size, action_size)
self.v = v_function.FCVFunction(lstm_size)
super().__init__(self.pi_head, self.v_head,
self.pi_lstm, self.v_lstm, self.pi, self.v)
def pi_and_v(self, state):
def forward(head, lstm, tail):
h = F.relu(head(state))
h = lstm(h)
return tail(h)
pout = forward(self.pi_head, self.pi_lstm, self.pi)
vout = forward(self.v_head, self.v_lstm, self.v)
return pout, vout
def main():
import logging
parser = argparse.ArgumentParser()
parser.add_argument('processes', type=int)
parser.add_argument('--env', type=str, default='CartPole-v0')
parser.add_argument('--arch', type=str, default='FFSoftmax',
choices=('FFSoftmax', 'FFMellowmax', 'LSTMGaussian'))
parser.add_argument('--seed', type=int, default=0,
help='Random seed [0, 2 ** 32)')
parser.add_argument('--outdir', type=str, default='results',
help='Directory path to save output files.'
' If it does not exist, it will be created.')
parser.add_argument('--t-max', type=int, default=5)
parser.add_argument('--beta', type=float, default=1e-2)
parser.add_argument('--profile', action='store_true')
parser.add_argument('--steps', type=int, default=8 * 10 ** 7)
parser.add_argument('--eval-interval', type=int, default=10 ** 5)
parser.add_argument('--eval-n-runs', type=int, default=10)
parser.add_argument('--reward-scale-factor', type=float, default=1e-2)
parser.add_argument('--rmsprop-epsilon', type=float, default=1e-1)
parser.add_argument('--render', action='store_true', default=False)
parser.add_argument('--lr', type=float, default=7e-4)
parser.add_argument('--weight-decay', type=float, default=0.0)
parser.add_argument('--demo', action='store_true', default=False)
parser.add_argument('--load', type=str, default='')
parser.add_argument('--logger-level', type=int, default=logging.DEBUG)
parser.add_argument('--monitor', action='store_true')
args = parser.parse_args()
logging.basicConfig(level=args.logger_level)
# Set a random seed used in ChainerRL.
# If you use more than one processes, the results will be no longer
# deterministic even with the same random seed.
misc.set_random_seed(args.seed)
# Set different random seeds for different subprocesses.
# If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3].
# If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7].
process_seeds = np.arange(args.processes) + args.seed * args.processes
assert process_seeds.max() < 2 ** 32
args.outdir = experiments.prepare_output_dir(args, args.outdir)
def make_env(process_idx, test):
env = gym.make(args.env)
# Use different random seeds for train and test envs
process_seed = int(process_seeds[process_idx])
env_seed = 2 ** 32 - 1 - process_seed if test else process_seed
env.seed(env_seed)
# Cast observations to float32 because our model uses float32
env = chainerrl.wrappers.CastObservationToFloat32(env)
if args.monitor and process_idx == 0:
env = chainerrl.wrappers.Monitor(env, args.outdir)
if not test:
# Scale rewards (and thus returns) to a reasonable range so that
# training is easier
env = chainerrl.wrappers.ScaleReward(env, args.reward_scale_factor)
if args.render and process_idx == 0 and not test:
env = chainerrl.wrappers.Render(env)
return env
sample_env = gym.make(args.env)
timestep_limit = sample_env.spec.tags.get(
'wrapper_config.TimeLimit.max_episode_steps')
obs_space = sample_env.observation_space
action_space = sample_env.action_space
# Switch policy types accordingly to action space types
if args.arch == 'LSTMGaussian':
model = A3CLSTMGaussian(obs_space.low.size, action_space.low.size)
elif args.arch == 'FFSoftmax':
model = A3CFFSoftmax(obs_space.low.size, action_space.n)
elif args.arch == 'FFMellowmax':
model = A3CFFMellowmax(obs_space.low.size, action_space.n)
opt = rmsprop_async.RMSpropAsync(
lr=args.lr, eps=args.rmsprop_epsilon, alpha=0.99)
opt.setup(model)
opt.add_hook(chainer.optimizer.GradientClipping(40))
if args.weight_decay > 0:
opt.add_hook(NonbiasWeightDecay(args.weight_decay))
agent = a3c.A3C(model, opt, t_max=args.t_max, gamma=0.99,
beta=args.beta)
if args.load:
agent.load(args.load)
if args.demo:
env = make_env(0, True)
eval_stats = experiments.eval_performance(
env=env,
agent=agent,
n_steps=None,
n_episodes=args.eval_n_runs,
max_episode_len=timestep_limit)
print('n_runs: {} mean: {} median: {} stdev {}'.format(
args.eval_n_runs, eval_stats['mean'], eval_stats['median'],
eval_stats['stdev']))
else:
experiments.train_agent_async(
agent=agent,
outdir=args.outdir,
processes=args.processes,
make_env=make_env,
profile=args.profile,
steps=args.steps,
eval_n_steps=None,
eval_n_episodes=args.eval_n_runs,
eval_interval=args.eval_interval,
max_episode_len=timestep_limit)
if __name__ == '__main__':
main()
| {
"repo_name": "toslunar/chainerrl",
"path": "examples/gym/train_a3c_gym.py",
"copies": "1",
"size": "8242",
"license": "mit",
"hash": -7026018327212045000,
"line_mean": 38.2476190476,
"line_max": 94,
"alpha_frac": 0.6493569522,
"autogenerated": false,
"ratio": 3.4100124120810924,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4559369364281093,
"avg_score": null,
"num_lines": null
} |
"""An example of training A3C against OpenAI Gym Envs.
This script is an example of training a PCL agent against OpenAI Gym envs.
Both discrete and continuous action spaces are supported.
To solve CartPole-v0, run:
python train_a3c_gym.py 8 --env CartPole-v0
To solve InvertedPendulum-v1, run:
python train_a3c_gym.py 8 --env InvertedPendulum-v1 --arch LSTMGaussian --t-max 50 # noqa
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases()
import argparse
import chainer
from chainer import functions as F
from chainer import links as L
import gym
import gym.wrappers
import numpy as np
from chainerrl.agents import a3c
from chainerrl import experiments
from chainerrl import links
from chainerrl import misc
from chainerrl import policy
from chainerrl.optimizers.nonbias_weight_decay import NonbiasWeightDecay
from chainerrl.optimizers import rmsprop_async
from chainerrl import policies
from chainerrl.recurrent import RecurrentChainMixin
from chainerrl import v_function
from saliency_a3c import SaliencyA3C
# My imports
from iclr_acer_link import ICLRACERHead, ICLRACERHeadMini
from guided_relu import guided_relu
import env_gym_chainer
import sys
X_SHAPE = 84
Y_SHAPE = 84
def phi(obs):
assert len(obs) == 4
raw_values = np.asarray(obs, dtype=np.float32)
raw_values /= 255.0
return raw_values
class A3CFF(chainer.ChainList, a3c.A3CModel):
def __init__(self, n_actions):
self.head = ICLRACERHead(activation=guided_relu)
self.pi = policy.FCSoftmaxPolicy(
self.head.n_output_channels, n_actions)
self.v = v_function.FCVFunction(self.head.n_output_channels)
super().__init__(self.head, self.pi, self.v)
def pi_and_v(self, state):
out = self.head(state)
return self.pi(out), self.v(out)
def main():
import logging
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('processes', type=int)
parser.add_argument('--env', type=str, default='CartPole-v0')
parser.add_argument('--arch', type=str, default='FFSoftmax',
choices=('FFSoftmax', 'FFMellowmax', 'LSTMGaussian'))
parser.add_argument('--seed', type=int, default=None)
parser.add_argument('--outdir', type=str, default=None)
parser.add_argument('--t-max', type=int, default=5)
parser.add_argument('--beta', type=float, default=1e-2)
parser.add_argument('--profile', action='store_true')
parser.add_argument('--steps', type=int, default=8 * 10 ** 7)
parser.add_argument('--eval-interval', type=int, default=10 ** 5)
parser.add_argument('--eval-n-runs', type=int, default=10)
parser.add_argument('--reward-scale-factor', type=float, default=1e-2)
parser.add_argument('--rmsprop-epsilon', type=float, default=1e-1)
parser.add_argument('--render', action='store_true', default=False)
parser.add_argument('--lr', type=float, default=7e-4)
parser.add_argument('--weight-decay', type=float, default=0.0)
parser.add_argument('--demo', action='store_true', default=False)
parser.add_argument('--load', type=str, default='')
parser.add_argument('--logger-level', type=int, default=logging.DEBUG)
parser.add_argument('--monitor', action='store_true')
parser.add_argument('--frame-buffer-length', type=int, default=4)
parser.add_argument('--render-b2w', action='store_true', default=False)
# Additional params
parser.add_argument('--min_reward', type=float, default=sys.float_info.min)
args = parser.parse_args()
def f_trim_reward(x, min_reward=args.min_reward):
if x < min_reward:
x = 0
else:
if x != 0:
print ("XXXXXXXXXXXXX ", x)
return x
logging.getLogger().setLevel(args.logger_level)
if args.seed is not None:
misc.set_random_seed(args.seed)
args.outdir = experiments.prepare_output_dir(args, args.outdir)
def make_env(process_idx, test):
env = gym.make(args.env)
if args.monitor and process_idx == 0:
env = gym.wrappers.Monitor(env, args.outdir)
# Scale rewards observed by agents
if not test:
misc.env_modifiers.make_reward_filtered(
env, lambda x: f_trim_reward(x) * args.reward_scale_factor)
misc.env_modifiers.make_reward_clipped(env, -1, 1)
if args.render and process_idx == 0 and not test:
misc.env_modifiers.make_rendered(env)
env = env_gym_chainer.GymEnvironment(
env,
res_width=X_SHAPE,
res_height=Y_SHAPE,
agent_history_length=args.frame_buffer_length,
render=args.render_b2w)
return env
sample_env = gym.make(args.env)
timestep_limit = sample_env.spec.tags.get(
'wrapper_config.TimeLimit.max_episode_steps')
obs_space = sample_env.observation_space
action_space = sample_env.action_space
# Switch policy types accordingly to action space types
model = A3CFF(action_space.n)
opt = rmsprop_async.RMSpropAsync(
lr=args.lr, eps=args.rmsprop_epsilon, alpha=0.99)
opt.setup(model)
# Clipping by gradient norm (changed from 40 to 10)
# opt.add_hook(chainer.optimizer.GradientClipping(10))
if args.weight_decay > 0:
opt.add_hook(NonbiasWeightDecay(args.weight_decay))
agent = SaliencyA3C(model, opt, t_max=args.t_max, gamma=0.99,
beta=args.beta, phi=phi)
if args.load:
agent.load(args.load)
if args.demo:
with chainer.using_config("train", False):
env = make_env(0, True)
eval_stats = experiments.eval_performance(
env=env,
agent=agent,
n_runs=args.eval_n_runs,
max_episode_len=timestep_limit)
print('n_runs: {} mean: {} median: {} stdev {}'.format(
args.eval_n_runs, eval_stats['mean'], eval_stats['median'],
eval_stats['stdev']))
else:
experiments.train_agent_async(
agent=agent,
outdir=args.outdir,
processes=args.processes,
make_env=make_env,
profile=args.profile,
steps=args.steps,
eval_n_runs=args.eval_n_runs,
eval_interval=args.eval_interval,
max_episode_len=timestep_limit)
if __name__ == '__main__':
main()
| {
"repo_name": "hmightypirate/guided-backprop-chainerrl",
"path": "examples/mygym/train_a3c_gym.py",
"copies": "1",
"size": "6636",
"license": "apache-2.0",
"hash": -8408779957788209000,
"line_mean": 34.6774193548,
"line_max": 94,
"alpha_frac": 0.6511452682,
"autogenerated": false,
"ratio": 3.4383419689119172,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4589487237111917,
"avg_score": null,
"num_lines": null
} |
"""An example of training Categorical DQN against OpenAI Gym Envs.
This script is an example of training an IQN agent against OpenAI
Gym envs. Only discrete spaces are supported.
To solve CartPole-v0, run:
python train_categorical_dqn_gym.py --env CartPole-v0
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import argparse
import sys
import chainer.functions as F
import chainer.links as L
from chainer import optimizers
import gym
import chainerrl
from chainerrl import experiments
from chainerrl import explorers
from chainerrl import misc
from chainerrl import replay_buffer
def main():
import logging
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('--outdir', type=str, default='results',
help='Directory path to save output files.'
' If it does not exist, it will be created.')
parser.add_argument('--env', type=str, default='CartPole-v1')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--final-exploration-steps',
type=int, default=1000)
parser.add_argument('--start-epsilon', type=float, default=1.0)
parser.add_argument('--end-epsilon', type=float, default=0.1)
parser.add_argument('--demo', action='store_true', default=False)
parser.add_argument('--load', type=str, default=None)
parser.add_argument('--steps', type=int, default=10 ** 8)
parser.add_argument('--replay-start-size', type=int, default=50)
parser.add_argument('--target-update-interval', type=int, default=100)
parser.add_argument('--target-update-method', type=str, default='hard')
parser.add_argument('--update-interval', type=int, default=1)
parser.add_argument('--eval-n-runs', type=int, default=100)
parser.add_argument('--eval-interval', type=int, default=1000)
parser.add_argument('--n-hidden-channels', type=int, default=12)
parser.add_argument('--n-hidden-layers', type=int, default=3)
parser.add_argument('--gamma', type=float, default=0.95)
parser.add_argument('--minibatch-size', type=int, default=32)
parser.add_argument('--render-train', action='store_true')
parser.add_argument('--render-eval', action='store_true')
parser.add_argument('--monitor', action='store_true')
parser.add_argument('--reward-scale-factor',
type=float, default=1.0)
args = parser.parse_args()
# Set a random seed used in ChainerRL
misc.set_random_seed(args.seed, gpus=(args.gpu,))
args.outdir = experiments.prepare_output_dir(
args, args.outdir, argv=sys.argv)
print('Output files are saved in {}'.format(args.outdir))
def make_env(test):
env = gym.make(args.env)
env_seed = 2 ** 32 - 1 - args.seed if test else args.seed
env.seed(env_seed)
# Cast observations to float32 because our model uses float32
env = chainerrl.wrappers.CastObservationToFloat32(env)
if args.monitor:
env = chainerrl.wrappers.Monitor(env, args.outdir)
if not test:
misc.env_modifiers.make_reward_filtered(
env, lambda x: x * args.reward_scale_factor)
if ((args.render_eval and test) or
(args.render_train and not test)):
env = chainerrl.wrappers.Render(env)
return env
env = make_env(test=False)
timestep_limit = env.spec.tags.get(
'wrapper_config.TimeLimit.max_episode_steps')
obs_size = env.observation_space.low.size
action_space = env.action_space
hidden_size = 64
q_func = chainerrl.agents.iqn.ImplicitQuantileQFunction(
psi=chainerrl.links.Sequence(
L.Linear(obs_size, hidden_size),
F.relu,
),
phi=chainerrl.links.Sequence(
chainerrl.agents.iqn.CosineBasisLinear(64, hidden_size),
F.relu,
),
f=L.Linear(hidden_size, env.action_space.n),
)
# Use epsilon-greedy for exploration
explorer = explorers.LinearDecayEpsilonGreedy(
args.start_epsilon, args.end_epsilon, args.final_exploration_steps,
action_space.sample)
opt = optimizers.Adam(1e-3)
opt.setup(q_func)
rbuf_capacity = 50000 # 5 * 10 ** 5
rbuf = replay_buffer.ReplayBuffer(rbuf_capacity)
agent = chainerrl.agents.IQN(
q_func, opt, rbuf, gpu=args.gpu, gamma=args.gamma,
explorer=explorer, replay_start_size=args.replay_start_size,
target_update_interval=args.target_update_interval,
update_interval=args.update_interval,
minibatch_size=args.minibatch_size,
)
if args.load:
agent.load(args.load)
eval_env = make_env(test=True)
if args.demo:
eval_stats = experiments.eval_performance(
env=eval_env,
agent=agent,
n_steps=None,
n_episodes=args.eval_n_runs,
max_episode_len=timestep_limit,
)
print('n_runs: {} mean: {} median: {} stdev {}'.format(
args.eval_n_runs, eval_stats['mean'], eval_stats['median'],
eval_stats['stdev']))
else:
experiments.train_agent_with_evaluation(
agent=agent,
env=env,
steps=args.steps,
eval_n_steps=None,
eval_n_episodes=args.eval_n_runs,
eval_interval=args.eval_interval,
outdir=args.outdir,
eval_env=eval_env,
train_max_episode_len=timestep_limit,
)
if __name__ == '__main__':
main()
| {
"repo_name": "toslunar/chainerrl",
"path": "examples/gym/train_iqn_gym.py",
"copies": "1",
"size": "5827",
"license": "mit",
"hash": -5829898203047046000,
"line_mean": 35.8797468354,
"line_max": 75,
"alpha_frac": 0.6414964819,
"autogenerated": false,
"ratio": 3.5902649414664203,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9731761423366421,
"avg_score": 0,
"num_lines": 158
} |
"""An example of training DQN against OpenAI Gym Envs.
This script is an example of training a DQN agent against OpenAI Gym envs.
Both discrete and continuous action spaces are supported. For continuous action
spaces, A NAF (Normalized Advantage Function) is used to approximate Q-values.
To solve CartPole-v0, run:
python train_dqn_gym.py --env CartPole-v0
To solve Pendulum-v0, run:
python train_dqn_gym.py --env Pendulum-v0
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import argparse
import os
import sys
from chainer import optimizers
import gym
from gym import spaces
import numpy as np
import chainerrl
from chainerrl.agents.dqn import DQN
from chainerrl import experiments
from chainerrl import explorers
from chainerrl import links
from chainerrl import misc
from chainerrl import q_functions
from chainerrl import replay_buffer
def main():
import logging
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('--outdir', type=str, default='results',
help='Directory path to save output files.'
' If it does not exist, it will be created.')
parser.add_argument('--env', type=str, default='Pendulum-v0')
parser.add_argument('--seed', type=int, default=0,
help='Random seed [0, 2 ** 32)')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--final-exploration-steps',
type=int, default=10 ** 4)
parser.add_argument('--start-epsilon', type=float, default=1.0)
parser.add_argument('--end-epsilon', type=float, default=0.1)
parser.add_argument('--noisy-net-sigma', type=float, default=None)
parser.add_argument('--demo', action='store_true', default=False)
parser.add_argument('--load', type=str, default=None)
parser.add_argument('--steps', type=int, default=10 ** 5)
parser.add_argument('--prioritized-replay', action='store_true')
parser.add_argument('--replay-start-size', type=int, default=1000)
parser.add_argument('--target-update-interval', type=int, default=10 ** 2)
parser.add_argument('--target-update-method', type=str, default='hard')
parser.add_argument('--soft-update-tau', type=float, default=1e-2)
parser.add_argument('--update-interval', type=int, default=1)
parser.add_argument('--eval-n-runs', type=int, default=100)
parser.add_argument('--eval-interval', type=int, default=10 ** 4)
parser.add_argument('--n-hidden-channels', type=int, default=100)
parser.add_argument('--n-hidden-layers', type=int, default=2)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--minibatch-size', type=int, default=None)
parser.add_argument('--render-train', action='store_true')
parser.add_argument('--render-eval', action='store_true')
parser.add_argument('--monitor', action='store_true')
parser.add_argument('--reward-scale-factor', type=float, default=1e-3)
args = parser.parse_args()
# Set a random seed used in ChainerRL
misc.set_random_seed(args.seed, gpus=(args.gpu,))
args.outdir = experiments.prepare_output_dir(
args, args.outdir, argv=sys.argv)
print('Output files are saved in {}'.format(args.outdir))
def clip_action_filter(a):
return np.clip(a, action_space.low, action_space.high)
def make_env(test):
env = gym.make(args.env)
# Use different random seeds for train and test envs
env_seed = 2 ** 32 - 1 - args.seed if test else args.seed
env.seed(env_seed)
# Cast observations to float32 because our model uses float32
env = chainerrl.wrappers.CastObservationToFloat32(env)
if args.monitor:
env = chainerrl.wrappers.Monitor(env, args.outdir)
if isinstance(env.action_space, spaces.Box):
misc.env_modifiers.make_action_filtered(env, clip_action_filter)
if not test:
# Scale rewards (and thus returns) to a reasonable range so that
# training is easier
env = chainerrl.wrappers.ScaleReward(env, args.reward_scale_factor)
if ((args.render_eval and test) or
(args.render_train and not test)):
env = chainerrl.wrappers.Render(env)
return env
env = make_env(test=False)
timestep_limit = env.spec.tags.get(
'wrapper_config.TimeLimit.max_episode_steps')
obs_space = env.observation_space
obs_size = obs_space.low.size
action_space = env.action_space
if isinstance(action_space, spaces.Box):
action_size = action_space.low.size
# Use NAF to apply DQN to continuous action spaces
q_func = q_functions.FCQuadraticStateQFunction(
obs_size, action_size,
n_hidden_channels=args.n_hidden_channels,
n_hidden_layers=args.n_hidden_layers,
action_space=action_space)
# Use the Ornstein-Uhlenbeck process for exploration
ou_sigma = (action_space.high - action_space.low) * 0.2
explorer = explorers.AdditiveOU(sigma=ou_sigma)
else:
n_actions = action_space.n
q_func = q_functions.FCStateQFunctionWithDiscreteAction(
obs_size, n_actions,
n_hidden_channels=args.n_hidden_channels,
n_hidden_layers=args.n_hidden_layers)
# Use epsilon-greedy for exploration
explorer = explorers.LinearDecayEpsilonGreedy(
args.start_epsilon, args.end_epsilon, args.final_exploration_steps,
action_space.sample)
if args.noisy_net_sigma is not None:
links.to_factorized_noisy(q_func, sigma_scale=args.noisy_net_sigma)
# Turn off explorer
explorer = explorers.Greedy()
# Draw the computational graph and save it in the output directory.
chainerrl.misc.draw_computational_graph(
[q_func(np.zeros_like(obs_space.low, dtype=np.float32)[None])],
os.path.join(args.outdir, 'model'))
opt = optimizers.Adam()
opt.setup(q_func)
rbuf_capacity = 5 * 10 ** 5
if args.minibatch_size is None:
args.minibatch_size = 32
if args.prioritized_replay:
betasteps = (args.steps - args.replay_start_size) \
// args.update_interval
rbuf = replay_buffer.PrioritizedReplayBuffer(
rbuf_capacity, betasteps=betasteps)
else:
rbuf = replay_buffer.ReplayBuffer(rbuf_capacity)
agent = DQN(q_func, opt, rbuf, gpu=args.gpu, gamma=args.gamma,
explorer=explorer, replay_start_size=args.replay_start_size,
target_update_interval=args.target_update_interval,
update_interval=args.update_interval,
minibatch_size=args.minibatch_size,
target_update_method=args.target_update_method,
soft_update_tau=args.soft_update_tau,
)
if args.load:
agent.load(args.load)
eval_env = make_env(test=True)
if args.demo:
eval_stats = experiments.eval_performance(
env=eval_env,
agent=agent,
n_steps=None,
n_episodes=args.eval_n_runs,
max_episode_len=timestep_limit)
print('n_runs: {} mean: {} median: {} stdev {}'.format(
args.eval_n_runs, eval_stats['mean'], eval_stats['median'],
eval_stats['stdev']))
else:
experiments.train_agent_with_evaluation(
agent=agent, env=env, steps=args.steps,
eval_n_steps=None,
eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval,
outdir=args.outdir, eval_env=eval_env,
train_max_episode_len=timestep_limit)
if __name__ == '__main__':
main()
| {
"repo_name": "toslunar/chainerrl",
"path": "examples/gym/train_dqn_gym.py",
"copies": "1",
"size": "7961",
"license": "mit",
"hash": 1923436568514541600,
"line_mean": 39.8256410256,
"line_max": 79,
"alpha_frac": 0.6524305992,
"autogenerated": false,
"ratio": 3.5779775280898876,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47304081272898874,
"avg_score": null,
"num_lines": null
} |
"""An example of training PPO against OpenAI Gym Envs.
This script is an example of training a PPO agent against OpenAI Gym envs.
Both discrete and continuous action spaces are supported.
To solve CartPole-v0, run:
python train_ppo_gym.py --env CartPole-v0
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import argparse
import functools
import chainer
from chainer import functions as F
from chainer import links as L
import gym
import gym.spaces
import numpy as np
import chainerrl
from chainerrl.agents import PPO
from chainerrl import experiments
from chainerrl import misc
from chainerrl.optimizers.nonbias_weight_decay import NonbiasWeightDecay
def main():
import logging
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--env', type=str, default='Hopper-v2')
parser.add_argument('--num-envs', type=int, default=1)
parser.add_argument('--seed', type=int, default=0,
help='Random seed [0, 2 ** 32)')
parser.add_argument('--outdir', type=str, default='results',
help='Directory path to save output files.'
' If it does not exist, it will be created.')
parser.add_argument('--steps', type=int, default=10 ** 6)
parser.add_argument('--eval-interval', type=int, default=10000)
parser.add_argument('--eval-n-runs', type=int, default=10)
parser.add_argument('--reward-scale-factor', type=float, default=1e-2)
parser.add_argument('--standardize-advantages', action='store_true')
parser.add_argument('--render', action='store_true', default=False)
parser.add_argument('--lr', type=float, default=3e-4)
parser.add_argument('--weight-decay', type=float, default=0.0)
parser.add_argument('--demo', action='store_true', default=False)
parser.add_argument('--load', type=str, default='')
parser.add_argument('--logger-level', type=int, default=logging.DEBUG)
parser.add_argument('--monitor', action='store_true')
parser.add_argument('--window-size', type=int, default=100)
parser.add_argument('--update-interval', type=int, default=2048)
parser.add_argument('--log-interval', type=int, default=1000)
parser.add_argument('--batchsize', type=int, default=64)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--entropy-coef', type=float, default=0.0)
args = parser.parse_args()
logging.basicConfig(level=args.logger_level)
# Set a random seed used in ChainerRL
misc.set_random_seed(args.seed, gpus=(args.gpu,))
# Set different random seeds for different subprocesses.
# If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3].
# If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7].
process_seeds = np.arange(args.num_envs) + args.seed * args.num_envs
assert process_seeds.max() < 2 ** 32
args.outdir = experiments.prepare_output_dir(args, args.outdir)
def make_env(process_idx, test):
env = gym.make(args.env)
# Use different random seeds for train and test envs
process_seed = int(process_seeds[process_idx])
env_seed = 2 ** 32 - 1 - process_seed if test else process_seed
env.seed(env_seed)
# Cast observations to float32 because our model uses float32
env = chainerrl.wrappers.CastObservationToFloat32(env)
if args.monitor:
env = chainerrl.wrappers.Monitor(env, args.outdir)
if not test:
# Scale rewards (and thus returns) to a reasonable range so that
# training is easier
env = chainerrl.wrappers.ScaleReward(env, args.reward_scale_factor)
if args.render:
env = chainerrl.wrappers.Render(env)
return env
def make_batch_env(test):
return chainerrl.envs.MultiprocessVectorEnv(
[functools.partial(make_env, idx, test)
for idx, env in enumerate(range(args.num_envs))])
# Only for getting timesteps, and obs-action spaces
sample_env = gym.make(args.env)
timestep_limit = sample_env.spec.tags.get(
'wrapper_config.TimeLimit.max_episode_steps')
obs_space = sample_env.observation_space
action_space = sample_env.action_space
# Normalize observations based on their empirical mean and variance
obs_normalizer = chainerrl.links.EmpiricalNormalization(
obs_space.low.size, clip_threshold=5)
winit_last = chainer.initializers.LeCunNormal(1e-2)
# Switch policy types accordingly to action space types
if isinstance(action_space, gym.spaces.Discrete):
n_actions = action_space.n
policy = chainer.Sequential(
L.Linear(None, 64),
F.tanh,
L.Linear(None, 64),
F.tanh,
L.Linear(None, n_actions, initialW=winit_last),
chainerrl.distribution.SoftmaxDistribution,
)
elif isinstance(action_space, gym.spaces.Box):
action_size = action_space.low.size
policy = chainer.Sequential(
L.Linear(None, 64),
F.tanh,
L.Linear(None, 64),
F.tanh,
L.Linear(None, action_size, initialW=winit_last),
chainerrl.policies.GaussianHeadWithStateIndependentCovariance(
action_size=action_size,
var_type='diagonal',
var_func=lambda x: F.exp(2 * x), # Parameterize log std
var_param_init=0, # log std = 0 => std = 1
),
)
else:
print("""\
This example only supports gym.spaces.Box or gym.spaces.Discrete action spaces.""") # NOQA
return
vf = chainer.Sequential(
L.Linear(None, 64),
F.tanh,
L.Linear(None, 64),
F.tanh,
L.Linear(None, 1),
)
# Combine a policy and a value function into a single model
model = chainerrl.links.Branched(policy, vf)
opt = chainer.optimizers.Adam(alpha=args.lr, eps=1e-5)
opt.setup(model)
if args.weight_decay > 0:
opt.add_hook(NonbiasWeightDecay(args.weight_decay))
agent = PPO(model, opt,
obs_normalizer=obs_normalizer,
gpu=args.gpu,
update_interval=args.update_interval,
minibatch_size=args.batchsize, epochs=args.epochs,
clip_eps_vf=None, entropy_coef=args.entropy_coef,
standardize_advantages=args.standardize_advantages,
)
if args.load:
agent.load(args.load)
if args.demo:
env = make_batch_env(True)
eval_stats = experiments.eval_performance(
env=env,
agent=agent,
n_steps=None,
n_episodes=args.eval_n_runs,
max_episode_len=timestep_limit)
print('n_runs: {} mean: {} median: {} stdev {}'.format(
args.eval_n_runs, eval_stats['mean'], eval_stats['median'],
eval_stats['stdev']))
else:
# Linearly decay the learning rate to zero
def lr_setter(env, agent, value):
agent.optimizer.alpha = value
lr_decay_hook = experiments.LinearInterpolationHook(
args.steps, args.lr, 0, lr_setter)
experiments.train_agent_batch_with_evaluation(
agent=agent,
env=make_batch_env(False),
eval_env=make_batch_env(True),
outdir=args.outdir,
steps=args.steps,
eval_n_steps=None,
eval_n_episodes=args.eval_n_runs,
eval_interval=args.eval_interval,
log_interval=args.log_interval,
return_window_size=args.window_size,
max_episode_len=timestep_limit,
save_best_so_far_agent=False,
step_hooks=[
lr_decay_hook,
],
)
if __name__ == '__main__':
main()
| {
"repo_name": "toslunar/chainerrl",
"path": "examples/mujoco/train_ppo_batch_gym.py",
"copies": "1",
"size": "8073",
"license": "mit",
"hash": 7375016043823864000,
"line_mean": 37.0801886792,
"line_max": 91,
"alpha_frac": 0.6288864115,
"autogenerated": false,
"ratio": 3.684618895481515,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4813505306981515,
"avg_score": null,
"num_lines": null
} |
'''An example of using :class:`AreaDetector`'''
import time
import config
from ophyd import SimDetector
from ophyd import (ImagePlugin, TIFFPlugin, ProcessPlugin, OverlayPlugin,
Component as Cpt)
logger = config.logger
class MyDetector(SimDetector):
image1 = Cpt(ImagePlugin, 'image1:')
tiff1 = Cpt(TIFFPlugin, 'TIFF1:')
proc1 = Cpt(ProcessPlugin, 'Proc1:')
over1 = Cpt(OverlayPlugin, 'Over1:')
det1_prefix = 'XF:31IDA-BI{Cam:Tbl}'
det = MyDetector(det1_prefix)
det.cam.image_mode.put('Single', wait=True)
det.image1.enable.put('Enable', wait=True)
det.cam.array_callbacks.put('Enable', wait=True)
# ensure EPICS_CA_MAX_ARRAY_BYTES set properly...
img = det.image1.image
print('Image: {}'.format(img))
det.tiff1.file_template.put('%s%s_%3.3d.tif', wait=True)
logger.debug('template value=%s', det.tiff1.file_template.get())
logger.debug('full filename=%s', det.tiff1.full_file_name.get())
logger.debug('acquire = %d', det.cam.acquire.get())
img1 = det.image1
logger.debug('nd_array_port = %s', img1.nd_array_port.get())
# Signal group allows setting value as a list:
proc1 = det.proc1
logger.debug('fc=%s', proc1.fc.get())
FcTuple = proc1.fc.get_device_tuple()
proc1.fc.put(FcTuple(fc1=1, fc2=2, fc3=3, fc4=4),
wait=True)
time.sleep(0.1)
logger.debug('fc=%s', proc1.fc.get())
# But they can be accessed individually as well
logger.debug('(fc1=%s, fc2=%s, fc3=%s, fc4=%s)', proc1.fc.fc1.get(),
proc1.fc.fc2.get(), proc1.fc.fc3.get(), proc1.fc.fc4.get())
# Reset them to the default values
proc1.fc.put(FcTuple(1, -1, 0, 1), wait=True)
time.sleep(0.1)
logger.debug('reset to fc=%s', proc1.fc.get())
# if using IPython, try the following:
# In [0]: run areadetector.py
#
# In [1]: help(proc1)
logger.debug('Overlay1:1 blue=%s', det.over1.overlay_1.blue.get())
| {
"repo_name": "dchabot/ophyd",
"path": "examples/areadetector.py",
"copies": "3",
"size": "1836",
"license": "bsd-3-clause",
"hash": 7953465430392761000,
"line_mean": 27.6875,
"line_max": 73,
"alpha_frac": 0.6797385621,
"autogenerated": false,
"ratio": 2.6608695652173915,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9840608127317392,
"avg_score": 0,
"num_lines": 64
} |
"""An example of using sum-product loopy belief propagation to reason about
faces and their parts in an image."""
import sys
sys.path.append("..")
import numpy as np
import pylab
from graph import BpGraph
from nodesLib import VarNodes
from nodesLib import NoisyOrNodes
from nodesLib import CatNodes
import matplotlib.pyplot as plt
def imshow_gray(image, rescale=False):
""" Displays an image in gray-scale.
Args:
image (ndarray): a 2D array representing the image
rescale (boolean, optional): rescale the image so entries are in the
range [0,1].
"""
if not rescale:
assert np.max(image) <= 1, 'max value must be <= 1'
assert np.min(image) >= 0, 'max value must be >= 0'
plt.imshow(image, cmap=plt.get_cmap('gray'), vmin=0, vmax=1, interpolation='None')
else:
plt.imshow(image, cmap=plt.get_cmap('gray'), interpolation='None')
plt.draw()
plt.pause(0.001)
#image size
IM_SZ = [35, 35]
#condition on the presence of a symbol(s) at a particular place(s) in the image.
#uncomment one of the 3 lines below to see different scenarios.
COND_POINTS = [['face', 18, 18]]
#COND_POINTS = [['eye', 18, 18]]
#COND_POINTS = [['face', 18, 18], ['eye', 12, 16]]
#symbols in our model. Note: we do not distinguish between left and right eyes.
SYMBOLS = ('face', 'eye', 'nose', 'mouth')
#parts of the face. the model encodes the production rule
# ace-->eye,eye,nose,mouth
FACE_PARTS = ('eye', 'eye', 'nose', 'mouth')
#for each of the FACE_PARTS, specify where they are located relative to the
#face and how large the region of uncertainties are.
OFFSETS = [[-7, -3], [-7, 3], [0, 0], [7, 0]]
REGION_SIZES = [[3, 3], [3, 3], [3, 3], [7, 3]]
#used to keep track of the nodes (variable nodes, etc.) associated with
#a particular symbol (eg, face).
sym_super_nodes = {}
bpg = BpGraph()
#create variable node containers and noisy-or factor containers. We will add
#variable and noisy-or factor nodes to these containerslater.
for i in range(0, len(SYMBOLS)):
sym = SYMBOLS[i]
tmp_dict = {}
tmp_dict['vars'] = VarNodes(sym+'_vars', {'num_states': 2})
tmp_dict['noisy'] = NoisyOrNodes(name=sym + '_noisy',
nodes_params={'leak_prob': 0.01, \
'prob_success': 0.99, \
'bp_algo': 'sum'})
tmp_dict['var_ids'] = np.zeros(IM_SZ, dtype='int')
tmp_dict['fact_ids'] = np.zeros(IM_SZ, dtype='int')
sym_super_nodes[sym] = tmp_dict
#create variable nodes for presene/absence of each object and its connecting
#noisy-or factor. Cconnect these entities together.
for sym in SYMBOLS:
chunk_dict = sym_super_nodes[sym]
for i in range(0, IM_SZ[0]):
for j in range(0, IM_SZ[1]):
chunk_dict['var_ids'][i, j] = chunk_dict['vars'].create_nodes(1)[0]
chunk_dict['fact_ids'][i, j] = chunk_dict['noisy'].create_nodes(1)[0]
bpg.add_edge(chunk_dict['vars'], chunk_dict['var_ids'][i, j], \
chunk_dict['noisy'], chunk_dict['fact_ids'][i, j], \
'output')
face_super_nodes = sym_super_nodes['face']
#express for a face each location in the image, where its parts (eye, eye,
#nose, mouth) could be.
for ch_ind in range(0, len(FACE_PARTS)):
face_part = FACE_PARTS[ch_ind]
offset = OFFSETS[ch_ind]
#parameters for a CatNode factor (representing a categorical distribution)
# o represent the spatial relationship between a face and this part
#
num_choices = REGION_SIZES[ch_ind][0]*REGION_SIZES[ch_ind][1]+1
probs_use = np.ones((2, num_choices))/(REGION_SIZES[ch_ind][0]*REGION_SIZES[ch_ind][1])
probs_use[0, :] = 0.0
probs_use[0, -1] = 1.0
probs_use[1, -1] = 0.0
#create CatNodes container for storing categorical factors. Create
#variable nodes representing the possible outcomes of the categorical
#factors. Encoding uses a 1-hot encoding for the output choice of a
#categorical factor.
tmp_cat_nodes = CatNodes(name=face_part + '_cat' + str(ch_ind),
nodes_params={'probs': probs_use, \
'bp_algo': 'sum'})
tmp_cat_vars = VarNodes(face_part + '_vars_cat' + str(ch_ind), {'num_states': 2})
ch_chunk = sym_super_nodes[face_part]
for i in range(0, IM_SZ[0]):
for j in range(0, IM_SZ[1]):
#create categorical node
cat_id = tmp_cat_nodes.create_nodes(1)[0]
bpg.add_edge(face_super_nodes['vars'], \
face_super_nodes['var_ids'][i, j], \
tmp_cat_nodes, \
cat_id, \
'input')
#create categorical variable nodes
cat_var_ids = tmp_cat_vars.create_nodes(num_choices)
for c_id in cat_var_ids:
bpg.add_edge(tmp_cat_vars, \
c_id, \
tmp_cat_nodes, \
cat_id, \
'output')
#hook up categorical variables to noisy-or factors
ct = 0
for ii in range(0, REGION_SIZES[ch_ind][0]):
ii_use = ii+i+offset[0]
if ii_use < 0 or ii_use >= IM_SZ[0]:
continue
for jj in range(0, REGION_SIZES[ch_ind][1]):
jj_use = jj+j+offset[1]
if jj_use < 0 or jj_use >= IM_SZ[1]:
continue
bpg.add_edge(tmp_cat_vars, \
cat_var_ids[ct], \
ch_chunk['noisy'], \
ch_chunk['fact_ids'][ii_use, jj_use], \
'input')
ct += 1
bpg.add_nodes_to_schedule(tmp_cat_nodes)
bpg.add_nodes_to_schedule(tmp_cat_vars)
#condition on presence/absence of certain symbols in the scene at certain
#locations
for cond_pt in COND_POINTS:
[sym, i, j] = cond_pt
sym_super = sym_super_nodes[sym]
sym_super['vars'].condition_on([sym_super['var_ids'][i, j]], 1)
# schedule nodes for message-passing in graph object
for i in SYMBOLS:
bpg.add_nodes_to_schedule(sym_super_nodes[i]['vars'])
bpg.add_nodes_to_schedule(sym_super_nodes[i]['noisy'])
#prepare graph object for inference
bpg.finalize()
#do inference
bpg.do_message_passing()
#get resulting beliefs
for i in SYMBOLS:
sym_super_nodes[i]['bel'] = sym_super_nodes[i]['vars'].get_beliefs()
sym_super_nodes[i]['bel'] = np.reshape(sym_super_nodes[i]['bel'][:, 1], IM_SZ)
#visualize resulting beliefs on a log-scale, and normalize to be in range [0,1]
for i in range(0, len(SYMBOLS)):
pylab.subplot(1, len(SYMBOLS), i+1)
imshow_gray(np.log(sym_super_nodes[SYMBOLS[i]]['bel']), True)
| {
"repo_name": "jeroen-chua/factorflow",
"path": "examples/example_face.py",
"copies": "1",
"size": "6929",
"license": "mit",
"hash": 2305337049538094600,
"line_mean": 35.277486911,
"line_max": 91,
"alpha_frac": 0.5732428922,
"autogenerated": false,
"ratio": 3.31055900621118,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.438380189841118,
"avg_score": null,
"num_lines": null
} |
"""An example of using the internal DB-API module without any Django."""
# Adds the relative path for the MS SQL Server backend to Python's import path.
# We do this so we can run this module from a checkout for demo purposes
# without having to install it.
def _hack_backend_path():
import os, sys
backend_path = os.path.join(os.path.abspath(os.path.dirname(".")), "../source")
sys.path.append(backend_path)
# Import the dbapi module, after hacking the import path.
_hack_backend_path()
import sqlserver_ado.dbapi as db
def _print_names(results):
for item in results:
print(item[1])
def sproc_1(connection):
"Calls a sproc using execute with explicit parameter markers."
c = connection.cursor()
c.execute('uspAppUser_GetAll %s', ['current_user'])
_print_names(c.fetchall())
c.close()
def sproc_1b(connection):
"Calls a sproc using execute with explicit parameter markers."
c = connection.cursor()
c.execute('uspAppUser_GetAll %s', [None])
_print_names(c.fetchall())
c.close()
def sproc_2(connection):
"Calls a sproc using 'callproc'."
c = connection.cursor()
c.callproc('uspAppUser_GetAll', ['current_user'])
_print_names(c.fetchall())
c.close()
def sproc_2b(connection):
"Calls a sproc using 'callproc'."
c = connection.cursor()
c.callproc('uspAppUser_GetAll', [0])
_print_names(c.fetchall())
c.close()
def main():
connection = db.connect("PROVIDER=SQLOLEDB;DATA SOURCE=localhost\\ss2005;Initial Catalog=Ted;Integrated Security=SSPI")
sproc_2b(connection)
connection.close()
main()
| {
"repo_name": "theoriginalgri/django-mssql",
"path": "extras/samples/nodjango.py",
"copies": "1",
"size": "1654",
"license": "mit",
"hash": -4848877497085948000,
"line_mean": 29.2075471698,
"line_max": 123,
"alpha_frac": 0.6650544135,
"autogenerated": false,
"ratio": 3.4244306418219463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45894850553219463,
"avg_score": null,
"num_lines": null
} |
""" An example of using this library to calculate related artists
from the last.fm dataset. More details can be found
at http://www.benfrederickson.com/matrix-factorization/
This code will automically download a HDF5 version of the dataset from
GitHub when it is first run. The original dataset can also be found at
http://www.dtic.upf.edu/~ocelma/MusicRecommendationDataset/lastfm-360K.html.
"""
import argparse
import codecs
import logging
import time
import numpy as np
import tqdm
from implicit.als import AlternatingLeastSquares
from implicit.approximate_als import (
AnnoyAlternatingLeastSquares,
FaissAlternatingLeastSquares,
NMSLibAlternatingLeastSquares,
)
from implicit.bpr import BayesianPersonalizedRanking
from implicit.datasets.lastfm import get_lastfm
from implicit.lmf import LogisticMatrixFactorization
from implicit.nearest_neighbours import (
BM25Recommender,
CosineRecommender,
TFIDFRecommender,
bm25_weight,
)
# maps command line model argument to class name
MODELS = {
"als": AlternatingLeastSquares,
"nmslib_als": NMSLibAlternatingLeastSquares,
"annoy_als": AnnoyAlternatingLeastSquares,
"faiss_als": FaissAlternatingLeastSquares,
"tfidf": TFIDFRecommender,
"cosine": CosineRecommender,
"bpr": BayesianPersonalizedRanking,
"lmf": LogisticMatrixFactorization,
"bm25": BM25Recommender,
}
def get_model(model_name):
print("getting model %s" % model_name)
model_class = MODELS.get(model_name)
if not model_class:
raise ValueError("Unknown Model '%s'" % model_name)
# some default params
if model_name.endswith("als"):
params = {"factors": 64, "dtype": np.float32}
elif model_name == "bm25":
params = {"K1": 100, "B": 0.5}
elif model_name == "bpr":
params = {"factors": 63}
elif model_name == "lmf":
params = {"factors": 30, "iterations": 40, "regularization": 1.5}
else:
params = {}
return model_class(**params)
def calculate_similar_artists(output_filename, model_name="als"):
"""generates a list of similar artists in lastfm by utilizing the 'similar_items'
api of the models"""
artists, users, plays = get_lastfm()
# create a model from the input data
model = get_model(model_name)
# if we're training an ALS based model, weight input for last.fm
# by bm25
if model_name.endswith("als"):
# lets weight these models by bm25weight.
logging.debug("weighting matrix by bm25_weight")
plays = bm25_weight(plays, K1=100, B=0.8)
# also disable building approximate recommend index
model.approximate_recommend = False
# this is actually disturbingly expensive:
plays = plays.tocsr()
logging.debug("training model %s", model_name)
start = time.time()
model.fit(plays)
logging.debug("trained model '%s' in %0.2fs", model_name, time.time() - start)
# write out similar artists by popularity
start = time.time()
logging.debug("calculating top artists")
user_count = np.ediff1d(plays.indptr)
to_generate = sorted(np.arange(len(artists)), key=lambda x: -user_count[x])
# write out as a TSV of artistid, otherartistid, score
logging.debug("writing similar items")
with tqdm.tqdm(total=len(to_generate)) as progress:
with codecs.open(output_filename, "w", "utf8") as o:
for artistid in to_generate:
artist = artists[artistid]
for other, score in model.similar_items(artistid, 11):
o.write("%s\t%s\t%s\n" % (artist, artists[other], score))
progress.update(1)
logging.debug("generated similar artists in %0.2fs", time.time() - start)
def calculate_recommendations(output_filename, model_name="als"):
"""Generates artist recommendations for each user in the dataset"""
# train the model based off input params
artists, users, plays = get_lastfm()
# create a model from the input data
model = get_model(model_name)
# if we're training an ALS based model, weight input for last.fm
# by bm25
if model_name.endswith("als"):
# lets weight these models by bm25weight.
logging.debug("weighting matrix by bm25_weight")
plays = bm25_weight(plays, K1=100, B=0.8)
# also disable building approximate recommend index
model.approximate_similar_items = False
# this is actually disturbingly expensive:
plays = plays.tocsr()
logging.debug("training model %s", model_name)
start = time.time()
model.fit(plays)
logging.debug("trained model '%s' in %0.2fs", model_name, time.time() - start)
# generate recommendations for each user and write out to a file
start = time.time()
user_plays = plays.T.tocsr()
with tqdm.tqdm(total=len(users)) as progress:
with codecs.open(output_filename, "w", "utf8") as o:
for userid, username in enumerate(users):
for artistid, score in model.recommend(userid, user_plays):
o.write("%s\t%s\t%s\n" % (username, artists[artistid], score))
progress.update(1)
logging.debug("generated recommendations in %0.2fs", time.time() - start)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generates similar artists on the last.fm dataset"
" or generates personalized recommendations for each user",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--output",
type=str,
default="similar-artists.tsv",
dest="outputfile",
help="output file name",
)
parser.add_argument(
"--model",
type=str,
default="als",
dest="model",
help="model to calculate (%s)" % "/".join(MODELS.keys()),
)
parser.add_argument(
"--recommend",
help="Recommend items for each user rather than calculate similar_items",
action="store_true",
)
parser.add_argument(
"--param", action="append", help="Parameters to pass to the model, formatted as 'KEY=VALUE"
)
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG)
if args.recommend:
calculate_recommendations(args.outputfile, model_name=args.model)
else:
calculate_similar_artists(args.outputfile, model_name=args.model)
| {
"repo_name": "benfred/implicit",
"path": "examples/lastfm.py",
"copies": "1",
"size": "6412",
"license": "mit",
"hash": -2531186803664295400,
"line_mean": 33.1063829787,
"line_max": 99,
"alpha_frac": 0.664691204,
"autogenerated": false,
"ratio": 3.586129753914989,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9748960117296872,
"avg_score": 0.0003721681236234931,
"num_lines": 188
} |
"""An example of wrapping manual tqdm updates for urllib reporthook.
# urllib.urlretrieve documentation
> If present, the hook function will be called once
> on establishment of the network connection and once after each block read
> thereafter. The hook will be passed three arguments; a count of blocks
> transferred so far, a block size in bytes, and the total size of the file.
Usage:
tqdm_wget.py [options]
Options:
-h, --help
Print this help message and exit
-u URL, --url URL : string, optional
The url to fetch.
[default: http://www.doc.ic.ac.uk/~cod11/matryoshka.zip]
-o FILE, --output FILE : string, optional
The local file path in which to save the url [default: /dev/null].
"""
import urllib
from tqdm import tqdm
from docopt import docopt
def my_hook(t):
"""
Wraps tqdm instance. Don't forget to close() or __exit__()
the tqdm instance once you're done with it (easiest using `with` syntax).
Example
-------
>>> with tqdm(...) as t:
... reporthook = my_hook(t)
... urllib.urlretrieve(..., reporthook=reporthook)
"""
last_b = [0]
def inner(b=1, bsize=1, tsize=None):
"""
b : int, optional
Number of blocks just transferred [default: 1].
bsize : int, optional
Size of each block (in tqdm units) [default: 1].
tsize : int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
opts = docopt(__doc__)
eg_link = opts['--url']
eg_file = eg_link.replace('/', ' ').split()[-1]
with tqdm(unit='B', unit_scale=True, leave=True, miniters=1,
desc=eg_file) as t: # all optional kwargs
urllib.urlretrieve(eg_link, filename=opts['--output'],
reporthook=my_hook(t), data=None)
| {
"repo_name": "lrq3000/tqdm",
"path": "examples/tqdm_wget.py",
"copies": "2",
"size": "1942",
"license": "mit",
"hash": 5515203176879914000,
"line_mean": 28.8769230769,
"line_max": 77,
"alpha_frac": 0.6168898043,
"autogenerated": false,
"ratio": 3.3891797556719023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 65
} |
"""An example program that uses the elsapy module"""
from elsapy.elsclient import ElsClient
from elsapy.elsprofile import ElsAuthor, ElsAffil
from elsapy.elsdoc import FullDoc, AbsDoc
from elsapy.elssearch import ElsSearch
import json
## Load configuration
con_file = open("config.json")
config = json.load(con_file)
con_file.close()
## Initialize client
client = ElsClient(config['apikey'])
client.inst_token = config['insttoken']
## Author example
# Initialize author with uri
my_auth = ElsAuthor(
uri = 'https://api.elsevier.com/content/author/author_id/7004367821')
# Read author data, then write to disk
if my_auth.read(client):
print ("my_auth.full_name: ", my_auth.full_name)
my_auth.write()
else:
print ("Read author failed.")
## Affiliation example
# Initialize affiliation with ID as string
my_aff = ElsAffil(affil_id = '60101411')
if my_aff.read(client):
print ("my_aff.name: ", my_aff.name)
my_aff.write()
else:
print ("Read affiliation failed.")
## Scopus (Abtract) document example
# Initialize document with ID as integer
scp_doc = AbsDoc(scp_id = 84872135457)
if scp_doc.read(client):
print ("scp_doc.title: ", scp_doc.title)
scp_doc.write()
else:
print ("Read document failed.")
## ScienceDirect (full-text) document example using PII
pii_doc = FullDoc(sd_pii = 'S1674927814000082')
if pii_doc.read(client):
print ("pii_doc.title: ", pii_doc.title)
pii_doc.write()
else:
print ("Read document failed.")
## ScienceDirect (full-text) document example using DOI
doi_doc = FullDoc(doi = '10.1016/S1525-1578(10)60571-5')
if doi_doc.read(client):
print ("doi_doc.title: ", doi_doc.title)
doi_doc.write()
else:
print ("Read document failed.")
## Load list of documents from the API into affilation and author objects.
# Since a document list is retrieved for 25 entries at a time, this is
# a potentially lenghty operation - hence the prompt.
print ("Load documents (Y/N)?")
s = input('--> ')
if (s == "y" or s == "Y"):
## Read all documents for example author, then write to disk
if my_auth.read_docs(client):
print ("my_auth.doc_list has " + str(len(my_auth.doc_list)) + " items.")
my_auth.write_docs()
else:
print ("Read docs for author failed.")
## Read all documents for example affiliation, then write to disk
if my_aff.read_docs(client):
print ("my_aff.doc_list has " + str(len(my_aff.doc_list)) + " items.")
my_aff.write_docs()
else:
print ("Read docs for affiliation failed.")
## Initialize author search object and execute search
auth_srch = ElsSearch('authlast(keuskamp)','author')
auth_srch.execute(client)
print ("auth_srch has", len(auth_srch.results), "results.")
## Initialize affiliation search object and execute search
aff_srch = ElsSearch('affil(amsterdam)','affiliation')
aff_srch.execute(client)
print ("aff_srch has", len(aff_srch.results), "results.")
## Initialize doc search object using Scopus and execute search, retrieving
# all results
doc_srch = ElsSearch("AFFIL(dartmouth) AND AUTHOR-NAME(lewis) AND PUBYEAR > 2011",'scopus')
doc_srch.execute(client, get_all = True)
print ("doc_srch has", len(doc_srch.results), "results.")
## Initialize doc search object using ScienceDirect and execute search,
# retrieving all results
doc_srch = ElsSearch("star trek vs star wars",'sciencedirect')
doc_srch.execute(client, get_all = False)
print ("doc_srch has", len(doc_srch.results), "results.") | {
"repo_name": "ElsevierDev/elsapy",
"path": "exampleProg.py",
"copies": "1",
"size": "3608",
"license": "bsd-3-clause",
"hash": 1416001645934588700,
"line_mean": 32.0566037736,
"line_max": 91,
"alpha_frac": 0.6751662971,
"autogenerated": false,
"ratio": 3.0969957081545063,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4272162005254506,
"avg_score": null,
"num_lines": null
} |
# An example script showing the functionality of the TinCanPython Library
import uuid
from resources import lrs_properties
from tincan import (
RemoteLRS,
Statement,
Agent,
Verb,
Activity,
Context,
LanguageMap,
ActivityDefinition,
StateDocument,
)
# construct an LRS
print "constructing the LRS..."
lrs = RemoteLRS(
version=lrs_properties.version,
endpoint=lrs_properties.endpoint,
username=lrs_properties.username,
password=lrs_properties.password,
)
print "...done"
# construct the actor of the statement
print "constructing the Actor..."
actor = Agent(
name='UserMan',
mbox='mailto:tincanpython@tincanapi.com',
)
print "...done"
# construct the verb of the statement
print "constructing the Verb..."
verb = Verb(
id='http://adlnet.gov/expapi/verbs/experienced',
display=LanguageMap({'en-US': 'experienced'}),
)
print "...done"
# construct the object of the statement
print "constructing the Object..."
object = Activity(
id='http://tincanapi.com/TinCanPython/Example/0',
definition=ActivityDefinition(
name=LanguageMap({'en-US': 'TinCanPython Library'}),
description=LanguageMap({'en-US': 'Use of, or interaction with, the TinCanPython Library'}),
),
)
print "...done"
# construct a context for the statement
print "constructing the Context..."
context = Context(
registration=uuid.uuid4(),
instructor=Agent(
name='Lord TinCan',
mbox='mailto:lordtincan@tincanapi.com',
),
# language='en-US',
)
print "...done"
# construct the actual statement
print "constructing the Statement..."
statement = Statement(
actor=actor,
verb=verb,
object=object,
context=context,
)
print "...done"
# save our statement to the remote_lrs and store the response in 'response'
print "saving the Statement..."
response = lrs.save_statement(statement)
if not response:
raise ValueError("statement failed to save")
print "...done"
# retrieve our statement from the remote_lrs using the id returned in the response
print "Now, retrieving statement..."
response = lrs.retrieve_statement(response.content.id)
if not response.success:
raise ValueError("statement could not be retrieved")
print "...done"
print "constructing new Statement from retrieved statement data..."
ret_statement = response.content
print "...done"
# now, using our old statement and our returned statement, we can send multiple statements
# note: these statements are logically identical, but are 2 separate objects
print "saving both Statements"
response = lrs.save_statements([statement, ret_statement])
if not response:
raise ValueError("statements failed to save")
print "...done"
# we can query our statements using an object
# constructing the query object with common fields
# note: more information about queries can be found in the API documentation:
# docs/build/html/tincan.html#module-tincan.remote_lrs
query = {
"agent": actor,
"verb": verb,
"activity": object,
"related_activities": True,
"related_agents": True,
"limit": 2,
}
print "querying statements..."
response = lrs.query_statements(query)
if not response:
raise ValueError("statements could not be queried")
print "...done"
# now we will explore saving a document, e.g. a state document
print "constructing a state document..."
state_document = StateDocument(
activity=object,
agent=actor,
id='stateDoc',
content=bytearray('stateDocValue', encoding='utf-8'),
)
print "...done"
print "saving state document..."
response = lrs.save_state(state_document)
if not response.success:
raise ValueError("could not save state document")
print "...done"
| {
"repo_name": "jpablo128/TinCanPython",
"path": "examples/example_script.py",
"copies": "2",
"size": "3687",
"license": "apache-2.0",
"hash": -8697472152876557000,
"line_mean": 24.9647887324,
"line_max": 100,
"alpha_frac": 0.7138595064,
"autogenerated": false,
"ratio": 3.716733870967742,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00023195911168383965,
"num_lines": 142
} |
# An example script showing the functionality of the TinCanPython Library
import uuid
from test.resources import lrs_properties
from tincan import (
RemoteLRS,
Statement,
Agent,
Verb,
Activity,
Context,
LanguageMap,
ActivityDefinition,
StateDocument,
)
# construct an LRS
print("constructing the LRS...")
lrs = RemoteLRS(
version=lrs_properties.version,
endpoint=lrs_properties.endpoint,
username=lrs_properties.username,
password=lrs_properties.password,
)
print("...done")
# construct the actor of the statement
print("constructing the Actor...")
actor = Agent(
name='UserMan',
mbox='mailto:tincanpython@tincanapi.com',
)
print("...done")
# construct the verb of the statement
print("constructing the Verb...")
verb = Verb(
id='http://adlnet.gov/expapi/verbs/experienced',
display=LanguageMap({'en-US': 'experienced'}),
)
print("...done")
# construct the object of the statement
print("constructing the Object...")
object = Activity(
id='http://tincanapi.com/TinCanPython/Example/0',
definition=ActivityDefinition(
name=LanguageMap({'en-US': 'TinCanPython Library'}),
description=LanguageMap({'en-US': 'Use of, or interaction with, the TinCanPython Library'}),
),
)
print("...done")
# construct a context for the statement
print("constructing the Context...")
context = Context(
registration=uuid.uuid4(),
instructor=Agent(
name='Lord TinCan',
mbox='mailto:lordtincan@tincanapi.com',
),
# language='en-US',
)
print("...done")
# construct the actual statement
print("constructing the Statement...")
statement = Statement(
actor=actor,
verb=verb,
object=object,
context=context,
)
print("...done")
# save our statement to the remote_lrs and store the response in 'response'
print("saving the Statement...")
response = lrs.save_statement(statement)
if not response:
raise ValueError("statement failed to save")
print("...done")
# retrieve our statement from the remote_lrs using the id returned in the response
print("Now, retrieving statement...")
response = lrs.retrieve_statement(response.content.id)
if not response.success:
raise ValueError("statement could not be retrieved")
print("...done")
print("constructing new Statement from retrieved statement data...")
ret_statement = response.content
print("...done")
# now, using our old statement and our returned statement, we can send multiple statements
# note: these statements are logically identical, but are 2 separate objects
print("saving both Statements")
response = lrs.save_statements([statement, ret_statement])
if not response:
raise ValueError("statements failed to save")
print("...done")
# we can query our statements using an object
# constructing the query object with common fields
# note: more information about queries can be found in the API documentation:
# docs/build/html/tincan.html#module-tincan.remote_lrs
query = {
"agent": actor,
"verb": verb,
"activity": object,
"related_activities": True,
"related_agents": True,
"limit": 2,
}
print("querying statements...")
response = lrs.query_statements(query)
if not response:
raise ValueError("statements could not be queried")
print("...done")
# now we will explore saving a document, e.g. a state document
print("constructing a state document...")
state_document = StateDocument(
activity=object,
agent=actor,
id='stateDoc',
content=bytearray('stateDocValue', encoding='utf-8'),
)
print("...done")
print("saving state document...")
response = lrs.save_state(state_document)
if not response.success:
raise ValueError("could not save state document")
print("...done")
| {
"repo_name": "RusticiSoftware/TinCanPython",
"path": "examples/example_script.py",
"copies": "1",
"size": "3718",
"license": "apache-2.0",
"hash": 5229279536557394000,
"line_mean": 25.1830985915,
"line_max": 100,
"alpha_frac": 0.7089833244,
"autogenerated": false,
"ratio": 3.7404426559356136,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9948266184777195,
"avg_score": 0.00023195911168383965,
"num_lines": 142
} |
# An example script to generate calfits from pointing/cable calibration txt files,
# modified from the example script on the pyuvdata github.
from pyuvdata import UVCal
import numpy as np
# Time array is in JD and calculated at the center of the time sample, and the corresponding pointing number.
# Best to reference pointing JDs from Aug 23, 2013 (though not required)
time_array = [2456528.21023,2456528.23208,2456528.25329,2456528.27444,2456528.29565]
poi_array = ['-2','-1','0','1','2']
#
# large reference time array = [2456528.18694,2456528.21023,2456528.23208,2456528.25329,2456528.27444,2456528.29565,2456528.31894,2456528.34194]
# for pointings ['-3','-2','-1','0','1','2','3','4']
#
Ntimes = len(time_array)
# Frequency array of the cal in Hz
freq_array = np.linspace(1.67075e8, 1.97715e8, 384) # highband
# freq_array for lowband is np.linspace(1.38915e8, 1.69555e8, 384)
Nfreqs = len(freq_array)
jones_array = np.array([-5, -6]) # only 2 jones parameters, jxx and jyy.
Njones = len(jones_array)
ant_array = np.arange(128)
Nants_data = len(ant_array)
antenna_names = np.array(['ant{0}.format(ant)' for ant in ant_array])
Nspws = 1 # only 1 spw is supported
# Generate data arrays
gains = np.zeros((Nants_data,Nfreqs,Ntimes,Njones)) + 0j
flags = np.ones_like(gains, dtype=np.bool)
chisq = np.ones_like(gains, dtype=np.float32)
### Get cable lengths to sort calibration solutions to the right tiles
datafile_name='/nfs/eor-00/h1/nbarry/MWA/IDL_code/FHD/instrument_config/mwa_cable_length.txt'
datafile = open(datafile_name, "r")
temp = next(datafile)
cols = (col.strip().split() for col in datafile)
rows = zip(*cols)
tile_index = np.asarray(rows[0], dtype=np.int16)
cable_length = np.asarray(rows[2], dtype=np.float32)
datafile.close()
index90 = np.where(cable_length == 90)
index150 = np.where(cable_length == 150)
index230 = np.where(cable_length == 230)
index320 = np.where(cable_length == 320)
index400 = np.where(cable_length == 400)
index524 = np.where(cable_length == 524)
###
for poi_i, poi in enumerate(poi_array)):
### Get cable fits
#
# Change this datafile name to match the txt tile format
#
datafile_name='/nfs/eor-00/h1/nbarry/MWA/IDL_code/FHD/instrument_config/'+poi+'_bandpass.txt'
datafile = open(datafile_name, "r")
cols = (col.strip().split() for col in datafile)
rows = zip(*cols)
x90 = np.asarray(rows[1], dtype=np.float32) + 0j
y90 = np.asarray(rows[2], dtype=np.float32) + 0j
x150 = np.asarray(rows[3], dtype=np.float32) + 0j
y150 = np.asarray(rows[4], dtype=np.float32) + 0j
x230 = np.asarray(rows[5], dtype=np.float32) + 0j
y230 = np.asarray(rows[6], dtype=np.float32) + 0j
x320 = np.asarray(rows[7], dtype=np.float32) + 0j
y320 = np.asarray(rows[8], dtype=np.float32) + 0j
x400 = np.asarray(rows[9], dtype=np.float32) + 0j
y400 = np.asarray(rows[10], dtype=np.float32) + 0j
x524 = np.asarray(rows[11], dtype=np.float32) + 0j
y524 = np.asarray(rows[12], dtype=np.float32) + 0j
gains[index90, :, poi_i, 0] = x90
gains[index90, :, poi_i, 1] = y90
gains[index150, :, poi_i, 0] = x150
gains[index150, :, poi_i, 1] = y150
gains[index230, :, poi_i, 0] = x230
gains[index230, :, poi_i, 1] = y230
gains[index320, :, poi_i, 0] = x320
gains[index320, :, poi_i, 1] = y320
gains[index400, :, poi_i, 0] = x400
gains[index400, :, poi_i, 1] = y400
gains[index524, :, poi_i, 0] = x524
gains[index524, :, poi_i, 1] = y524
datafile.close()
###
cal = UVCal()
cal.cal_type = 'gain'
cal.set_gain()
cal.Nfreqs = Nfreqs
cal.Njones = Njones
cal.Ntimes = Ntimes
#
# Change the history comment to list field, freq range name, instrument, averaging sample set, pointing JD reference,
# calibration catalogs, and whatever else is important.
#
cal.history = 'EXAMPLE HISTORY, PLEASE CHANGE: EoR0 highband per frequency, per pointing, per polarization bandpass for MWA, averaged per cable over Season 1 using an early version of KGS. Pointing JD is referenced from Aug 23,2013.'
#
cal.Nspws = 1
cal.freq_array = freq_array.reshape(cal.Nspws, -1)
cal.freq_range = [freq_array[0], freq_array[-1]] # valid frequencies for solutions.
cal.channel_width = np.diff(freq_array)[0]
cal.jones_array = jones_array
cal.time_array = time_array
#
# Pointing integration time
#
cal.integration_time = 1800.
#
cal.gain_convention = 'divide' # Use this operation to apply gain solution.
cal.x_orientation = 'east' # orientation of 1st jones parameter.
#
# JD's this can applied to. Below is Season 1
#
cal.time_range = [2456528., 2456626.]
#
cal.telescope_name = 'MWA'
cal.Nants_data = Nants_data
cal.Nants_telescope = Nants_data # have solutions for all antennas in array.
cal.ant_array = ant_array
cal.antenna_names = antenna_names
cal.antenna_numbers = ant_array
cal.flag_array = flags
cal.gain_array = gains
cal.quality_array = chisq
#
# Put your name in as creator
#
cal.observer = '<YOUR NAME HERE>'
#
# Put in the git url of the code that generated the cals
#
cal.git_origin_cal = 'https://github.com/EoRImaging/FHD'
#
# And if you know the git hash, put that in as well
#
#cal.git_hash_cal =
# Finally, generate an output!
cal.write_calfits('<put in a filepath>/<put in an instrument>_<put in a field>_<put in a frequency range name>_<put in a season/day>_cable_bandpass.fits')
| {
"repo_name": "miguelfmorales/FHD",
"path": "instrument_config/obsolete/pyuvdata_calfits_from_txt.py",
"copies": "2",
"size": "5245",
"license": "bsd-2-clause",
"hash": 9022491926787726000,
"line_mean": 34.6802721088,
"line_max": 233,
"alpha_frac": 0.7044804576,
"autogenerated": false,
"ratio": 2.613353263577479,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4317833721177479,
"avg_score": null,
"num_lines": null
} |
"""An example server using a WSGI application with the CherryPy web server."""
import os
import optparse
import logging
import sys
import cherrypy
import amfast
from amfast.remoting.wsgi_channel import WsgiChannelSet, WsgiChannel
class App(object):
"""Base web app."""
@cherrypy.expose
def index(self):
raise cherrypy.HTTPRedirect('/messaging.html')
if __name__ == '__main__':
usage = """usage: %s [options]""" % __file__
parser = optparse.OptionParser(usage=usage)
parser.add_option("-p", default=8000,
dest="port", help="port number to serve")
parser.add_option("-d", default="localhost",
dest="domain", help="domain to serve")
parser.add_option("-l", action="store_true",
dest="log_debug", help="log debugging output")
(options, args) = parser.parse_args()
amfast.log_debug = options.log_debug
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
amfast.logger.addHandler(handler)
cp_options = {
'global':
{
'server.socket_port': int(options.port),
'server.socket_host': str(options.domain),
},
'/':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(os.getcwd(), '../flex/deploy')
}
}
channel_set = WsgiChannelSet(notify_connections=False)
# Clients connect every x seconds
# to polling channels to check for messages.
# If messages are available, they are
# returned to the client.
polling_channel = WsgiChannel('amf')
channel_set.mapChannel(polling_channel)
# Long-poll channels do not return
# a response to the client until
# a message is available, or channel.max_interval
# is reached.
long_poll_channel = WsgiChannel('longPoll', wait_interval=90)
channel_set.mapChannel(long_poll_channel)
app = App()
cherrypy.tree.graft(polling_channel, '/amf')
cherrypy.tree.graft(long_poll_channel, '/longPoll')
cherrypy.quickstart(app, '/', config=cp_options)
print "Serving on %s:%s" % (options.domain, options.port)
print "Press ctrl-c to halt."
| {
"repo_name": "limscoder/amfast",
"path": "examples/messaging/python/cp_wsgi_server.py",
"copies": "1",
"size": "2159",
"license": "mit",
"hash": 3496894416358253600,
"line_mean": 30.2898550725,
"line_max": 78,
"alpha_frac": 0.6452061139,
"autogenerated": false,
"ratio": 3.754782608695652,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9889182416080369,
"avg_score": 0.0021612613030565866,
"num_lines": 69
} |
"""An example server using the CherryPy web framework."""
import os
import optparse
from wsgiref import simple_server
import amfast
from amfast.remoting.channel import ChannelSet
from amfast.remoting.wsgi_channel import WsgiChannel
from amfast.remoting.pyamf_endpoint import PyAmfEndpoint
import utils
class App(object):
def __init__(self):
self.channel_set = ChannelSet()
rpc_channel = WsgiChannel('amf-channel', endpoint=PyAmfEndpoint())
self.channel_set.mapChannel(rpc_channel)
utils.setup_channel_set(self.channel_set)
def __call__(self, environ, start_response):
path = environ['PATH_INFO'].replace('/', '')
if path == 'amf':
channel = self.channel_set.getChannel('amf-channel')
return channel(environ, start_response)
else:
channel = self.channel_set.getChannel('amf-channel')
return channel.badPage(start_response, 'Page does not exist.')
if __name__ == '__main__':
usage = """usage: %s [options]""" % __file__
parser = optparse.OptionParser(usage=usage)
parser.add_option("-p", default=8000,
dest="port", help="port number to serve")
parser.add_option("-d", default="localhost",
dest="domain", help="domain to serve")
parser.add_option("-l", action="store_true",
dest="log_debug", help="log debugging output")
(options, args) = parser.parse_args()
amfast.log_debug = options.log_debug
server = simple_server.WSGIServer((options.domain, int(options.port)),
simple_server.WSGIRequestHandler)
server.set_app(App())
try:
print "Serving on %s:%s" % (options.domain, options.port)
print "Press ctrl-c to halt."
server.serve_forever()
except KeyboardInterrupt:
pass
| {
"repo_name": "limscoder/amfast",
"path": "examples/pyamf/python/wsgi_server.py",
"copies": "1",
"size": "1797",
"license": "mit",
"hash": 3336666866539049500,
"line_mean": 32.9056603774,
"line_max": 74,
"alpha_frac": 0.6505286589,
"autogenerated": false,
"ratio": 3.7752100840336134,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9909423973714705,
"avg_score": 0.0032629538437816664,
"num_lines": 53
} |
"""An example server using the CherryPy web framework."""
import os
import optparse
from wsgiref import simple_server
import amfast
from amfast.remoting.wsgi_channel import WsgiChannelSet, WsgiChannel
import utils
if __name__ == '__main__':
usage = """usage: %s [options]""" % __file__
parser = optparse.OptionParser(usage=usage)
parser.add_option("-p", default=8000,
dest="port", help="port number to serve")
parser.add_option("-d", default="localhost",
dest="domain", help="domain to serve")
parser.add_option("-l", action="store_true",
dest="log_debug", help="log debugging output")
(options, args) = parser.parse_args()
amfast.log_debug = options.log_debug
channel_set = WsgiChannelSet()
rpc_channel = WsgiChannel('amf')
channel_set.mapChannel(rpc_channel)
utils.setup_channel_set(channel_set)
server = simple_server.WSGIServer((options.domain, int(options.port)),
simple_server.WSGIRequestHandler)
server.set_app(channel_set)
try:
print "Serving on %s:%s" % (options.domain, options.port)
print "Press ctrl-c to halt."
server.serve_forever()
except KeyboardInterrupt:
pass
| {
"repo_name": "limscoder/amfast",
"path": "examples/hello_world/python/wsgi_server.py",
"copies": "1",
"size": "1209",
"license": "mit",
"hash": 1566378933520295000,
"line_mean": 30.8157894737,
"line_max": 74,
"alpha_frac": 0.6650124069,
"autogenerated": false,
"ratio": 3.6859756097560976,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.9840031709059253,
"avg_score": 0.002191261519369022,
"num_lines": 38
} |
"""An example server using the CherryPy web framework."""
import os
import optparse
import logging
import sys
import cherrypy
import amfast
from amfast.remoting.cherrypy_channel import CherryPyChannel, CherryPyChannelSet
class App(CherryPyChannelSet):
"""Base web app."""
@cherrypy.expose
def index(self):
raise cherrypy.HTTPRedirect('/messaging.html')
if __name__ == '__main__':
usage = """usage: %s [options]""" % __file__
parser = optparse.OptionParser(usage=usage)
parser.add_option("-p", default=8000,
dest="port", help="port number to serve")
parser.add_option("-d", default="localhost",
dest="domain", help="domain to serve")
parser.add_option("-l", action="store_true",
dest="log_debug", help="log debugging output")
(options, args) = parser.parse_args()
amfast.log_debug = options.log_debug
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
amfast.logger.addHandler(handler)
cp_options = {
'global':
{
'server.socket_port': int(options.port),
'server.socket_host': str(options.domain),
},
'/':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(os.getcwd(), '../flex/deploy')
}
}
# Create ChannelSet
channel_set = App(notify_connections=True)
# Clients connect every x seconds
# to polling channels to check for messages.
# If messages are available, they are
# returned to the client.
polling_channel = CherryPyChannel('amf')
channel_set.mapChannel(polling_channel)
# Long-poll channels do not return
# a response to the client until
# a message is available, or channel.max_interval
# is reached.
long_poll_channel = CherryPyChannel('longPoll', wait_interval=90)
channel_set.mapChannel(long_poll_channel)
# Start serving
cherrypy.quickstart(channel_set, '/', config=cp_options)
print "Serving on %s:%s" % (options.domain, options.port)
print "Press ctrl-c to halt."
| {
"repo_name": "limscoder/amfast",
"path": "examples/messaging/python/cp_server.py",
"copies": "1",
"size": "2090",
"license": "mit",
"hash": -6907731681666073000,
"line_mean": 29.2898550725,
"line_max": 80,
"alpha_frac": 0.6464114833,
"autogenerated": false,
"ratio": 3.8419117647058822,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4988323248005882,
"avg_score": null,
"num_lines": null
} |
"""An example server using the CherryPy web framework."""
import os
import optparse
import cherrypy
import amfast
from amfast.remoting.channel import ChannelSet
from amfast.remoting.wsgi_channel import WsgiChannel
from amfast.remoting.pyamf_endpoint import PyAmfEndpoint
import utils
class App(object):
"""Base web app."""
@cherrypy.expose
def index(self):
raise cherrypy.HTTPRedirect('/hello_world.html')
if __name__ == '__main__':
usage = """usage: %s [options]""" % __file__
parser = optparse.OptionParser(usage=usage)
parser.add_option("-p", default=8000,
dest="port", help="port number to serve")
parser.add_option("-d", default="localhost",
dest="domain", help="domain to serve")
parser.add_option("-l", action="store_true",
dest="log_debug", help="log debugging output")
(options, args) = parser.parse_args()
amfast.log_debug = options.log_debug
cp_options = {
'global':
{
'server.socket_port': int(options.port),
'server.socket_host': str(options.domain),
},
'/':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(os.getcwd(), '../flex/deploy')
}
}
channel_set = ChannelSet()
rpc_channel = WsgiChannel('amf-channel', endpoint=PyAmfEndpoint())
channel_set.mapChannel(rpc_channel)
utils.setup_channel_set(channel_set)
app = App()
cherrypy.tree.graft(rpc_channel, '/amf')
cherrypy.quickstart(app, '/', config=cp_options)
print "Serving on %s:%s" % (options.domain, options.port)
print "Press ctrl-c to halt."
| {
"repo_name": "limscoder/amfast",
"path": "examples/pyamf/python/cp_server.py",
"copies": "1",
"size": "1652",
"license": "mit",
"hash": 2637910705783713000,
"line_mean": 28.5,
"line_max": 78,
"alpha_frac": 0.6283292978,
"autogenerated": false,
"ratio": 3.599128540305011,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4727457838105011,
"avg_score": null,
"num_lines": null
} |
"""An example server using the CherryPy web framework."""
import os
import optparse
import cherrypy
import amfast
from amfast.remoting.cherrypy_channel import CherryPyChannelSet, CherryPyChannel
import utils
class App(CherryPyChannelSet):
"""Base web app."""
@cherrypy.expose
def index(self):
raise cherrypy.HTTPRedirect('/addressbook.html')
if __name__ == '__main__':
usage = """usage: %s [options]""" % __file__
parser = optparse.OptionParser(usage=usage)
parser.add_option("-p", default=8000,
dest="port", help="port number to serve")
parser.add_option("-d", default="localhost",
dest="domain", help="domain to serve")
parser.add_option("-l", action="store_true",
dest="log_debug", help="log debugging output")
(options, args) = parser.parse_args()
amfast.log_debug = options.log_debug
cp_options = {
'global':
{
'server.socket_port': int(options.port),
'server.socket_host': str(options.domain),
},
'/':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(os.getcwd(), '../flex/deploy')
}
}
channel_set = App()
rpc_channel = CherryPyChannel('amf')
channel_set.mapChannel(rpc_channel)
utils.setup_channel_set(channel_set)
cherrypy.quickstart(channel_set, '/', config=cp_options)
print "Serving on %s:%s" % (options.domain, options.port)
print "Press ctrl-c to halt."
| {
"repo_name": "limscoder/amfast",
"path": "examples/addressbook/python/cp_server.py",
"copies": "1",
"size": "1498",
"license": "mit",
"hash": 4141970166072879600,
"line_mean": 27.8076923077,
"line_max": 80,
"alpha_frac": 0.6181575434,
"autogenerated": false,
"ratio": 3.6805896805896805,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.479874722398968,
"avg_score": null,
"num_lines": null
} |
"""An example server using the CherryPy web framework.
This example uses a WsgiChannel, grafted onto the CherryPy tree.
To run the example execute the command:
python cp_wsgi_server.py
"""
import os
import optparse
import logging
import sys
import cherrypy
import amfast
from amfast.remoting.channel import ChannelSet
from amfast.remoting.wsgi_channel import StreamingWsgiChannel
class App(object):
"""Base web app."""
@cherrypy.expose
def index(self):
raise cherrypy.HTTPRedirect('/streaming.html')
if __name__ == '__main__':
usage = """usage: %s [options]""" % __file__
parser = optparse.OptionParser(usage=usage)
parser.add_option("-p", default=8000,
dest="port", help="port number to serve")
parser.add_option("-d", default="localhost",
dest="domain", help="domain to serve")
parser.add_option("-l", action="store_true",
dest="log_debug", help="log debugging output")
(options, args) = parser.parse_args()
amfast.log_debug = options.log_debug
# Send log messages to STDOUT
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
amfast.logger.addHandler(handler)
cp_options = {
'global':
{
'server.socket_port': int(options.port),
'server.socket_host': str(options.domain),
},
'/':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(os.getcwd(), '../flex/deploy')
}
}
channel_set = ChannelSet(notify_connections=True)
stream_channel = StreamingWsgiChannel('stream-channel')
channel_set.mapChannel(stream_channel)
app = App()
cherrypy.tree.graft(stream_channel, '/amf')
cherrypy.quickstart(app, '/', config=cp_options)
print "Serving on %s:%s" % (options.domain, options.port)
print "Press ctrl-c to halt."
| {
"repo_name": "limscoder/amfast",
"path": "examples/streaming/python/cp_wsgi_server.py",
"copies": "1",
"size": "1888",
"license": "mit",
"hash": -1332228876717665800,
"line_mean": 28.0461538462,
"line_max": 78,
"alpha_frac": 0.6451271186,
"autogenerated": false,
"ratio": 3.7238658777120315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9838290917088115,
"avg_score": 0.006140415844783146,
"num_lines": 65
} |
"""An example server using the CherryPy web framework.
To run the example execute the command:
python cp_server.py
"""
import os
import optparse
import logging
import sys
import cherrypy
import amfast
from amfast.remoting.cherrypy_channel import CherryPyChannelSet, StreamingCherryPyChannel
class App(CherryPyChannelSet):
"""Base web app."""
@cherrypy.expose
def index(self):
raise cherrypy.HTTPRedirect('/streaming.html')
if __name__ == '__main__':
usage = """usage: %s [options]""" % __file__
parser = optparse.OptionParser(usage=usage)
parser.add_option("-p", default=8000,
dest="port", help="port number to serve")
parser.add_option("-d", default="localhost",
dest="domain", help="domain to serve")
parser.add_option("-l", action="store_true",
dest="log_debug", help="log debugging output")
(options, args) = parser.parse_args()
amfast.log_debug = options.log_debug
# Send log messages to STDOUT
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
amfast.logger.addHandler(handler)
cp_options = {
'global':
{
'server.socket_port': int(options.port),
'server.socket_host': str(options.domain),
},
'/':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(os.getcwd(), '../flex/deploy')
}
}
channel_set = App(notify_connections=True)
stream_channel = StreamingCherryPyChannel('amf')
channel_set.mapChannel(stream_channel)
cherrypy.quickstart(channel_set, '/', config=cp_options)
print "Serving on %s:%s" % (options.domain, options.port)
print "Press ctrl-c to halt."
| {
"repo_name": "limscoder/amfast",
"path": "examples/streaming/python/cp_server.py",
"copies": "1",
"size": "1741",
"license": "mit",
"hash": 9037741573736894000,
"line_mean": 27.5409836066,
"line_max": 89,
"alpha_frac": 0.6392877657,
"autogenerated": false,
"ratio": 3.752155172413793,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9859486821180123,
"avg_score": 0.006391223386734037,
"num_lines": 61
} |
"""An example server using WSGI."""
import os
import optparse
from wsgiref import simple_server
import amfast
from amfast.remoting.channel import ChannelSet
from amfast.remoting.wsgi_channel import WsgiChannel
import utils
class App(object):
def __init__(self):
self.channel_set = ChannelSet()
rpc_channel = WsgiChannel('amf-channel')
self.channel_set.mapChannel(rpc_channel)
utils.setup_channel_set(self.channel_set)
def __call__(self, environ, start_response):
path = environ['PATH_INFO'].replace('/', '')
if path == 'amf':
channel = self.channel_set.getChannel('amf-channel')
return channel(environ, start_response)
else:
channel = self.channel_set.getChannel('amf-channel')
return channel.badPage(start_response, 'Page does not exist.')
if __name__ == '__main__':
usage = """usage: %s [options]""" % __file__
parser = optparse.OptionParser(usage=usage)
parser.add_option("-p", default=8000,
dest="port", help="port number to serve")
parser.add_option("-d", default="localhost",
dest="domain", help="domain to serve")
parser.add_option("-l", action="store_true",
dest="log_debug", help="log debugging output")
(options, args) = parser.parse_args()
amfast.log_debug = options.log_debug
server = simple_server.WSGIServer((options.domain, int(options.port)),
simple_server.WSGIRequestHandler)
server.set_app(App())
try:
print "Serving on %s:%s" % (options.domain, options.port)
print "Press ctrl-c to halt."
server.serve_forever()
except KeyboardInterrupt:
pass
| {
"repo_name": "limscoder/amfast",
"path": "examples/addressbook/python/wsgi_server.py",
"copies": "1",
"size": "1691",
"license": "mit",
"hash": 3523165894783595500,
"line_mean": 32.1568627451,
"line_max": 74,
"alpha_frac": 0.638083974,
"autogenerated": false,
"ratio": 3.791479820627803,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49295637946278026,
"avg_score": null,
"num_lines": null
} |
""" An example showing how to use DEAP optimization (http://pythonhosted.org/deap/).
DEAP can be combined with *pypet* to keep track of all the data and the full trajectory
of points created by a genetic algorithm.
Note that *pypet* adds quite some overhead to the optimization algorithm.
Using *pypet* in combination with DEAP is only suitable in case the
evaluation of an individual (i.e. a single run) takes a considerable amount of time
(i.e. 1 second or longer) and, thus, pypet's overhead is only marginal.
This *OneMax* problem serves only as an example and is not a well suited problem.
Suitable would be the genetic optimization of neural networks where running and evaluating
the network may take a few seconds.
Here we avoid using an Environment and *manually* execute runs using multiprocessing.
"""
__author__ = 'Robert Meyer'
import random
import os
import multiprocessing as multip
try:
from itertools import izip
except ImportError:
# For Python 3
izip = zip
from deap import base
from deap import creator
from deap import tools
from pypet import Trajectory, cartesian_product, manual_run, MultiprocContext
@manual_run(store_meta_data=True) # Important decorator for manual execution of runs
def eval_one_max(traj, individual):
"""The fitness function"""
traj.f_add_result('$set.$.individual', list(individual))
fitness = sum(individual)
traj.f_add_result('$set.$.fitness', fitness)
traj.f_store()
return (fitness,) # DEAP wants a tuple here!
def eval_wrapper(the_tuple):
"""Wrapper function that unpacks a single tuple as arguments to the fitness function.
The pool's map function only allows a single iterable so we need to zip it first
and then unpack it here.
"""
return eval_one_max(*the_tuple)
def main():
# No environment here ;-)
filename = os.path.join('experiments', 'example_20.hdf5')
traj = Trajectory('onemax', filename=filename, overwrite_file=True)
# ------- Add parameters ------- #
traj.f_add_parameter('popsize', 100)
traj.f_add_parameter('CXPB', 0.5)
traj.f_add_parameter('MUTPB', 0.2)
traj.f_add_parameter('NGEN', 20)
traj.f_add_parameter('generation', 0)
traj.f_add_parameter('ind_idx', 0)
traj.f_add_parameter('ind_len', 50)
traj.f_add_parameter('indpb', 0.005)
traj.f_add_parameter('tournsize', 3)
traj.f_add_parameter('seed', 42)
traj.f_store(only_init=True)
# ------- Create and register functions with DEAP ------- #
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
# Attribute generator
toolbox.register("attr_bool", random.randint, 0, 1)
# Structure initializers
toolbox.register("individual", tools.initRepeat, creator.Individual,
toolbox.attr_bool, traj.ind_len)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# Operator registering
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutFlipBit, indpb=traj.indpb)
toolbox.register("select", tools.selTournament, tournsize=traj.tournsize)
toolbox.register("evaluate", eval_wrapper)
pool = multip.Pool(4)
toolbox.register("map", pool.map) # We use the pool's map function!
# ------- Initialize Population -------- #
random.seed(traj.seed)
pop = toolbox.population(n=traj.popsize)
CXPB, MUTPB, NGEN = traj.CXPB, traj.MUTPB, traj.NGEN
start_idx = 0 # We need to count executed runs
print("Start of evolution")
for g in range(traj.NGEN):
print("-- Generation %i --" % g)
# Determine individuals that need to be evaluated
eval_pop = [ind for ind in pop if not ind.fitness.valid]
# Add as many explored runs as individuals that need to be evaluated
traj.f_expand(cartesian_product({'generation': [g], 'ind_idx': range(len(eval_pop))}))
# We need to make the storage service multiprocessing safe
mc = MultiprocContext(traj, wrap_mode='QUEUE')
mc.f_start()
# Create a single iterable to be passed to our fitness function (wrapper).
# `yields='copy'` is important, the pool's `map` function will
# go over the whole iterator at once and store it in memory.
# So for every run we need a copy of the trajectory.
# Alternatively, you could use `yields='self'` and use the pool's `imap` function.
zip_iterable = izip(traj.f_iter_runs(start_idx, yields='copy'), eval_pop)
fitnesses = toolbox.map(eval_wrapper, zip_iterable)
# fitnesses is just a list of tuples [(fitness,), ...]
for idx, fitness in enumerate(fitnesses):
# Update fitnesses
eval_pop[idx].fitness.values = fitness
# Finalize the multiproc wrapper
mc.f_finalize()
# Update start index
start_idx += len(eval_pop)
print(" Evaluated %i individuals" % len(eval_pop))
# Gather all the fitnesses in one list and print the stats
fits = [ind.fitness.values[0] for ind in pop]
length = len(pop)
mean = sum(fits) / length
sum2 = sum(x*x for x in fits)
std = abs(sum2 / length - mean**2)**0.5
print(" Min %s" % min(fits))
print(" Max %s" % max(fits))
print(" Avg %s" % mean)
print(" Std %s" % std)
# ------- Create the next generation by crossover and mutation -------- #
if g < traj.NGEN -1: # not necessary for the last generation
# Select the next generation individuals
offspring = toolbox.select(pop, len(pop))
# Clone the selected individuals
offspring = list(map(toolbox.clone, offspring))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# The population is entirely replaced by the offspring
pop[:] = offspring
# Stop the multiprocessing pool
pool.close()
pool.join()
print("-- End of (successful) evolution --")
best_ind = tools.selBest(pop, 1)[0]
print("Best individual is %s, %s" % (best_ind, best_ind.fitness.values))
traj.f_store() # And store all the rest of the data
if __name__ == "__main__":
main() | {
"repo_name": "nigroup/pypet",
"path": "examples/example_20_using_deap_manual_runs.py",
"copies": "2",
"size": "6723",
"license": "bsd-3-clause",
"hash": -6079670687043612000,
"line_mean": 33.306122449,
"line_max": 94,
"alpha_frac": 0.6450989142,
"autogenerated": false,
"ratio": 3.637987012987013,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5283085927187012,
"avg_score": null,
"num_lines": null
} |
# An example showing how to use sensor data, here with the SenseHAT (or SenseHAT emulator)
# Refer to RaspberryPi forums Teaching and learning resources : https://www.raspberrypi.org/forums/viewtopic.php?f=49&t=202386 for some talk using it.
# compass added for show, but commented as it will not work if just enabled
from guizero import *
from sense_hat import SenseHat
# from sense_emu import SenseHat #use this instead of sense_hat if you don't have a SenseHAT, but are using Raspbian/x86 Desktop
sense=SenseHat()
SENSOR_ENVIRONMENT_UPDATE_FREQUENCY = 1000 #time in milliseconds (ms)
SENSOR_IMU_UPDATE_FREQUENCY = 100
def read_temp_sensor():
return round(sense.get_temperature(),1)
def read_humidity_sensor():
return round(sense.get_humidity(),1)
def read_pressure_sensor():
return round(sense.get_pressure()*0.1,1)
def update_environment_sensors():
count_text.value=int(count_text.value)+1 #used to debug, i.e. check it is actually incrementing
temperature_text.value = read_temp_sensor(),"°C"
humidity_text.value = read_humidity_sensor(),"%"
pressure_text.value = read_pressure_sensor(),"kPa"
def update_IMU_sensors():
orient_yaw,orient_pitch,orient_roll = sense.get_orientation().values()
mag_x,mag_y,mag_z = sense.get_compass_raw().values()
acc_x,acc_y,acc_z = sense.get_accelerometer_raw().values()
gyro_x,gyro_y,gyro_z = sense.get_gyroscope_raw().values()
IMU_orient_yaw_text.value = round(orient_yaw,1)
IMU_orient_pitch_text.value = round(orient_pitch,1)
IMU_orient_roll_text.value = round(orient_roll,1)
IMU_mag_x_text.value = round(mag_x,1)
IMU_mag_y_text.value = round(mag_y,1)
IMU_mag_z_text.value = round(mag_z,1)
IMU_acc_x_text.value = round(acc_x,1)
IMU_acc_y_text.value = round(acc_y,1)
IMU_acc_z_text.value = round(acc_z,1)
IMU_gyro_x_text.value = round(gyro_x,1)
IMU_gyro_y_text.value = round(gyro_y,1)
IMU_gyro_z_text.value = round(gyro_z,1)
if __name__ == '__main__':
app = App(title="Sensor Display!",
height=230,
width=420,
layout='grid')
title_count = Text(app, "count 'debug':", grid=[0, 0])
count_text = Text(app, "1", grid=[1, 0])
title = Text(app, "Temperature Sensor value:", grid=[0, 1])
temperature_text = Text(app, "xx", grid=[1, 1])
title2 = Text(app, "Humidity Sensor value:", grid=[0, 2])
humidity_text = Text(app, "xx", grid=[1, 2])
title3 = Text(app, "Pressure Sensor value:", grid=[0, 3])
pressure_text = Text(app, "xx", grid=[1, 3])
#IMU box
IMU_title_orient_yaw = Text(app, "Yaw", grid=[1, 5])
IMU_title_orient_pitch = Text(app, "Pitch", grid=[2, 5])
IMU_title_orient_roll = Text(app, "Roll", grid=[3, 5])
IMU_title_orient = Text(app, "Orientation:", grid=[0, 6])
IMU_orient_yaw_text = Text(app, "xx", grid=[1, 6])
IMU_orient_pitch_text = Text(app, "xx", grid=[2, 6])
IMU_orient_roll_text = Text(app, "xx", grid=[3, 6])
IMU_title_x = Text(app, "X", grid=[1, 8])
IMU_title_y = Text(app, "Y", grid=[2, 8])
IMU_title_z = Text(app, "Z", grid=[3, 8])
IMU_title_mag = Text(app, "Magnetometer µT", grid=[0, 9])
IMU_title_acc = Text(app, "Accelerometer Gs", grid=[0, 10])
IMU_title_gyro = Text(app, "Gyroscope rad/s", grid=[0, 11])
IMU_mag_x_text = Text(app, "xx", grid=[1, 9])
IMU_mag_y_text = Text(app, "xx", grid=[2, 9])
IMU_mag_z_text = Text(app, "xx", grid=[3, 9])
IMU_acc_x_text = Text(app, "xx", grid=[1, 10])
IMU_acc_y_text = Text(app, "xx", grid=[2, 10])
IMU_acc_z_text = Text(app, "xx", grid=[3, 10])
IMU_gyro_x_text = Text(app, "xx", grid=[1, 11])
IMU_gyro_y_text = Text(app, "xx", grid=[2, 11])
IMU_gyro_z_text = Text(app, "xx", grid=[3, 11])
IMU_title_compass = Text(app, "Compass North Bearing", grid=[0, 13])
IMU_compass_text = Text(app, "xx", grid=[1, 13])
app.repeat(SENSOR_ENVIRONMENT_UPDATE_FREQUENCY, update_environment_sensors)
app.repeat(SENSOR_IMU_UPDATE_FREQUENCY, update_IMU_sensors)
app.display()
| {
"repo_name": "lawsie/guizero",
"path": "examples/sensehat_read_data.py",
"copies": "1",
"size": "4080",
"license": "bsd-3-clause",
"hash": -3316168253024331000,
"line_mean": 35.4107142857,
"line_max": 150,
"alpha_frac": 0.6343795978,
"autogenerated": false,
"ratio": 2.667102681491171,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8718866923918003,
"avg_score": 0.01652307107463347,
"num_lines": 112
} |
""" An example showing moveable shapes. """
# Enthought library imports.
from enable.api import Container, Window
from enable.example_support import DemoFrame, demo_main
# Local imports
from box import Box
from circle import Circle
class MyFrame(DemoFrame):
""" The top-level frame. """
# 'DemoFrame' interface.
#--------------------------------------------------------------------------
def _create_window(self):
""" Create an enable window. """
container = Container(
auto_size=False, bgcolor='black', *self._create_shapes()
)
return Window(self, component=container)
# Private interface.
#--------------------------------------------------------------------------
def _create_shapes(self):
""" Create some shapes. """
box1 = Box(
bounds = [100, 100],
position = [50, 50],
fill_color = 'lightpink',
text = 'Box 1'
)
box2 = Box(
bounds = [100, 100],
position = [150, 150],
fill_color = 'greenyellow',
text = 'Box 2'
)
circle1 = Circle(
radius = 50,
position = [250,250],
fill_color = 'cornflowerblue',
text = 'Circle 1'
)
circle2 = Circle(
radius = 50,
position = [350,350],
fill_color = 'khaki',
text = 'Circle 2'
)
return box1, box2, circle1, circle2
if __name__ == "__main__":
# Save demo so that it doesn't get garbage collected when run within
# existing event loop (i.e. from ipython).
demo = demo_main(MyFrame, size=(500, 500),
title="Click and drag the shapes")
| {
"repo_name": "tommy-u/enable",
"path": "examples/enable/shapes/run.py",
"copies": "1",
"size": "1812",
"license": "bsd-3-clause",
"hash": 5092299413806449000,
"line_mean": 26.0447761194,
"line_max": 79,
"alpha_frac": 0.4668874172,
"autogenerated": false,
"ratio": 4.408759124087592,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.019992953547087272,
"num_lines": 67
} |
""" An example simulation for a Hawk-Dove game using two-population discrete time replicator dynamics.
"""
## This is hackery to make this run from where it is. Do not do this.
import os
import sys
path = os.path.realpath(__file__)
sys.path.insert(0, os.path.abspath(os.sep.join([path, "..", "..", "src"])))
## End hackery
from simulations.simulation_runner import SimulationRunner
from simulations.dynamics.npop_discrete_replicator import NPopDiscreteReplicatorDynamics
from simulations.base import listener
from simulations.base import once
def firstgen(this, gennum, thisgen, lastgen):
""" Prints 'Working...' after the first generation step is complete
"""
print >> this.out, 'Working...'
def initialset(this, firstpop):
""" Prints out a notice for the first population being chosen
"""
print >> this.out, 'Initial population selected.'
def stablestate(this, genct, thisgen, lastgen, firstgen):
""" Prints a notice when a stable state is reached
"""
print >> this.out, 'Stable state reached!'
def forcestop(this, genct, thisgen, lastgen, firstgen):
""" Prints a notice when the simulation is force-stopped
"""
print >> this.out, 'Force stopped!'
def generation(this, genct, thisgen, lastgen):
""" Prints a notice that a generation is done.
"""
print >> this.out, 'Generation {0} complete.'.format(genct)
def simdone(this, result):
""" Prints a notice that one of the sims has finished.
"""
print "Simulation {0} complete.".format(this.finished_count)
def alldone(this):
""" Prints a notice when all simulations are done.
"""
print "All done."
@listener('generation', generation)
@listener('force stop', forcestop)
@listener('stable state', stablestate)
@listener('initial set', initialset)
@once('generation', firstgen)
class HawkDoveSim(NPopDiscreteReplicatorDynamics):
_payoffs = [[0, 4], [1, 2]]
def __init__(self, *args, **kwdargs):
super(HawkDoveSim, self).__init__(*args, **kwdargs)
def _default_types(self):
return [['H', 'D'], ['H', 'D']]
def _interaction(self, me, profile):
if me == 0 or me == 1:
return self._payoffs[profile[me]][profile[1 - me]]
else:
raise ValueError("Profile index out of bounds")
@listener('result', simdone)
@listener('done', alldone)
class HDSimRunner(SimulationRunner):
pass
if __name__ == '__main__':
runner = HDSimRunner(HawkDoveSim)
runner.go()
| {
"repo_name": "gsmcwhirter/simulations",
"path": "examples/two_population_hd.py",
"copies": "1",
"size": "2497",
"license": "mit",
"hash": -9192509709340824000,
"line_mean": 22.780952381,
"line_max": 102,
"alpha_frac": 0.663195835,
"autogenerated": false,
"ratio": 3.536827195467422,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4700023030467422,
"avg_score": null,
"num_lines": null
} |
""" An example simulation for a Prisoner's Dilemma using one-population discrete time replicator dynamics.
"""
## This is hackery to make this run from where it is. Do not do this.
import os
import sys
path = os.path.realpath(__file__)
sys.path.insert(0, os.path.abspath(os.sep.join([path, "..", "..", "src"])))
## End hackery
from simulations.simulation_runner import SimulationRunner
from simulations.dynamics.onepop_discrete_replicator import OnePopDiscreteReplicatorDynamics
from simulations.base import listener
from simulations.base import once
def firstgen(this, gennum, thisgen, lastgen):
""" Prints 'Working...' after the first generation step is complete
"""
print >> this.out, 'Working...'
def initialset(this, firstpop):
""" Prints out a notice for the first population being chosen
"""
print >> this.out, 'Initial population selected.'
def stablestate(this, genct, thisgen, lastgen, firstgen):
""" Prints a notice when a stable state is reached
"""
print >> this.out, 'Stable state reached!'
def forcestop(this, genct, thisgen, lastgen, firstgen):
""" Prints a notice when the simulation is force-stopped
"""
print >> this.out, 'Force stopped!'
def generation(this, genct, thisgen, lastgen):
""" Prints a notice that a generation is done.
"""
print >> this.out, 'Generation {0} complete.'.format(genct)
def simdone(this, result):
""" Prints a notice that one of the sims has finished.
"""
print "Simulation {0} complete.".format(this.finished_count)
def alldone(this):
""" Prints a notice when all simulations are done.
"""
print "All done."
@listener('generation', generation)
@listener('force stop', forcestop)
@listener('stable state', stablestate)
@listener('initial set', initialset)
@once('generation', firstgen)
class PrisonersDilemmaSim(OnePopDiscreteReplicatorDynamics):
_payoffs = [[3, 0], [4, 1]]
def __init__(self, *args, **kwdargs):
super(PrisonersDilemmaSim, self).__init__(*args, **kwdargs)
self.types = ['C', 'D']
def _interaction(self, me, profile):
if me == 0 or me == 1:
return self._payoffs[profile[me]][profile[1 - me]]
else:
raise ValueError("Profile index out of bounds")
@listener('result', simdone)
@listener('done', alldone)
class PDSimRunner(SimulationRunner):
pass
if __name__ == '__main__':
runner = PDSimRunner(PrisonersDilemmaSim)
runner.go()
| {
"repo_name": "gsmcwhirter/simulations",
"path": "examples/one_population_pd.py",
"copies": "1",
"size": "2494",
"license": "mit",
"hash": 7719338379207879000,
"line_mean": 22.7523809524,
"line_max": 106,
"alpha_frac": 0.6704089816,
"autogenerated": false,
"ratio": 3.5577746077032812,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9719398815093236,
"avg_score": 0.0017569548420088552,
"num_lines": 105
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.