code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
"""Punctuator input generator."""
import string
import lingvo.compat as tf
from lingvo.core import base_input_generator
from lingvo.core import datasource
from lingvo.core import py_utils
from lingvo.core import tokenizers
class TextLines(datasource.TFDatasetSource):
"""Returns a tf.data.Dataset containing lines from a text file."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('file_pattern', None, 'A file pattern string.')
p.Define('shuffle_buffer_size', 10000,
'Number of records buffered for random shuffling.')
return p
def GetDataset(self):
p = self.params
if not p.file_pattern:
raise ValueError('A file pattern must be provided.')
file_pattern_glob = py_utils.ShardedFilePatternToGlob(p.file_pattern)
dataset = tf.data.Dataset.list_files(
file_pattern_glob,
shuffle=not self.cluster.require_sequential_input_order)
dataset = tf.data.TextLineDataset(
dataset,
num_parallel_reads=(1 if self.cluster.in_unit_test else
tf.data.experimental.AUTOTUNE))
if not self.cluster.require_sequential_input_order:
dataset = dataset.shuffle(
p.shuffle_buffer_size, reshuffle_each_iteration=True)
dataset = dataset.repeat()
return dataset
class PunctuatorInput(base_input_generator.BaseInputGenerator):
"""Reads text line by line and processes them for the punctuator task.
Input batches are NestedMaps containing:
- src.ids: int32 source word-piece ids of shape [batch, p.source_max_length].
- src.paddings: float32 paddings of shape [batch, p.source_max_length] where
paddings == 0.0 if the position is part of the input and 1.0 if the position
is padding.
- tgt.ids: int32 target word-piece ids of shape [batch, p.target_max_length].
- tgt.labels = int32 target label word-piece ids of shape
[batch, p.target_max_length]. The difference between labels and ids is that
ids include the sos (start of sequence) token in the front and labels
include the eos (end of sequence) token in the end.
- tgt.paddings: float32 paddings of shape [batch, p.target_max_length].
- tgt.weights: float32 weights of shape [batch, p.target_max_length]. Weights
are generally 1.0 - paddings and are used for loss computation.
- bucket_keys: int32 value used for bucketing. This is set automatically by
TFDatasetBatchBySequenceLength, and is for debugging purposes.
"""
@classmethod
def Params(cls):
"""Defaults params for PunctuatorInput."""
p = super().Params()
p.file_datasource = TextLines.Params()
p.Define('tokenizer', tokenizers.WpmTokenizer.Params(), 'Tokenizer params.')
p.Define('source_max_length', None,
'The maximum length of the source sequence.')
p.Define('target_max_length', None,
'The maximum length of the target sequence.')
p.Define(
'bucket_upper_bound', [], 'Bucketing scheme. Required to be'
'a sorted list of integers. Examples that are longer than all bucket'
'upper bounds are skipped.')
p.Define(
'bucket_batch_limit', [], 'Desired per-split batch size per bucket. '
'Must be the same length as bucket_upper_bound.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
self.CreateChild('tokenizer', p.tokenizer)
def CreateDatasource(self):
p = self.params
ds = p.file_datasource
ds = datasource.CustomTFDatasetTransform.Params().Set(sub=ds, fn='_Process')
# See documentation for TFDatasetBatchBySequenceLength for the specifics of
# how the bucketing process works.
ds = datasource.TFDatasetBatchBySequenceLength.Params().Set(
sub=ds,
seqlen_fn='_GetSequenceLength',
input_shape_fn='_InputShape',
input_padding_fn='_InputPaddingValue',
bucket_upper_bound=p.bucket_upper_bound,
bucket_batch_limit=p.bucket_batch_limit)
p.file_datasource = ds
super().CreateDatasource()
def _Process(self, dataset):
"""Processes the dataset containing individual lines."""
return dataset.map(self._ProcessLine, **self._map_args)
def _ProcessLine(self, line):
"""A single-text-line processor.
Gets a string tensor representing a line of text that have been read from
the input file, and splits it to graphemes (characters).
We use original characters as the target labels, and the lowercased and
punctuation-removed characters as the source labels.
Args:
line: a 1D string tensor.
Returns:
A NestedMap containing the processed example.
"""
p = self.params
# Tokenize the input into integer ids.
# tgt_ids has the start-of-sentence token prepended, and tgt_labels has the
# end-of-sentence token appended.
tgt_ids, tgt_labels, tgt_paddings = self.tokenizer.StringsToIds(
tf.convert_to_tensor([line]), p.target_max_length)
# Because StringsToIds requires a vector but _ProcessLine is called for
# individual lines, we need to manually remove the batch dimension.
tgt_ids = tgt_ids[0]
tgt_labels = tgt_labels[0]
tgt_paddings = tgt_paddings[0]
# This normalization function produces the "source" text from which the
# Punctuator task is trained to reproduce the original "target" text.
def Normalize(line):
# Lowercase and remove punctuation.
line = line.lower().translate(None, string.punctuation.encode('utf-8'))
# Convert multiple consecutive spaces to a single one.
line = b' '.join(line.split())
return line
normalized_line = tf.py_func(Normalize, [line], tf.string, stateful=False)
_, src_labels, src_paddings = self.tokenizer.StringsToIds(
tf.convert_to_tensor([normalized_line]), p.source_max_length)
# Because StringsToIds requires a vector but _ProcessLine is called for
# individual lines, we need to manually remove the batch dimension.
src_labels = src_labels[0]
src_paddings = src_paddings[0]
# The model expects the source without a start-of-sentence token.
src_ids = src_labels
tgt_weights = 1.0 - tgt_paddings
ret = py_utils.NestedMap()
ret.src = py_utils.NestedMap()
ret.src.ids = tf.cast(src_ids, dtype=tf.int32)
ret.src.paddings = src_paddings
ret.tgt = py_utils.NestedMap()
ret.tgt.ids = tgt_ids
ret.tgt.labels = tf.cast(tgt_labels, dtype=tf.int32)
ret.tgt.weights = tgt_weights
ret.tgt.paddings = tgt_paddings
return ret
def _GetSequenceLength(self, example):
"""Returns sequence length for the example NestedMap from the dataset.
This function is used by the TFDatasetBatchBySequenceLength DataSource to
obtain the key used for bucketing. Bucketing separates examples into
groups before batching, such that each batch contains only examples within a
certain length.
Args:
example: A NestedMap containing an input example. Tensors in the example
do not have a leading batch dimension.
Returns:
An integer sequence length for the example.
"""
return tf.cast(
tf.round(
tf.maximum(
tf.reduce_sum(1.0 - example.src.paddings),
tf.reduce_sum(1.0 - example.tgt.paddings))), tf.int32)
def _InputShape(self, key):
"""Returns the final shape of the tensor corresponding to key as a tuple.
The shape should not include a leading batch dimension.
This function is used by the TFDatasetBatchBySequenceLength DataSource to
specify the shape for each key in an example. Because sequence examples are
of different lengths, they need to be padded to a common shape for batching.
Args:
key: The NestedMap key to return shape for.
"""
p = self.params
if key == 'bucket_keys':
return ()
if key.startswith('src.'):
return [p.source_max_length]
if key.startswith('tgt.'):
return [p.target_max_length]
raise ValueError('Unexpected key %s' % key)
def _InputPaddingValue(self, key, tensorspec):
"""Returns a scalar value to pad the tensor corresponding to key with.
This function is used by the TFDatasetBatchBySequenceLength DataSource to
specify the value used for padding.
Args:
key: The NestedMap key to return padding value for.
tensorspec: a tf.TensorSpec describing the tensor to be padded.
"""
if key.endswith('_paddings'):
return tf.ones([], dtype=tensorspec.dtype)
else:
return tf.zeros([], dtype=tensorspec.dtype) | lingvo/tasks/punctuator/input_generator.py | """Punctuator input generator."""
import string
import lingvo.compat as tf
from lingvo.core import base_input_generator
from lingvo.core import datasource
from lingvo.core import py_utils
from lingvo.core import tokenizers
class TextLines(datasource.TFDatasetSource):
"""Returns a tf.data.Dataset containing lines from a text file."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('file_pattern', None, 'A file pattern string.')
p.Define('shuffle_buffer_size', 10000,
'Number of records buffered for random shuffling.')
return p
def GetDataset(self):
p = self.params
if not p.file_pattern:
raise ValueError('A file pattern must be provided.')
file_pattern_glob = py_utils.ShardedFilePatternToGlob(p.file_pattern)
dataset = tf.data.Dataset.list_files(
file_pattern_glob,
shuffle=not self.cluster.require_sequential_input_order)
dataset = tf.data.TextLineDataset(
dataset,
num_parallel_reads=(1 if self.cluster.in_unit_test else
tf.data.experimental.AUTOTUNE))
if not self.cluster.require_sequential_input_order:
dataset = dataset.shuffle(
p.shuffle_buffer_size, reshuffle_each_iteration=True)
dataset = dataset.repeat()
return dataset
class PunctuatorInput(base_input_generator.BaseInputGenerator):
"""Reads text line by line and processes them for the punctuator task.
Input batches are NestedMaps containing:
- src.ids: int32 source word-piece ids of shape [batch, p.source_max_length].
- src.paddings: float32 paddings of shape [batch, p.source_max_length] where
paddings == 0.0 if the position is part of the input and 1.0 if the position
is padding.
- tgt.ids: int32 target word-piece ids of shape [batch, p.target_max_length].
- tgt.labels = int32 target label word-piece ids of shape
[batch, p.target_max_length]. The difference between labels and ids is that
ids include the sos (start of sequence) token in the front and labels
include the eos (end of sequence) token in the end.
- tgt.paddings: float32 paddings of shape [batch, p.target_max_length].
- tgt.weights: float32 weights of shape [batch, p.target_max_length]. Weights
are generally 1.0 - paddings and are used for loss computation.
- bucket_keys: int32 value used for bucketing. This is set automatically by
TFDatasetBatchBySequenceLength, and is for debugging purposes.
"""
@classmethod
def Params(cls):
"""Defaults params for PunctuatorInput."""
p = super().Params()
p.file_datasource = TextLines.Params()
p.Define('tokenizer', tokenizers.WpmTokenizer.Params(), 'Tokenizer params.')
p.Define('source_max_length', None,
'The maximum length of the source sequence.')
p.Define('target_max_length', None,
'The maximum length of the target sequence.')
p.Define(
'bucket_upper_bound', [], 'Bucketing scheme. Required to be'
'a sorted list of integers. Examples that are longer than all bucket'
'upper bounds are skipped.')
p.Define(
'bucket_batch_limit', [], 'Desired per-split batch size per bucket. '
'Must be the same length as bucket_upper_bound.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
self.CreateChild('tokenizer', p.tokenizer)
def CreateDatasource(self):
p = self.params
ds = p.file_datasource
ds = datasource.CustomTFDatasetTransform.Params().Set(sub=ds, fn='_Process')
# See documentation for TFDatasetBatchBySequenceLength for the specifics of
# how the bucketing process works.
ds = datasource.TFDatasetBatchBySequenceLength.Params().Set(
sub=ds,
seqlen_fn='_GetSequenceLength',
input_shape_fn='_InputShape',
input_padding_fn='_InputPaddingValue',
bucket_upper_bound=p.bucket_upper_bound,
bucket_batch_limit=p.bucket_batch_limit)
p.file_datasource = ds
super().CreateDatasource()
def _Process(self, dataset):
"""Processes the dataset containing individual lines."""
return dataset.map(self._ProcessLine, **self._map_args)
def _ProcessLine(self, line):
"""A single-text-line processor.
Gets a string tensor representing a line of text that have been read from
the input file, and splits it to graphemes (characters).
We use original characters as the target labels, and the lowercased and
punctuation-removed characters as the source labels.
Args:
line: a 1D string tensor.
Returns:
A NestedMap containing the processed example.
"""
p = self.params
# Tokenize the input into integer ids.
# tgt_ids has the start-of-sentence token prepended, and tgt_labels has the
# end-of-sentence token appended.
tgt_ids, tgt_labels, tgt_paddings = self.tokenizer.StringsToIds(
tf.convert_to_tensor([line]), p.target_max_length)
# Because StringsToIds requires a vector but _ProcessLine is called for
# individual lines, we need to manually remove the batch dimension.
tgt_ids = tgt_ids[0]
tgt_labels = tgt_labels[0]
tgt_paddings = tgt_paddings[0]
# This normalization function produces the "source" text from which the
# Punctuator task is trained to reproduce the original "target" text.
def Normalize(line):
# Lowercase and remove punctuation.
line = line.lower().translate(None, string.punctuation.encode('utf-8'))
# Convert multiple consecutive spaces to a single one.
line = b' '.join(line.split())
return line
normalized_line = tf.py_func(Normalize, [line], tf.string, stateful=False)
_, src_labels, src_paddings = self.tokenizer.StringsToIds(
tf.convert_to_tensor([normalized_line]), p.source_max_length)
# Because StringsToIds requires a vector but _ProcessLine is called for
# individual lines, we need to manually remove the batch dimension.
src_labels = src_labels[0]
src_paddings = src_paddings[0]
# The model expects the source without a start-of-sentence token.
src_ids = src_labels
tgt_weights = 1.0 - tgt_paddings
ret = py_utils.NestedMap()
ret.src = py_utils.NestedMap()
ret.src.ids = tf.cast(src_ids, dtype=tf.int32)
ret.src.paddings = src_paddings
ret.tgt = py_utils.NestedMap()
ret.tgt.ids = tgt_ids
ret.tgt.labels = tf.cast(tgt_labels, dtype=tf.int32)
ret.tgt.weights = tgt_weights
ret.tgt.paddings = tgt_paddings
return ret
def _GetSequenceLength(self, example):
"""Returns sequence length for the example NestedMap from the dataset.
This function is used by the TFDatasetBatchBySequenceLength DataSource to
obtain the key used for bucketing. Bucketing separates examples into
groups before batching, such that each batch contains only examples within a
certain length.
Args:
example: A NestedMap containing an input example. Tensors in the example
do not have a leading batch dimension.
Returns:
An integer sequence length for the example.
"""
return tf.cast(
tf.round(
tf.maximum(
tf.reduce_sum(1.0 - example.src.paddings),
tf.reduce_sum(1.0 - example.tgt.paddings))), tf.int32)
def _InputShape(self, key):
"""Returns the final shape of the tensor corresponding to key as a tuple.
The shape should not include a leading batch dimension.
This function is used by the TFDatasetBatchBySequenceLength DataSource to
specify the shape for each key in an example. Because sequence examples are
of different lengths, they need to be padded to a common shape for batching.
Args:
key: The NestedMap key to return shape for.
"""
p = self.params
if key == 'bucket_keys':
return ()
if key.startswith('src.'):
return [p.source_max_length]
if key.startswith('tgt.'):
return [p.target_max_length]
raise ValueError('Unexpected key %s' % key)
def _InputPaddingValue(self, key, tensorspec):
"""Returns a scalar value to pad the tensor corresponding to key with.
This function is used by the TFDatasetBatchBySequenceLength DataSource to
specify the value used for padding.
Args:
key: The NestedMap key to return padding value for.
tensorspec: a tf.TensorSpec describing the tensor to be padded.
"""
if key.endswith('_paddings'):
return tf.ones([], dtype=tensorspec.dtype)
else:
return tf.zeros([], dtype=tensorspec.dtype) | 0.863089 | 0.436202 |
from __future__ import absolute_import
from argparse import Action, ArgumentTypeError, Namespace, _ActionsContainer
from pex import pex_warnings
from pex.argparse import HandleBoolAction
from pex.network_configuration import NetworkConfiguration
from pex.orderedset import OrderedSet
from pex.resolve.lockfile import json_codec
from pex.resolve.lockfile.model import Lockfile
from pex.resolve.path_mappings import PathMapping, PathMappings
from pex.resolve.resolver_configuration import (
PYPI,
LockRepositoryConfiguration,
PexRepositoryConfiguration,
PipConfiguration,
ReposConfiguration,
ResolverVersion,
)
from pex.result import Error
from pex.tracer import TRACER
from pex.typing import TYPE_CHECKING, cast
if TYPE_CHECKING:
from typing import Optional, Union
class _ManylinuxAction(Action):
def __init__(self, *args, **kwargs):
kwargs["nargs"] = "?"
super(_ManylinuxAction, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, value, option_str=None):
if option_str.startswith("--no"):
setattr(namespace, self.dest, None)
elif value.startswith("manylinux"):
setattr(namespace, self.dest, value)
else:
raise ArgumentTypeError(
"Please specify a manylinux standard; ie: --manylinux=manylinux1. "
"Given {}".format(value)
)
class _HandleTransitiveAction(Action):
def __init__(self, *args, **kwargs):
kwargs["nargs"] = 0
super(_HandleTransitiveAction, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, value, option_str=None):
setattr(namespace, self.dest, option_str == "--transitive")
def register(
parser, # type: _ActionsContainer
include_pex_repository=False, # type: bool
include_lock=False, # type: bool
):
# type: (...) -> None
"""Register resolver configuration options with the given parser.
:param parser: The parser to register resolver configuration options with.
:param include_pex_repository: Whether to include the `--pex-repository` option.
:param include_lock: Whether to include the `--lock` option.
"""
default_resolver_configuration = PipConfiguration()
parser.add_argument(
"--resolver-version",
dest="resolver_version",
default=default_resolver_configuration.resolver_version,
choices=ResolverVersion.values(),
type=ResolverVersion.for_value,
help=(
"The dependency resolver version to use. Read more at "
"https://pip.pypa.io/en/stable/user_guide/#resolver-changes-2020"
),
)
register_repos_options(parser)
register_network_options(parser)
parser.add_argument(
"--cache-ttl",
metavar="DEPRECATED",
default=None,
type=int,
help="Deprecated: No longer used.",
)
parser.add_argument(
"-H",
"--header",
dest="headers",
metavar="DEPRECATED",
default=None,
type=str,
action="append",
help="Deprecated: No longer used.",
)
repository_choice = (
parser.add_mutually_exclusive_group() if include_pex_repository and include_lock else parser
)
if include_pex_repository:
repository_choice.add_argument(
"--pex-repository",
dest="pex_repository",
metavar="FILE",
default=None,
type=str,
help=(
"Resolve requirements from the given PEX file instead of from --index servers, "
"--find-links repos or a --lock file."
),
)
if include_lock:
repository_choice.add_argument(
"--lock",
dest="lock",
metavar="FILE",
default=None,
type=str,
help=(
"Resolve requirements from the given lock file created by Pex instead of from "
"--index servers, --find-links repos or a --pex-repository. If no requirements are "
"specified, will install the entire lock."
),
)
register_lock_options(parser)
parser.add_argument(
"--pre",
"--no-pre",
dest="allow_prereleases",
default=default_resolver_configuration.allow_prereleases,
action=HandleBoolAction,
help="Whether to include pre-release and development versions of requirements.",
)
parser.add_argument(
"--wheel",
"--binary",
"--no-wheel",
"--no-use-wheel",
"--no-binary",
"--no-use-binary",
dest="allow_wheels",
default=default_resolver_configuration.allow_wheels,
action=HandleBoolAction,
help="Whether to allow binary distributions.",
)
parser.add_argument(
"--build",
"--no-build",
dest="allow_builds",
default=default_resolver_configuration.allow_builds,
action=HandleBoolAction,
help="Whether to allow building of distributions from source.",
)
parser.add_argument(
"--prefer-wheel",
"--prefer-binary",
"--no-prefer-wheel",
"--no-prefer-binary",
dest="prefer_older_binary",
default=default_resolver_configuration.prefer_older_binary,
action=HandleBoolAction,
help=(
"Whether to prefer older binary distributions to newer source distributions (prefer "
"not building wheels)."
),
)
parser.add_argument(
"--force-pep517",
"--use-pep517",
"--no-use-pep517",
dest="use_pep517",
default=default_resolver_configuration.use_pep517,
action=HandleBoolAction,
help=(
"Whether to force use of PEP 517 for building source distributions into wheels ("
"https://www.python.org/dev/peps/pep-0518) or force direct invocation of"
"`setup.py bdist_wheel` (which requires all source distributions have a `setup.py` "
"based build). Defaults to using PEP-517 only when a `pyproject.toml` file is present "
"with a `build-system` section. If PEP-517 is forced (--use-pep517 is passed) and no "
"`pyproject.toml` file is present or one is but does not have a `build-system` section "
"defined, then the build is executed as if a `pyproject.toml` was present with a "
'`build-system` section comprised of `requires = ["setuptools>=40.8.0", "wheel"]` and '
'`build-backend = "setuptools.build_meta:__legacy__"`.'
),
)
parser.add_argument(
"--build-isolation",
"--no-build-isolation",
dest="build_isolation",
default=default_resolver_configuration.build_isolation,
action=HandleBoolAction,
help=(
"Disable `sys.path` isolation when building a modern source distribution. Build "
"dependencies specified by PEP 518 (https://www.python.org/dev/peps/pep-0518) must "
"already be installed on the `sys.path` if this option is used."
),
)
parser.add_argument(
"--transitive",
"--no-transitive",
"--intransitive",
dest="transitive",
default=default_resolver_configuration.transitive,
action=_HandleTransitiveAction,
help="Whether to transitively resolve requirements.",
)
register_max_jobs_option(parser)
def register_lock_options(parser):
# type: (_ActionsContainer) -> None
"""Register lock options with the given parser.
:param parser: The parser to register lock configuration options with.
"""
parser.add_argument(
"--path-mapping",
dest="path_mappings",
action="append",
default=[],
type=str,
help=(
"A mapping of the form `NAME|PATH|DESCRIPTION` of a logical name to a concrete local "
"absolute path with an optional description. Can be specified multiple times. The "
"mapping must include the pipe (`|`) separated name and absolute path components, but "
"the trailing pipe-separated description is optional. The mapping is used when "
"creating, and later reading, lock files to ensure the lock file created on one "
"machine can be used on another with a potentially different realization of various "
"paths used in the resolve. A typical example is a find-links repo. This might be "
"provided on the file-system via a network mount instead of via an HTTP(S) server and "
"that network mount may be at different absolute paths on different machines. "
"Classically, it may be in a user's home directory; whose path will vary from user to "
"user."
),
)
def register_repos_options(parser):
# type: (_ActionsContainer) -> None
"""Register repos configuration options with the given parser.
:param parser: The parser to register repos configuration options with.
"""
parser.add_argument(
"--pypi",
"--no-pypi",
"--no-index",
dest="pypi",
action=HandleBoolAction,
default=True,
help="Whether to use PyPI to resolve dependencies.",
)
parser.add_argument(
"-f",
"--find-links",
"--repo",
metavar="PATH/URL",
action="append",
dest="find_links",
type=str,
help="Additional repository path (directory or URL) to look for requirements.",
)
parser.add_argument(
"-i",
"--index",
"--index-url",
metavar="URL",
action="append",
dest="indexes",
type=str,
help="Additional cheeseshop indices to use to satisfy requirements.",
)
def register_network_options(parser):
# type: (_ActionsContainer) -> None
"""Register network configuration options with the given parser.
:param parser: The parser to register network configuration options with.
"""
default_resolver_configuration = PipConfiguration()
default_network_configuration = default_resolver_configuration.network_configuration
parser.add_argument(
"--retries",
default=default_network_configuration.retries,
type=int,
help="Maximum number of retries each connection should attempt.",
)
parser.add_argument(
"--timeout",
metavar="SECS",
default=default_network_configuration.timeout,
type=int,
help="Set the socket timeout in seconds.",
)
parser.add_argument(
"--proxy",
type=str,
default=default_network_configuration.proxy,
help="Specify a proxy in the form http(s)://[user:passwd@]proxy.server:port.",
)
parser.add_argument(
"--cert",
metavar="PATH",
type=str,
default=default_network_configuration.cert,
help="Path to alternate CA bundle.",
)
parser.add_argument(
"--client-cert",
metavar="PATH",
type=str,
default=default_network_configuration.client_cert,
help=(
"Path to an SSL client certificate which should be a single file containing the "
"private key and the certificate in PEM format."
),
)
def register_max_jobs_option(parser):
# type: (_ActionsContainer) -> None
"""Register the max jobs configuration option with the given parser.
:param parser: The parser to register the max job option with.
"""
default_resolver_configuration = PipConfiguration()
parser.add_argument(
"-j",
"--jobs",
metavar="JOBS",
dest="max_jobs",
type=int,
default=default_resolver_configuration.max_jobs,
help=(
"The maximum number of parallel jobs to use when resolving, building and "
"installing distributions. You might want to increase the maximum number of "
"parallel jobs to potentially improve the latency of the pex creation process at "
"the expense of other processes on your system."
),
)
class InvalidConfigurationError(Exception):
"""Indicates an invalid resolver configuration."""
if TYPE_CHECKING:
ResolverConfiguration = Union[
LockRepositoryConfiguration, PexRepositoryConfiguration, PipConfiguration
]
def configure(options):
# type: (Namespace) -> ResolverConfiguration
"""Creates a resolver configuration from options registered by `register`.
:param options: The resolver configuration options.
:raise: :class:`InvalidConfigurationError` if the resolver configuration is invalid.
"""
pex_repository = getattr(options, "pex_repository", None)
lock = getattr(options, "lock", None)
if pex_repository and (options.indexes or options.find_links):
raise InvalidConfigurationError(
'The "--pex-repository" option cannot be used together with the "--index" or '
'"--find-links" options.'
)
if pex_repository:
return PexRepositoryConfiguration(
pex_repository=pex_repository,
network_configuration=create_network_configuration(options),
transitive=options.transitive,
)
pip_configuration = create_pip_configuration(options)
if lock:
return LockRepositoryConfiguration(
parse_lock=lambda: parse_lockfile(options),
pip_configuration=pip_configuration,
)
return pip_configuration
def create_pip_configuration(options):
# type: (Namespace) -> PipConfiguration
"""Creates a Pip configuration from options registered by `register`.
:param options: The Pip resolver configuration options.
"""
if options.cache_ttl:
pex_warnings.warn("The --cache-ttl option is deprecated and no longer has any effect.")
if options.headers:
pex_warnings.warn("The --header option is deprecated and no longer has any effect.")
repos_configuration = create_repos_configuration(options)
return PipConfiguration(
resolver_version=options.resolver_version,
repos_configuration=repos_configuration,
network_configuration=create_network_configuration(options),
allow_prereleases=options.allow_prereleases,
allow_wheels=options.allow_wheels,
allow_builds=options.allow_builds,
prefer_older_binary=options.prefer_older_binary,
use_pep517=options.use_pep517,
build_isolation=options.build_isolation,
transitive=options.transitive,
max_jobs=get_max_jobs_value(options),
)
def create_repos_configuration(options):
# type: (Namespace) -> ReposConfiguration
"""Creates a repos configuration from options registered by `register_repos_options`.
:param options: The Pip resolver configuration options.
"""
indexes = OrderedSet(
([PYPI] if options.pypi else []) + (options.indexes or [])
) # type: OrderedSet[str]
find_links = OrderedSet(options.find_links or ()) # type: OrderedSet[str]
return ReposConfiguration.create(indexes=tuple(indexes), find_links=tuple(find_links))
def create_network_configuration(options):
# type: (Namespace) -> NetworkConfiguration
"""Creates a network configuration from options registered by `register_network_options`.
:param options: The Pip resolver configuration options.
"""
return NetworkConfiguration(
retries=options.retries,
timeout=options.timeout,
proxy=options.proxy,
cert=options.cert,
client_cert=options.client_cert,
)
def get_max_jobs_value(options):
# type: (Namespace) -> int
"""Retrieves the max jobs value from the option registered by `register_max_jobs_option`.
:param options: The max jobs configuration option.
"""
return cast(int, options.max_jobs)
def _parse_path_mapping(path_mapping):
# type: (str) -> PathMapping
components = path_mapping.split("|", 2)
if len(components) < 2:
raise ArgumentTypeError(
"A path mapping must be of the form `NAME|PATH` with an optional trailing "
"`|DESCRIPTION`, given: {path_mapping}.\n"
"For example: `FL|/path/to/local/find-links/repo/directory` indicates that find-links "
"requirements or URLs starting with `/path/to/local/find-links/repo/directory` should "
"have that absolute root path replaced with the `${{FL}}` placeholder name.\n"
"Alternatively, you could use the form with a trailing description to make it more "
"clear what value should be substituted for `${{FL}}` when the mapping is later read, "
"e.g.: `FL|/local/path|The local find-links repo path`."
"".format(path_mapping=path_mapping)
)
name, path = components[:2]
description = components[2] if len(components) == 3 else None
return PathMapping(path=path, name=name, description=description)
def get_path_mappings(options):
# type: (Namespace) -> PathMappings
"""Retrieves the PathMappings value from the options registered by `register_lock_options`.
:param options: The lock configuration options.
"""
return PathMappings(
mappings=tuple(_parse_path_mapping(path_mapping) for path_mapping in options.path_mappings)
)
def parse_lockfile(
options, # type: Namespace
lock_file_path=None, # type: Optional[str]
):
# type: (...) -> Union[Lockfile, Error]
path = lock_file_path or options.lock
path_mappings = get_path_mappings(options)
with TRACER.timed("Parsing lock {lockfile}".format(lockfile=path)):
try:
return json_codec.load(path, path_mappings=path_mappings)
except json_codec.PathMappingError as e:
return Error(
"The lockfile at {path} requires specifying {prefix}"
"'--path-mapping' {values} for: {required_paths}\n"
"Given {given_mappings_verbiage}\n"
"{maybe_path_mappings}"
"Which left the following path mappings unspecified:\n"
"{unspecified_paths}\n"
"\n"
"To fix, add command line options for:\n{examples}".format(
path=path,
prefix="" if len(e.required_path_mappings) > 1 else "a ",
values="values" if len(e.required_path_mappings) > 1 else "value",
required_paths=", ".join(sorted(e.required_path_mappings)),
given_mappings_verbiage="the following path mappings:"
if path_mappings.mappings
else "no path mappings.",
maybe_path_mappings="{path_mappings}\n".format(
path_mappings="\n".join(
sorted(
"--path-mapping '{mapping}'".format(
mapping="|".join((mapping.name, mapping.path))
)
for mapping in path_mappings.mappings
)
)
)
if path_mappings.mappings
else "",
unspecified_paths="\n".join(
sorted(
(
"{path}: {description}".format(path=path, description=description)
if description
else path
)
for path, description in e.required_path_mappings.items()
if path in e.unspecified_paths
)
),
examples="\n".join(
sorted(
"--path-mapping '{path}|<path of {path}>'".format(path=path)
for path in e.required_path_mappings
if path in e.unspecified_paths
)
),
)
) | pex/resolve/resolver_options.py |
from __future__ import absolute_import
from argparse import Action, ArgumentTypeError, Namespace, _ActionsContainer
from pex import pex_warnings
from pex.argparse import HandleBoolAction
from pex.network_configuration import NetworkConfiguration
from pex.orderedset import OrderedSet
from pex.resolve.lockfile import json_codec
from pex.resolve.lockfile.model import Lockfile
from pex.resolve.path_mappings import PathMapping, PathMappings
from pex.resolve.resolver_configuration import (
PYPI,
LockRepositoryConfiguration,
PexRepositoryConfiguration,
PipConfiguration,
ReposConfiguration,
ResolverVersion,
)
from pex.result import Error
from pex.tracer import TRACER
from pex.typing import TYPE_CHECKING, cast
if TYPE_CHECKING:
from typing import Optional, Union
class _ManylinuxAction(Action):
def __init__(self, *args, **kwargs):
kwargs["nargs"] = "?"
super(_ManylinuxAction, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, value, option_str=None):
if option_str.startswith("--no"):
setattr(namespace, self.dest, None)
elif value.startswith("manylinux"):
setattr(namespace, self.dest, value)
else:
raise ArgumentTypeError(
"Please specify a manylinux standard; ie: --manylinux=manylinux1. "
"Given {}".format(value)
)
class _HandleTransitiveAction(Action):
def __init__(self, *args, **kwargs):
kwargs["nargs"] = 0
super(_HandleTransitiveAction, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, value, option_str=None):
setattr(namespace, self.dest, option_str == "--transitive")
def register(
parser, # type: _ActionsContainer
include_pex_repository=False, # type: bool
include_lock=False, # type: bool
):
# type: (...) -> None
"""Register resolver configuration options with the given parser.
:param parser: The parser to register resolver configuration options with.
:param include_pex_repository: Whether to include the `--pex-repository` option.
:param include_lock: Whether to include the `--lock` option.
"""
default_resolver_configuration = PipConfiguration()
parser.add_argument(
"--resolver-version",
dest="resolver_version",
default=default_resolver_configuration.resolver_version,
choices=ResolverVersion.values(),
type=ResolverVersion.for_value,
help=(
"The dependency resolver version to use. Read more at "
"https://pip.pypa.io/en/stable/user_guide/#resolver-changes-2020"
),
)
register_repos_options(parser)
register_network_options(parser)
parser.add_argument(
"--cache-ttl",
metavar="DEPRECATED",
default=None,
type=int,
help="Deprecated: No longer used.",
)
parser.add_argument(
"-H",
"--header",
dest="headers",
metavar="DEPRECATED",
default=None,
type=str,
action="append",
help="Deprecated: No longer used.",
)
repository_choice = (
parser.add_mutually_exclusive_group() if include_pex_repository and include_lock else parser
)
if include_pex_repository:
repository_choice.add_argument(
"--pex-repository",
dest="pex_repository",
metavar="FILE",
default=None,
type=str,
help=(
"Resolve requirements from the given PEX file instead of from --index servers, "
"--find-links repos or a --lock file."
),
)
if include_lock:
repository_choice.add_argument(
"--lock",
dest="lock",
metavar="FILE",
default=None,
type=str,
help=(
"Resolve requirements from the given lock file created by Pex instead of from "
"--index servers, --find-links repos or a --pex-repository. If no requirements are "
"specified, will install the entire lock."
),
)
register_lock_options(parser)
parser.add_argument(
"--pre",
"--no-pre",
dest="allow_prereleases",
default=default_resolver_configuration.allow_prereleases,
action=HandleBoolAction,
help="Whether to include pre-release and development versions of requirements.",
)
parser.add_argument(
"--wheel",
"--binary",
"--no-wheel",
"--no-use-wheel",
"--no-binary",
"--no-use-binary",
dest="allow_wheels",
default=default_resolver_configuration.allow_wheels,
action=HandleBoolAction,
help="Whether to allow binary distributions.",
)
parser.add_argument(
"--build",
"--no-build",
dest="allow_builds",
default=default_resolver_configuration.allow_builds,
action=HandleBoolAction,
help="Whether to allow building of distributions from source.",
)
parser.add_argument(
"--prefer-wheel",
"--prefer-binary",
"--no-prefer-wheel",
"--no-prefer-binary",
dest="prefer_older_binary",
default=default_resolver_configuration.prefer_older_binary,
action=HandleBoolAction,
help=(
"Whether to prefer older binary distributions to newer source distributions (prefer "
"not building wheels)."
),
)
parser.add_argument(
"--force-pep517",
"--use-pep517",
"--no-use-pep517",
dest="use_pep517",
default=default_resolver_configuration.use_pep517,
action=HandleBoolAction,
help=(
"Whether to force use of PEP 517 for building source distributions into wheels ("
"https://www.python.org/dev/peps/pep-0518) or force direct invocation of"
"`setup.py bdist_wheel` (which requires all source distributions have a `setup.py` "
"based build). Defaults to using PEP-517 only when a `pyproject.toml` file is present "
"with a `build-system` section. If PEP-517 is forced (--use-pep517 is passed) and no "
"`pyproject.toml` file is present or one is but does not have a `build-system` section "
"defined, then the build is executed as if a `pyproject.toml` was present with a "
'`build-system` section comprised of `requires = ["setuptools>=40.8.0", "wheel"]` and '
'`build-backend = "setuptools.build_meta:__legacy__"`.'
),
)
parser.add_argument(
"--build-isolation",
"--no-build-isolation",
dest="build_isolation",
default=default_resolver_configuration.build_isolation,
action=HandleBoolAction,
help=(
"Disable `sys.path` isolation when building a modern source distribution. Build "
"dependencies specified by PEP 518 (https://www.python.org/dev/peps/pep-0518) must "
"already be installed on the `sys.path` if this option is used."
),
)
parser.add_argument(
"--transitive",
"--no-transitive",
"--intransitive",
dest="transitive",
default=default_resolver_configuration.transitive,
action=_HandleTransitiveAction,
help="Whether to transitively resolve requirements.",
)
register_max_jobs_option(parser)
def register_lock_options(parser):
# type: (_ActionsContainer) -> None
"""Register lock options with the given parser.
:param parser: The parser to register lock configuration options with.
"""
parser.add_argument(
"--path-mapping",
dest="path_mappings",
action="append",
default=[],
type=str,
help=(
"A mapping of the form `NAME|PATH|DESCRIPTION` of a logical name to a concrete local "
"absolute path with an optional description. Can be specified multiple times. The "
"mapping must include the pipe (`|`) separated name and absolute path components, but "
"the trailing pipe-separated description is optional. The mapping is used when "
"creating, and later reading, lock files to ensure the lock file created on one "
"machine can be used on another with a potentially different realization of various "
"paths used in the resolve. A typical example is a find-links repo. This might be "
"provided on the file-system via a network mount instead of via an HTTP(S) server and "
"that network mount may be at different absolute paths on different machines. "
"Classically, it may be in a user's home directory; whose path will vary from user to "
"user."
),
)
def register_repos_options(parser):
# type: (_ActionsContainer) -> None
"""Register repos configuration options with the given parser.
:param parser: The parser to register repos configuration options with.
"""
parser.add_argument(
"--pypi",
"--no-pypi",
"--no-index",
dest="pypi",
action=HandleBoolAction,
default=True,
help="Whether to use PyPI to resolve dependencies.",
)
parser.add_argument(
"-f",
"--find-links",
"--repo",
metavar="PATH/URL",
action="append",
dest="find_links",
type=str,
help="Additional repository path (directory or URL) to look for requirements.",
)
parser.add_argument(
"-i",
"--index",
"--index-url",
metavar="URL",
action="append",
dest="indexes",
type=str,
help="Additional cheeseshop indices to use to satisfy requirements.",
)
def register_network_options(parser):
# type: (_ActionsContainer) -> None
"""Register network configuration options with the given parser.
:param parser: The parser to register network configuration options with.
"""
default_resolver_configuration = PipConfiguration()
default_network_configuration = default_resolver_configuration.network_configuration
parser.add_argument(
"--retries",
default=default_network_configuration.retries,
type=int,
help="Maximum number of retries each connection should attempt.",
)
parser.add_argument(
"--timeout",
metavar="SECS",
default=default_network_configuration.timeout,
type=int,
help="Set the socket timeout in seconds.",
)
parser.add_argument(
"--proxy",
type=str,
default=default_network_configuration.proxy,
help="Specify a proxy in the form http(s)://[user:passwd@]proxy.server:port.",
)
parser.add_argument(
"--cert",
metavar="PATH",
type=str,
default=default_network_configuration.cert,
help="Path to alternate CA bundle.",
)
parser.add_argument(
"--client-cert",
metavar="PATH",
type=str,
default=default_network_configuration.client_cert,
help=(
"Path to an SSL client certificate which should be a single file containing the "
"private key and the certificate in PEM format."
),
)
def register_max_jobs_option(parser):
# type: (_ActionsContainer) -> None
"""Register the max jobs configuration option with the given parser.
:param parser: The parser to register the max job option with.
"""
default_resolver_configuration = PipConfiguration()
parser.add_argument(
"-j",
"--jobs",
metavar="JOBS",
dest="max_jobs",
type=int,
default=default_resolver_configuration.max_jobs,
help=(
"The maximum number of parallel jobs to use when resolving, building and "
"installing distributions. You might want to increase the maximum number of "
"parallel jobs to potentially improve the latency of the pex creation process at "
"the expense of other processes on your system."
),
)
class InvalidConfigurationError(Exception):
"""Indicates an invalid resolver configuration."""
if TYPE_CHECKING:
ResolverConfiguration = Union[
LockRepositoryConfiguration, PexRepositoryConfiguration, PipConfiguration
]
def configure(options):
# type: (Namespace) -> ResolverConfiguration
"""Creates a resolver configuration from options registered by `register`.
:param options: The resolver configuration options.
:raise: :class:`InvalidConfigurationError` if the resolver configuration is invalid.
"""
pex_repository = getattr(options, "pex_repository", None)
lock = getattr(options, "lock", None)
if pex_repository and (options.indexes or options.find_links):
raise InvalidConfigurationError(
'The "--pex-repository" option cannot be used together with the "--index" or '
'"--find-links" options.'
)
if pex_repository:
return PexRepositoryConfiguration(
pex_repository=pex_repository,
network_configuration=create_network_configuration(options),
transitive=options.transitive,
)
pip_configuration = create_pip_configuration(options)
if lock:
return LockRepositoryConfiguration(
parse_lock=lambda: parse_lockfile(options),
pip_configuration=pip_configuration,
)
return pip_configuration
def create_pip_configuration(options):
# type: (Namespace) -> PipConfiguration
"""Creates a Pip configuration from options registered by `register`.
:param options: The Pip resolver configuration options.
"""
if options.cache_ttl:
pex_warnings.warn("The --cache-ttl option is deprecated and no longer has any effect.")
if options.headers:
pex_warnings.warn("The --header option is deprecated and no longer has any effect.")
repos_configuration = create_repos_configuration(options)
return PipConfiguration(
resolver_version=options.resolver_version,
repos_configuration=repos_configuration,
network_configuration=create_network_configuration(options),
allow_prereleases=options.allow_prereleases,
allow_wheels=options.allow_wheels,
allow_builds=options.allow_builds,
prefer_older_binary=options.prefer_older_binary,
use_pep517=options.use_pep517,
build_isolation=options.build_isolation,
transitive=options.transitive,
max_jobs=get_max_jobs_value(options),
)
def create_repos_configuration(options):
# type: (Namespace) -> ReposConfiguration
"""Creates a repos configuration from options registered by `register_repos_options`.
:param options: The Pip resolver configuration options.
"""
indexes = OrderedSet(
([PYPI] if options.pypi else []) + (options.indexes or [])
) # type: OrderedSet[str]
find_links = OrderedSet(options.find_links or ()) # type: OrderedSet[str]
return ReposConfiguration.create(indexes=tuple(indexes), find_links=tuple(find_links))
def create_network_configuration(options):
# type: (Namespace) -> NetworkConfiguration
"""Creates a network configuration from options registered by `register_network_options`.
:param options: The Pip resolver configuration options.
"""
return NetworkConfiguration(
retries=options.retries,
timeout=options.timeout,
proxy=options.proxy,
cert=options.cert,
client_cert=options.client_cert,
)
def get_max_jobs_value(options):
# type: (Namespace) -> int
"""Retrieves the max jobs value from the option registered by `register_max_jobs_option`.
:param options: The max jobs configuration option.
"""
return cast(int, options.max_jobs)
def _parse_path_mapping(path_mapping):
# type: (str) -> PathMapping
components = path_mapping.split("|", 2)
if len(components) < 2:
raise ArgumentTypeError(
"A path mapping must be of the form `NAME|PATH` with an optional trailing "
"`|DESCRIPTION`, given: {path_mapping}.\n"
"For example: `FL|/path/to/local/find-links/repo/directory` indicates that find-links "
"requirements or URLs starting with `/path/to/local/find-links/repo/directory` should "
"have that absolute root path replaced with the `${{FL}}` placeholder name.\n"
"Alternatively, you could use the form with a trailing description to make it more "
"clear what value should be substituted for `${{FL}}` when the mapping is later read, "
"e.g.: `FL|/local/path|The local find-links repo path`."
"".format(path_mapping=path_mapping)
)
name, path = components[:2]
description = components[2] if len(components) == 3 else None
return PathMapping(path=path, name=name, description=description)
def get_path_mappings(options):
# type: (Namespace) -> PathMappings
"""Retrieves the PathMappings value from the options registered by `register_lock_options`.
:param options: The lock configuration options.
"""
return PathMappings(
mappings=tuple(_parse_path_mapping(path_mapping) for path_mapping in options.path_mappings)
)
def parse_lockfile(
options, # type: Namespace
lock_file_path=None, # type: Optional[str]
):
# type: (...) -> Union[Lockfile, Error]
path = lock_file_path or options.lock
path_mappings = get_path_mappings(options)
with TRACER.timed("Parsing lock {lockfile}".format(lockfile=path)):
try:
return json_codec.load(path, path_mappings=path_mappings)
except json_codec.PathMappingError as e:
return Error(
"The lockfile at {path} requires specifying {prefix}"
"'--path-mapping' {values} for: {required_paths}\n"
"Given {given_mappings_verbiage}\n"
"{maybe_path_mappings}"
"Which left the following path mappings unspecified:\n"
"{unspecified_paths}\n"
"\n"
"To fix, add command line options for:\n{examples}".format(
path=path,
prefix="" if len(e.required_path_mappings) > 1 else "a ",
values="values" if len(e.required_path_mappings) > 1 else "value",
required_paths=", ".join(sorted(e.required_path_mappings)),
given_mappings_verbiage="the following path mappings:"
if path_mappings.mappings
else "no path mappings.",
maybe_path_mappings="{path_mappings}\n".format(
path_mappings="\n".join(
sorted(
"--path-mapping '{mapping}'".format(
mapping="|".join((mapping.name, mapping.path))
)
for mapping in path_mappings.mappings
)
)
)
if path_mappings.mappings
else "",
unspecified_paths="\n".join(
sorted(
(
"{path}: {description}".format(path=path, description=description)
if description
else path
)
for path, description in e.required_path_mappings.items()
if path in e.unspecified_paths
)
),
examples="\n".join(
sorted(
"--path-mapping '{path}|<path of {path}>'".format(path=path)
for path in e.required_path_mappings
if path in e.unspecified_paths
)
),
)
) | 0.818664 | 0.084078 |
import inspect
import os
import pkgutil
try:
from importlib import reload
except ImportError:
pass
class StepmaniaPlugin(object):
def __init__(self, server):
self.server = server
def on_packet(self, session, serv, packet):
pass
def on_nscping(self, session, serv, packet):
pass
def on_nscpingr(self, session, serv, packet):
pass
def on_nschello(self, session, serv, packet):
pass
def on_nscgsr(self, session, serv, packet):
pass
def on_nscgon(self, session, serv, packet):
pass
def on_nscgsu(self, session, serv, packet):
pass
def on_nscsu(self, session, serv, packet):
pass
def on_nsccm(self, session, serv, packet):
pass
def on_nscrsg(self, session, serv, packet):
pass
def on_nsccuul(self, session, serv, packet):
pass
def on_nsscsms(self, session, serv, packet):
pass
def on_nscuopts(self, session, serv, packet):
pass
def on_nssmonl(self, session, serv, packet):
func = getattr(self, "on_%s" % packet["packet"].command.name.lower(), None)
if not func:
return None
return func(session, serv, packet["packet"])
def on_nscformatted(self, session, serv, packet):
pass
def on_nscattack(self, session, serv, packet):
pass
def on_xmlpacket(self, session, serv, packet):
pass
def on_login(self, session, serv, packet):
pass
def on_enterroom(self, session, serv, packet):
pass
def on_createroom(self, session, serv, packet):
pass
def on_roominfo(self, session, serv, packet):
pass
class PluginError(Exception):
pass
class PluginManager(list):
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
def __init__(self, plugin_class, paths=None, directory=None, plugin_file=None, force_reload=False):
super().__init__()
if not isinstance(plugin_class, list):
plugin_class = [plugin_class]
self.plugin_class = plugin_class
self.plugin_file = plugin_file
self.directory = directory
if paths is None:
paths = self.all_paths(directory)
if not paths:
paths = []
self.paths = paths
self.load(force_reload)
def all_paths(self, directory):
directory_path = os.path.join(self.__location__, "/".join(directory.split(".")[1:]))
for _, name, _ in pkgutil.iter_modules([directory_path]):
yield name
def load(self, force_reload=False):
del self[:]
for path in self.paths:
fullpath = '.'.join([p for p in (self.directory, path, self.plugin_file) if p is not None])
self.extend(self.import_plugin(fullpath,
plugin_classes=self.plugin_class,
force_reload=force_reload))
def init(self, *opt):
for idx, app in enumerate(self):
self[idx] = app(*opt)
@staticmethod
def import_plugin(path, plugin_classes, force_reload=False):
if not isinstance(plugin_classes, list):
plugin_classes = [plugin_classes]
module = __import__(path, fromlist=plugin_classes)
if force_reload:
reload(module)
apps = []
for cls in inspect.getmembers(module, inspect.isclass):
app = getattr(module, cls[0], None)
if not app:
continue
for plugin_class in plugin_classes:
if plugin_class in (x.__name__ for x in inspect.getmro(app)):
apps.append(app)
return apps
@classmethod
def get_plugin(cls, path, plugin_classes, default=None, force_reload=False):
apps = cls.import_plugin(path, plugin_classes, force_reload)
if not apps:
return default
return apps[0]
if __name__ == "__main__":
plugins = PluginManager("StepmaniaPlugin", ["example"], "plugins", "plugin")
plugins.load()
print(plugins)
plugins.init("a")
print(plugins)
print(PluginManager("StepmaniaController", None, "smserver.controllers"))
print(PluginManager("ChatPlugin", None, "smserver.chat_commands")) | smserver/pluginmanager.py | import inspect
import os
import pkgutil
try:
from importlib import reload
except ImportError:
pass
class StepmaniaPlugin(object):
def __init__(self, server):
self.server = server
def on_packet(self, session, serv, packet):
pass
def on_nscping(self, session, serv, packet):
pass
def on_nscpingr(self, session, serv, packet):
pass
def on_nschello(self, session, serv, packet):
pass
def on_nscgsr(self, session, serv, packet):
pass
def on_nscgon(self, session, serv, packet):
pass
def on_nscgsu(self, session, serv, packet):
pass
def on_nscsu(self, session, serv, packet):
pass
def on_nsccm(self, session, serv, packet):
pass
def on_nscrsg(self, session, serv, packet):
pass
def on_nsccuul(self, session, serv, packet):
pass
def on_nsscsms(self, session, serv, packet):
pass
def on_nscuopts(self, session, serv, packet):
pass
def on_nssmonl(self, session, serv, packet):
func = getattr(self, "on_%s" % packet["packet"].command.name.lower(), None)
if not func:
return None
return func(session, serv, packet["packet"])
def on_nscformatted(self, session, serv, packet):
pass
def on_nscattack(self, session, serv, packet):
pass
def on_xmlpacket(self, session, serv, packet):
pass
def on_login(self, session, serv, packet):
pass
def on_enterroom(self, session, serv, packet):
pass
def on_createroom(self, session, serv, packet):
pass
def on_roominfo(self, session, serv, packet):
pass
class PluginError(Exception):
pass
class PluginManager(list):
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
def __init__(self, plugin_class, paths=None, directory=None, plugin_file=None, force_reload=False):
super().__init__()
if not isinstance(plugin_class, list):
plugin_class = [plugin_class]
self.plugin_class = plugin_class
self.plugin_file = plugin_file
self.directory = directory
if paths is None:
paths = self.all_paths(directory)
if not paths:
paths = []
self.paths = paths
self.load(force_reload)
def all_paths(self, directory):
directory_path = os.path.join(self.__location__, "/".join(directory.split(".")[1:]))
for _, name, _ in pkgutil.iter_modules([directory_path]):
yield name
def load(self, force_reload=False):
del self[:]
for path in self.paths:
fullpath = '.'.join([p for p in (self.directory, path, self.plugin_file) if p is not None])
self.extend(self.import_plugin(fullpath,
plugin_classes=self.plugin_class,
force_reload=force_reload))
def init(self, *opt):
for idx, app in enumerate(self):
self[idx] = app(*opt)
@staticmethod
def import_plugin(path, plugin_classes, force_reload=False):
if not isinstance(plugin_classes, list):
plugin_classes = [plugin_classes]
module = __import__(path, fromlist=plugin_classes)
if force_reload:
reload(module)
apps = []
for cls in inspect.getmembers(module, inspect.isclass):
app = getattr(module, cls[0], None)
if not app:
continue
for plugin_class in plugin_classes:
if plugin_class in (x.__name__ for x in inspect.getmro(app)):
apps.append(app)
return apps
@classmethod
def get_plugin(cls, path, plugin_classes, default=None, force_reload=False):
apps = cls.import_plugin(path, plugin_classes, force_reload)
if not apps:
return default
return apps[0]
if __name__ == "__main__":
plugins = PluginManager("StepmaniaPlugin", ["example"], "plugins", "plugin")
plugins.load()
print(plugins)
plugins.init("a")
print(plugins)
print(PluginManager("StepmaniaController", None, "smserver.controllers"))
print(PluginManager("ChatPlugin", None, "smserver.chat_commands")) | 0.326916 | 0.095307 |
import os # Used to validate filepaths
import sys # Used to fix arglengths of 0 for CLI
import logging # Used to log (obviously)
from copy import deepcopy
from typing import Generator # Used to typehint generator returns
from secrets import token_hex # Used to produce reliably random hex values
# External Dependencies
from docopt import docopt # Used to handle argument parsing from the entrypoint
usage = """Used to generate one-time pads 🤐, by default in emojis.
Usage:
otp_emoji [-h] [-v]
otp_emoji encrypt <text> [-s] [-o OUTPUT_PATH] [-p PAD_PATH]
otp_emoji decrypt <ciphertext> <pad> [-s] [-o OUTPUT_PATH]
Options:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-o OUTPUT_PATH, --output OUTPUT_PATH
a directory of where to write pad/plaintext/ciphertext
output
-p PAD_PATH, --pad PAD_PATH
allows you to specify a pre-created one time pad
-s, --stream print result to output stream (stdout)
"""
cipher_chars = [
"🤗", "🙄", "🤮", "🤧", "🥵", "🙏", "👅", "🍒", "🍆", "🍇", "🍌", "🍋", "🌵", "🍑", "👀",
"👨💻", "👨🎤", "🧛", "🧜♀️", "🧝♂️", "🧞", "👨🦼", "🧗", "⛷", "🐶", "🦊", "🦄", "🐊", "🐢", "🦜", "🦉",
"🐙", "🐳", "🐉", "🦖", "🦂", "🥐", "🥨", "🥯", "🥞", "🍔", "🍕", "🧈", "🍜", "🦀", "🦞", "🦑",
"🏺", "🚄", "🚔", "🦼", "🚀", "🛸", "🌚", "❄", "🌊", "🥌", "♟", "🦺", "🎩", "🎷", "💻", "💾",
"🤏", "🤘", "🤞", "🤙", "🖕", "👊", "🤛", "🙌", "👏", "🤳", "💪", "👂", "👁", "👨🦰", "👨🦱", "🧔", "👩🦳",
"👩", "👩🦲", "👴", "🙅", "🙆", "💁♂️", "🙋♀️", "🧏♂️", "🙇", "🤦", "🤦♂️", "🤦♀️", "🤷", "🤷♂️", "🤷♀️", "👨🎓", "👨🏫",
"👨🌾", "👨🔧", "👩🏭", "👩💼", "👨🔬", "👩💻", "👨🎨", "👩✈️", "👮", "🕵", "💂", "👷", "🎅", "🦸", "🧙", "🧚", "💇", "👨🦯",
"👯", "🤺", "🏇", "🏌", "⛹", "🏋", "🚴", "🤸", "🤽", "🤼", "🤹", "🧘", "🛌", "👨👩👦👦", "👨👩👧👧", "👨👨👧👦", "👩👩👧👦", "👩👩👧👧",
"🤎", "🖤", "💜", "💙", "💚", "💛", "🧡", "💯", "💥", "💦", "💣", "💨", "💤", "👋", "🖐", "🖖", "🏄", "🚣",
"🏊", "🐿", "🐹", "🐀", "🦇", "🦥", "🦦", "🦨", "🦘", "🦃", "🐔", "🐥", "🐧", "🕊", "🦅", "🦆", "🦢", "🐌",
"🦋", "🐛", "🐝", "🐜", "🦗", "🐞", "🕷", "💮", "🏵", "🌷", "🌱", "🌿", "🍂", "🥑", "🌶", "🥙", "🍳", "🥘", "🍿",
"🍺", "🍻", "🥃", "🍽", "🏔", "🏛", "🏗", "🏰", "🗽", "🗼", "⛩", "🕋", "🛕", "⛲", "🌁", "♨", "🌉", "🎡", "🛤", "⛽",
"⛵", "🚤", "✈", "🚁", "🛎", "🧳", "🌑", "🌒", "🌓", "🌔", "🌕", "🌛", "🌜", "🪐", "⭐", "🌟", "🌌", "🌪", "🌀", "⛱",
"⚡", "☃", "🔥", "💧", "🌊", "🎎", "🎍", "🧧", "🥊", "🥅", "🎣", "🤿", "🎿", "🥌", "🎱", "🎮", "🎰", "🎲", "♠", "♟",
"🎴", "🧵", "🥼", "👔", "🧥", "🥾", "🖨", "🆘"
]
emoji_map = {
"🤗" : 0,"🙄" : 1,"🤮" : 2,"🤧" : 3,"🥵" : 4,"🙏" : 5,"👅" : 6,"🍒" : 7,"🍆" : 8,"🍇" : 9,"🍌" : 10,
"🍋" : 11,"🌵" : 12,"🍑" : 13,"👀" : 14,"👨💻" : 15,"👨🎤" : 16,"🧛" : 17,"🧜♀️" : 18,"🧝♂️" : 19,"🧞" : 20,
"👨🦼" : 21,"🧗" : 22,"⛷" : 23,"🐶" : 24,"🦊" : 25,"🦄" : 26,"🐊" : 27,"🐢" : 28,"🦜" : 29,"🦉" : 30,
"🐙" : 31,"🐳" : 32,"🐉" : 33,"🦖" : 34,"🦂" : 35,"🥐" : 36,"🥨" : 37,"🥯" : 38,"🥞" : 39,"🍔" : 40,
"🍕" : 41,"🧈" : 42,"🍜" : 43,"🦀" : 44,"🦞" : 45,"🦑" : 46,"🏺" : 47,"🚄" : 48,"🚔" : 49,"🦼" : 50,
"🚀" : 51,"🛸" : 52,"🌚" : 53,"❄" : 54,"🌊" : 232,"🥌" : 241,"♟" : 247,"🦺" : 58,"🎩" : 59,"🎷" : 60,
"💻" : 61,"💾" : 62,"🤏" : 63,"🤘" : 64,"🤞" : 65,"🤙" : 66,"🖕" : 67,"👊" : 68,"🤛" : 69,"🙌" : 70,
"👏" : 71,"🤳" : 72,"💪" : 73,"👂" : 74,"👁" : 75,"👨🦰" : 76,"👨🦱" : 77,"🧔" : 78,"👩🦳" : 79,"👩" : 80,
"👩🦲" : 81,"👴" : 82,"🙅" : 83,"🙆" : 84,"💁♂️" : 85,"🙋♀️" : 86,"🧏♂️" : 87,"🙇" : 88,"🤦" : 89,"🤦♂️" : 90,
"🤦♀️" : 91,"🤷" : 92,"🤷♂️" : 93,"🤷♀️" : 94,"👨🎓" : 95,"👨🏫" : 96,"👨🌾" : 97,"👨🔧" : 98,"👩🏭" : 99,"👩💼" : 100,
"👨🔬" : 101,"👩💻" : 102,"👨🎨" : 103,"👩✈️" : 104,"👮" : 105,"🕵" : 106,"💂" : 107,"👷" : 108,"🎅" : 109,"🦸" : 110,
"🧙" : 111,"🧚" : 112,"💇" : 113,"👨🦯" : 114,"👯" : 115,"🤺" : 116,"🏇" : 117,"🏌" : 118,"⛹" : 119,"🏋" : 120,
"🚴" : 121,"🤸" : 122,"🤽" : 123,"🤼" : 124,"🤹" : 125,"🧘" : 126,"🛌" : 127,"👨👩👦👦" : 128,"👨👩👧👧" : 129,"👨👨👧👦" : 130,
"👩👩👧👦" : 131,"👩👩👧👧" : 132,"🤎" : 133,"🖤" : 134,"💜" : 135,"💙" : 136,"💚" : 137,"💛" : 138,"🧡" : 139,"💯" : 140,
"💥" : 141,"💦" : 142,"💣" : 143,"💨" : 144,"💤" : 145,"👋" : 146,"🖐" : 147,"🖖" : 148,"🏄" : 149,"🚣" : 150,
"🏊" : 151,"🐿" : 152,"🐹" : 153,"🐀" : 154,"🦇" : 155,"🦥" : 156,"🦦" : 157,"🦨" : 158,"🦘" : 159,"🦃" : 160,
"🐔" : 161,"🐥" : 162,"🐧" : 163,"🕊" : 164,"🦅" : 165,"🦆" : 166,"🦢" : 167,"🐌" : 168,"🦋" : 169,"🐛" : 170,
"🐝" : 171,"🐜" : 172,"🦗" : 173,"🐞" : 174,"🕷" : 175,"💮" : 176,"🏵" : 177,"🌷" : 178,"🌱" : 179,"🌿" : 180,
"🍂" : 181,"🥑" : 182,"🌶" : 183,"🥙" : 184,"🍳" : 185,"🥘" : 186,"🍿" : 187,"🍺" : 188,"🍻" : 189,"🥃" : 190,
"🍽" : 191,"🏔" : 192,"🏛" : 193,"🏗" : 194,"🏰" : 195,"🗽" : 196,"🗼" : 197,"⛩" : 198,"🕋" : 199,"🛕" : 200,
"⛲" : 201,"🌁" : 202,"♨" : 203,"🌉" : 204,"🎡" : 205,"🛤" : 206,"⛽" : 207,"⛵" : 208,"🚤" : 209,"✈" : 210,
"🚁" : 211,"🛎" : 212,"🧳" : 213,"🌑" : 214,"🌒" : 215,"🌓" : 216,"🌔" : 217,"🌕" : 218,"🌛" : 219,"🌜" : 220,
"🪐" : 221,"⭐" : 222,"🌟" : 223,"🌌" : 224,"🌪" : 225,"🌀" : 226,"⛱" : 227,"⚡" : 228,"☃" : 229,"🔥" : 230,
"💧" : 231,"🎎" : 233,"🎍" : 234,"🧧" : 235,"🥊" : 236,"🥅" : 237,"🎣" : 238,"🤿" : 239,"🎿" : 240,"🎱" : 242,
"🎮" : 243,"🎰" : 244,"🎲" : 245,"♠" : 246,"🎴" : 248,"🧵" : 249,"🥼" : 250,"👔" : 251,"🧥" : 252,"🥾" : 253,
"🖨" : 254,"🆘" : 255
}
def generate_otp(length:int) -> Generator:
"""Generates a one time pad of emojis based on input length.
Parameters
----------
length:(int)
The amount of random emoji's to generate.
Yields
------
str:
The next character in the one time pad
Examples
--------
Generating a 10 character otp
```
from otp_emoji import generate_otp
otp = generate_otp(10)
for character in otp: # Iterate through resulting generator
print(character) # Prints: 🙏🧗🧛👨🎤🎩🥯🧛🙄🏺🧞
```
"""
for digit in range(length):
hex_value = int(token_hex(1), 16)
yield cipher_chars[hex_value] + "|"
def encrypt(input_text:str, pad:bool=False, pad_path:str = False, ciphertext_path:str = False) -> tuple:
"""Encrypts 🔒 text using provided pad, or generates one of the same length.
Parameters
----------
input_text:(str)
The text you would like to encrypt.
pad:(bool|str)
If pad is specified it will be used to encrypt
if left False it will be generated for you.
pad_path:(bool|str)
If specified then it will be the path the pad is
written to.
ciphertext_path:(bool|str)
If specified then it will be the path the ciphertext
is written to.
Returns
------
tuple[str,str]:
The ciphertext, and the onetime pad
Examples
--------
Encrypting a 1984 (George Orwell) quote and saving
the resulting ciphertext and path to files.
```
from otp_emoji import encrypt
text = 'Who controls the past controls the future. Who controls the present controls the past.'
# Creates ciphertext and pad and saves them in current directory as pad.txt and ciphertext.txt respectively
ciphertext, pad = encrypt(text, pad_path='./pad.txt', ciphertext_path='./ciphertext.txt')
```
"""
print("🔒 Encrypting Text 🔒")
logging.debug(f"input_text = {input_text}")
logging.debug(f"pad={pad}")
logging.debug(f"pad_path={pad_path}")
logging.debug(f"ciphertext_path={ciphertext_path}")
ciphertext = ""
if not pad:
pad = ""
for count, character in enumerate(generate_otp(len(input_text))):
logging.debug(character)
pad += character
shifted_value = ""
character = character[0:-1] # remove | delimiter from pad character
logging.debug(f"{input_text[count]} ^ {character}({emoji_map[character]})")
shifted_value += cipher_chars[(ord(input_text[count]) ^ emoji_map[character])]
ciphertext += (shifted_value) + "|" # Delimit ciphertext by pipes and append
logging.debug(f"pad={pad}")
else: # If custom pad is provided
pad = deepcopy(pad)
pad = pad.split("|")
for character in zip(input_text, pad):
print(f"Character= {character[0]} {character[1]}")
shifted_value = ""
logging.debug(f"{input_text[count]} ^ {character}({emoji_map[character]})")
shifted_value += cipher_chars[(ord(input_text[count]) ^ emoji_map[character])]
ciphertext += (shifted_value) + "|" # Delimit ciphertext by pipes and append
ciphertext = ciphertext[0:-1]
if pad_path:
with open(pad_path, "wb") as otp_file:
otp_file.write(pad.encode("utf-8"))
logging.info(f"One-time-pad text written to: {pad_path}")
if ciphertext_path:
with open(ciphertext_path, "wb") as encrypted_message:
encrypted_message.write(ciphertext.encode("utf-8"))
logging.info(f"Encrypted text written to: {ciphertext_path}")
return ciphertext, pad
def decrypt(cipher_text:str, pad:str, text_path:str = False) -> str:
"""Decrypts 🔓 text using provided pad.
Parameters
----------
cipher_text:(str)
The text you would like to decrypt.
pad:(str)
The pad that corresponds with the ciphertext.
text_path:(bool|str)
If specified then it will be the path the decrypted
text is written to.
Returns
------
str:
The decrypted text
Examples
--------
Encrypting some text from files found in the encrypt() example.
```
from otp_emoji import decrypt
pad = ''
ciphertext = ''
with open('pad.txt') as pad_file:
pad = pad_file.read()
with open('ciphertext.txt') as ciphertext_file:
ciphertext = ciphertext_file.read()
print( decrypt(ciphertext, pad) ) # Prints: 'Who controls the past controls the future. Who controls the present controls the past.'
```
"""
cipher_text = cipher_text.split("|") # Split ciphertext by pipes
pad = pad.split("|") # Split pad by pipes
print("👀 Decrypting text 👀")
plaintext = ""
logging.debug(f"cipher_text={cipher_text}")
logging.debug(f"pad={pad}")
for character in zip(cipher_text, pad): # Use pad to decrypt each character
logging.debug(f"Character= {character[0]} {character[1]}")
decrypted_value = ""
logging.debug(f"{character[0]} ^ {character[1]}")
decrypted_value += chr(emoji_map[character[0]] ^ emoji_map[character[1]])
plaintext += decrypted_value
if text_path:
with open(os.path.abspath(text_path), "wb") as encrypted_message:
encrypted_message.write(plaintext.encode("utf-8"))
logging.info(f"Decrypted text written to: {text_path}")
return plaintext
def main() -> None:
"""otp_emoji script entrypoint; handles logic for the otp_emoji command"""
if len(sys.argv) == 1: # If no arguments are provided
print(usage) # Print helptext
exit() # Exit program
args = docopt(usage, version="otp_emoji V 1.3.0")
# ================== Encrypt Argument Parsing ==================
if args["encrypt"]:
if os.path.isfile(args["<text>"]):
with open(args["<text>"], encoding="utf-8") as text_file:
args["<text>"] = text_file.read()
if args["--output"]:
if not os.path.isdir(args["--output"]): # If no valid output directory specified
args["--output"] = os.curdir
else:
args["--output"] = os.curdir
ciphertext, pad = encrypt(args["<text>"], args["--pad"], pad_path=f"{args['--output']}{os.sep}pad.txt", ciphertext_path=f"{args['--output']}{os.sep}ciphertext.txt")
if args["--stream"]:
print(f"Ciphertext: {ciphertext}")
print(f"Pad: {pad}")
# ================== Decrypt Argument Parsing ==================
if args["decrypt"]:
with open(args["<ciphertext>"], encoding="utf-8") as ciphertext_file:
args["<ciphertext>"] = ciphertext_file.read()
with open(args["<pad>"], encoding="utf-8") as pad_file:
args["<pad>"] = pad_file.read()
if args["--output"]:
if not os.path.isdir(args["--output"]): # If no valid output directory specified
args["--output"] = os.curdir
print(f"Provided output path was not valid using {os.curdir} instead")
else:
args["--output"] = os.curdir
plaintext = decrypt(args["<ciphertext>"], args["<pad>"], text_path=f"{args['--output']}{os.sep}plaintext.txt")
if args["--stream"]:
print(plaintext)
if __name__ == "__main__":
main() # Runs the otp_emoji command | otp_emoji.py | import os # Used to validate filepaths
import sys # Used to fix arglengths of 0 for CLI
import logging # Used to log (obviously)
from copy import deepcopy
from typing import Generator # Used to typehint generator returns
from secrets import token_hex # Used to produce reliably random hex values
# External Dependencies
from docopt import docopt # Used to handle argument parsing from the entrypoint
usage = """Used to generate one-time pads 🤐, by default in emojis.
Usage:
otp_emoji [-h] [-v]
otp_emoji encrypt <text> [-s] [-o OUTPUT_PATH] [-p PAD_PATH]
otp_emoji decrypt <ciphertext> <pad> [-s] [-o OUTPUT_PATH]
Options:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-o OUTPUT_PATH, --output OUTPUT_PATH
a directory of where to write pad/plaintext/ciphertext
output
-p PAD_PATH, --pad PAD_PATH
allows you to specify a pre-created one time pad
-s, --stream print result to output stream (stdout)
"""
cipher_chars = [
"🤗", "🙄", "🤮", "🤧", "🥵", "🙏", "👅", "🍒", "🍆", "🍇", "🍌", "🍋", "🌵", "🍑", "👀",
"👨💻", "👨🎤", "🧛", "🧜♀️", "🧝♂️", "🧞", "👨🦼", "🧗", "⛷", "🐶", "🦊", "🦄", "🐊", "🐢", "🦜", "🦉",
"🐙", "🐳", "🐉", "🦖", "🦂", "🥐", "🥨", "🥯", "🥞", "🍔", "🍕", "🧈", "🍜", "🦀", "🦞", "🦑",
"🏺", "🚄", "🚔", "🦼", "🚀", "🛸", "🌚", "❄", "🌊", "🥌", "♟", "🦺", "🎩", "🎷", "💻", "💾",
"🤏", "🤘", "🤞", "🤙", "🖕", "👊", "🤛", "🙌", "👏", "🤳", "💪", "👂", "👁", "👨🦰", "👨🦱", "🧔", "👩🦳",
"👩", "👩🦲", "👴", "🙅", "🙆", "💁♂️", "🙋♀️", "🧏♂️", "🙇", "🤦", "🤦♂️", "🤦♀️", "🤷", "🤷♂️", "🤷♀️", "👨🎓", "👨🏫",
"👨🌾", "👨🔧", "👩🏭", "👩💼", "👨🔬", "👩💻", "👨🎨", "👩✈️", "👮", "🕵", "💂", "👷", "🎅", "🦸", "🧙", "🧚", "💇", "👨🦯",
"👯", "🤺", "🏇", "🏌", "⛹", "🏋", "🚴", "🤸", "🤽", "🤼", "🤹", "🧘", "🛌", "👨👩👦👦", "👨👩👧👧", "👨👨👧👦", "👩👩👧👦", "👩👩👧👧",
"🤎", "🖤", "💜", "💙", "💚", "💛", "🧡", "💯", "💥", "💦", "💣", "💨", "💤", "👋", "🖐", "🖖", "🏄", "🚣",
"🏊", "🐿", "🐹", "🐀", "🦇", "🦥", "🦦", "🦨", "🦘", "🦃", "🐔", "🐥", "🐧", "🕊", "🦅", "🦆", "🦢", "🐌",
"🦋", "🐛", "🐝", "🐜", "🦗", "🐞", "🕷", "💮", "🏵", "🌷", "🌱", "🌿", "🍂", "🥑", "🌶", "🥙", "🍳", "🥘", "🍿",
"🍺", "🍻", "🥃", "🍽", "🏔", "🏛", "🏗", "🏰", "🗽", "🗼", "⛩", "🕋", "🛕", "⛲", "🌁", "♨", "🌉", "🎡", "🛤", "⛽",
"⛵", "🚤", "✈", "🚁", "🛎", "🧳", "🌑", "🌒", "🌓", "🌔", "🌕", "🌛", "🌜", "🪐", "⭐", "🌟", "🌌", "🌪", "🌀", "⛱",
"⚡", "☃", "🔥", "💧", "🌊", "🎎", "🎍", "🧧", "🥊", "🥅", "🎣", "🤿", "🎿", "🥌", "🎱", "🎮", "🎰", "🎲", "♠", "♟",
"🎴", "🧵", "🥼", "👔", "🧥", "🥾", "🖨", "🆘"
]
emoji_map = {
"🤗" : 0,"🙄" : 1,"🤮" : 2,"🤧" : 3,"🥵" : 4,"🙏" : 5,"👅" : 6,"🍒" : 7,"🍆" : 8,"🍇" : 9,"🍌" : 10,
"🍋" : 11,"🌵" : 12,"🍑" : 13,"👀" : 14,"👨💻" : 15,"👨🎤" : 16,"🧛" : 17,"🧜♀️" : 18,"🧝♂️" : 19,"🧞" : 20,
"👨🦼" : 21,"🧗" : 22,"⛷" : 23,"🐶" : 24,"🦊" : 25,"🦄" : 26,"🐊" : 27,"🐢" : 28,"🦜" : 29,"🦉" : 30,
"🐙" : 31,"🐳" : 32,"🐉" : 33,"🦖" : 34,"🦂" : 35,"🥐" : 36,"🥨" : 37,"🥯" : 38,"🥞" : 39,"🍔" : 40,
"🍕" : 41,"🧈" : 42,"🍜" : 43,"🦀" : 44,"🦞" : 45,"🦑" : 46,"🏺" : 47,"🚄" : 48,"🚔" : 49,"🦼" : 50,
"🚀" : 51,"🛸" : 52,"🌚" : 53,"❄" : 54,"🌊" : 232,"🥌" : 241,"♟" : 247,"🦺" : 58,"🎩" : 59,"🎷" : 60,
"💻" : 61,"💾" : 62,"🤏" : 63,"🤘" : 64,"🤞" : 65,"🤙" : 66,"🖕" : 67,"👊" : 68,"🤛" : 69,"🙌" : 70,
"👏" : 71,"🤳" : 72,"💪" : 73,"👂" : 74,"👁" : 75,"👨🦰" : 76,"👨🦱" : 77,"🧔" : 78,"👩🦳" : 79,"👩" : 80,
"👩🦲" : 81,"👴" : 82,"🙅" : 83,"🙆" : 84,"💁♂️" : 85,"🙋♀️" : 86,"🧏♂️" : 87,"🙇" : 88,"🤦" : 89,"🤦♂️" : 90,
"🤦♀️" : 91,"🤷" : 92,"🤷♂️" : 93,"🤷♀️" : 94,"👨🎓" : 95,"👨🏫" : 96,"👨🌾" : 97,"👨🔧" : 98,"👩🏭" : 99,"👩💼" : 100,
"👨🔬" : 101,"👩💻" : 102,"👨🎨" : 103,"👩✈️" : 104,"👮" : 105,"🕵" : 106,"💂" : 107,"👷" : 108,"🎅" : 109,"🦸" : 110,
"🧙" : 111,"🧚" : 112,"💇" : 113,"👨🦯" : 114,"👯" : 115,"🤺" : 116,"🏇" : 117,"🏌" : 118,"⛹" : 119,"🏋" : 120,
"🚴" : 121,"🤸" : 122,"🤽" : 123,"🤼" : 124,"🤹" : 125,"🧘" : 126,"🛌" : 127,"👨👩👦👦" : 128,"👨👩👧👧" : 129,"👨👨👧👦" : 130,
"👩👩👧👦" : 131,"👩👩👧👧" : 132,"🤎" : 133,"🖤" : 134,"💜" : 135,"💙" : 136,"💚" : 137,"💛" : 138,"🧡" : 139,"💯" : 140,
"💥" : 141,"💦" : 142,"💣" : 143,"💨" : 144,"💤" : 145,"👋" : 146,"🖐" : 147,"🖖" : 148,"🏄" : 149,"🚣" : 150,
"🏊" : 151,"🐿" : 152,"🐹" : 153,"🐀" : 154,"🦇" : 155,"🦥" : 156,"🦦" : 157,"🦨" : 158,"🦘" : 159,"🦃" : 160,
"🐔" : 161,"🐥" : 162,"🐧" : 163,"🕊" : 164,"🦅" : 165,"🦆" : 166,"🦢" : 167,"🐌" : 168,"🦋" : 169,"🐛" : 170,
"🐝" : 171,"🐜" : 172,"🦗" : 173,"🐞" : 174,"🕷" : 175,"💮" : 176,"🏵" : 177,"🌷" : 178,"🌱" : 179,"🌿" : 180,
"🍂" : 181,"🥑" : 182,"🌶" : 183,"🥙" : 184,"🍳" : 185,"🥘" : 186,"🍿" : 187,"🍺" : 188,"🍻" : 189,"🥃" : 190,
"🍽" : 191,"🏔" : 192,"🏛" : 193,"🏗" : 194,"🏰" : 195,"🗽" : 196,"🗼" : 197,"⛩" : 198,"🕋" : 199,"🛕" : 200,
"⛲" : 201,"🌁" : 202,"♨" : 203,"🌉" : 204,"🎡" : 205,"🛤" : 206,"⛽" : 207,"⛵" : 208,"🚤" : 209,"✈" : 210,
"🚁" : 211,"🛎" : 212,"🧳" : 213,"🌑" : 214,"🌒" : 215,"🌓" : 216,"🌔" : 217,"🌕" : 218,"🌛" : 219,"🌜" : 220,
"🪐" : 221,"⭐" : 222,"🌟" : 223,"🌌" : 224,"🌪" : 225,"🌀" : 226,"⛱" : 227,"⚡" : 228,"☃" : 229,"🔥" : 230,
"💧" : 231,"🎎" : 233,"🎍" : 234,"🧧" : 235,"🥊" : 236,"🥅" : 237,"🎣" : 238,"🤿" : 239,"🎿" : 240,"🎱" : 242,
"🎮" : 243,"🎰" : 244,"🎲" : 245,"♠" : 246,"🎴" : 248,"🧵" : 249,"🥼" : 250,"👔" : 251,"🧥" : 252,"🥾" : 253,
"🖨" : 254,"🆘" : 255
}
def generate_otp(length:int) -> Generator:
"""Generates a one time pad of emojis based on input length.
Parameters
----------
length:(int)
The amount of random emoji's to generate.
Yields
------
str:
The next character in the one time pad
Examples
--------
Generating a 10 character otp
```
from otp_emoji import generate_otp
otp = generate_otp(10)
for character in otp: # Iterate through resulting generator
print(character) # Prints: 🙏🧗🧛👨🎤🎩🥯🧛🙄🏺🧞
```
"""
for digit in range(length):
hex_value = int(token_hex(1), 16)
yield cipher_chars[hex_value] + "|"
def encrypt(input_text:str, pad:bool=False, pad_path:str = False, ciphertext_path:str = False) -> tuple:
"""Encrypts 🔒 text using provided pad, or generates one of the same length.
Parameters
----------
input_text:(str)
The text you would like to encrypt.
pad:(bool|str)
If pad is specified it will be used to encrypt
if left False it will be generated for you.
pad_path:(bool|str)
If specified then it will be the path the pad is
written to.
ciphertext_path:(bool|str)
If specified then it will be the path the ciphertext
is written to.
Returns
------
tuple[str,str]:
The ciphertext, and the onetime pad
Examples
--------
Encrypting a 1984 (George Orwell) quote and saving
the resulting ciphertext and path to files.
```
from otp_emoji import encrypt
text = 'Who controls the past controls the future. Who controls the present controls the past.'
# Creates ciphertext and pad and saves them in current directory as pad.txt and ciphertext.txt respectively
ciphertext, pad = encrypt(text, pad_path='./pad.txt', ciphertext_path='./ciphertext.txt')
```
"""
print("🔒 Encrypting Text 🔒")
logging.debug(f"input_text = {input_text}")
logging.debug(f"pad={pad}")
logging.debug(f"pad_path={pad_path}")
logging.debug(f"ciphertext_path={ciphertext_path}")
ciphertext = ""
if not pad:
pad = ""
for count, character in enumerate(generate_otp(len(input_text))):
logging.debug(character)
pad += character
shifted_value = ""
character = character[0:-1] # remove | delimiter from pad character
logging.debug(f"{input_text[count]} ^ {character}({emoji_map[character]})")
shifted_value += cipher_chars[(ord(input_text[count]) ^ emoji_map[character])]
ciphertext += (shifted_value) + "|" # Delimit ciphertext by pipes and append
logging.debug(f"pad={pad}")
else: # If custom pad is provided
pad = deepcopy(pad)
pad = pad.split("|")
for character in zip(input_text, pad):
print(f"Character= {character[0]} {character[1]}")
shifted_value = ""
logging.debug(f"{input_text[count]} ^ {character}({emoji_map[character]})")
shifted_value += cipher_chars[(ord(input_text[count]) ^ emoji_map[character])]
ciphertext += (shifted_value) + "|" # Delimit ciphertext by pipes and append
ciphertext = ciphertext[0:-1]
if pad_path:
with open(pad_path, "wb") as otp_file:
otp_file.write(pad.encode("utf-8"))
logging.info(f"One-time-pad text written to: {pad_path}")
if ciphertext_path:
with open(ciphertext_path, "wb") as encrypted_message:
encrypted_message.write(ciphertext.encode("utf-8"))
logging.info(f"Encrypted text written to: {ciphertext_path}")
return ciphertext, pad
def decrypt(cipher_text:str, pad:str, text_path:str = False) -> str:
"""Decrypts 🔓 text using provided pad.
Parameters
----------
cipher_text:(str)
The text you would like to decrypt.
pad:(str)
The pad that corresponds with the ciphertext.
text_path:(bool|str)
If specified then it will be the path the decrypted
text is written to.
Returns
------
str:
The decrypted text
Examples
--------
Encrypting some text from files found in the encrypt() example.
```
from otp_emoji import decrypt
pad = ''
ciphertext = ''
with open('pad.txt') as pad_file:
pad = pad_file.read()
with open('ciphertext.txt') as ciphertext_file:
ciphertext = ciphertext_file.read()
print( decrypt(ciphertext, pad) ) # Prints: 'Who controls the past controls the future. Who controls the present controls the past.'
```
"""
cipher_text = cipher_text.split("|") # Split ciphertext by pipes
pad = pad.split("|") # Split pad by pipes
print("👀 Decrypting text 👀")
plaintext = ""
logging.debug(f"cipher_text={cipher_text}")
logging.debug(f"pad={pad}")
for character in zip(cipher_text, pad): # Use pad to decrypt each character
logging.debug(f"Character= {character[0]} {character[1]}")
decrypted_value = ""
logging.debug(f"{character[0]} ^ {character[1]}")
decrypted_value += chr(emoji_map[character[0]] ^ emoji_map[character[1]])
plaintext += decrypted_value
if text_path:
with open(os.path.abspath(text_path), "wb") as encrypted_message:
encrypted_message.write(plaintext.encode("utf-8"))
logging.info(f"Decrypted text written to: {text_path}")
return plaintext
def main() -> None:
"""otp_emoji script entrypoint; handles logic for the otp_emoji command"""
if len(sys.argv) == 1: # If no arguments are provided
print(usage) # Print helptext
exit() # Exit program
args = docopt(usage, version="otp_emoji V 1.3.0")
# ================== Encrypt Argument Parsing ==================
if args["encrypt"]:
if os.path.isfile(args["<text>"]):
with open(args["<text>"], encoding="utf-8") as text_file:
args["<text>"] = text_file.read()
if args["--output"]:
if not os.path.isdir(args["--output"]): # If no valid output directory specified
args["--output"] = os.curdir
else:
args["--output"] = os.curdir
ciphertext, pad = encrypt(args["<text>"], args["--pad"], pad_path=f"{args['--output']}{os.sep}pad.txt", ciphertext_path=f"{args['--output']}{os.sep}ciphertext.txt")
if args["--stream"]:
print(f"Ciphertext: {ciphertext}")
print(f"Pad: {pad}")
# ================== Decrypt Argument Parsing ==================
if args["decrypt"]:
with open(args["<ciphertext>"], encoding="utf-8") as ciphertext_file:
args["<ciphertext>"] = ciphertext_file.read()
with open(args["<pad>"], encoding="utf-8") as pad_file:
args["<pad>"] = pad_file.read()
if args["--output"]:
if not os.path.isdir(args["--output"]): # If no valid output directory specified
args["--output"] = os.curdir
print(f"Provided output path was not valid using {os.curdir} instead")
else:
args["--output"] = os.curdir
plaintext = decrypt(args["<ciphertext>"], args["<pad>"], text_path=f"{args['--output']}{os.sep}plaintext.txt")
if args["--stream"]:
print(plaintext)
if __name__ == "__main__":
main() # Runs the otp_emoji command | 0.32896 | 0.337395 |
from typing import List
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
import torch.nn.functional as F
from ..utils.generic_utils import to_cuda
from ..utils.constants import VERY_SMALL_NUMBER
def dropout(x, drop_prob, shared_axes=[], training=False):
"""
Apply dropout to input tensor.
Parameters
----------
input_tensor: ``torch.FloatTensor``
A tensor of shape ``(batch_size, ..., num_timesteps, embedding_dim)``
Returns
-------
output: ``torch.FloatTensor``
A tensor of shape ``(batch_size, ..., num_timesteps, embedding_dim)`` with dropout applied.
"""
if drop_prob == 0 or drop_prob == None or (not training):
return x
sz = list(x.size())
for i in shared_axes:
sz[i] = 1
mask = x.new(*sz).bernoulli_(1. - drop_prob).div_(1. - drop_prob)
mask = mask.expand_as(x)
return x * mask
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size, \
bidirectional=False, num_layers=1, rnn_type='lstm', rnn_dropout=None, device=None):
super(EncoderRNN, self).__init__()
if not rnn_type in ('lstm', 'gru'):
raise RuntimeError('rnn_type is expected to be lstm or gru, got {}'.format(rnn_type))
if bidirectional:
print('[ Using {}-layer bidirectional {} encoder ]'.format(num_layers, rnn_type))
else:
print('[ Using {}-layer {} encoder ]'.format(num_layers, rnn_type))
if bidirectional and hidden_size % 2 != 0:
raise RuntimeError('hidden_size is expected to be even in the bidirectional mode!')
self.rnn_type = rnn_type
self.num_layers = num_layers
self.rnn_dropout = rnn_dropout
self.device = device
self.hidden_size = hidden_size // 2 if bidirectional else hidden_size
self.num_directions = 2 if bidirectional else 1
model = nn.LSTM if rnn_type == 'lstm' else nn.GRU
self.model = model(input_size, self.hidden_size, self.num_layers, batch_first=True, bidirectional=bidirectional)
def forward(self, x, x_len):
"""x: [batch_size * max_length * emb_dim]
x_len: [batch_size]
"""
sorted_x_len, indx = torch.sort(x_len, 0, descending=True)
x = pack_padded_sequence(x[indx], sorted_x_len.data.tolist(), batch_first=True)
h0 = to_cuda(torch.zeros(self.num_directions * self.num_layers, x_len.size(0), self.hidden_size), self.device)
if self.rnn_type == 'lstm':
c0 = to_cuda(torch.zeros(self.num_directions * self.num_layers, x_len.size(0), self.hidden_size), self.device)
packed_h, (packed_h_t, packed_c_t) = self.model(x, (h0, c0))
else:
packed_h, packed_h_t = self.model(x, h0)
if self.num_directions == 2:
packed_h_t = torch.cat((packed_h_t[-1], packed_h_t[-2]), 1)
if self.rnn_type == 'lstm':
packed_c_t = torch.cat((packed_c_t[-1], packed_c_t[-2]), 1)
else:
packed_h_t = packed_h_t[-1]
if self.rnn_type == 'lstm':
packed_c_t = packed_c_t[-1]
# restore the sorting
_, inverse_indx = torch.sort(indx, 0)
hh, _ = pad_packed_sequence(packed_h, batch_first=True)
restore_hh = hh[inverse_indx]
restore_hh = dropout(restore_hh, self.rnn_dropout, shared_axes=[-2], training=self.training)
restore_hh = restore_hh.transpose(0, 1) # [max_length, batch_size, emb_dim]
restore_packed_h_t = packed_h_t[inverse_indx]
restore_packed_h_t = dropout(restore_packed_h_t, self.rnn_dropout, training=self.training)
restore_packed_h_t = restore_packed_h_t.unsqueeze(0) # [1, batch_size, emb_dim]
if self.rnn_type == 'lstm':
restore_packed_c_t = packed_c_t[inverse_indx]
restore_packed_c_t = dropout(restore_packed_c_t, self.rnn_dropout, training=self.training)
restore_packed_c_t = restore_packed_c_t.unsqueeze(0) # [1, batch_size, emb_dim]
rnn_state_t = (restore_packed_h_t, restore_packed_c_t)
else:
rnn_state_t = restore_packed_h_t
return restore_hh, rnn_state_t | src/core/layers/common.py | from typing import List
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
import torch.nn.functional as F
from ..utils.generic_utils import to_cuda
from ..utils.constants import VERY_SMALL_NUMBER
def dropout(x, drop_prob, shared_axes=[], training=False):
"""
Apply dropout to input tensor.
Parameters
----------
input_tensor: ``torch.FloatTensor``
A tensor of shape ``(batch_size, ..., num_timesteps, embedding_dim)``
Returns
-------
output: ``torch.FloatTensor``
A tensor of shape ``(batch_size, ..., num_timesteps, embedding_dim)`` with dropout applied.
"""
if drop_prob == 0 or drop_prob == None or (not training):
return x
sz = list(x.size())
for i in shared_axes:
sz[i] = 1
mask = x.new(*sz).bernoulli_(1. - drop_prob).div_(1. - drop_prob)
mask = mask.expand_as(x)
return x * mask
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size, \
bidirectional=False, num_layers=1, rnn_type='lstm', rnn_dropout=None, device=None):
super(EncoderRNN, self).__init__()
if not rnn_type in ('lstm', 'gru'):
raise RuntimeError('rnn_type is expected to be lstm or gru, got {}'.format(rnn_type))
if bidirectional:
print('[ Using {}-layer bidirectional {} encoder ]'.format(num_layers, rnn_type))
else:
print('[ Using {}-layer {} encoder ]'.format(num_layers, rnn_type))
if bidirectional and hidden_size % 2 != 0:
raise RuntimeError('hidden_size is expected to be even in the bidirectional mode!')
self.rnn_type = rnn_type
self.num_layers = num_layers
self.rnn_dropout = rnn_dropout
self.device = device
self.hidden_size = hidden_size // 2 if bidirectional else hidden_size
self.num_directions = 2 if bidirectional else 1
model = nn.LSTM if rnn_type == 'lstm' else nn.GRU
self.model = model(input_size, self.hidden_size, self.num_layers, batch_first=True, bidirectional=bidirectional)
def forward(self, x, x_len):
"""x: [batch_size * max_length * emb_dim]
x_len: [batch_size]
"""
sorted_x_len, indx = torch.sort(x_len, 0, descending=True)
x = pack_padded_sequence(x[indx], sorted_x_len.data.tolist(), batch_first=True)
h0 = to_cuda(torch.zeros(self.num_directions * self.num_layers, x_len.size(0), self.hidden_size), self.device)
if self.rnn_type == 'lstm':
c0 = to_cuda(torch.zeros(self.num_directions * self.num_layers, x_len.size(0), self.hidden_size), self.device)
packed_h, (packed_h_t, packed_c_t) = self.model(x, (h0, c0))
else:
packed_h, packed_h_t = self.model(x, h0)
if self.num_directions == 2:
packed_h_t = torch.cat((packed_h_t[-1], packed_h_t[-2]), 1)
if self.rnn_type == 'lstm':
packed_c_t = torch.cat((packed_c_t[-1], packed_c_t[-2]), 1)
else:
packed_h_t = packed_h_t[-1]
if self.rnn_type == 'lstm':
packed_c_t = packed_c_t[-1]
# restore the sorting
_, inverse_indx = torch.sort(indx, 0)
hh, _ = pad_packed_sequence(packed_h, batch_first=True)
restore_hh = hh[inverse_indx]
restore_hh = dropout(restore_hh, self.rnn_dropout, shared_axes=[-2], training=self.training)
restore_hh = restore_hh.transpose(0, 1) # [max_length, batch_size, emb_dim]
restore_packed_h_t = packed_h_t[inverse_indx]
restore_packed_h_t = dropout(restore_packed_h_t, self.rnn_dropout, training=self.training)
restore_packed_h_t = restore_packed_h_t.unsqueeze(0) # [1, batch_size, emb_dim]
if self.rnn_type == 'lstm':
restore_packed_c_t = packed_c_t[inverse_indx]
restore_packed_c_t = dropout(restore_packed_c_t, self.rnn_dropout, training=self.training)
restore_packed_c_t = restore_packed_c_t.unsqueeze(0) # [1, batch_size, emb_dim]
rnn_state_t = (restore_packed_h_t, restore_packed_c_t)
else:
rnn_state_t = restore_packed_h_t
return restore_hh, rnn_state_t | 0.927831 | 0.560854 |
import os
import re
import datetime
import configparser
import numpy as np
import astropy.io.fits as fits
from astropy.table import Table
from ...utils.misc import extract_date
from ..common import load_config
from .common import get_region_lst, get_std_setup, print_wrapper
from .reduce import reduce_rawdata
def make_config():
"""Generate a config file for reducing the data taken with Subaru/HDS
spectrograph.
"""
# find date of data obtained
current_pathname = os.path.basename(os.getcwd())
guess_date = extract_date(current_pathname)
while(True):
if guess_date is None:
prompt = 'YYYYMMDD'
else:
prompt = guess_date
string = input('Date of observation [{}]: '.format(prompt))
input_date = extract_date(string)
if input_date is None:
if guess_date is None:
continue
else:
input_date = guess_date
break
else:
break
input_datetime = datetime.datetime.strptime(input_date, '%Y-%m-%d')
# general database path for this instrument
dbpath = '~/.gamse/Subaru.HDS'
# create config object
config = configparser.ConfigParser()
config.add_section('data')
config.set('data', 'telescope', 'Subaru')
config.set('data', 'instrument', 'HDS')
config.set('data', 'rawpath', 'rawdata')
config.add_section('reduce')
config.set('reduce', 'midpath', 'midproc')
config.set('reduce', 'figpath', 'images')
config.set('reduce', 'odspath', 'onedspec')
config.set('reduce', 'mode', 'normal')
config.set('reduce', 'oned_suffix', 'ods')
config.set('reduce', 'fig_format', 'png')
config.set('reduce', 'ncores', 'max')
# section of bias correction
sectname = 'reduce.bias'
config.add_section(sectname)
config.set(sectname, 'bias_file', '${reduce:midpath}/bias.fits')
config.set(sectname, 'cosmic_clip', str(10))
config.set(sectname, 'maxiter', str(5))
# write to config file
filename = 'HDS.{}.cfg'.format(input_date)
outfile = open(filename, 'w')
for section in config.sections():
maxkeylen = max([len(key) for key in config[section].keys()])
outfile.write('[{}]'.format(section)+os.linesep)
fmt = '{{:{}s}} = {{}}'.format(maxkeylen)
for key, value in config[section].items():
outfile.write(fmt.format(key, value)+os.linesep)
outfile.write(os.linesep)
outfile.close()
print('Config file written to {}'.format(filename))
def make_obslog():
"""Scan the raw data, and generate a log file containing the detail
information for each frame.
"""
# load config file
config = load_config('HDS\S*\.cfg$')
rawpath = config['data'].get('rawpath')
# prepare logtable
logtable = Table(dtype=[
('frameid', 'i2'),
('fileid1', 'S12'),
('fileid2', 'S12'),
('objtype', 'S10'),
('object', 'S20'),
('i2', 'S1'),
('exptime', 'f4'),
('obsdate', 'S19'),
('setup', 'S7'),
('binning', 'S7'),
('slitsize', 'S8'),
('nsat_1', 'i4'),
('nsat_2', 'i4'),
('q95_1', 'i4'),
('q95_2', 'i4'),
])
fmt_str = (' - {:>5s} {:12s} {:12s} {:<10s} {:<20s} {:1s}I2 {:>7} {:^23s}'
' {:<7s} {:5} {:>8s}' # setup, binning, slitsize
' \033[31m{:>7}\033[0m' # nsat_1
' \033[34m{:>7}\033[0m' # nsat_2
' \033[31m{:>5}\033[0m' # q95_1
' \033[34m{:>5}\033[0m' # q95_2
)
head_str = fmt_str.format('FID', 'fileid1', 'fileid2', 'objtype', 'object',
'', 'exptime', 'obsdate',
'setup', 'binning', 'slitsize',
'nsat_1', 'nsat_2', 'q95_1', 'q95_2')
print(head_str)
frameid = 0
# start scanning the raw files
for fname in sorted(os.listdir(rawpath)):
if not re.match('HDSA\d{8}\.fits$', fname):
continue
# check the both CCD frames are exist
framenum = int(fname[4:12])
if framenum % 2 == 1:
other_fname = 'HDSA{:08d}.fits'.format(framenum+1)
else:
other_fname = 'HDSA{:08d}.fits'.format(framenum-1)
if not os.path.exists(os.path.join(rawpath, other_fname)):
print('Warning: missing file: {}'.format(other_fname))
if framenum % 2 == 0:
continue
frameid1 = int(fname[4:12])
frameid2 = frameid1 + 1
fileid1 = 'HDSA{:08d}'.format(frameid1)
fileid2 = 'HDSA{:08d}'.format(frameid2)
fname1 = '{}.fits'.format(fileid1)
fname2 = '{}.fits'.format(fileid2)
filename1 = os.path.join(rawpath, fname1)
data1, head1 = fits.getdata(filename1, header=True)
region1, region2 = get_region_lst(head1)
x11, x12, y11, y12 = region1[0]
x21, x22, y21, y22 = region2[0]
data1 = np.concatenate(
(data1[y11:y12, x11:x12], data1[y21:y22, x21:x22]),
axis=1)
filename2 = os.path.join(rawpath, fname2)
data2, head2 = fits.getdata(filename2, header=True)
region1, region2 = get_region_lst(head2)
x11, x12, y11, y12 = region1[0]
x21, x22, y21, y22 = region2[0]
data2 = np.concatenate(
(data2[y11:y12, x11:x12], data2[y21:y22, x21:x22]),
axis=1)
for key in ['DATA-TYP', 'OBJECT', 'EXPTIME', 'DATE-OBS', 'UT',
'SLIT', 'SLT-WID', 'SLT-LEN', 'FILTER01', 'FILTER02',
'H_I2CELL', 'H_COLLIM', 'H_CROSSD',
'BIN-FCT1', 'BIN-FCT2']:
if head1[key] != head2[key]:
print('Warning: {} of {} ({}) and {} ({}) does not match.'.format(
key, frameid1, head1[key], frameid2, head2[key]))
frameid = frameid + 1
objtype = head1['DATA-TYP']
objectname = head1['OBJECT']
exptime = head1['EXPTIME']
i2 = {'USE': '+', 'NOUSE': '-'}[head1['H_I2CELL']]
obsdate = '{}T{}'.format(head1['DATE-OBS'], head1['UT'])
# get setup and check the consistency of CCD1 and CCD2
setup1 = get_std_setup(head1)
setup2 = get_std_setup(head2)
if setup1 != setup2:
print('Warning: setup of CCD1 ({}) and CCD2 ({})'
'does not match'.format(setup1, setup2))
setup = setup1
# get binning and check the consistency of CCD1 and CCD2
bin_1 = (head1['BIN-FCT1'], head1['BIN-FCT2'])
bin_2 = (head2['BIN-FCT1'], head2['BIN-FCT2'])
if bin_1 != bin_2:
print('Warning: Binning of CCD1 ({}) and CCD2 ({})'
' do not match'.format(bin_1, bin_2))
binning = '({},{})'.format(bin_1[0], bin_1[1])
slitsize = '{:4.2f}x{:3.1f}'.format(head1['SLT-WID'], head1['SLT-LEN'])
sat_mask1 = np.isnan(data1)
sat_mask2 = np.isnan(data2)
nsat_1 = sat_mask1.sum()
nsat_2 = sat_mask2.sum()
data1[sat_mask1] = 66535
data2[sat_mask2] = 66535
q95_1 = int(np.round(np.percentile(data1, 95)))
q95_2 = int(np.round(np.percentile(data2, 95)))
item = [frameid, fileid1, fileid2, objtype, objectname, i2, exptime,
obsdate, setup, binning, slitsize,
nsat_1, nsat_2, q95_1, q95_2]
logtable.add_row(item)
item = logtable[-1]
# print log item with colors
string = fmt_str.format('[{:d}]'.format(frameid),
fileid1, fileid2, objtype, objectname, i2, exptime,
obsdate, setup, binning, slitsize,
nsat_1, nsat_2, q95_1, q95_2,
)
print(print_wrapper(string, item))
# determine filename of logtable.
# use the obsdate of the first frame
obsdate = logtable[0]['obsdate'][0:10]
outname = '{}.obslog'.format(obsdate)
if os.path.exists(outname):
i = 0
while(True):
i += 1
outname = '{}.{}.obslog'.format(obsdate, i)
if not os.path.exists(outname):
outfilename = outname
break
else:
outfilename = outname
# set display formats
logtable['objtype'].info.format = '<s'
logtable['object'].info.format = '<s'
logtable['i2'].info.format = '^s'
logtable['exptime'].info.format = 'g'
logtable['binning'].info.format = '^s'
# save the logtable
outfile = open(outfilename, 'w')
for row in logtable.pformat_all():
outfile.write(row+os.linesep)
outfile.close() | gamse/pipelines/hds/__init__.py | import os
import re
import datetime
import configparser
import numpy as np
import astropy.io.fits as fits
from astropy.table import Table
from ...utils.misc import extract_date
from ..common import load_config
from .common import get_region_lst, get_std_setup, print_wrapper
from .reduce import reduce_rawdata
def make_config():
"""Generate a config file for reducing the data taken with Subaru/HDS
spectrograph.
"""
# find date of data obtained
current_pathname = os.path.basename(os.getcwd())
guess_date = extract_date(current_pathname)
while(True):
if guess_date is None:
prompt = 'YYYYMMDD'
else:
prompt = guess_date
string = input('Date of observation [{}]: '.format(prompt))
input_date = extract_date(string)
if input_date is None:
if guess_date is None:
continue
else:
input_date = guess_date
break
else:
break
input_datetime = datetime.datetime.strptime(input_date, '%Y-%m-%d')
# general database path for this instrument
dbpath = '~/.gamse/Subaru.HDS'
# create config object
config = configparser.ConfigParser()
config.add_section('data')
config.set('data', 'telescope', 'Subaru')
config.set('data', 'instrument', 'HDS')
config.set('data', 'rawpath', 'rawdata')
config.add_section('reduce')
config.set('reduce', 'midpath', 'midproc')
config.set('reduce', 'figpath', 'images')
config.set('reduce', 'odspath', 'onedspec')
config.set('reduce', 'mode', 'normal')
config.set('reduce', 'oned_suffix', 'ods')
config.set('reduce', 'fig_format', 'png')
config.set('reduce', 'ncores', 'max')
# section of bias correction
sectname = 'reduce.bias'
config.add_section(sectname)
config.set(sectname, 'bias_file', '${reduce:midpath}/bias.fits')
config.set(sectname, 'cosmic_clip', str(10))
config.set(sectname, 'maxiter', str(5))
# write to config file
filename = 'HDS.{}.cfg'.format(input_date)
outfile = open(filename, 'w')
for section in config.sections():
maxkeylen = max([len(key) for key in config[section].keys()])
outfile.write('[{}]'.format(section)+os.linesep)
fmt = '{{:{}s}} = {{}}'.format(maxkeylen)
for key, value in config[section].items():
outfile.write(fmt.format(key, value)+os.linesep)
outfile.write(os.linesep)
outfile.close()
print('Config file written to {}'.format(filename))
def make_obslog():
"""Scan the raw data, and generate a log file containing the detail
information for each frame.
"""
# load config file
config = load_config('HDS\S*\.cfg$')
rawpath = config['data'].get('rawpath')
# prepare logtable
logtable = Table(dtype=[
('frameid', 'i2'),
('fileid1', 'S12'),
('fileid2', 'S12'),
('objtype', 'S10'),
('object', 'S20'),
('i2', 'S1'),
('exptime', 'f4'),
('obsdate', 'S19'),
('setup', 'S7'),
('binning', 'S7'),
('slitsize', 'S8'),
('nsat_1', 'i4'),
('nsat_2', 'i4'),
('q95_1', 'i4'),
('q95_2', 'i4'),
])
fmt_str = (' - {:>5s} {:12s} {:12s} {:<10s} {:<20s} {:1s}I2 {:>7} {:^23s}'
' {:<7s} {:5} {:>8s}' # setup, binning, slitsize
' \033[31m{:>7}\033[0m' # nsat_1
' \033[34m{:>7}\033[0m' # nsat_2
' \033[31m{:>5}\033[0m' # q95_1
' \033[34m{:>5}\033[0m' # q95_2
)
head_str = fmt_str.format('FID', 'fileid1', 'fileid2', 'objtype', 'object',
'', 'exptime', 'obsdate',
'setup', 'binning', 'slitsize',
'nsat_1', 'nsat_2', 'q95_1', 'q95_2')
print(head_str)
frameid = 0
# start scanning the raw files
for fname in sorted(os.listdir(rawpath)):
if not re.match('HDSA\d{8}\.fits$', fname):
continue
# check the both CCD frames are exist
framenum = int(fname[4:12])
if framenum % 2 == 1:
other_fname = 'HDSA{:08d}.fits'.format(framenum+1)
else:
other_fname = 'HDSA{:08d}.fits'.format(framenum-1)
if not os.path.exists(os.path.join(rawpath, other_fname)):
print('Warning: missing file: {}'.format(other_fname))
if framenum % 2 == 0:
continue
frameid1 = int(fname[4:12])
frameid2 = frameid1 + 1
fileid1 = 'HDSA{:08d}'.format(frameid1)
fileid2 = 'HDSA{:08d}'.format(frameid2)
fname1 = '{}.fits'.format(fileid1)
fname2 = '{}.fits'.format(fileid2)
filename1 = os.path.join(rawpath, fname1)
data1, head1 = fits.getdata(filename1, header=True)
region1, region2 = get_region_lst(head1)
x11, x12, y11, y12 = region1[0]
x21, x22, y21, y22 = region2[0]
data1 = np.concatenate(
(data1[y11:y12, x11:x12], data1[y21:y22, x21:x22]),
axis=1)
filename2 = os.path.join(rawpath, fname2)
data2, head2 = fits.getdata(filename2, header=True)
region1, region2 = get_region_lst(head2)
x11, x12, y11, y12 = region1[0]
x21, x22, y21, y22 = region2[0]
data2 = np.concatenate(
(data2[y11:y12, x11:x12], data2[y21:y22, x21:x22]),
axis=1)
for key in ['DATA-TYP', 'OBJECT', 'EXPTIME', 'DATE-OBS', 'UT',
'SLIT', 'SLT-WID', 'SLT-LEN', 'FILTER01', 'FILTER02',
'H_I2CELL', 'H_COLLIM', 'H_CROSSD',
'BIN-FCT1', 'BIN-FCT2']:
if head1[key] != head2[key]:
print('Warning: {} of {} ({}) and {} ({}) does not match.'.format(
key, frameid1, head1[key], frameid2, head2[key]))
frameid = frameid + 1
objtype = head1['DATA-TYP']
objectname = head1['OBJECT']
exptime = head1['EXPTIME']
i2 = {'USE': '+', 'NOUSE': '-'}[head1['H_I2CELL']]
obsdate = '{}T{}'.format(head1['DATE-OBS'], head1['UT'])
# get setup and check the consistency of CCD1 and CCD2
setup1 = get_std_setup(head1)
setup2 = get_std_setup(head2)
if setup1 != setup2:
print('Warning: setup of CCD1 ({}) and CCD2 ({})'
'does not match'.format(setup1, setup2))
setup = setup1
# get binning and check the consistency of CCD1 and CCD2
bin_1 = (head1['BIN-FCT1'], head1['BIN-FCT2'])
bin_2 = (head2['BIN-FCT1'], head2['BIN-FCT2'])
if bin_1 != bin_2:
print('Warning: Binning of CCD1 ({}) and CCD2 ({})'
' do not match'.format(bin_1, bin_2))
binning = '({},{})'.format(bin_1[0], bin_1[1])
slitsize = '{:4.2f}x{:3.1f}'.format(head1['SLT-WID'], head1['SLT-LEN'])
sat_mask1 = np.isnan(data1)
sat_mask2 = np.isnan(data2)
nsat_1 = sat_mask1.sum()
nsat_2 = sat_mask2.sum()
data1[sat_mask1] = 66535
data2[sat_mask2] = 66535
q95_1 = int(np.round(np.percentile(data1, 95)))
q95_2 = int(np.round(np.percentile(data2, 95)))
item = [frameid, fileid1, fileid2, objtype, objectname, i2, exptime,
obsdate, setup, binning, slitsize,
nsat_1, nsat_2, q95_1, q95_2]
logtable.add_row(item)
item = logtable[-1]
# print log item with colors
string = fmt_str.format('[{:d}]'.format(frameid),
fileid1, fileid2, objtype, objectname, i2, exptime,
obsdate, setup, binning, slitsize,
nsat_1, nsat_2, q95_1, q95_2,
)
print(print_wrapper(string, item))
# determine filename of logtable.
# use the obsdate of the first frame
obsdate = logtable[0]['obsdate'][0:10]
outname = '{}.obslog'.format(obsdate)
if os.path.exists(outname):
i = 0
while(True):
i += 1
outname = '{}.{}.obslog'.format(obsdate, i)
if not os.path.exists(outname):
outfilename = outname
break
else:
outfilename = outname
# set display formats
logtable['objtype'].info.format = '<s'
logtable['object'].info.format = '<s'
logtable['i2'].info.format = '^s'
logtable['exptime'].info.format = 'g'
logtable['binning'].info.format = '^s'
# save the logtable
outfile = open(outfilename, 'w')
for row in logtable.pformat_all():
outfile.write(row+os.linesep)
outfile.close() | 0.339937 | 0.137446 |
import discord
import asyncio
import aiohttp
import random
#custom libs
import config
import db
from event import Emitter
#Discord client PhiBot
class PhiBot(discord.Client):
def __init__(self, lock):
super().__init__()
self.running = True
self.lock = lock
self.eb_quotes = [
'It is certain',
'It is decidedly so',
'Without a doubt',
'Yes, definitely',
'You may rely on it',
'As I see it, yes',
'Most likely',
'Outlook good',
'Yes',
'Signs point to yes',
'Reply hazy, please try again',
'Ask again later',
'Better not tell you now',
'Cannot predict now',
'Concentrate and ask again',
'Don\'t count on it',
'My reply is no',
'My sources say no',
'Outlook not so good',
'Very doubtful'
]
self.shortener_endpoint = 'https://www.googleapis.com/urlshortener/v1/url'
def set_running(self, running):
self.running = running
#on startup
async def on_ready(self):
if config.DEV_MODE:
await self.change_presence(game=discord.Game(name='phi-bot DEV_MODE'))
else:
await self.change_presence(game=discord.Game(name='with all of my fellow users'))
await Emitter.emit('Successfully-logged-in', 'Logged in as:{} with a user ID of:{}'.format(self.user.name, self.user.id))
#Parse the id from a string
def parse_id_from_string(self, id_string):
return id_string.lstrip('<@!').rstrip('>')
#Generate a user object given the current server members and id
def make_user_object(self, server_members, id_string):
user_id = self.parse_id_from_string(id_string)
return discord.utils.get(server_members, id=user_id)
#Helper functions for processing commands
async def thats_me(self, message):
await self.send_message(message.channel, 'Hey, that\'s me!')
#The magical 8ball
async def eight_ball(self, message):
await self.send_message(message.channel, random.choice(self.eb_quotes))
#link shortener
async def shorten_url(self, message):
params = {'key':config.SHORTENER_KEY}
headers = {'Content-Type': 'application/json'}
url = message.content.split(' ')[1]
async with aiohttp.ClientSession() as session:
async with session.post(self.shortener_endpoint, params=params, json={'longUrl':url}, headers=headers) as response:
if response.status == 200:
json = await response.json()
reply = 'Your shortened url: {}'.format(json['id'])
await self.send_message(message.channel, reply)
await Emitter.emit('Shorten-Success', 'URL:{} shortened to URL:{}'.format(url, json['id']))
else:
await self.send_message(message.channel, 'You did not provide a correct URL')
await Emitter.emit('Shorten-Failure', 'URL:{} was attempted'.format(url))
#Start a users bank account if they don't already ahve one
async def start_bank_account(self, message):
user_not_in_bank = None
discord_id = message.author.id
#Makes a bank account for the user if they don't already have one
with await self.lock:
user_not_in_bank = db.create_new_bank_account(discord_id)
if user_not_in_bank:
message_to_send = '<@{}>, your bank account has just been created and you have been granted 200 credits! Yay!'.format(discord_id)
await self.send_message(message.channel, message_to_send)
else:
message_to_send = '<@{}>, you already have an account within our bank!'.format(discord_id)
await self.send_message(message.channel, message_to_send)
#Retreive bank account funds for a specific user
async def get_bank_funds(self, message):
discord_id = message.author.id
funds = 0
with await self.lock:
funds = db.get_funds(discord_id)
if funds == -1:
message_to_send = '<@{}>, you do not have a bank account, but you can make one with the `$bank start` command :)'.format(discord_id)
await self.send_message(message.channel, message_to_send)
else:
message_to_send = '<@{}>, you have ${} in your bank account, have a nice day!'.format(discord_id, funds)
await self.send_message(message.channel, message_to_send)
#Transfer bank funds to the user, called from the bank account routing
async def transfer_bank_funds(self, message):
discord_id = message.author.id
command_input = message.content.split()
if len(command_input) == 4:
amount = int(command_input[2])
receiving_user = self.parse_id_from_string(command_input[3])
#Prevent negative funds through
if amount < 0:
await self.send_message(message.channel, '<@{}> you can\'t transfer a negative amount of money!'.format(discord_id))
return
#Validate that the user isn't trying to transfer funds to themselves
if discord_id == receiving_user:
await self.send_message(message.channel, '<@{}> you can\'t transfer a funds to yourself!'.format(discord_id))
return
#attempt to subtract funds from the user who initiated the transfers
#bank account
with await self.lock:
funds_subtracted = db.subtract_funds(discord_id, amount)
if not funds_subtracted:
await self.send_message(message.channel, '<@{}> You either tried transferring more than you have or do not have a bank account, this is embarrasing!'.format(discord_id))
return
#Attempt to add the new funds to the receiving users bank account,
#refund the initial user if the transaction fails
with await self.lock:
funds_added = db.add_funds(receiving_user, amount)
if not funds_added:
await self.send_message(message.channel, '<@{}> You either didn\'t provide a valid user or the user simply doesn\'t have a bank account. I am transferring the money back now.'.format(discord_id))
db.add_funds(discord_id, amount)
return
#Retrieve the balance of the user who just made the transfer
with await self.lock:
new_balance = db.get_funds(discord_id)
await self.send_message(message.channel, '<@{}> Successfully transferred funds, Your account balance is now: {} beans'.format(discord_id, new_balance))
else:
await self.send_message(message.channel, '```Sorry, this is an invalid use of transfer, please try $help bank for more information```')
#Route bank account transaction request and handle input processing
async def process_bank_account(self, message):
discord_id = message.author.id
command_input = message.content.split()
try:
if command_input[1] == 'start':
await self.start_bank_account(message)
elif command_input[1] == 'funds':
await self.get_bank_funds(message)
elif command_input[1] == 'transfer':
await self.transfer_bank_funds(message)
except Exception as e:
await self.send_message(message.channel, '```Sorry, this is an invalid use transfer, please try $help bank for more information```')
print(e)
#Commmand processor
async def process_command(self, message):
content = message.content
if not content.startswith('$'):
return False
if content.startswith('$phi'):
await self.thats_me(message)
elif content.startswith('$goodboy'):
await self.send_message(message.channel, 'Woof!')
elif content.startswith('$bank'):
await self.process_bank_account(message)
elif content.startswith('$8ball'):
await self.eight_ball(message)
elif content.startswith('$shorten'):
await self.shorten_url(message)
else:
await self.send_message(message.channel, '```Sorry, you didn\'t enter a valid command, please try $help for more information```')
return False
return True
#on message event handler. Sends command through custom command
#handler
async def on_message(self, message):
valid_command = await self.process_command(message)
#Log command to database
if valid_command:
await Emitter.emit('Processed-Command', 'Just finished processing a valid command')
user_input = message.content.split()
with await self.lock:
db.add_command_to_history(user_input[0], " ".join(user_input[1:]), message.author.name, message.author.id)
#Shutdown bp
async def shutdown():
await Emitter.emit('Bot shutdown phase', 'Bot is now turning off')
await Emitter.shutdown()
def main(loop):
#Shared lock for keeping database information safe
lock = asyncio.Lock()
phi = PhiBot(lock)
#Manage the asyncio event loop
try:
loop.run_until_complete(phi.start(config.DISCORD_TOKEN))
except KeyboardInterrupt:
phi.set_running(False)
loop.run_until_complete(phi.logout())
loop.run_until_complete(shutdown())
finally:
loop.close()
if __name__ == '__main__':
#Create database connection and then start all asynchronous actions
db.create_db_connection()
event_loop = asyncio.get_event_loop()
main(event_loop) | phi.py | import discord
import asyncio
import aiohttp
import random
#custom libs
import config
import db
from event import Emitter
#Discord client PhiBot
class PhiBot(discord.Client):
def __init__(self, lock):
super().__init__()
self.running = True
self.lock = lock
self.eb_quotes = [
'It is certain',
'It is decidedly so',
'Without a doubt',
'Yes, definitely',
'You may rely on it',
'As I see it, yes',
'Most likely',
'Outlook good',
'Yes',
'Signs point to yes',
'Reply hazy, please try again',
'Ask again later',
'Better not tell you now',
'Cannot predict now',
'Concentrate and ask again',
'Don\'t count on it',
'My reply is no',
'My sources say no',
'Outlook not so good',
'Very doubtful'
]
self.shortener_endpoint = 'https://www.googleapis.com/urlshortener/v1/url'
def set_running(self, running):
self.running = running
#on startup
async def on_ready(self):
if config.DEV_MODE:
await self.change_presence(game=discord.Game(name='phi-bot DEV_MODE'))
else:
await self.change_presence(game=discord.Game(name='with all of my fellow users'))
await Emitter.emit('Successfully-logged-in', 'Logged in as:{} with a user ID of:{}'.format(self.user.name, self.user.id))
#Parse the id from a string
def parse_id_from_string(self, id_string):
return id_string.lstrip('<@!').rstrip('>')
#Generate a user object given the current server members and id
def make_user_object(self, server_members, id_string):
user_id = self.parse_id_from_string(id_string)
return discord.utils.get(server_members, id=user_id)
#Helper functions for processing commands
async def thats_me(self, message):
await self.send_message(message.channel, 'Hey, that\'s me!')
#The magical 8ball
async def eight_ball(self, message):
await self.send_message(message.channel, random.choice(self.eb_quotes))
#link shortener
async def shorten_url(self, message):
params = {'key':config.SHORTENER_KEY}
headers = {'Content-Type': 'application/json'}
url = message.content.split(' ')[1]
async with aiohttp.ClientSession() as session:
async with session.post(self.shortener_endpoint, params=params, json={'longUrl':url}, headers=headers) as response:
if response.status == 200:
json = await response.json()
reply = 'Your shortened url: {}'.format(json['id'])
await self.send_message(message.channel, reply)
await Emitter.emit('Shorten-Success', 'URL:{} shortened to URL:{}'.format(url, json['id']))
else:
await self.send_message(message.channel, 'You did not provide a correct URL')
await Emitter.emit('Shorten-Failure', 'URL:{} was attempted'.format(url))
#Start a users bank account if they don't already ahve one
async def start_bank_account(self, message):
user_not_in_bank = None
discord_id = message.author.id
#Makes a bank account for the user if they don't already have one
with await self.lock:
user_not_in_bank = db.create_new_bank_account(discord_id)
if user_not_in_bank:
message_to_send = '<@{}>, your bank account has just been created and you have been granted 200 credits! Yay!'.format(discord_id)
await self.send_message(message.channel, message_to_send)
else:
message_to_send = '<@{}>, you already have an account within our bank!'.format(discord_id)
await self.send_message(message.channel, message_to_send)
#Retreive bank account funds for a specific user
async def get_bank_funds(self, message):
discord_id = message.author.id
funds = 0
with await self.lock:
funds = db.get_funds(discord_id)
if funds == -1:
message_to_send = '<@{}>, you do not have a bank account, but you can make one with the `$bank start` command :)'.format(discord_id)
await self.send_message(message.channel, message_to_send)
else:
message_to_send = '<@{}>, you have ${} in your bank account, have a nice day!'.format(discord_id, funds)
await self.send_message(message.channel, message_to_send)
#Transfer bank funds to the user, called from the bank account routing
async def transfer_bank_funds(self, message):
discord_id = message.author.id
command_input = message.content.split()
if len(command_input) == 4:
amount = int(command_input[2])
receiving_user = self.parse_id_from_string(command_input[3])
#Prevent negative funds through
if amount < 0:
await self.send_message(message.channel, '<@{}> you can\'t transfer a negative amount of money!'.format(discord_id))
return
#Validate that the user isn't trying to transfer funds to themselves
if discord_id == receiving_user:
await self.send_message(message.channel, '<@{}> you can\'t transfer a funds to yourself!'.format(discord_id))
return
#attempt to subtract funds from the user who initiated the transfers
#bank account
with await self.lock:
funds_subtracted = db.subtract_funds(discord_id, amount)
if not funds_subtracted:
await self.send_message(message.channel, '<@{}> You either tried transferring more than you have or do not have a bank account, this is embarrasing!'.format(discord_id))
return
#Attempt to add the new funds to the receiving users bank account,
#refund the initial user if the transaction fails
with await self.lock:
funds_added = db.add_funds(receiving_user, amount)
if not funds_added:
await self.send_message(message.channel, '<@{}> You either didn\'t provide a valid user or the user simply doesn\'t have a bank account. I am transferring the money back now.'.format(discord_id))
db.add_funds(discord_id, amount)
return
#Retrieve the balance of the user who just made the transfer
with await self.lock:
new_balance = db.get_funds(discord_id)
await self.send_message(message.channel, '<@{}> Successfully transferred funds, Your account balance is now: {} beans'.format(discord_id, new_balance))
else:
await self.send_message(message.channel, '```Sorry, this is an invalid use of transfer, please try $help bank for more information```')
#Route bank account transaction request and handle input processing
async def process_bank_account(self, message):
discord_id = message.author.id
command_input = message.content.split()
try:
if command_input[1] == 'start':
await self.start_bank_account(message)
elif command_input[1] == 'funds':
await self.get_bank_funds(message)
elif command_input[1] == 'transfer':
await self.transfer_bank_funds(message)
except Exception as e:
await self.send_message(message.channel, '```Sorry, this is an invalid use transfer, please try $help bank for more information```')
print(e)
#Commmand processor
async def process_command(self, message):
content = message.content
if not content.startswith('$'):
return False
if content.startswith('$phi'):
await self.thats_me(message)
elif content.startswith('$goodboy'):
await self.send_message(message.channel, 'Woof!')
elif content.startswith('$bank'):
await self.process_bank_account(message)
elif content.startswith('$8ball'):
await self.eight_ball(message)
elif content.startswith('$shorten'):
await self.shorten_url(message)
else:
await self.send_message(message.channel, '```Sorry, you didn\'t enter a valid command, please try $help for more information```')
return False
return True
#on message event handler. Sends command through custom command
#handler
async def on_message(self, message):
valid_command = await self.process_command(message)
#Log command to database
if valid_command:
await Emitter.emit('Processed-Command', 'Just finished processing a valid command')
user_input = message.content.split()
with await self.lock:
db.add_command_to_history(user_input[0], " ".join(user_input[1:]), message.author.name, message.author.id)
#Shutdown bp
async def shutdown():
await Emitter.emit('Bot shutdown phase', 'Bot is now turning off')
await Emitter.shutdown()
def main(loop):
#Shared lock for keeping database information safe
lock = asyncio.Lock()
phi = PhiBot(lock)
#Manage the asyncio event loop
try:
loop.run_until_complete(phi.start(config.DISCORD_TOKEN))
except KeyboardInterrupt:
phi.set_running(False)
loop.run_until_complete(phi.logout())
loop.run_until_complete(shutdown())
finally:
loop.close()
if __name__ == '__main__':
#Create database connection and then start all asynchronous actions
db.create_db_connection()
event_loop = asyncio.get_event_loop()
main(event_loop) | 0.266644 | 0.103612 |
from zone_api import platform_encapsulator as pe
from zone_api.core.actions.turn_off_adjacent_zones import TurnOffAdjacentZones
from zone_api.core.event_info import EventInfo
from zone_api.core.zone import Zone
from zone_api.core.zone_event import ZoneEvent
from zone_api.core.neighbor import Neighbor, NeighborType
from zone_api.core.devices.switch import Fan, Light
from zone_api_test.core.device_test import DeviceTest, create_zone_manager
ILLUMINANCE_THRESHOLD_IN_LUX = 10
class TurnOffAdjacentZonesTest(DeviceTest):
""" Unit tests for turn_off_adjacent_zones.py """
def setUp(self):
self.items = [pe.create_switch_item('TestLightName1'),
pe.create_switch_item('TestLightName2'),
pe.create_switch_item('TestLightName3'),
pe.create_switch_item('TestFanName'),
]
self.set_items(self.items)
super(TurnOffAdjacentZonesTest, self).setUp()
[self.washroom_light_item, self.lobby_light_item, self.foyer_light_item, self.fan_item] = self.items
self.washroom_light = Light(self.washroom_light_item, 5)
self.lobby_light = Light(self.lobby_light_item, 5)
self.foyer_light = Light(self.foyer_light_item, 5,
ILLUMINANCE_THRESHOLD_IN_LUX, "0-23:59") # always stay on
self.show_fan = Fan(self.fan_item, 5)
self.action = TurnOffAdjacentZones()
self.washroom = Zone('washroom', [self.washroom_light]).add_action(self.action)
self.shower = Zone('shower', [self.show_fan]).add_action(self.action)
self.lobby = Zone('lobby', [self.lobby_light]).add_action(self.action)
self.foyer = Zone('foyer', [self.foyer_light]).add_action(self.action)
self.lobby = self.lobby.add_neighbor(
Neighbor(self.foyer.get_id(), NeighborType.OPEN_SPACE))
self.foyer = self.foyer.add_neighbor(
Neighbor(self.lobby.get_id(), NeighborType.OPEN_SPACE))
self.washroom = self.washroom.add_neighbor(
Neighbor(self.lobby.get_id(), NeighborType.OPEN_SPACE))
self.washroom = self.washroom.add_neighbor(
Neighbor(self.shower.get_id(), NeighborType.OPEN_SPACE))
self.zone_manager = create_zone_manager(
[self.washroom, self.shower, self.lobby, self.foyer])
def testOnAction_normalOpenSpaceNeighbor_turnsOffLight(self):
pe.set_switch_state(self.lobby_light_item, True)
self.assertTrue(self.trigger_action_from_zone(self.washroom))
self.assertFalse(self.lobby.is_light_on())
def testOnAction_openSpaceButDisableTurnOffByNeighbor_mustNotTurnsOffLight(self):
pe.set_switch_state(self.foyer_light_item, True)
self.assertTrue(self.foyer.is_light_on())
self.assertTrue(self.trigger_action_from_zone(self.lobby))
self.assertTrue(self.foyer.is_light_on())
def testOnAction_fanZone_returnsFalse(self):
pe.set_switch_state(self.fan_item, True)
self.assertFalse(self.trigger_action_from_zone(self.shower))
def testOnAction_neighborWithFan_mustNotTurnOffNeighborFan(self):
pe.set_switch_state(self.fan_item, True)
pe.set_switch_state(self.lobby_light_item, True)
self.assertTrue(self.trigger_action_from_zone(self.washroom))
self.assertFalse(self.lobby.is_light_on())
self.assertTrue(pe.is_in_on_state(self.fan_item))
def trigger_action_from_zone(self, zone):
"""
Creates a turn-on event on a light in the given zone, and invokes the turn-off-adjacent-zones action.
:param zone: the zone with the light just turned on.
:return: Boolean
"""
event_info = EventInfo(ZoneEvent.SWITCH_TURNED_ON, zone.get_devices()[0].get_item(), zone,
self.zone_manager, pe.get_event_dispatcher())
return self.action.on_action(event_info) | tests/zone_api_test/core/actions/turn_off_adjacent_zones_test.py | from zone_api import platform_encapsulator as pe
from zone_api.core.actions.turn_off_adjacent_zones import TurnOffAdjacentZones
from zone_api.core.event_info import EventInfo
from zone_api.core.zone import Zone
from zone_api.core.zone_event import ZoneEvent
from zone_api.core.neighbor import Neighbor, NeighborType
from zone_api.core.devices.switch import Fan, Light
from zone_api_test.core.device_test import DeviceTest, create_zone_manager
ILLUMINANCE_THRESHOLD_IN_LUX = 10
class TurnOffAdjacentZonesTest(DeviceTest):
""" Unit tests for turn_off_adjacent_zones.py """
def setUp(self):
self.items = [pe.create_switch_item('TestLightName1'),
pe.create_switch_item('TestLightName2'),
pe.create_switch_item('TestLightName3'),
pe.create_switch_item('TestFanName'),
]
self.set_items(self.items)
super(TurnOffAdjacentZonesTest, self).setUp()
[self.washroom_light_item, self.lobby_light_item, self.foyer_light_item, self.fan_item] = self.items
self.washroom_light = Light(self.washroom_light_item, 5)
self.lobby_light = Light(self.lobby_light_item, 5)
self.foyer_light = Light(self.foyer_light_item, 5,
ILLUMINANCE_THRESHOLD_IN_LUX, "0-23:59") # always stay on
self.show_fan = Fan(self.fan_item, 5)
self.action = TurnOffAdjacentZones()
self.washroom = Zone('washroom', [self.washroom_light]).add_action(self.action)
self.shower = Zone('shower', [self.show_fan]).add_action(self.action)
self.lobby = Zone('lobby', [self.lobby_light]).add_action(self.action)
self.foyer = Zone('foyer', [self.foyer_light]).add_action(self.action)
self.lobby = self.lobby.add_neighbor(
Neighbor(self.foyer.get_id(), NeighborType.OPEN_SPACE))
self.foyer = self.foyer.add_neighbor(
Neighbor(self.lobby.get_id(), NeighborType.OPEN_SPACE))
self.washroom = self.washroom.add_neighbor(
Neighbor(self.lobby.get_id(), NeighborType.OPEN_SPACE))
self.washroom = self.washroom.add_neighbor(
Neighbor(self.shower.get_id(), NeighborType.OPEN_SPACE))
self.zone_manager = create_zone_manager(
[self.washroom, self.shower, self.lobby, self.foyer])
def testOnAction_normalOpenSpaceNeighbor_turnsOffLight(self):
pe.set_switch_state(self.lobby_light_item, True)
self.assertTrue(self.trigger_action_from_zone(self.washroom))
self.assertFalse(self.lobby.is_light_on())
def testOnAction_openSpaceButDisableTurnOffByNeighbor_mustNotTurnsOffLight(self):
pe.set_switch_state(self.foyer_light_item, True)
self.assertTrue(self.foyer.is_light_on())
self.assertTrue(self.trigger_action_from_zone(self.lobby))
self.assertTrue(self.foyer.is_light_on())
def testOnAction_fanZone_returnsFalse(self):
pe.set_switch_state(self.fan_item, True)
self.assertFalse(self.trigger_action_from_zone(self.shower))
def testOnAction_neighborWithFan_mustNotTurnOffNeighborFan(self):
pe.set_switch_state(self.fan_item, True)
pe.set_switch_state(self.lobby_light_item, True)
self.assertTrue(self.trigger_action_from_zone(self.washroom))
self.assertFalse(self.lobby.is_light_on())
self.assertTrue(pe.is_in_on_state(self.fan_item))
def trigger_action_from_zone(self, zone):
"""
Creates a turn-on event on a light in the given zone, and invokes the turn-off-adjacent-zones action.
:param zone: the zone with the light just turned on.
:return: Boolean
"""
event_info = EventInfo(ZoneEvent.SWITCH_TURNED_ON, zone.get_devices()[0].get_item(), zone,
self.zone_manager, pe.get_event_dispatcher())
return self.action.on_action(event_info) | 0.629319 | 0.29044 |
import json
import os
import unittest
import pytest
from mock import patch
import requests
import requests_mock
from gitdl import gitdl
class TestGitdl(unittest.TestCase):
def test_params_invalid_api_token(self):
old_env = os.environ
os.environ = {}
with pytest.raises(Exception) as exc_info:
gitdl.get_params()
os.environ = old_env
assert str(exc_info.value) == "GITHUB_API_TOKEN not found"
def test_params_valid_api_token(self):
old_env = os.environ
os.environ = {"GITHUB_API_TOKEN": "key<PASSWORD>"}
assert gitdl.get_params().get('API_TOKEN') == \
os.environ.get("GITHUB_API_TOKEN")
os.environ = old_env
def test_get_first_search_result_invalid(self):
fake_json = json.dumps({'items': []})
url = "https://api.github.com/search/repositories?q=aksejake"
with requests_mock.mock() as mocker:
mocker.get(url, json=fake_json)
response = json.loads(requests.get(url).json())
with pytest.raises(Exception) as exc_info:
gitdl.get_first_search_result(response)
assert str(exc_info.value) == "Repository Not Found."
def test_download_exact_repo_invalid_repo(self):
# does not contain the slash required for owner/repo format
repo = "example"
with requests_mock.mock() as mocker:
mocker.get("https://api.github.com/repos/{}".format(repo),
status_code=404)
with pytest.raises(Exception) as exc_info:
with patch.dict('os.environ', {"GITHUB_API_TOKEN": "key123"}):
gitdl.download_exact_repo(repo)
assert str(exc_info.value) == "Repository Not Found."
def test_get_size(self):
with requests_mock.mock() as mocker:
mocker.get("mock://google.com",
headers={'Content-Length': '42'})
r = requests.get("mock://google.com")
self.assertEqual(gitdl.get_size(r), 0.04)
def test_get_search_results(self):
with requests_mock.mock() as mocker:
mocker.get("https://api.github.com/search/repositories?"
"q=gitdl&sort=&order=desc&per_page=30",
json="Found 3 repos!")
with patch.dict('os.environ', {"GITHUB_API_TOKEN": "<PASSWORD>"}):
resp = gitdl.get_search_results("gitdl")
self.assertEqual(resp, "Found 3 repos!")
def test_get_first_search_result_valid(self):
fake_json = json.dumps({'items': [{"id": 1, "name": "gitdl"}]})
url = "https://api.github.com/search/repositories?q=aksejake"
with requests_mock.mock() as mocker:
mocker.get(url, json=fake_json)
response = json.loads(requests.get(url).json())
res = gitdl.get_first_search_result(response)
self.assertEqual(res, {'id': 1, 'name': 'gitdl'})
def test_get_repo_names(self):
url = "https://api.github.com/search/repositories?q=anything"
fake_json = json.dumps(
{'items': [{'id': 1, 'full_name': 'SanketDG/gitdl'},
{'id': 2, 'full_name': 'SanketDG/djurl'}]})
with requests_mock.mock() as mocker:
mocker.get(url, json=fake_json)
response = json.loads(requests.get(url).json())
res = gitdl.get_repo_names(response)
self.assertEqual(res, ['SanketDG/gitdl', 'SanketDG/djurl'])
def test_get_search_results_first_only(self):
fake_json = {'items': [{"id": 1, "name": "gitdl"}]}
with requests_mock.mock() as mocker:
mocker.get("https://api.github.com/search/repositories?"
"q=gitdl&sort=&order=desc&per_page=30",
json=fake_json)
with patch.dict('os.environ', {"GITHUB_API_TOKEN": "<PASSWORD>"}):
resp = gitdl.get_search_results("gitdl", only_first=True)
self.assertEqual(resp, {'id': 1, 'name': 'gitdl'}) | tests/test_gitdl.py | import json
import os
import unittest
import pytest
from mock import patch
import requests
import requests_mock
from gitdl import gitdl
class TestGitdl(unittest.TestCase):
def test_params_invalid_api_token(self):
old_env = os.environ
os.environ = {}
with pytest.raises(Exception) as exc_info:
gitdl.get_params()
os.environ = old_env
assert str(exc_info.value) == "GITHUB_API_TOKEN not found"
def test_params_valid_api_token(self):
old_env = os.environ
os.environ = {"GITHUB_API_TOKEN": "key<PASSWORD>"}
assert gitdl.get_params().get('API_TOKEN') == \
os.environ.get("GITHUB_API_TOKEN")
os.environ = old_env
def test_get_first_search_result_invalid(self):
fake_json = json.dumps({'items': []})
url = "https://api.github.com/search/repositories?q=aksejake"
with requests_mock.mock() as mocker:
mocker.get(url, json=fake_json)
response = json.loads(requests.get(url).json())
with pytest.raises(Exception) as exc_info:
gitdl.get_first_search_result(response)
assert str(exc_info.value) == "Repository Not Found."
def test_download_exact_repo_invalid_repo(self):
# does not contain the slash required for owner/repo format
repo = "example"
with requests_mock.mock() as mocker:
mocker.get("https://api.github.com/repos/{}".format(repo),
status_code=404)
with pytest.raises(Exception) as exc_info:
with patch.dict('os.environ', {"GITHUB_API_TOKEN": "key123"}):
gitdl.download_exact_repo(repo)
assert str(exc_info.value) == "Repository Not Found."
def test_get_size(self):
with requests_mock.mock() as mocker:
mocker.get("mock://google.com",
headers={'Content-Length': '42'})
r = requests.get("mock://google.com")
self.assertEqual(gitdl.get_size(r), 0.04)
def test_get_search_results(self):
with requests_mock.mock() as mocker:
mocker.get("https://api.github.com/search/repositories?"
"q=gitdl&sort=&order=desc&per_page=30",
json="Found 3 repos!")
with patch.dict('os.environ', {"GITHUB_API_TOKEN": "<PASSWORD>"}):
resp = gitdl.get_search_results("gitdl")
self.assertEqual(resp, "Found 3 repos!")
def test_get_first_search_result_valid(self):
fake_json = json.dumps({'items': [{"id": 1, "name": "gitdl"}]})
url = "https://api.github.com/search/repositories?q=aksejake"
with requests_mock.mock() as mocker:
mocker.get(url, json=fake_json)
response = json.loads(requests.get(url).json())
res = gitdl.get_first_search_result(response)
self.assertEqual(res, {'id': 1, 'name': 'gitdl'})
def test_get_repo_names(self):
url = "https://api.github.com/search/repositories?q=anything"
fake_json = json.dumps(
{'items': [{'id': 1, 'full_name': 'SanketDG/gitdl'},
{'id': 2, 'full_name': 'SanketDG/djurl'}]})
with requests_mock.mock() as mocker:
mocker.get(url, json=fake_json)
response = json.loads(requests.get(url).json())
res = gitdl.get_repo_names(response)
self.assertEqual(res, ['SanketDG/gitdl', 'SanketDG/djurl'])
def test_get_search_results_first_only(self):
fake_json = {'items': [{"id": 1, "name": "gitdl"}]}
with requests_mock.mock() as mocker:
mocker.get("https://api.github.com/search/repositories?"
"q=gitdl&sort=&order=desc&per_page=30",
json=fake_json)
with patch.dict('os.environ', {"GITHUB_API_TOKEN": "<PASSWORD>"}):
resp = gitdl.get_search_results("gitdl", only_first=True)
self.assertEqual(resp, {'id': 1, 'name': 'gitdl'}) | 0.390708 | 0.225278 |
from CNC import CNC
from rgv import RGV
def sea_info(cnc_list):
"""找出当前发出信号的所有CNC,并记录故障机器"""
cnc_loc_list = []
error_cnc_list = []
for cnc_call in cnc_list:
cnc_stat = cnc_call.call_rgv()
if cnc_stat[1] in ['off', 'empty']:
cnc_loc_list.append(cnc_stat)
elif cnc_stat[1] == 'error':
error_cnc_list.append(cnc_stat[0])
return cnc_loc_list, error_cnc_list
delta_time = 1
duration = 8 * 60 * 60
repairing_time = 15 * 60
usedCase3 = False
# usedCase3 = True
# 第1组
t1 = 20
t2 = 33
t3 = 46
t4 = 700
t5 = 400
t6 = 378
t7 = 28
t8 = 31
t9 = 25
# 第2组
# t1 = 23
# t2 = 41
# t3 = 59
# t4 = 580
# t5 = 280
# t6 = 500
# t7 = 30
# t8 = 35
# t9 = 30
# 第3组
# t1 = 18
# t2 = 32
# t3 = 46
# t4 = 545
# t5 = 455
# t6 = 182
# t7 = 27
# t8 = 32
# t9 = 25
cncList = [CNC('CNC' + str(i)) for i in range(1, 9)]
rgv = RGV(t1, t2, t3, t7, t8, t9)
t = 0
n = 0
while t < duration:
cncLocList, errorCNC = sea_info(cncList)
if cncLocList:
calling_cnc = [cnc_calling[0] for cnc_calling in cncLocList]
calling_cnc = [cncList[int(name[-1])-1] for name in calling_cnc]
errorCncList = [cncList[int(name[-1])-1] for name in errorCNC] # 对象
rgv_delta, rgvMovingDelta, cncName = rgv.run(cncLocList)
cncProcessed = cncList[int(cncName[-1])-1] # RGV本次处理的CNC
if usedCase3:
errorOccur = cncProcessed.processing(0, t4, case3=True)
if errorOccur:
cncProcessed.error_repairing(0, repairing_time)
print(n, cncName, t + rgvMovingDelta, 'Error')
t += rgv_delta
else:
n += 1
print(n, cncName, t + rgvMovingDelta)
t += rgv_delta
else:
cncProcessed.processing(0, t4)
n += 1
print(n, cncName, t + rgvMovingDelta)
t += rgv_delta
for cncProcessing in cncList:
if (cncProcessing not in calling_cnc) and (cncProcessing not in errorCncList): # 正在运行的CNC
cncProcessing.processing(rgv_delta, t4)
elif cncProcessing in errorCncList: # 故障中的CNC
cncProcessing.error_repairing(rgv_delta, repairing_time)
else: # 所有未故障CNC都在加工
t += delta_time
for cnc in cncList:
if cnc not in errorCNC:
cnc.processing(delta_time, t4)
else:
cnc.error_repairing(delta_time, repairing_time) | CUMCM_2018/RGV_CNC/simulate.py | from CNC import CNC
from rgv import RGV
def sea_info(cnc_list):
"""找出当前发出信号的所有CNC,并记录故障机器"""
cnc_loc_list = []
error_cnc_list = []
for cnc_call in cnc_list:
cnc_stat = cnc_call.call_rgv()
if cnc_stat[1] in ['off', 'empty']:
cnc_loc_list.append(cnc_stat)
elif cnc_stat[1] == 'error':
error_cnc_list.append(cnc_stat[0])
return cnc_loc_list, error_cnc_list
delta_time = 1
duration = 8 * 60 * 60
repairing_time = 15 * 60
usedCase3 = False
# usedCase3 = True
# 第1组
t1 = 20
t2 = 33
t3 = 46
t4 = 700
t5 = 400
t6 = 378
t7 = 28
t8 = 31
t9 = 25
# 第2组
# t1 = 23
# t2 = 41
# t3 = 59
# t4 = 580
# t5 = 280
# t6 = 500
# t7 = 30
# t8 = 35
# t9 = 30
# 第3组
# t1 = 18
# t2 = 32
# t3 = 46
# t4 = 545
# t5 = 455
# t6 = 182
# t7 = 27
# t8 = 32
# t9 = 25
cncList = [CNC('CNC' + str(i)) for i in range(1, 9)]
rgv = RGV(t1, t2, t3, t7, t8, t9)
t = 0
n = 0
while t < duration:
cncLocList, errorCNC = sea_info(cncList)
if cncLocList:
calling_cnc = [cnc_calling[0] for cnc_calling in cncLocList]
calling_cnc = [cncList[int(name[-1])-1] for name in calling_cnc]
errorCncList = [cncList[int(name[-1])-1] for name in errorCNC] # 对象
rgv_delta, rgvMovingDelta, cncName = rgv.run(cncLocList)
cncProcessed = cncList[int(cncName[-1])-1] # RGV本次处理的CNC
if usedCase3:
errorOccur = cncProcessed.processing(0, t4, case3=True)
if errorOccur:
cncProcessed.error_repairing(0, repairing_time)
print(n, cncName, t + rgvMovingDelta, 'Error')
t += rgv_delta
else:
n += 1
print(n, cncName, t + rgvMovingDelta)
t += rgv_delta
else:
cncProcessed.processing(0, t4)
n += 1
print(n, cncName, t + rgvMovingDelta)
t += rgv_delta
for cncProcessing in cncList:
if (cncProcessing not in calling_cnc) and (cncProcessing not in errorCncList): # 正在运行的CNC
cncProcessing.processing(rgv_delta, t4)
elif cncProcessing in errorCncList: # 故障中的CNC
cncProcessing.error_repairing(rgv_delta, repairing_time)
else: # 所有未故障CNC都在加工
t += delta_time
for cnc in cncList:
if cnc not in errorCNC:
cnc.processing(delta_time, t4)
else:
cnc.error_repairing(delta_time, repairing_time) | 0.144511 | 0.141786 |
import pytest
from mocks import MockDAO
# RAMSTK Package Imports
from ramstk.models import RAMSTKEnvironmentRecord
@pytest.fixture
def mock_program_dao(monkeypatch):
_environment_1 = RAMSTKEnvironmentRecord()
_environment_1.revision_id = 1
_environment_1.mission_id = 1
_environment_1.phase_id = 1
_environment_1.environment_id = 1
_environment_1.name = "Condition Name"
_environment_1.units = "Units"
_environment_1.minimum = 0.0
_environment_1.maximum = 0.0
_environment_1.mean = 0.0
_environment_1.variance = 0.0
_environment_1.ramp_rate = 0.0
_environment_1.low_dwell_time = 0.0
_environment_1.high_dwell_time = 0.0
_environment_2 = RAMSTKEnvironmentRecord()
_environment_2.revision_id = 1
_environment_2.mission_id = 1
_environment_2.phase_id = 1
_environment_2.environment_id = 2
_environment_2.name = "Condition Name"
_environment_2.units = "Units"
_environment_2.minimum = 0.0
_environment_2.maximum = 0.0
_environment_2.mean = 0.0
_environment_2.variance = 0.0
_environment_2.ramp_rate = 0.0
_environment_2.low_dwell_time = 0.0
_environment_2.high_dwell_time = 0.0
_environment_3 = RAMSTKEnvironmentRecord()
_environment_3.revision_id = 1
_environment_3.mission_id = 1
_environment_3.phase_id = 1
_environment_3.environment_id = 3
_environment_3.name = "Condition Name"
_environment_3.units = "Units"
_environment_3.minimum = 0.0
_environment_3.maximum = 0.0
_environment_3.mean = 0.0
_environment_3.variance = 0.0
_environment_3.ramp_rate = 0.0
_environment_3.low_dwell_time = 0.0
_environment_3.high_dwell_time = 0.0
DAO = MockDAO()
DAO.table = [
_environment_1,
_environment_2,
_environment_3,
]
yield DAO
@pytest.fixture(scope="function")
def test_attributes():
yield {
"revision_id": 1,
"mission_id": 1,
"phase_id": 1,
"environment_id": 1,
"name": "Condition Name",
"units": "Units",
"minimum": 0.0,
"maximum": 0.0,
"mean": 0.0,
"variance": 0.0,
"ramp_rate": 0.0,
"low_dwell_time": 0.0,
"high_dwell_time": 0.0,
}
@pytest.fixture(scope="function")
def test_recordmodel(mock_program_dao):
"""Get a record model instance for each test function."""
dut = mock_program_dao.do_select_all(RAMSTKEnvironmentRecord, _all=False)
yield dut
# Delete the device under test.
del dut | tests/models/programdb/environment/conftest.py | import pytest
from mocks import MockDAO
# RAMSTK Package Imports
from ramstk.models import RAMSTKEnvironmentRecord
@pytest.fixture
def mock_program_dao(monkeypatch):
_environment_1 = RAMSTKEnvironmentRecord()
_environment_1.revision_id = 1
_environment_1.mission_id = 1
_environment_1.phase_id = 1
_environment_1.environment_id = 1
_environment_1.name = "Condition Name"
_environment_1.units = "Units"
_environment_1.minimum = 0.0
_environment_1.maximum = 0.0
_environment_1.mean = 0.0
_environment_1.variance = 0.0
_environment_1.ramp_rate = 0.0
_environment_1.low_dwell_time = 0.0
_environment_1.high_dwell_time = 0.0
_environment_2 = RAMSTKEnvironmentRecord()
_environment_2.revision_id = 1
_environment_2.mission_id = 1
_environment_2.phase_id = 1
_environment_2.environment_id = 2
_environment_2.name = "Condition Name"
_environment_2.units = "Units"
_environment_2.minimum = 0.0
_environment_2.maximum = 0.0
_environment_2.mean = 0.0
_environment_2.variance = 0.0
_environment_2.ramp_rate = 0.0
_environment_2.low_dwell_time = 0.0
_environment_2.high_dwell_time = 0.0
_environment_3 = RAMSTKEnvironmentRecord()
_environment_3.revision_id = 1
_environment_3.mission_id = 1
_environment_3.phase_id = 1
_environment_3.environment_id = 3
_environment_3.name = "Condition Name"
_environment_3.units = "Units"
_environment_3.minimum = 0.0
_environment_3.maximum = 0.0
_environment_3.mean = 0.0
_environment_3.variance = 0.0
_environment_3.ramp_rate = 0.0
_environment_3.low_dwell_time = 0.0
_environment_3.high_dwell_time = 0.0
DAO = MockDAO()
DAO.table = [
_environment_1,
_environment_2,
_environment_3,
]
yield DAO
@pytest.fixture(scope="function")
def test_attributes():
yield {
"revision_id": 1,
"mission_id": 1,
"phase_id": 1,
"environment_id": 1,
"name": "Condition Name",
"units": "Units",
"minimum": 0.0,
"maximum": 0.0,
"mean": 0.0,
"variance": 0.0,
"ramp_rate": 0.0,
"low_dwell_time": 0.0,
"high_dwell_time": 0.0,
}
@pytest.fixture(scope="function")
def test_recordmodel(mock_program_dao):
"""Get a record model instance for each test function."""
dut = mock_program_dao.do_select_all(RAMSTKEnvironmentRecord, _all=False)
yield dut
# Delete the device under test.
del dut | 0.59408 | 0.510985 |
import md5
import os
import random
import time
from tashi.aws.wsdl.AmazonEC2_services_server import *
from tashi.rpycservices.rpyctypes import *
from tashi.aws.util import *
import tashi.aws.util
import tashi
def getImages():
IMGDIR="/mnt/merkabah/tashi/images/"
imgList = os.listdir(IMGDIR)
images = {}
for img in imgList:
fullImg = IMGDIR + img
if (os.path.isdir(fullImg)):
continue
imageId = "ami-%8.8s" % (md5.md5(img).hexdigest())
images[imageId] = img
return images
def makeTashiInstanceEC2Instance(inst):
res = RunInstancesResponseMsg()
res = res.new_instancesSet()
instanceItem = res.new_item()
instanceItem.instanceId = "i-%8.8d" % (inst.id)
instanceItem.imageId = "ami-%8.8s" % (md5.md5(inst.disks[0].uri).hexdigest())
instanceItem.instanceState = instanceItem.new_instanceState()
instanceItem.instanceState.code = inst.state
instanceItem.instanceState.name = "%10.10s" % (tashi.vmStates[inst.state])
instanceItem.privateDnsName = inst.name
instanceItem.dnsName = str(inst.nics[0].ip)
#instanceItem.reason = 'None'
instanceItem.keyName = "%12.12d" % (inst.userId)
instanceItem.amiLaunchIndex = str(inst.id)
#instanceItem.productCodes = instanceItem.new_productCodes()
#productItem = instanceItem.productCodes.new_item()
#productItem.productCode = '774F4FF8'
#instanceItem.productCodes.item = [productItem]
sizeList = sizes.items()
sizeList.sort(cmp=lambda x, y: cmp(cmp(x[1][0], y[1][0]) + cmp(x[1][1], y[1][1]), 0))
sizeList.reverse()
mySize = 'undef'
for size in sizeList:
if (inst.memory <= size[1][0] and inst.cores <= size[1][1]):
mySize = size[0]
instanceItem.instanceType = mySize
instanceItem.launchTime = time.time()
instanceItem.placement = instanceItem.new_placement()
instanceItem.placement.availabilityZone = 'tashi'
#instanceItem.kernelId = 'aki-ba3adfd3'
#instanceItem.ramdiskId = 'ari-badbad00'
#instanceItem.platform = 'Linux'
instanceItem.monitoring = instanceItem.new_monitoring()
instanceItem.monitoring.state = 'OFF'
return instanceItem
def RunInstances(imageId, minCount, maxCount, instanceType='m1.small', groupSet=None, keyName=None, additionalInfo="", userData={'data':None}, addressingType="", placement={'availabilityZone':None}, kernelId="", ramdiskId="", blockDeviceMapping={'virtualName':None,'deviceName':None}, monitoring={'enabled':False}):
inst = Instance()
inst.userId = userNameToId(tashi.aws.util.authorizedUser)
res = RunInstancesResponseMsg()
res.requestId = genRequestId()
if (additionalInfo == ""):
inst.name = tashi.aws.util.authorizedUser + res.requestId
else:
inst.name = additionalInfo
(inst.memory, inst.cores) = sizes.get(instanceType, (0, 0))
dc = DiskConfiguration()
images = getImages()
if (imageId in images):
dc.uri = images[imageId]
else:
dc.uri = imageId
dc.persistent = False
inst.disks = [dc]
nc = NetworkConfiguration()
nc.network = 999
nc.mac = '52:54:00:%2.2x:%2.2x:%2.2x' % (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
inst.nics = [nc]
inst.hints = {}
oldInst = inst
inst = client.createVm(oldInst)
res.reservationId = 'r-12345678'
res.ownerId = 'UYY3TLBUXIEON5NQVUUX6OMPWBZIQNFM'
res.groupSet = res.new_groupSet()
item = res.groupSet.new_item()
item.groupId = '1234'
res.groupSet.item = [item]
res.instancesSet = res.new_instancesSet()
instanceItem = makeTashiInstanceEC2Instance(inst)
res.instancesSet.item = [instanceItem]
return res
def GetConsoleOutput():
raise NotImplementedError
def TerminateInstances(instancesSet={'item':{}}):
res = TerminateInstancesResponseMsg()
res.requestId = genRequestId()
res.instancesSet = res.new_instancesSet()
items = []
if (instancesSet):
for instanceId in instancesSet['item'].values():
thisInstanceId = int(filter(lambda x: x in "0123456789", instanceId))
item = res.instancesSet.new_item()
item.instanceId = str(instanceId)
item.shutdownState = item.new_shutdownState()
item.shutdownState.code = InstanceState.Exited
item.shutdownState.name = tashi.vmStates[InstanceState.Exited]
item.previousState = item.new_previousState()
item.previousState.code = InstanceState.Running
item.previousState.name = tashi.vmStates[InstanceState.Running]
client.destroyVm(int(thisInstanceId))
items.append(item)
res.instancesSet.item = items
return res
def RebootInstances():
raise NotImplementedError
def DescribeInstances(instancesSet={}):
instances = client.getInstances()
res = DescribeInstancesResponseMsg()
res.requestId = genRequestId()
res.reservationSet = res.new_reservationSet()
item = res.reservationSet.new_item()
item.reservationId = 'r-12345678'
item.ownerId = 'UYY3TLBUXIEON5NQVUUX6OMPWBZIQNFM'
item.groupSet = item.new_groupSet()
groupItem = item.groupSet.new_item()
groupItem.groupId = 'default'
item.groupSet.item = [groupItem]
item.instancesSet = item.new_instancesSet()
item.instancesSet.item = []
instances.sort(cmp=lambda x, y: cmp(x.id, y.id))
for inst in instances:
userName = userIdToName(inst.userId)
if (userName == tashi.aws.util.authorizedUser):
instanceItem = makeTashiInstanceEC2Instance(inst)
item.instancesSet.item.append(instanceItem)
# For some reason, if item.instancesSet is empty,
# "Server: Processing Failure is printed out on the command line.
item.requesterId = '1234'
res.reservationSet.item = [item]
return res
functions = ['RunInstances', 'GetConsoleOutput', 'TerminateInstances', 'RebootInstances', 'DescribeInstances'] | src/tashi/aws/impl/instances.py |
import md5
import os
import random
import time
from tashi.aws.wsdl.AmazonEC2_services_server import *
from tashi.rpycservices.rpyctypes import *
from tashi.aws.util import *
import tashi.aws.util
import tashi
def getImages():
IMGDIR="/mnt/merkabah/tashi/images/"
imgList = os.listdir(IMGDIR)
images = {}
for img in imgList:
fullImg = IMGDIR + img
if (os.path.isdir(fullImg)):
continue
imageId = "ami-%8.8s" % (md5.md5(img).hexdigest())
images[imageId] = img
return images
def makeTashiInstanceEC2Instance(inst):
res = RunInstancesResponseMsg()
res = res.new_instancesSet()
instanceItem = res.new_item()
instanceItem.instanceId = "i-%8.8d" % (inst.id)
instanceItem.imageId = "ami-%8.8s" % (md5.md5(inst.disks[0].uri).hexdigest())
instanceItem.instanceState = instanceItem.new_instanceState()
instanceItem.instanceState.code = inst.state
instanceItem.instanceState.name = "%10.10s" % (tashi.vmStates[inst.state])
instanceItem.privateDnsName = inst.name
instanceItem.dnsName = str(inst.nics[0].ip)
#instanceItem.reason = 'None'
instanceItem.keyName = "%12.12d" % (inst.userId)
instanceItem.amiLaunchIndex = str(inst.id)
#instanceItem.productCodes = instanceItem.new_productCodes()
#productItem = instanceItem.productCodes.new_item()
#productItem.productCode = '774F4FF8'
#instanceItem.productCodes.item = [productItem]
sizeList = sizes.items()
sizeList.sort(cmp=lambda x, y: cmp(cmp(x[1][0], y[1][0]) + cmp(x[1][1], y[1][1]), 0))
sizeList.reverse()
mySize = 'undef'
for size in sizeList:
if (inst.memory <= size[1][0] and inst.cores <= size[1][1]):
mySize = size[0]
instanceItem.instanceType = mySize
instanceItem.launchTime = time.time()
instanceItem.placement = instanceItem.new_placement()
instanceItem.placement.availabilityZone = 'tashi'
#instanceItem.kernelId = 'aki-ba3adfd3'
#instanceItem.ramdiskId = 'ari-badbad00'
#instanceItem.platform = 'Linux'
instanceItem.monitoring = instanceItem.new_monitoring()
instanceItem.monitoring.state = 'OFF'
return instanceItem
def RunInstances(imageId, minCount, maxCount, instanceType='m1.small', groupSet=None, keyName=None, additionalInfo="", userData={'data':None}, addressingType="", placement={'availabilityZone':None}, kernelId="", ramdiskId="", blockDeviceMapping={'virtualName':None,'deviceName':None}, monitoring={'enabled':False}):
inst = Instance()
inst.userId = userNameToId(tashi.aws.util.authorizedUser)
res = RunInstancesResponseMsg()
res.requestId = genRequestId()
if (additionalInfo == ""):
inst.name = tashi.aws.util.authorizedUser + res.requestId
else:
inst.name = additionalInfo
(inst.memory, inst.cores) = sizes.get(instanceType, (0, 0))
dc = DiskConfiguration()
images = getImages()
if (imageId in images):
dc.uri = images[imageId]
else:
dc.uri = imageId
dc.persistent = False
inst.disks = [dc]
nc = NetworkConfiguration()
nc.network = 999
nc.mac = '52:54:00:%2.2x:%2.2x:%2.2x' % (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
inst.nics = [nc]
inst.hints = {}
oldInst = inst
inst = client.createVm(oldInst)
res.reservationId = 'r-12345678'
res.ownerId = 'UYY3TLBUXIEON5NQVUUX6OMPWBZIQNFM'
res.groupSet = res.new_groupSet()
item = res.groupSet.new_item()
item.groupId = '1234'
res.groupSet.item = [item]
res.instancesSet = res.new_instancesSet()
instanceItem = makeTashiInstanceEC2Instance(inst)
res.instancesSet.item = [instanceItem]
return res
def GetConsoleOutput():
raise NotImplementedError
def TerminateInstances(instancesSet={'item':{}}):
res = TerminateInstancesResponseMsg()
res.requestId = genRequestId()
res.instancesSet = res.new_instancesSet()
items = []
if (instancesSet):
for instanceId in instancesSet['item'].values():
thisInstanceId = int(filter(lambda x: x in "0123456789", instanceId))
item = res.instancesSet.new_item()
item.instanceId = str(instanceId)
item.shutdownState = item.new_shutdownState()
item.shutdownState.code = InstanceState.Exited
item.shutdownState.name = tashi.vmStates[InstanceState.Exited]
item.previousState = item.new_previousState()
item.previousState.code = InstanceState.Running
item.previousState.name = tashi.vmStates[InstanceState.Running]
client.destroyVm(int(thisInstanceId))
items.append(item)
res.instancesSet.item = items
return res
def RebootInstances():
raise NotImplementedError
def DescribeInstances(instancesSet={}):
instances = client.getInstances()
res = DescribeInstancesResponseMsg()
res.requestId = genRequestId()
res.reservationSet = res.new_reservationSet()
item = res.reservationSet.new_item()
item.reservationId = 'r-12345678'
item.ownerId = 'UYY3TLBUXIEON5NQVUUX6OMPWBZIQNFM'
item.groupSet = item.new_groupSet()
groupItem = item.groupSet.new_item()
groupItem.groupId = 'default'
item.groupSet.item = [groupItem]
item.instancesSet = item.new_instancesSet()
item.instancesSet.item = []
instances.sort(cmp=lambda x, y: cmp(x.id, y.id))
for inst in instances:
userName = userIdToName(inst.userId)
if (userName == tashi.aws.util.authorizedUser):
instanceItem = makeTashiInstanceEC2Instance(inst)
item.instancesSet.item.append(instanceItem)
# For some reason, if item.instancesSet is empty,
# "Server: Processing Failure is printed out on the command line.
item.requesterId = '1234'
res.reservationSet.item = [item]
return res
functions = ['RunInstances', 'GetConsoleOutput', 'TerminateInstances', 'RebootInstances', 'DescribeInstances'] | 0.140838 | 0.071106 |
from __future__ import absolute_import, division, print_function
import numpy as np
import time
import argparse
import sys
import os
import json
from make_data import sample_IHDP
from utils.metrics import compute_PEHE, mean_confidence_interval
from models.causal_models import CMGP
import initpath_alg
initpath_alg.init_sys_path()
import utilmlab
def run_experiment(fn_data, mode="CMGP", test_frac=0.1):
train_data, test_data = sample_IHDP(fn_data, test_frac=test_frac)
X_train, W_train, Y_train, T_true_train = train_data[0], train_data[1], train_data[2], train_data[6]
X_test, T_true_test = test_data[0], test_data[6]
model = CMGP(dim=25, mode=mode)
model.fit(X_train, Y_train, W_train)
TE_est_test = model.predict(X_test)[0]
TE_est_train = model.predict(X_train)[0]
PEHE_train = compute_PEHE(TE_est_train, T_true_train)
PEHE_test = compute_PEHE(TE_est_test, T_true_test)
return PEHE_train, PEHE_test
def main(args, fn_data):
PEHE_train_ = []
PEHE_test_ = []
results_d = {}
time_start = time.time()
for _ in range(args.num_exp):
pehe_train_curr, pehe_test_curr = run_experiment(fn_data, mode=args.mode, test_frac=args.test_frac)
PEHE_train_.append(pehe_train_curr)
PEHE_test_.append(pehe_test_curr)
print("Experiment: %d (train) \tPEHE: %.3f \t--- (test) \tPEHE: %.3f \t---" % (_, pehe_train_curr, pehe_test_curr))
# on purpose results are calculated within the loop so intermediate results become available while the experiment is ongoing
results_d['train'] = PEHE_train_
results_d['test'] = PEHE_test_
PEHE_train_np = np.array(PEHE_train_)[~np.isnan(np.array(PEHE_train_))]
PEHE_test_np = np.array(PEHE_test_)[~np.isnan(np.array(PEHE_test_))]
time_exe = time.time() - time_start
results_d['PEHE_train'] = mean_confidence_interval(PEHE_train_np)
results_d['PEHE_test'] = mean_confidence_interval(PEHE_test_np)
results_d['time_exe'] = time_exe
if args.o is not None:
with open(args.o, 'w') as fp:
json.dump(results_d, fp)
print('exe time {:0.0f}s'.format(time_exe))
print("Final results|| Train PEHE = %.3f +/- %.3f --- Test PEHE = %.3f +/- %.3f" % (mean_confidence_interval(PEHE_train_np)[0],
mean_confidence_interval(PEHE_train_np)[1],
mean_confidence_interval(PEHE_test_np)[0],
mean_confidence_interval(PEHE_test_np)[1]))
return results_d
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Causal Multi-task Gaussian Processes")
parser.add_argument("-n", "--num-exp", default=10, type=int)
parser.add_argument("-m", "--mode", default="CMGP", type=str)
parser.add_argument("-t", "--test-frac", default=0.1, type=float)
parser.add_argument("-o")
args = parser.parse_args()
fn_data = '{}/ihdp/ihdp_covariates.csv'.format(utilmlab.get_data_dir())
if not os.path.isfile(fn_data):
print('Error: this implementation requires the IHDP dataset'
', please refer to the README.md for more details.')
sys.exit(0)
results_d = main(args, fn_data)
if args.o is not None:
with open(args.o, 'w') as fp:
json.dump(results_d, fp) | alg/causal_multitask_gaussian_processes_ite/test_models.py |
from __future__ import absolute_import, division, print_function
import numpy as np
import time
import argparse
import sys
import os
import json
from make_data import sample_IHDP
from utils.metrics import compute_PEHE, mean_confidence_interval
from models.causal_models import CMGP
import initpath_alg
initpath_alg.init_sys_path()
import utilmlab
def run_experiment(fn_data, mode="CMGP", test_frac=0.1):
train_data, test_data = sample_IHDP(fn_data, test_frac=test_frac)
X_train, W_train, Y_train, T_true_train = train_data[0], train_data[1], train_data[2], train_data[6]
X_test, T_true_test = test_data[0], test_data[6]
model = CMGP(dim=25, mode=mode)
model.fit(X_train, Y_train, W_train)
TE_est_test = model.predict(X_test)[0]
TE_est_train = model.predict(X_train)[0]
PEHE_train = compute_PEHE(TE_est_train, T_true_train)
PEHE_test = compute_PEHE(TE_est_test, T_true_test)
return PEHE_train, PEHE_test
def main(args, fn_data):
PEHE_train_ = []
PEHE_test_ = []
results_d = {}
time_start = time.time()
for _ in range(args.num_exp):
pehe_train_curr, pehe_test_curr = run_experiment(fn_data, mode=args.mode, test_frac=args.test_frac)
PEHE_train_.append(pehe_train_curr)
PEHE_test_.append(pehe_test_curr)
print("Experiment: %d (train) \tPEHE: %.3f \t--- (test) \tPEHE: %.3f \t---" % (_, pehe_train_curr, pehe_test_curr))
# on purpose results are calculated within the loop so intermediate results become available while the experiment is ongoing
results_d['train'] = PEHE_train_
results_d['test'] = PEHE_test_
PEHE_train_np = np.array(PEHE_train_)[~np.isnan(np.array(PEHE_train_))]
PEHE_test_np = np.array(PEHE_test_)[~np.isnan(np.array(PEHE_test_))]
time_exe = time.time() - time_start
results_d['PEHE_train'] = mean_confidence_interval(PEHE_train_np)
results_d['PEHE_test'] = mean_confidence_interval(PEHE_test_np)
results_d['time_exe'] = time_exe
if args.o is not None:
with open(args.o, 'w') as fp:
json.dump(results_d, fp)
print('exe time {:0.0f}s'.format(time_exe))
print("Final results|| Train PEHE = %.3f +/- %.3f --- Test PEHE = %.3f +/- %.3f" % (mean_confidence_interval(PEHE_train_np)[0],
mean_confidence_interval(PEHE_train_np)[1],
mean_confidence_interval(PEHE_test_np)[0],
mean_confidence_interval(PEHE_test_np)[1]))
return results_d
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Causal Multi-task Gaussian Processes")
parser.add_argument("-n", "--num-exp", default=10, type=int)
parser.add_argument("-m", "--mode", default="CMGP", type=str)
parser.add_argument("-t", "--test-frac", default=0.1, type=float)
parser.add_argument("-o")
args = parser.parse_args()
fn_data = '{}/ihdp/ihdp_covariates.csv'.format(utilmlab.get_data_dir())
if not os.path.isfile(fn_data):
print('Error: this implementation requires the IHDP dataset'
', please refer to the README.md for more details.')
sys.exit(0)
results_d = main(args, fn_data)
if args.o is not None:
with open(args.o, 'w') as fp:
json.dump(results_d, fp) | 0.419291 | 0.231571 |
from keras.layers import Input, Conv2D, Conv2DTranspose, Concatenate, MaxPooling2D, UpSampling2D, Add
from keras.applications.vgg19 import VGG19
from keras.models import Model
from DWT import DWT_Pooling, IWT_UpSampling
def down_block(input_layer, filters, kernel_size=(3,3), activation="relu"):
output = Conv2D(filters, (3,3), padding="same", activation=activation, data_format='channels_last')(input_layer)
output = Conv2D(filters, kernel_size, padding="valid", activation=activation, data_format='channels_last')(output)
output = Conv2D(filters*2, kernel_size, padding="valid", activation=activation, data_format='channels_last')(output)
output = Conv2D(filters*4, kernel_size, padding="valid", activation=activation, data_format='channels_last')(output)
return output, DWT_Pooling()(output)
def up_block(input_layer, residual_layer, filters, kernel_size=(3,3),activation="relu"):
output = IWT_UpSampling()(input_layer)
output = Add()([residual_layer,output])
output = Conv2DTranspose(filters*2, kernel_size, activation='relu', padding='valid', data_format='channels_last')(output)
output = Conv2DTranspose(filters, kernel_size, activation='relu', padding='valid', data_format='channels_last')(output)
output = Conv2DTranspose(3, kernel_size, activation='relu', padding='valid', data_format='channels_last')(output)
return output
def build_vgg():
vgg_model = VGG19(include_top=False, weights='imagenet')
vgg_model.trainable = False
return Model(inputs=vgg_model.input, outputs=vgg_model.get_layer('block3_conv4').output)
def build_mbllen(input_shape):
def EM(input, kernel_size, channel):
down1, pool1 = down_block(input, channel, (kernel_size, kernel_size))
res = up_block(pool1, down1, channel, (kernel_size, kernel_size))
return res
inputs = Input(shape=input_shape)
FEM = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_last')(inputs)
EM_com = EM(FEM, 5, 8)
for j in range(3):
for i in range(0, 3):
FEM = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_last')(FEM)
EM1 = EM(FEM, 5, 8)
EM_com = Concatenate(axis=3)([EM_com, EM1])
outputs = Conv2D(3, (1, 1), activation='relu', padding='same', data_format='channels_last')(EM_com)
return Model(inputs, outputs) | main/Network_2.py | from keras.layers import Input, Conv2D, Conv2DTranspose, Concatenate, MaxPooling2D, UpSampling2D, Add
from keras.applications.vgg19 import VGG19
from keras.models import Model
from DWT import DWT_Pooling, IWT_UpSampling
def down_block(input_layer, filters, kernel_size=(3,3), activation="relu"):
output = Conv2D(filters, (3,3), padding="same", activation=activation, data_format='channels_last')(input_layer)
output = Conv2D(filters, kernel_size, padding="valid", activation=activation, data_format='channels_last')(output)
output = Conv2D(filters*2, kernel_size, padding="valid", activation=activation, data_format='channels_last')(output)
output = Conv2D(filters*4, kernel_size, padding="valid", activation=activation, data_format='channels_last')(output)
return output, DWT_Pooling()(output)
def up_block(input_layer, residual_layer, filters, kernel_size=(3,3),activation="relu"):
output = IWT_UpSampling()(input_layer)
output = Add()([residual_layer,output])
output = Conv2DTranspose(filters*2, kernel_size, activation='relu', padding='valid', data_format='channels_last')(output)
output = Conv2DTranspose(filters, kernel_size, activation='relu', padding='valid', data_format='channels_last')(output)
output = Conv2DTranspose(3, kernel_size, activation='relu', padding='valid', data_format='channels_last')(output)
return output
def build_vgg():
vgg_model = VGG19(include_top=False, weights='imagenet')
vgg_model.trainable = False
return Model(inputs=vgg_model.input, outputs=vgg_model.get_layer('block3_conv4').output)
def build_mbllen(input_shape):
def EM(input, kernel_size, channel):
down1, pool1 = down_block(input, channel, (kernel_size, kernel_size))
res = up_block(pool1, down1, channel, (kernel_size, kernel_size))
return res
inputs = Input(shape=input_shape)
FEM = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_last')(inputs)
EM_com = EM(FEM, 5, 8)
for j in range(3):
for i in range(0, 3):
FEM = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_last')(FEM)
EM1 = EM(FEM, 5, 8)
EM_com = Concatenate(axis=3)([EM_com, EM1])
outputs = Conv2D(3, (1, 1), activation='relu', padding='same', data_format='channels_last')(EM_com)
return Model(inputs, outputs) | 0.924511 | 0.598987 |
from create import random_image
import glob
import random
import numpy as np
from PIL import Image
from keras.models import Sequential, load_model
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
background = Image.open('background.png')
LETTERS = 'abcdefghijklmnopqrstuvwxyz0123456789'
def string_to_onehot(string):
arr0 = np.array([int(string[1] == c) for c in LETTERS])
# arr1 = np.array([int(string[1] == c) for c in LETTERS])
# arr2 = np.array([int(string[2] == c) for c in LETTERS])
# arr3 = np.array([int(string[3] == c) for c in LETTERS])
return arr0 #np.append(arr0, [arr1, arr2, arr3])
def onehot_to_string(vector):
_1 = vector[0:36]
# _2 = vector[36:72]
# _3 = vector[72:108]
# _4 = vector[108:144]
return LETTERS[list(_1).index(max(_1))] #+ LETTERS[list(_2).index(max(_2))] + LETTERS[list(_3).index(max(_3))] + LETTERS[list(_4).index(max(_4))]
def image_training_generator(per):
while True:
images_and_answers = [random_image(background) for i in range(per)]
images, answers = zip(*images_and_answers)
yield (np.array([np.array(image) for image in images]),
np.array([string_to_onehot(answer) for answer in answers]))
from keras import backend as K
def reset_weights(model):
session = K.get_session()
for layer in model.layers:
if isinstance(layer, Dense):
old = layer.get_weights()
layer.W.initializer.run(session=session)
layer.b.initializer.run(session=session)
print(np.array_equal(old, layer.get_weights())," after initializer run")
else:
print(layer, "not reinitialized")
def create_model():
print "Creating Model..."
model = Sequential()
model.add(Convolution2D(32, 3, 3, input_shape=(background.size[::-1] + (3,))))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="tf"))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="tf"))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="tf"))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(36))
model.add(Activation('softmax'))
print "Compiling Model..."
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
return model
def train(model):
model.fit_generator(
image_training_generator(1000),
samples_per_epoch=100000,
nb_epoch=20,
validation_data=image_training_generator(1),
nb_val_samples=100)
print "Saving model..."
model.save('model.h5')
test = Image.open(random.choice(glob.glob('tests/*.jpeg')))
test.show()
result = model.predict(np.array([np.array(test)]))[0]
print onehot_to_string(result)
def main():
model = load_model('Definitely_good_model.h5')
reset_weights(model)
train(model)
while True:
test = Image.open(random.choice(glob.glob('tests/*.jpeg')))
test.show()
result = model.predict(np.array([np.array(test)]))[0]
print onehot_to_string(result)
raw_input("Press enter to continue")
if __name__ == '__main__':
main() | train.py | from create import random_image
import glob
import random
import numpy as np
from PIL import Image
from keras.models import Sequential, load_model
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
background = Image.open('background.png')
LETTERS = 'abcdefghijklmnopqrstuvwxyz0123456789'
def string_to_onehot(string):
arr0 = np.array([int(string[1] == c) for c in LETTERS])
# arr1 = np.array([int(string[1] == c) for c in LETTERS])
# arr2 = np.array([int(string[2] == c) for c in LETTERS])
# arr3 = np.array([int(string[3] == c) for c in LETTERS])
return arr0 #np.append(arr0, [arr1, arr2, arr3])
def onehot_to_string(vector):
_1 = vector[0:36]
# _2 = vector[36:72]
# _3 = vector[72:108]
# _4 = vector[108:144]
return LETTERS[list(_1).index(max(_1))] #+ LETTERS[list(_2).index(max(_2))] + LETTERS[list(_3).index(max(_3))] + LETTERS[list(_4).index(max(_4))]
def image_training_generator(per):
while True:
images_and_answers = [random_image(background) for i in range(per)]
images, answers = zip(*images_and_answers)
yield (np.array([np.array(image) for image in images]),
np.array([string_to_onehot(answer) for answer in answers]))
from keras import backend as K
def reset_weights(model):
session = K.get_session()
for layer in model.layers:
if isinstance(layer, Dense):
old = layer.get_weights()
layer.W.initializer.run(session=session)
layer.b.initializer.run(session=session)
print(np.array_equal(old, layer.get_weights())," after initializer run")
else:
print(layer, "not reinitialized")
def create_model():
print "Creating Model..."
model = Sequential()
model.add(Convolution2D(32, 3, 3, input_shape=(background.size[::-1] + (3,))))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="tf"))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="tf"))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="tf"))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(36))
model.add(Activation('softmax'))
print "Compiling Model..."
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
return model
def train(model):
model.fit_generator(
image_training_generator(1000),
samples_per_epoch=100000,
nb_epoch=20,
validation_data=image_training_generator(1),
nb_val_samples=100)
print "Saving model..."
model.save('model.h5')
test = Image.open(random.choice(glob.glob('tests/*.jpeg')))
test.show()
result = model.predict(np.array([np.array(test)]))[0]
print onehot_to_string(result)
def main():
model = load_model('Definitely_good_model.h5')
reset_weights(model)
train(model)
while True:
test = Image.open(random.choice(glob.glob('tests/*.jpeg')))
test.show()
result = model.predict(np.array([np.array(test)]))[0]
print onehot_to_string(result)
raw_input("Press enter to continue")
if __name__ == '__main__':
main() | 0.668447 | 0.455804 |
import py_cui
class StatusBar:
"""Very simple class representing a status bar
Attributes
----------
text : str
status bar text
color : py_cui.COLOR
color to display the statusbar
root : py_cui.PyCUI
Main PyCUI object reference
is_title_bar : bool
Is the StatusBar displayed on the top of the grid
"""
def __init__(self, text: str, color: int, root: 'py_cui.PyCUI', is_title_bar: bool=False):
"""Initializer for statusbar
"""
self.__text = text
self.__color = color
self.__height = 1
self.__root = root
self.__is_title_bar = is_title_bar
def get_color(self) -> int:
"""Getter for status bar color
Returns
-------
color : int
statusbar color
"""
return self.__color
def get_text(self) -> str:
"""Getter for status bar text
Returns
-------
text : str
The statusbar text
"""
return self.__text
def set_color(self, color) -> None:
"""Setter for statusbar color
Parameters
----------
color : int
new statusbar color code
"""
self.__color = color
def set_text(self, text: str) -> None :
"""Sets the statusbar text
Parameters
----------
text : str
New statusbar text
"""
self.__text = text
def get_height(self) -> int :
"""Getter for status bar height in row
Returns
-------
height : int
The statusbar height in row
"""
return self.__height
def show(self) -> None:
"""Sets the status bar height to 1"""
self.__height = 1
self._refresh_root_size()
def hide(self) -> None:
"""Sets the status bar height to 0"""
self.__height = 0
self._refresh_root_size()
def _refresh_root_size(self) -> None:
"""Resets the grid's title bar offset if needed and calls a UI size update."""
if self.__is_title_bar:
self.__root._grid._title_bar_offset = self.__height
self.__root._refresh_height_width() | py_cui/statusbar.py | import py_cui
class StatusBar:
"""Very simple class representing a status bar
Attributes
----------
text : str
status bar text
color : py_cui.COLOR
color to display the statusbar
root : py_cui.PyCUI
Main PyCUI object reference
is_title_bar : bool
Is the StatusBar displayed on the top of the grid
"""
def __init__(self, text: str, color: int, root: 'py_cui.PyCUI', is_title_bar: bool=False):
"""Initializer for statusbar
"""
self.__text = text
self.__color = color
self.__height = 1
self.__root = root
self.__is_title_bar = is_title_bar
def get_color(self) -> int:
"""Getter for status bar color
Returns
-------
color : int
statusbar color
"""
return self.__color
def get_text(self) -> str:
"""Getter for status bar text
Returns
-------
text : str
The statusbar text
"""
return self.__text
def set_color(self, color) -> None:
"""Setter for statusbar color
Parameters
----------
color : int
new statusbar color code
"""
self.__color = color
def set_text(self, text: str) -> None :
"""Sets the statusbar text
Parameters
----------
text : str
New statusbar text
"""
self.__text = text
def get_height(self) -> int :
"""Getter for status bar height in row
Returns
-------
height : int
The statusbar height in row
"""
return self.__height
def show(self) -> None:
"""Sets the status bar height to 1"""
self.__height = 1
self._refresh_root_size()
def hide(self) -> None:
"""Sets the status bar height to 0"""
self.__height = 0
self._refresh_root_size()
def _refresh_root_size(self) -> None:
"""Resets the grid's title bar offset if needed and calls a UI size update."""
if self.__is_title_bar:
self.__root._grid._title_bar_offset = self.__height
self.__root._refresh_height_width() | 0.811825 | 0.361616 |
<<<<<<< HEAD
#### Healthy Neighborhoods Project: Using Ecological Data to Improve Community Health
### Cedric subproject: Developing better ways to measure equity in health using the Gini coefficient
## Florida Charts Census Tract Mortality Data: Pyhton Computing Language Code Script by DrewC!
### Step 1: Import Libraries and Import Dataset
## Import Standard Libraries
import os # Inlcuded in every script DC!
import pandas as pd # Incldued in every code script for DC!
import numpy as np # Inclduded in every code script DC!
import scipy as sp # Incldued in every code script for DC!
## Import Datasets
os.chdir("C:/Users/drewc/GitHub/HNB") # Set wd to project repository
df_1 = pd.read_csv("_data/flcharts_50_stage.csv") # Import dataset from _data folder
df_2 = pd.read_csv("_data/acs_tract_stage.csv", low_memory = False) # Import dataset from _data folder
## Tidy Data Types, Missing Values, and Variable Names
## Verify
df_1.info() # Get class, memory, and column info: names, data types, obs.
df_2.info() # Get class, memory, and column info: names, data types, obs.
### Step 2: Join Datasets
## Join by Tract
df_join = pd.merge(df_1, df_2, on = "Tract", how = "inner")
## Verify
df_join.info() # Get class, memory, and column info: names, data types, obs.
## Export to CSV
df_join.to_csv(r"_data/flcharts_50_acs.csv") # Clean in excel and select variable
=======
#### Healthy Neighborhoods Project: Using Ecological Data to Improve Community Health
### Cedric subproject: Developing better ways to measure equity in health using the Gini coefficient
## Florida Charts Census Tract Mortality Data: Pyhton Computing Language Code Script by DrewC!
### Step 1: Import Libraries and Import Dataset
## Import Standard Libraries
import os # Inlcuded in every script DC!
import pandas as pd # Incldued in every code script for DC!
import numpy as np # Inclduded in every code script DC!
import scipy as sp # Incldued in every code script for DC!
## Import Datasets
os.chdir("C:/Users/drewc/GitHub/HNB") # Set wd to project repository
df_1 = pd.read_csv("_data/flcharts_50_stage.csv") # Import dataset from _data folder
df_2 = pd.read_csv("_data/acs_tract_stage.csv", low_memory = False) # Import dataset from _data folder
## Tidy Data Types, Missing Values, and Variable Names
## Verify
df_1.info() # Get class, memory, and column info: names, data types, obs.
df_2.info() # Get class, memory, and column info: names, data types, obs.
### Step 2: Join Datasets
## Join by Tract
df_join = pd.merge(df_1, df_2, on = "Tract", how = "inner")
## Verify
df_join.info() # Get class, memory, and column info: names, data types, obs.
## Export to CSV
df_join.to_csv(r"_data/flcharts_50_acs.csv") # Clean in excel and select variable
>>>>>>> 321eaea894d29a3f212ee3e111b86695f46cf6db | _archive/_archive/neville_acs_flcharts_join.py | <<<<<<< HEAD
#### Healthy Neighborhoods Project: Using Ecological Data to Improve Community Health
### Cedric subproject: Developing better ways to measure equity in health using the Gini coefficient
## Florida Charts Census Tract Mortality Data: Pyhton Computing Language Code Script by DrewC!
### Step 1: Import Libraries and Import Dataset
## Import Standard Libraries
import os # Inlcuded in every script DC!
import pandas as pd # Incldued in every code script for DC!
import numpy as np # Inclduded in every code script DC!
import scipy as sp # Incldued in every code script for DC!
## Import Datasets
os.chdir("C:/Users/drewc/GitHub/HNB") # Set wd to project repository
df_1 = pd.read_csv("_data/flcharts_50_stage.csv") # Import dataset from _data folder
df_2 = pd.read_csv("_data/acs_tract_stage.csv", low_memory = False) # Import dataset from _data folder
## Tidy Data Types, Missing Values, and Variable Names
## Verify
df_1.info() # Get class, memory, and column info: names, data types, obs.
df_2.info() # Get class, memory, and column info: names, data types, obs.
### Step 2: Join Datasets
## Join by Tract
df_join = pd.merge(df_1, df_2, on = "Tract", how = "inner")
## Verify
df_join.info() # Get class, memory, and column info: names, data types, obs.
## Export to CSV
df_join.to_csv(r"_data/flcharts_50_acs.csv") # Clean in excel and select variable
=======
#### Healthy Neighborhoods Project: Using Ecological Data to Improve Community Health
### Cedric subproject: Developing better ways to measure equity in health using the Gini coefficient
## Florida Charts Census Tract Mortality Data: Pyhton Computing Language Code Script by DrewC!
### Step 1: Import Libraries and Import Dataset
## Import Standard Libraries
import os # Inlcuded in every script DC!
import pandas as pd # Incldued in every code script for DC!
import numpy as np # Inclduded in every code script DC!
import scipy as sp # Incldued in every code script for DC!
## Import Datasets
os.chdir("C:/Users/drewc/GitHub/HNB") # Set wd to project repository
df_1 = pd.read_csv("_data/flcharts_50_stage.csv") # Import dataset from _data folder
df_2 = pd.read_csv("_data/acs_tract_stage.csv", low_memory = False) # Import dataset from _data folder
## Tidy Data Types, Missing Values, and Variable Names
## Verify
df_1.info() # Get class, memory, and column info: names, data types, obs.
df_2.info() # Get class, memory, and column info: names, data types, obs.
### Step 2: Join Datasets
## Join by Tract
df_join = pd.merge(df_1, df_2, on = "Tract", how = "inner")
## Verify
df_join.info() # Get class, memory, and column info: names, data types, obs.
## Export to CSV
df_join.to_csv(r"_data/flcharts_50_acs.csv") # Clean in excel and select variable
>>>>>>> 321eaea894d29a3f212ee3e111b86695f46cf6db | 0.281406 | 0.557845 |
class Element():
"""
Represents <name>body</name>.
(Where `body' is either a string or a list of sub-elements.)
"""
@property
def tag(self): return self.name
def __init__(self, name, body):
"""Initialize with the tag's name and the body (i.e. content)."""
if body == []:
# Empty element.
self.text = None
elif type(body) is not list:
# String element: decode body.
body = decode_entities(body)
self.text = body
self.name = name
self.body = body
def find(self, name):
"""Get first matching child element by name."""
for x in self.findall(name):
return x
def findall(self, name):
"""Get matching child elements by name."""
return list(self.findall_(name))
def findall_(self, name):
"""Get matching child elements by name (generator variant)."""
return (el for el in self.body if el.name == name)
# For debugging convenience:
def __str__(self):
if type(self.body) is list:
return '<{}>{}</{}>'.format(self.name, ''.join(map(str, self.body)), self.name)
else:
return '<{}>{}</{}>'.format(self.name, encode_entities(self.body), self.name)
def __repr__(self):
return '{}({})'.format(self.name, repr(self.body))
class Token(object):
"""A utility class for parsing XML."""
def __init__(self, s):
"""Create a `Token' object from `s', the text comprising the parsed token."""
self.text = s
def __repr__(self):
return str(type(self).__name__) + '(' + self.text.decode('utf-8') + ')'
def __str__(self):
return repr(self)
class TokenTagOpen(Token):
"""An opening tag (<foo>)"""
class TokenTagClose(Token):
"""An closing tag (</foo>)"""
class TokenCData(Token):
"""Textual element body"""
class QuasiXmlParseError(Exception):
"""Indicates parse failure of XML protocol data."""
def tokenize(s):
"""Parse an XML-ish string into a list of tokens."""
tokens = []
# Consume input until empty.
while True:
nextclose = s.find(b'</')
nextopen = s.find(b'<')
if nextopen < nextclose or nextopen == -1:
# Either we have no tags left, or we are in a non-cdata element body: strip whitespace.
s = s.lstrip()
if len(s) == 0:
return tokens
# Closing tag?
elif s.startswith(b'</'):
try:
name, s = s[2:].split(b'>', 1)
except Exception:
raise QuasiXmlParseError('protocol error: unterminated close tag')
tokens.append(TokenTagClose(name))
s = s.lstrip() # consume space after closing tag
# Opening tag?
elif s.startswith(b'<'):
try:
name, s = s[1:].split(b'>', 1)
except Exception:
raise QuasiXmlParseError('protocol error: unterminated open tag')
tokens.append(TokenTagOpen(name))
else:
# capture cdata till next tag.
try:
cdata, s = s.split(b'<', 1)
except Exception:
raise QuasiXmlParseError('protocol error: unterminated cdata')
s = b'<' + s
tokens.append(TokenCData(cdata))
def fromtokens(tokens):
"""Parse XML-ish tokens into an Element."""
def parse_elem(tokens):
"""Parse some tokens into one Element, and return unconsumed tokens."""
topen, tokens = tokens[0], tokens[1:]
if type(topen) is not TokenTagOpen:
raise QuasiXmlParseError('protocol error: data does not start with open tag')
children = []
cdata = None
while len(tokens) > 0:
t, tokens = tokens[0], tokens[1:]
if type(t) is TokenTagOpen:
# Slurp a sub-element.
el, tokens = parse_elem([t] + tokens)
children.append(el)
# Continue with non-consumed tokens.
elif type(t) == TokenTagClose:
if t.text != topen.text:
raise QuasiXmlParseError('protocol error: close tag <{}> does not match opening tag <{}>'.format(t.text, topen.text))
elif cdata is not None and len(children):
raise QuasiXmlParseError('protocol error: mixed cdata and child elements')
return Element(topen.text.decode('utf-8'), cdata.decode('utf-8') if cdata is not None else children), tokens
else:
cdata = t.text
elem, rest = parse_elem(tokens)
if rest != []:
raise QuasiXmlParseError('protocol error: trailing data')
return elem
try:
unicode # Python 2
except NameError:
unicode = str
def fromstring(s):
if type(s) is unicode:
s = s.encode('utf-8')
if type(s) is not bytes:
raise TypeError('expected a bytes-object, got {}'.format(type(s).__name__))
return fromtokens(tokenize(s))
def encode_entities(s):
from . import XML_entities_active
for k, v in XML_entities_active():
s = s.replace(k, v)
return s
def decode_entities(s):
from . import XML_entities_active
rev = list(XML_entities_active())
rev.reverse() # (make sure & is decoded last)
for k, v in rev:
s = s.replace(v, k)
return s | irods/message/quasixml.py |
class Element():
"""
Represents <name>body</name>.
(Where `body' is either a string or a list of sub-elements.)
"""
@property
def tag(self): return self.name
def __init__(self, name, body):
"""Initialize with the tag's name and the body (i.e. content)."""
if body == []:
# Empty element.
self.text = None
elif type(body) is not list:
# String element: decode body.
body = decode_entities(body)
self.text = body
self.name = name
self.body = body
def find(self, name):
"""Get first matching child element by name."""
for x in self.findall(name):
return x
def findall(self, name):
"""Get matching child elements by name."""
return list(self.findall_(name))
def findall_(self, name):
"""Get matching child elements by name (generator variant)."""
return (el for el in self.body if el.name == name)
# For debugging convenience:
def __str__(self):
if type(self.body) is list:
return '<{}>{}</{}>'.format(self.name, ''.join(map(str, self.body)), self.name)
else:
return '<{}>{}</{}>'.format(self.name, encode_entities(self.body), self.name)
def __repr__(self):
return '{}({})'.format(self.name, repr(self.body))
class Token(object):
"""A utility class for parsing XML."""
def __init__(self, s):
"""Create a `Token' object from `s', the text comprising the parsed token."""
self.text = s
def __repr__(self):
return str(type(self).__name__) + '(' + self.text.decode('utf-8') + ')'
def __str__(self):
return repr(self)
class TokenTagOpen(Token):
"""An opening tag (<foo>)"""
class TokenTagClose(Token):
"""An closing tag (</foo>)"""
class TokenCData(Token):
"""Textual element body"""
class QuasiXmlParseError(Exception):
"""Indicates parse failure of XML protocol data."""
def tokenize(s):
"""Parse an XML-ish string into a list of tokens."""
tokens = []
# Consume input until empty.
while True:
nextclose = s.find(b'</')
nextopen = s.find(b'<')
if nextopen < nextclose or nextopen == -1:
# Either we have no tags left, or we are in a non-cdata element body: strip whitespace.
s = s.lstrip()
if len(s) == 0:
return tokens
# Closing tag?
elif s.startswith(b'</'):
try:
name, s = s[2:].split(b'>', 1)
except Exception:
raise QuasiXmlParseError('protocol error: unterminated close tag')
tokens.append(TokenTagClose(name))
s = s.lstrip() # consume space after closing tag
# Opening tag?
elif s.startswith(b'<'):
try:
name, s = s[1:].split(b'>', 1)
except Exception:
raise QuasiXmlParseError('protocol error: unterminated open tag')
tokens.append(TokenTagOpen(name))
else:
# capture cdata till next tag.
try:
cdata, s = s.split(b'<', 1)
except Exception:
raise QuasiXmlParseError('protocol error: unterminated cdata')
s = b'<' + s
tokens.append(TokenCData(cdata))
def fromtokens(tokens):
"""Parse XML-ish tokens into an Element."""
def parse_elem(tokens):
"""Parse some tokens into one Element, and return unconsumed tokens."""
topen, tokens = tokens[0], tokens[1:]
if type(topen) is not TokenTagOpen:
raise QuasiXmlParseError('protocol error: data does not start with open tag')
children = []
cdata = None
while len(tokens) > 0:
t, tokens = tokens[0], tokens[1:]
if type(t) is TokenTagOpen:
# Slurp a sub-element.
el, tokens = parse_elem([t] + tokens)
children.append(el)
# Continue with non-consumed tokens.
elif type(t) == TokenTagClose:
if t.text != topen.text:
raise QuasiXmlParseError('protocol error: close tag <{}> does not match opening tag <{}>'.format(t.text, topen.text))
elif cdata is not None and len(children):
raise QuasiXmlParseError('protocol error: mixed cdata and child elements')
return Element(topen.text.decode('utf-8'), cdata.decode('utf-8') if cdata is not None else children), tokens
else:
cdata = t.text
elem, rest = parse_elem(tokens)
if rest != []:
raise QuasiXmlParseError('protocol error: trailing data')
return elem
try:
unicode # Python 2
except NameError:
unicode = str
def fromstring(s):
if type(s) is unicode:
s = s.encode('utf-8')
if type(s) is not bytes:
raise TypeError('expected a bytes-object, got {}'.format(type(s).__name__))
return fromtokens(tokenize(s))
def encode_entities(s):
from . import XML_entities_active
for k, v in XML_entities_active():
s = s.replace(k, v)
return s
def decode_entities(s):
from . import XML_entities_active
rev = list(XML_entities_active())
rev.reverse() # (make sure & is decoded last)
for k, v in rev:
s = s.replace(v, k)
return s | 0.874265 | 0.202364 |
# Stdlib
from unittest.mock import patch
# External packages
import nose
import nose.tools as ntools
from dnslib import DNSLabel
# SCION
from infrastructure.dns_server.main import SCIONDnsServer
from lib.defines import (
BEACON_SERVICE,
CERTIFICATE_SERVICE,
DNS_SERVICE,
PATH_SERVICE,
SCION_DNS_PORT,
SIBRA_SERVICE,
)
from lib.zk.errors import ZkNoConnection
from test.testcommon import create_mock
class BaseDNSServer(object):
DOMAIN = DNSLabel("testdomainpleaseignore")
NAME = "notaninstance"
FQDN = DOMAIN.add(NAME)
class TestSCIONDnsServerSetup(BaseDNSServer):
"""
Unit tests for infrastructure.dns_server.main.SCIONDnsServer.setup
"""
@patch("infrastructure.dns_server.main.Zookeeper", autospec=True)
@patch("infrastructure.dns_server.main.SCIONDnsLogger", autospec=True)
@patch("infrastructure.dns_server.main.SCIONDnsTcpServer", autospec=True)
@patch("infrastructure.dns_server.main.SCIONDnsUdpServer", autospec=True)
@patch("infrastructure.dns_server.main.DNSServer", autospec=True)
@patch("infrastructure.dns_server.main.ZoneResolver", autospec=True)
@patch("infrastructure.dns_server.main.SCIONDnsServer.__init__",
autospec=True, return_value=None)
def test(self, init, zone_resolver, dns_server, udp_server, tcp_server,
dns_logger, zookeeper):
# Setup
server = SCIONDnsServer("srvid", "conf_dir")
server.lock = "lock"
server.domain = "domain"
server.addr = create_mock(["host"])
server.addr.host = "127.0.0.1"
server.id = "srvid"
server.topology = create_mock(["isd_as", "zookeepers"])
server.topology.isd_as = "isd as"
server.topology.zookeepers = ["zk0", "zk1"]
server._setup_parties = create_mock()
# Call
server.setup()
# Tests
zone_resolver.assert_called_once_with("lock", "domain")
dns_server.assert_any_call(
zone_resolver.return_value, port=SCION_DNS_PORT,
address="127.0.0.1", server=udp_server,
logger=dns_logger.return_value)
dns_server.assert_any_call(
zone_resolver.return_value, port=SCION_DNS_PORT,
address="127.0.0.1", server=tcp_server,
logger=dns_logger.return_value)
ntools.eq_(dns_server.call_count, 2)
zookeeper.assert_called_once_with(
"isd as", DNS_SERVICE, "srvid\0%d\000127.0.0.1" % SCION_DNS_PORT,
["zk0", "zk1"])
ntools.eq_(server._parties, {})
server._setup_parties.assert_called_once_with()
class TestSCIONDnsSetupParties(BaseDNSServer):
"""
Unit tests for infrastructure.dns_server.main.SCIONDnsServer._setup_parties
"""
@patch("infrastructure.dns_server.main.SCIONDnsServer.__init__",
autospec=True, return_value=None)
def test(self, _):
server = SCIONDnsServer("srvid", "conf_dir")
server.zk = create_mock(["retry", "party_setup"])
server.addr = create_mock(["isd_as"])
server.addr.isd_as = "30-10"
server._parties = {}
# Call
server._setup_parties()
# Tests
for srv in server.SRV_TYPES:
autojoin = False
if srv == DNS_SERVICE:
autojoin = True
server.zk.retry.assert_any_call(
"Joining %s party" % srv, server.zk.party_setup,
prefix="/30-10/%s" % srv, autojoin=autojoin)
ntools.eq_(server.zk.retry.call_count, len(server.SRV_TYPES))
class TestSCIONDnsSyncZkState(BaseDNSServer):
"""
Unit tests for infrastructure.dns_server.main.SCIONDnsServer._sync_zk_state
"""
@patch("infrastructure.dns_server.main.SCIONDnsServer.__init__",
autospec=True, return_value=None)
def test_success(self, init):
# Setup
services = {
BEACON_SERVICE: ["bs1", "bs2", "bs3"],
CERTIFICATE_SERVICE: ["cs1"],
DNS_SERVICE: ["ds1", "ds2"],
PATH_SERVICE: [],
SIBRA_SERVICE: ["sb1"],
}
server = SCIONDnsServer("srvid", "conf_dir")
server.zk = create_mock(['wait_connected'])
server.domain = self.DOMAIN
server._parties = {}
for i in SCIONDnsServer.SRV_TYPES:
party = create_mock(["list"])
party.list.return_value = services[i]
server._parties[i] = party
server._parse_srv_inst = create_mock()
server.lock = create_mock(['__enter__', '__exit__'])
server.resolver = create_mock(["services"])
domain_set = set([self.DOMAIN.add(srv) for srv in
SCIONDnsServer.SRV_TYPES])
# Call
server._sync_zk_state()
# Tests
server.zk.wait_connected.assert_called_once_with(timeout=10.0)
ntools.eq_(domain_set, set(server.services))
for type_, insts in services.items():
for inst in insts:
server._parse_srv_inst.assert_any_call(
inst, self.DOMAIN.add(type_))
ntools.ok_(server.lock.mock_calls)
ntools.eq_(server.resolver.services, server.services)
@patch("infrastructure.dns_server.main.SCIONDnsServer.__init__",
autospec=True, return_value=None)
def test_no_conn(self, init):
# Setup
server = SCIONDnsServer("srvid", "conf_dir")
server.zk = create_mock(['wait_connected'])
server.zk.wait_connected.side_effect = ZkNoConnection
# Call
server._sync_zk_state()
# Tests
server.zk.wait_connected.assert_called_once_with(timeout=10.0)
ntools.eq_(server.services, {})
@patch("infrastructure.dns_server.main.SCIONDnsServer.__init__",
autospec=True, return_value=None)
def test_connloss(self, init):
# Setup
server = SCIONDnsServer("srvid", "conf_dir")
server.zk = create_mock(['wait_connected'])
server.domain = self.DOMAIN
party = create_mock(["list"])
party.list.side_effect = ZkNoConnection
server._parties = {
SCIONDnsServer.SRV_TYPES[0]: party
}
# Call
server._sync_zk_state()
class TestSCIONDnsParseSrvInst(BaseDNSServer):
"""
Unit tests for infrastructure.dns_server.main.SCIONDnsServer._parse_srv_inst
"""
@patch("infrastructure.dns_server.main.SCIONDnsServer.__init__",
autospec=True, return_value=None)
def test(self, init):
# Setup
server = SCIONDnsServer("srvid", "conf_dir")
srv_domain = self.DOMAIN.add(BEACON_SERVICE)
server.services = {srv_domain: ["addr0"]}
# Call
server._parse_srv_inst("name\0port\0addr1\0addr2", srv_domain)
# Tests
ntools.eq_(server.services[srv_domain], ["addr0", "addr1", "addr2"])
class TestSCIONDnsRun(BaseDNSServer):
"""
Unit tests for infrastructure.dns_server.main.SCIONDnsServer.run
"""
@patch("infrastructure.dns_server.main.sleep")
@patch("infrastructure.dns_server.main.SCIONDnsServer.__init__",
autospec=True, return_value=None)
def test(self, init, sleep):
# Setup
server = SCIONDnsServer("srvid", "conf_dir")
server._sync_zk_state = create_mock()
server.udp_server = create_mock(["start_thread", "isAlive"])
server.tcp_server = create_mock(["start_thread", "isAlive"])
sleep.side_effect = []
# Call
ntools.assert_raises(StopIteration, server.run)
# Tests
ntools.eq_(server._sync_zk_state.call_count, 2)
server.udp_server.start_thread.assert_called_once_with()
server.tcp_server.start_thread.assert_called_once_with()
server.udp_server.isAlive.assert_called_once_with()
server.tcp_server.isAlive.assert_called_once_with()
sleep.assert_called_once_with(server.SYNC_TIME)
if __name__ == "__main__":
nose.run(defaultTest=__name__) | test/infrastructure/dns_server/main_test.py | # Stdlib
from unittest.mock import patch
# External packages
import nose
import nose.tools as ntools
from dnslib import DNSLabel
# SCION
from infrastructure.dns_server.main import SCIONDnsServer
from lib.defines import (
BEACON_SERVICE,
CERTIFICATE_SERVICE,
DNS_SERVICE,
PATH_SERVICE,
SCION_DNS_PORT,
SIBRA_SERVICE,
)
from lib.zk.errors import ZkNoConnection
from test.testcommon import create_mock
class BaseDNSServer(object):
DOMAIN = DNSLabel("testdomainpleaseignore")
NAME = "notaninstance"
FQDN = DOMAIN.add(NAME)
class TestSCIONDnsServerSetup(BaseDNSServer):
"""
Unit tests for infrastructure.dns_server.main.SCIONDnsServer.setup
"""
@patch("infrastructure.dns_server.main.Zookeeper", autospec=True)
@patch("infrastructure.dns_server.main.SCIONDnsLogger", autospec=True)
@patch("infrastructure.dns_server.main.SCIONDnsTcpServer", autospec=True)
@patch("infrastructure.dns_server.main.SCIONDnsUdpServer", autospec=True)
@patch("infrastructure.dns_server.main.DNSServer", autospec=True)
@patch("infrastructure.dns_server.main.ZoneResolver", autospec=True)
@patch("infrastructure.dns_server.main.SCIONDnsServer.__init__",
autospec=True, return_value=None)
def test(self, init, zone_resolver, dns_server, udp_server, tcp_server,
dns_logger, zookeeper):
# Setup
server = SCIONDnsServer("srvid", "conf_dir")
server.lock = "lock"
server.domain = "domain"
server.addr = create_mock(["host"])
server.addr.host = "127.0.0.1"
server.id = "srvid"
server.topology = create_mock(["isd_as", "zookeepers"])
server.topology.isd_as = "isd as"
server.topology.zookeepers = ["zk0", "zk1"]
server._setup_parties = create_mock()
# Call
server.setup()
# Tests
zone_resolver.assert_called_once_with("lock", "domain")
dns_server.assert_any_call(
zone_resolver.return_value, port=SCION_DNS_PORT,
address="127.0.0.1", server=udp_server,
logger=dns_logger.return_value)
dns_server.assert_any_call(
zone_resolver.return_value, port=SCION_DNS_PORT,
address="127.0.0.1", server=tcp_server,
logger=dns_logger.return_value)
ntools.eq_(dns_server.call_count, 2)
zookeeper.assert_called_once_with(
"isd as", DNS_SERVICE, "srvid\0%d\000127.0.0.1" % SCION_DNS_PORT,
["zk0", "zk1"])
ntools.eq_(server._parties, {})
server._setup_parties.assert_called_once_with()
class TestSCIONDnsSetupParties(BaseDNSServer):
"""
Unit tests for infrastructure.dns_server.main.SCIONDnsServer._setup_parties
"""
@patch("infrastructure.dns_server.main.SCIONDnsServer.__init__",
autospec=True, return_value=None)
def test(self, _):
server = SCIONDnsServer("srvid", "conf_dir")
server.zk = create_mock(["retry", "party_setup"])
server.addr = create_mock(["isd_as"])
server.addr.isd_as = "30-10"
server._parties = {}
# Call
server._setup_parties()
# Tests
for srv in server.SRV_TYPES:
autojoin = False
if srv == DNS_SERVICE:
autojoin = True
server.zk.retry.assert_any_call(
"Joining %s party" % srv, server.zk.party_setup,
prefix="/30-10/%s" % srv, autojoin=autojoin)
ntools.eq_(server.zk.retry.call_count, len(server.SRV_TYPES))
class TestSCIONDnsSyncZkState(BaseDNSServer):
"""
Unit tests for infrastructure.dns_server.main.SCIONDnsServer._sync_zk_state
"""
@patch("infrastructure.dns_server.main.SCIONDnsServer.__init__",
autospec=True, return_value=None)
def test_success(self, init):
# Setup
services = {
BEACON_SERVICE: ["bs1", "bs2", "bs3"],
CERTIFICATE_SERVICE: ["cs1"],
DNS_SERVICE: ["ds1", "ds2"],
PATH_SERVICE: [],
SIBRA_SERVICE: ["sb1"],
}
server = SCIONDnsServer("srvid", "conf_dir")
server.zk = create_mock(['wait_connected'])
server.domain = self.DOMAIN
server._parties = {}
for i in SCIONDnsServer.SRV_TYPES:
party = create_mock(["list"])
party.list.return_value = services[i]
server._parties[i] = party
server._parse_srv_inst = create_mock()
server.lock = create_mock(['__enter__', '__exit__'])
server.resolver = create_mock(["services"])
domain_set = set([self.DOMAIN.add(srv) for srv in
SCIONDnsServer.SRV_TYPES])
# Call
server._sync_zk_state()
# Tests
server.zk.wait_connected.assert_called_once_with(timeout=10.0)
ntools.eq_(domain_set, set(server.services))
for type_, insts in services.items():
for inst in insts:
server._parse_srv_inst.assert_any_call(
inst, self.DOMAIN.add(type_))
ntools.ok_(server.lock.mock_calls)
ntools.eq_(server.resolver.services, server.services)
@patch("infrastructure.dns_server.main.SCIONDnsServer.__init__",
autospec=True, return_value=None)
def test_no_conn(self, init):
# Setup
server = SCIONDnsServer("srvid", "conf_dir")
server.zk = create_mock(['wait_connected'])
server.zk.wait_connected.side_effect = ZkNoConnection
# Call
server._sync_zk_state()
# Tests
server.zk.wait_connected.assert_called_once_with(timeout=10.0)
ntools.eq_(server.services, {})
@patch("infrastructure.dns_server.main.SCIONDnsServer.__init__",
autospec=True, return_value=None)
def test_connloss(self, init):
# Setup
server = SCIONDnsServer("srvid", "conf_dir")
server.zk = create_mock(['wait_connected'])
server.domain = self.DOMAIN
party = create_mock(["list"])
party.list.side_effect = ZkNoConnection
server._parties = {
SCIONDnsServer.SRV_TYPES[0]: party
}
# Call
server._sync_zk_state()
class TestSCIONDnsParseSrvInst(BaseDNSServer):
"""
Unit tests for infrastructure.dns_server.main.SCIONDnsServer._parse_srv_inst
"""
@patch("infrastructure.dns_server.main.SCIONDnsServer.__init__",
autospec=True, return_value=None)
def test(self, init):
# Setup
server = SCIONDnsServer("srvid", "conf_dir")
srv_domain = self.DOMAIN.add(BEACON_SERVICE)
server.services = {srv_domain: ["addr0"]}
# Call
server._parse_srv_inst("name\0port\0addr1\0addr2", srv_domain)
# Tests
ntools.eq_(server.services[srv_domain], ["addr0", "addr1", "addr2"])
class TestSCIONDnsRun(BaseDNSServer):
"""
Unit tests for infrastructure.dns_server.main.SCIONDnsServer.run
"""
@patch("infrastructure.dns_server.main.sleep")
@patch("infrastructure.dns_server.main.SCIONDnsServer.__init__",
autospec=True, return_value=None)
def test(self, init, sleep):
# Setup
server = SCIONDnsServer("srvid", "conf_dir")
server._sync_zk_state = create_mock()
server.udp_server = create_mock(["start_thread", "isAlive"])
server.tcp_server = create_mock(["start_thread", "isAlive"])
sleep.side_effect = []
# Call
ntools.assert_raises(StopIteration, server.run)
# Tests
ntools.eq_(server._sync_zk_state.call_count, 2)
server.udp_server.start_thread.assert_called_once_with()
server.tcp_server.start_thread.assert_called_once_with()
server.udp_server.isAlive.assert_called_once_with()
server.tcp_server.isAlive.assert_called_once_with()
sleep.assert_called_once_with(server.SYNC_TIME)
if __name__ == "__main__":
nose.run(defaultTest=__name__) | 0.449151 | 0.160825 |
import unittest
from faker import Faker
from src.logica.coleccion import Coleccion
from src.modelo.album import Album
from src.modelo.cancion import Cancion
from src.modelo.declarative_base import Session
from src.modelo.interprete import Interprete
class CancionTestCase(unittest.TestCase):
def setUp(self):
self.session = Session()
self.coleccion = Coleccion()
# Generación de datos con libreria Faker
self.data_factory = Faker()
def test_cancion_sin_interpretes(self):
# Nombre aleatorio
titulo_cancion = self.data_factory.name()
# Número aleatorio entre 0 y 60
minutos_cancion = self.data_factory.pyint(0, 60)
segundos_cancion = self.data_factory.pyint(0, 60)
compositor_cancion = self.data_factory.name()
cancion = self.coleccion.agregar_cancion(titulo_cancion, minutos_cancion, segundos_cancion, compositor_cancion, -1, [])
self.assertEqual(cancion, False)
def test_cancion_varios_interpretes(self):
nombre_interprete1 = self.data_factory.name()
# Frase aleatoria
texto_curiosidades1 = self.data_factory.sentence()
self.coleccion.agregar_interprete(nombre_interprete1, texto_curiosidades1, -1)
nombre_interprete2 = self.data_factory.name()
texto_curiosidades2 = self.data_factory.sentence()
self.coleccion.agregar_interprete(nombre_interprete2, texto_curiosidades2, -1)
titulo_cancion = self.data_factory.name()
minutos_cancion = self.data_factory.pyint(0, 60)
segundos_cancion = self.data_factory.pyint(0, 60)
compositor_cancion = self.data_factory.name()
self.coleccion.agregar_cancion(titulo_cancion, minutos_cancion, segundos_cancion, compositor_cancion, -1,
[{'nombre': nombre_interprete1, 'texto_curiosidades': texto_curiosidades1},
{'nombre': nombre_interprete2,
'texto_curiosidades': texto_curiosidades2}])
consulta = self.session.query(Cancion).filter(Cancion.titulo == titulo_cancion).first()
self.assertIsNotNone(consulta)
def test_cancion_con_album(self):
titulo_album = self.data_factory.name()
# Año aleatorio
anio_album = self.data_factory.year()
descripcion_album = self.data_factory.sentence()
self.coleccion.agregar_album(titulo_album, anio_album, descripcion_album, "CD")
consulta1 = self.session.query(Album).filter(Album.titulo == titulo_album).first().id
self.coleccion.agregar_interprete("<NAME>", "Canción dedicada a su ...", -1)
minutos_cancion = self.data_factory.pyint(0, 60)
segundos_cancion = self.data_factory.pyint(0, 60)
compositor_cancion = self.data_factory.name()
self.coleccion.agregar_cancion("Bye mamá", minutos_cancion, segundos_cancion, compositor_cancion, consulta1,
[{'nombre': '<NAME>',
'texto_curiosidades': 'Canción dedicada a su ...'}])
consulta2 = self.session.query(Cancion).filter(Cancion.titulo == "Bye mamá").first()
self.assertNotEqual(len(consulta2.albumes), 0)
def test_cancion_repetida_album(self):
titulo_album = self.data_factory.name()
anio_album = self.data_factory.year()
descripcion_album = self.data_factory.sentence()
self.coleccion.agregar_album(titulo_album, anio_album, descripcion_album, "CD")
consulta1 = self.session.query(Album).filter(Album.titulo == titulo_album).first().id
nombre_interprete1 = self.data_factory.name()
# Texto aleatorio
texto_curiosidades1 = self.data_factory.text()
self.coleccion.agregar_interprete(nombre_interprete1, texto_curiosidades1, -1)
titulo_cancion1 = self.data_factory.name()
minutos_cancion1 = self.data_factory.pyint(0, 60)
segundos_cancion1 = self.data_factory.pyint(0, 60)
compositor_cancion1 = self.data_factory.name()
self.coleccion.agregar_cancion(titulo_cancion1, minutos_cancion1, segundos_cancion1, compositor_cancion1, consulta1,
[{'nombre': nombre_interprete1,
'texto_curiosidades': texto_curiosidades1}])
nombre_interprete2 = self.data_factory.name()
texto_curiosidades2 = self.data_factory.text()
self.coleccion.agregar_interprete(nombre_interprete2, texto_curiosidades2, -1)
minutos_cancion2 = self.data_factory.pyint(0, 60)
segundos_cancion2 = self.data_factory.pyint(0, 60)
compositor_cancion2 = self.data_factory.name()
cancion = self.coleccion.agregar_cancion(titulo_cancion1, minutos_cancion2, segundos_cancion2, compositor_cancion2, consulta1,
[{'nombre': nombre_interprete2,
'texto_curiosidades': texto_curiosidades2}])
self.assertEqual(cancion, False)
def test_editar_cancion(self):
consulta1 = self.session.query(Cancion).filter(Cancion.id == 2).first().compositor
consulta2 = self.session.query(Interprete).filter(Interprete.nombre == "Franco de Vita").first()
texto_curiosidades = self.data_factory.text()
minutos_cancion = self.data_factory.pyint(0, 60)
segundos_cancion = self.data_factory.pyint(0, 60)
if consulta2 is None:
self.coleccion.agregar_interprete("Franco de Vita", texto_curiosidades, 1)
self.coleccion.editar_cancion(2, "Bye mamá", minutos_cancion, segundos_cancion, "J.R.Florez y Difelisatti",
[{'id': '2', 'nombre': '<NAME>',
'texto_curiosidades': 'Canción dedicada a su ...'},
{'id': 'n', 'nombre': 'Franco de Vita',
'texto_curiosidades': texto_curiosidades}])
else:
self.coleccion.editar_cancion(2, "Bye bye", minutos_cancion, segundos_cancion, "J.R.Florez y Difelisatti",
[{'id': '2', 'nombre': '<NAME>',
'texto_curiosidades': 'Canción dedicada a su ...'},
{'id': '9', 'nombre': 'Franco de Vita',
'texto_curiosidades': texto_curiosidades}])
consulta3 = self.session.query(Cancion).filter(Cancion.id == 2).first()
self.assertEqual(consulta3.compositor, "J.R.Florez y Difelisatti")
def test_eliminar_cancion(self):
self.coleccion.eliminar_cancion(3)
consulta = self.session.query(Cancion).filter(Cancion.id == 3).first()
self.assertIsNone(consulta)
def test_buscar_canciones_por_titulo(self):
titulo_cancion = self.data_factory.name()
anio_cancion = self.data_factory.year()
descripcion_cancion = self.data_factory.sentence()
self.coleccion.agregar_album(titulo_cancion, anio_cancion, descripcion_cancion, "CD")
consulta1 = self.session.query(Album).filter(Album.titulo == titulo_cancion).first().id
nombre_interprete = self.data_factory.name()
texto_curiosidades = self.data_factory.text()
self.coleccion.agregar_interprete(nombre_interprete, texto_curiosidades, -1)
minutos_cancion = self.data_factory.pyint(0, 60)
segundos_cancion = self.data_factory.pyint(0, 60)
compositor_cancion = self.data_factory.name()
self.coleccion.agregar_cancion("Baby blues", minutos_cancion, segundos_cancion, compositor_cancion, consulta1,
[{'nombre': nombre_interprete,
'texto_curiosidades': texto_curiosidades}])
consulta = self.coleccion.buscar_canciones_por_titulo("Baby")
self.assertGreater(len(consulta), 0)
def test_dar_cancion_por_id(self):
consulta = self.coleccion.dar_cancion_por_id(1)
self.assertEqual(consulta["titulo"], "Baby blues") | tests/test_cancion.py | import unittest
from faker import Faker
from src.logica.coleccion import Coleccion
from src.modelo.album import Album
from src.modelo.cancion import Cancion
from src.modelo.declarative_base import Session
from src.modelo.interprete import Interprete
class CancionTestCase(unittest.TestCase):
def setUp(self):
self.session = Session()
self.coleccion = Coleccion()
# Generación de datos con libreria Faker
self.data_factory = Faker()
def test_cancion_sin_interpretes(self):
# Nombre aleatorio
titulo_cancion = self.data_factory.name()
# Número aleatorio entre 0 y 60
minutos_cancion = self.data_factory.pyint(0, 60)
segundos_cancion = self.data_factory.pyint(0, 60)
compositor_cancion = self.data_factory.name()
cancion = self.coleccion.agregar_cancion(titulo_cancion, minutos_cancion, segundos_cancion, compositor_cancion, -1, [])
self.assertEqual(cancion, False)
def test_cancion_varios_interpretes(self):
nombre_interprete1 = self.data_factory.name()
# Frase aleatoria
texto_curiosidades1 = self.data_factory.sentence()
self.coleccion.agregar_interprete(nombre_interprete1, texto_curiosidades1, -1)
nombre_interprete2 = self.data_factory.name()
texto_curiosidades2 = self.data_factory.sentence()
self.coleccion.agregar_interprete(nombre_interprete2, texto_curiosidades2, -1)
titulo_cancion = self.data_factory.name()
minutos_cancion = self.data_factory.pyint(0, 60)
segundos_cancion = self.data_factory.pyint(0, 60)
compositor_cancion = self.data_factory.name()
self.coleccion.agregar_cancion(titulo_cancion, minutos_cancion, segundos_cancion, compositor_cancion, -1,
[{'nombre': nombre_interprete1, 'texto_curiosidades': texto_curiosidades1},
{'nombre': nombre_interprete2,
'texto_curiosidades': texto_curiosidades2}])
consulta = self.session.query(Cancion).filter(Cancion.titulo == titulo_cancion).first()
self.assertIsNotNone(consulta)
def test_cancion_con_album(self):
titulo_album = self.data_factory.name()
# Año aleatorio
anio_album = self.data_factory.year()
descripcion_album = self.data_factory.sentence()
self.coleccion.agregar_album(titulo_album, anio_album, descripcion_album, "CD")
consulta1 = self.session.query(Album).filter(Album.titulo == titulo_album).first().id
self.coleccion.agregar_interprete("<NAME>", "Canción dedicada a su ...", -1)
minutos_cancion = self.data_factory.pyint(0, 60)
segundos_cancion = self.data_factory.pyint(0, 60)
compositor_cancion = self.data_factory.name()
self.coleccion.agregar_cancion("Bye mamá", minutos_cancion, segundos_cancion, compositor_cancion, consulta1,
[{'nombre': '<NAME>',
'texto_curiosidades': 'Canción dedicada a su ...'}])
consulta2 = self.session.query(Cancion).filter(Cancion.titulo == "Bye mamá").first()
self.assertNotEqual(len(consulta2.albumes), 0)
def test_cancion_repetida_album(self):
titulo_album = self.data_factory.name()
anio_album = self.data_factory.year()
descripcion_album = self.data_factory.sentence()
self.coleccion.agregar_album(titulo_album, anio_album, descripcion_album, "CD")
consulta1 = self.session.query(Album).filter(Album.titulo == titulo_album).first().id
nombre_interprete1 = self.data_factory.name()
# Texto aleatorio
texto_curiosidades1 = self.data_factory.text()
self.coleccion.agregar_interprete(nombre_interprete1, texto_curiosidades1, -1)
titulo_cancion1 = self.data_factory.name()
minutos_cancion1 = self.data_factory.pyint(0, 60)
segundos_cancion1 = self.data_factory.pyint(0, 60)
compositor_cancion1 = self.data_factory.name()
self.coleccion.agregar_cancion(titulo_cancion1, minutos_cancion1, segundos_cancion1, compositor_cancion1, consulta1,
[{'nombre': nombre_interprete1,
'texto_curiosidades': texto_curiosidades1}])
nombre_interprete2 = self.data_factory.name()
texto_curiosidades2 = self.data_factory.text()
self.coleccion.agregar_interprete(nombre_interprete2, texto_curiosidades2, -1)
minutos_cancion2 = self.data_factory.pyint(0, 60)
segundos_cancion2 = self.data_factory.pyint(0, 60)
compositor_cancion2 = self.data_factory.name()
cancion = self.coleccion.agregar_cancion(titulo_cancion1, minutos_cancion2, segundos_cancion2, compositor_cancion2, consulta1,
[{'nombre': nombre_interprete2,
'texto_curiosidades': texto_curiosidades2}])
self.assertEqual(cancion, False)
def test_editar_cancion(self):
consulta1 = self.session.query(Cancion).filter(Cancion.id == 2).first().compositor
consulta2 = self.session.query(Interprete).filter(Interprete.nombre == "Franco de Vita").first()
texto_curiosidades = self.data_factory.text()
minutos_cancion = self.data_factory.pyint(0, 60)
segundos_cancion = self.data_factory.pyint(0, 60)
if consulta2 is None:
self.coleccion.agregar_interprete("Franco de Vita", texto_curiosidades, 1)
self.coleccion.editar_cancion(2, "Bye mamá", minutos_cancion, segundos_cancion, "J.R.Florez y Difelisatti",
[{'id': '2', 'nombre': '<NAME>',
'texto_curiosidades': 'Canción dedicada a su ...'},
{'id': 'n', 'nombre': 'Franco de Vita',
'texto_curiosidades': texto_curiosidades}])
else:
self.coleccion.editar_cancion(2, "Bye bye", minutos_cancion, segundos_cancion, "J.R.Florez y Difelisatti",
[{'id': '2', 'nombre': '<NAME>',
'texto_curiosidades': 'Canción dedicada a su ...'},
{'id': '9', 'nombre': 'Franco de Vita',
'texto_curiosidades': texto_curiosidades}])
consulta3 = self.session.query(Cancion).filter(Cancion.id == 2).first()
self.assertEqual(consulta3.compositor, "J.R.Florez y Difelisatti")
def test_eliminar_cancion(self):
self.coleccion.eliminar_cancion(3)
consulta = self.session.query(Cancion).filter(Cancion.id == 3).first()
self.assertIsNone(consulta)
def test_buscar_canciones_por_titulo(self):
titulo_cancion = self.data_factory.name()
anio_cancion = self.data_factory.year()
descripcion_cancion = self.data_factory.sentence()
self.coleccion.agregar_album(titulo_cancion, anio_cancion, descripcion_cancion, "CD")
consulta1 = self.session.query(Album).filter(Album.titulo == titulo_cancion).first().id
nombre_interprete = self.data_factory.name()
texto_curiosidades = self.data_factory.text()
self.coleccion.agregar_interprete(nombre_interprete, texto_curiosidades, -1)
minutos_cancion = self.data_factory.pyint(0, 60)
segundos_cancion = self.data_factory.pyint(0, 60)
compositor_cancion = self.data_factory.name()
self.coleccion.agregar_cancion("Baby blues", minutos_cancion, segundos_cancion, compositor_cancion, consulta1,
[{'nombre': nombre_interprete,
'texto_curiosidades': texto_curiosidades}])
consulta = self.coleccion.buscar_canciones_por_titulo("Baby")
self.assertGreater(len(consulta), 0)
def test_dar_cancion_por_id(self):
consulta = self.coleccion.dar_cancion_por_id(1)
self.assertEqual(consulta["titulo"], "Baby blues") | 0.325628 | 0.276239 |
# Stdlib
import json
try:
import cStringIO as StringIO
except ImportError:
import StringIO as StringIO
# Local
import doekbase.workspace.client
from . import thrift_service, ttypes
from doekbase.data_api.util import get_logger, log_start, log_end
from doekbase.data_api.rpc_util import thrift_validate
_log = get_logger('baseobj.impl')
class ObjectImpl(thrift_service.Iface):
def __init__(self, services=None):
print("IN ObjectImpl")
if services is None or type(services) != type({}):
raise TypeError("You must provide a service configuration "
"dictionary! Found {0}".format(type(services)))
elif not services.has_key("workspace_service_url"):
raise KeyError("Expecting workspace_service_url key!")
self.services = services
self.ws_client = None
self.ref = None
def init(self, auth):
token = auth.token
self.ws_client = doekbase.workspace.client.Workspace(
self.services["workspace_service_url"], token=token)
def get_info(self, ref):
self.ref = ref
try:
info_values = self.ws_client.get_object_info_new({
"objects": [{"ref": ref}],
"includeMetadata": 0,
"ignoreErrors": 0})[0]
except Exception as err:
raise # XXX
md5_typestr = self.ws_client.translate_to_MD5_types([info_values[2]]).values()[0]
info = ttypes.Metadata(
object_id=str(info_values[0]),
object_name=info_values[1],
object_reference="{0}/{1}".format(info_values[6],
info_values[0]),
object_reference_versioned="{0}/{1}/{2}".format(
info_values[6], info_values[0],info_values[4]),
type_string=md5_typestr,
save_date=info_values[3],
version=str(info_values[4]),
saved_by=info_values[5],
workspace_id=info_values[6],
workspace_name=info_values[7],
object_checksum=info_values[8],
object_size=info_values[9],
object_metadata=str(info_values[10]))
thrift_validate(info)
return info
def get_schema(self, ref):
return self.ws_client.get_type_info(self.get_info(ref)["type_string"])
def get_history(self):
return self.ws_client.get_object_history({"ref": self.ref})
def get_provenance(self):
return self.ws_client.get_object_provenance([{"ref": self.ref}])
def get_data(self):
t0 = log_start(_log, 'get_data')
s = ''
try:
t1 = log_start(_log, 'get_data.query')
data_dict = self.ws_client.get_objects([
{"ref": self.ref}])[0]["data"]
log_end(_log, t1, 'get_data.query')
t1 = log_start(_log, 'get_data.dump')
s = json.dumps(data_dict)
log_end(_log, t1, 'get_data.dump')
except Exception as err:
print("@@ died in .dumps: {}".format(err))
log_end(_log, t0, 'get_data')
return s
def get_data_subset(self, path_list=None):
return self.ws_client.get_object_subset([{"ref": self.ref,
"included": path_list}])[0]["data"]
def get_referrers(self):
referrers = self.ws_client.list_referencing_objects(
[{"ref": self.ref}])[0]
object_refs_by_type = dict()
for x in referrers:
typestring = self.ws_client.translate_to_MD5_types(
[x[2]]).values()[0]
if typestring not in object_refs_by_type:
object_refs_by_type[typestring] = list()
object_refs_by_type[typestring].append(str(x[6]) + "/" +
str(x[0]) + "/" +
str(x[4]))
return object_refs_by_type | lib/doekbase/data_api/baseobj/impl.py |
# Stdlib
import json
try:
import cStringIO as StringIO
except ImportError:
import StringIO as StringIO
# Local
import doekbase.workspace.client
from . import thrift_service, ttypes
from doekbase.data_api.util import get_logger, log_start, log_end
from doekbase.data_api.rpc_util import thrift_validate
_log = get_logger('baseobj.impl')
class ObjectImpl(thrift_service.Iface):
def __init__(self, services=None):
print("IN ObjectImpl")
if services is None or type(services) != type({}):
raise TypeError("You must provide a service configuration "
"dictionary! Found {0}".format(type(services)))
elif not services.has_key("workspace_service_url"):
raise KeyError("Expecting workspace_service_url key!")
self.services = services
self.ws_client = None
self.ref = None
def init(self, auth):
token = auth.token
self.ws_client = doekbase.workspace.client.Workspace(
self.services["workspace_service_url"], token=token)
def get_info(self, ref):
self.ref = ref
try:
info_values = self.ws_client.get_object_info_new({
"objects": [{"ref": ref}],
"includeMetadata": 0,
"ignoreErrors": 0})[0]
except Exception as err:
raise # XXX
md5_typestr = self.ws_client.translate_to_MD5_types([info_values[2]]).values()[0]
info = ttypes.Metadata(
object_id=str(info_values[0]),
object_name=info_values[1],
object_reference="{0}/{1}".format(info_values[6],
info_values[0]),
object_reference_versioned="{0}/{1}/{2}".format(
info_values[6], info_values[0],info_values[4]),
type_string=md5_typestr,
save_date=info_values[3],
version=str(info_values[4]),
saved_by=info_values[5],
workspace_id=info_values[6],
workspace_name=info_values[7],
object_checksum=info_values[8],
object_size=info_values[9],
object_metadata=str(info_values[10]))
thrift_validate(info)
return info
def get_schema(self, ref):
return self.ws_client.get_type_info(self.get_info(ref)["type_string"])
def get_history(self):
return self.ws_client.get_object_history({"ref": self.ref})
def get_provenance(self):
return self.ws_client.get_object_provenance([{"ref": self.ref}])
def get_data(self):
t0 = log_start(_log, 'get_data')
s = ''
try:
t1 = log_start(_log, 'get_data.query')
data_dict = self.ws_client.get_objects([
{"ref": self.ref}])[0]["data"]
log_end(_log, t1, 'get_data.query')
t1 = log_start(_log, 'get_data.dump')
s = json.dumps(data_dict)
log_end(_log, t1, 'get_data.dump')
except Exception as err:
print("@@ died in .dumps: {}".format(err))
log_end(_log, t0, 'get_data')
return s
def get_data_subset(self, path_list=None):
return self.ws_client.get_object_subset([{"ref": self.ref,
"included": path_list}])[0]["data"]
def get_referrers(self):
referrers = self.ws_client.list_referencing_objects(
[{"ref": self.ref}])[0]
object_refs_by_type = dict()
for x in referrers:
typestring = self.ws_client.translate_to_MD5_types(
[x[2]]).values()[0]
if typestring not in object_refs_by_type:
object_refs_by_type[typestring] = list()
object_refs_by_type[typestring].append(str(x[6]) + "/" +
str(x[0]) + "/" +
str(x[4]))
return object_refs_by_type | 0.246624 | 0.16975 |
from typing import Counter
import scrapy
from tutorial.items import QuoteItem
from scrapy.http.request import Request
class QuotesSpider(scrapy.Spider):
# 爬虫名称, 唯一的
name = 'quotes'
# 请求url非该域名则过滤
# allowed_domains = ['quotes.toscrape.com']
# is_open_count = True
# count = 0
# MAX = 5
custom_settings = {
<<<<<<< HEAD
"CONCURRENT_REQUESTS": 6,
"DOWNLOAD_DELAY": 0,
'tutorial.middlewares.TutorialDownloaderMiddleware': 543,
'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
'tutorial.middlewares.TutorialRetryMiddleware': 550,
}
start_urls = [
'http://httpbin.org/ip',
'http://httpbin.org/ip',
'http://httpbin.org/ip',
'http://httpbin.org/ip',
'http://httpbin.org/ip',
'http://httpbin.org/ip',
]
# start_urls = [
# # 'http://quotes.toscrape.com/page/1/',
# # 'http://quotes.toscrape.com/page/2/',
# # 'http://quotes.toscrape.com/page/3/',
# # 'http://quotes.toscrape.com/page/4/',
# # 'http://quotes.toscrape.com/page/5/',
# # 'http://quotes.toscrape.com/page/6/',
# # 'http://quotes.toscrape.com/page/7/',
# # 'http://quotes.toscrape.com/page/8/',
# # 'http://quotes.toscrape.com/page/9/',
# # 'http://quotes.toscrape.com/page/10/',
# # 'http://quotes.toscrape.com/page/11/',
# # 'http://quotes.toscrape.com/page/12/',
# # 'http://quotes.toscrape.com/page/13/',
# # 'http://quotes.toscrape.com/page/14/',
# # 'http://quotes.toscrape.com/page/15/',
# # 'http://quotes.toscrape.com/page/16/',
# # 'http://quotes.toscrape.com/page/17/',
# # 'https://www.correos.cl/',
# # 'https://www.correos.cl/',
# # 'https://www.correos.cl/',
=======
"CONCURRENT_REQUESTS": 4,
"DOWNLOAD_DELAY":0.5,
}
start_urls = [
'http://httpbin.org/ip#1/',
'http://httpbin.org/ip#2/',
'http://httpbin.org/ip#3/',
'http://httpbin.org/ip#4/',
'http://httpbin.org/ip#5/',
'http://httpbin.org/ip#6/',
'http://httpbin.org/ip#7/',
'http://httpbin.org/ip#8/',
'http://httpbin.org/ip#9/',
'http://httpbin.org/ip#10/',
'http://httpbin.org/ip#11/',
'http://httpbin.org/ip#12/',
'http://httpbin.org/ip#13/',
'http://httpbin.org/ip#14/',
'http://httpbin.org/ip#15/',
'http://httpbin.org/ip#16/',
'http://httpbin.org/ip#17/',
'http://httpbin.org/ip#17/',
'http://httpbin.org/ip#18/',
'http://httpbin.org/ip#19/',
'http://httpbin.org/ip#20/',
'http://httpbin.org/ip#21/',
]
# start_urls = [
# 'http://quotes.toscrape.com/page/1/',
# 'http://quotes.toscrape.com/page/2/',
# 'http://quotes.toscrape.com/page/3/',
# 'http://quotes.toscrape.com/page/4/',
# 'http://quotes.toscrape.com/page/5/',
# 'http://quotes.toscrape.com/page/6/',
# 'http://quotes.toscrape.com/page/7/',
# 'http://quotes.toscrape.com/page/8/',
# 'http://quotes.toscrape.com/page/9/',
# 'http://quotes.toscrape.com/page/10/',
# 'http://quotes.toscrape.com/page/11/',
# 'http://quotes.toscrape.com/page/12/',
# 'http://quotes.toscrape.com/page/13/',
# 'http://quotes.toscrape.com/page/14/',
# 'http://quotes.toscrape.com/page/15/',
# 'http://quotes.toscrape.com/page/16/',
# 'http://quotes.toscrape.com/page/17/',
# 'https://www.correos.cl/',
# 'https://www.correos.cl/',
# 'https://www.correos.cl/',
>>>>>>> 0b240f3f443ce7cf1346e781b65bff5ca72101fb
# ]
def parse(self, response):
item = QuoteItem()
item['url'] = response.url
item['data'] = response.body.decode()
# print(response.body.decode())
return item | quotes.toscrape.com/tutorial/tutorial/spiders/quotes.py | from typing import Counter
import scrapy
from tutorial.items import QuoteItem
from scrapy.http.request import Request
class QuotesSpider(scrapy.Spider):
# 爬虫名称, 唯一的
name = 'quotes'
# 请求url非该域名则过滤
# allowed_domains = ['quotes.toscrape.com']
# is_open_count = True
# count = 0
# MAX = 5
custom_settings = {
<<<<<<< HEAD
"CONCURRENT_REQUESTS": 6,
"DOWNLOAD_DELAY": 0,
'tutorial.middlewares.TutorialDownloaderMiddleware': 543,
'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
'tutorial.middlewares.TutorialRetryMiddleware': 550,
}
start_urls = [
'http://httpbin.org/ip',
'http://httpbin.org/ip',
'http://httpbin.org/ip',
'http://httpbin.org/ip',
'http://httpbin.org/ip',
'http://httpbin.org/ip',
]
# start_urls = [
# # 'http://quotes.toscrape.com/page/1/',
# # 'http://quotes.toscrape.com/page/2/',
# # 'http://quotes.toscrape.com/page/3/',
# # 'http://quotes.toscrape.com/page/4/',
# # 'http://quotes.toscrape.com/page/5/',
# # 'http://quotes.toscrape.com/page/6/',
# # 'http://quotes.toscrape.com/page/7/',
# # 'http://quotes.toscrape.com/page/8/',
# # 'http://quotes.toscrape.com/page/9/',
# # 'http://quotes.toscrape.com/page/10/',
# # 'http://quotes.toscrape.com/page/11/',
# # 'http://quotes.toscrape.com/page/12/',
# # 'http://quotes.toscrape.com/page/13/',
# # 'http://quotes.toscrape.com/page/14/',
# # 'http://quotes.toscrape.com/page/15/',
# # 'http://quotes.toscrape.com/page/16/',
# # 'http://quotes.toscrape.com/page/17/',
# # 'https://www.correos.cl/',
# # 'https://www.correos.cl/',
# # 'https://www.correos.cl/',
=======
"CONCURRENT_REQUESTS": 4,
"DOWNLOAD_DELAY":0.5,
}
start_urls = [
'http://httpbin.org/ip#1/',
'http://httpbin.org/ip#2/',
'http://httpbin.org/ip#3/',
'http://httpbin.org/ip#4/',
'http://httpbin.org/ip#5/',
'http://httpbin.org/ip#6/',
'http://httpbin.org/ip#7/',
'http://httpbin.org/ip#8/',
'http://httpbin.org/ip#9/',
'http://httpbin.org/ip#10/',
'http://httpbin.org/ip#11/',
'http://httpbin.org/ip#12/',
'http://httpbin.org/ip#13/',
'http://httpbin.org/ip#14/',
'http://httpbin.org/ip#15/',
'http://httpbin.org/ip#16/',
'http://httpbin.org/ip#17/',
'http://httpbin.org/ip#17/',
'http://httpbin.org/ip#18/',
'http://httpbin.org/ip#19/',
'http://httpbin.org/ip#20/',
'http://httpbin.org/ip#21/',
]
# start_urls = [
# 'http://quotes.toscrape.com/page/1/',
# 'http://quotes.toscrape.com/page/2/',
# 'http://quotes.toscrape.com/page/3/',
# 'http://quotes.toscrape.com/page/4/',
# 'http://quotes.toscrape.com/page/5/',
# 'http://quotes.toscrape.com/page/6/',
# 'http://quotes.toscrape.com/page/7/',
# 'http://quotes.toscrape.com/page/8/',
# 'http://quotes.toscrape.com/page/9/',
# 'http://quotes.toscrape.com/page/10/',
# 'http://quotes.toscrape.com/page/11/',
# 'http://quotes.toscrape.com/page/12/',
# 'http://quotes.toscrape.com/page/13/',
# 'http://quotes.toscrape.com/page/14/',
# 'http://quotes.toscrape.com/page/15/',
# 'http://quotes.toscrape.com/page/16/',
# 'http://quotes.toscrape.com/page/17/',
# 'https://www.correos.cl/',
# 'https://www.correos.cl/',
# 'https://www.correos.cl/',
>>>>>>> 0b240f3f443ce7cf1346e781b65bff5ca72101fb
# ]
def parse(self, response):
item = QuoteItem()
item['url'] = response.url
item['data'] = response.body.decode()
# print(response.body.decode())
return item | 0.270866 | 0.119948 |
import re
import itertools
from django import forms
from django.utils.text import slugify
from freenodejobs.jobs.enums import StateEnum
from ..models import Job
from ..jobs_tags.models import Tag
class AddEditForm(forms.ModelForm):
class Meta:
model = Job
fields = (
'title',
'job_type',
'location',
'apply_url',
'apply_email',
'description',
'tags',
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['tags'].choices = [
(x.pk, x.title) for x in Tag.objects.all()
]
# Remove empty label
self.fields['job_type'].choices.pop(0)
def clean(self):
if not self.cleaned_data.get('apply_url') and \
not self.cleaned_data.get('apply_email'):
self.add_error(
'apply_url',
"You must specify at least one application method.",
)
self.add_error('apply_email', '')
return self.cleaned_data
def save(self, user):
instance = super().save(commit=False)
instance.user = user
instance.save()
# Ensure newly-unselected tags are removed
for x in instance.job_tags.all():
if x.tag not in self.cleaned_data['tags']:
x.delete()
# Ensure selected tags are selected
for x in self.cleaned_data['tags']:
instance.job_tags.get_or_create(tag=x, defaults={'user': user})
if instance.state == StateEnum.LIVE and self.changed_data:
txt = "Edited whilst live."
instance.set_state(StateEnum.WAITING_FOR_APPROVAL, user, txt)
instance.save()
return instance
class SubmitForApprovalForm(forms.Form):
def __init__(self, job, *args, **kwargs):
self.job = job
super().__init__(*args, **kwargs)
def clean(self):
if self.job.state != StateEnum.NEW:
raise forms.ValidationError("Job is not currently new.")
return self.cleaned_data
def save(self, user):
txt = "Submitted for approval."
self.job.set_state(StateEnum.WAITING_FOR_APPROVAL, user, txt)
self.job.save()
return self.job
class RemoveForm(forms.Form):
reason = forms.CharField()
def __init__(self, job, *args, **kwargs):
self.job = job
super().__init__(*args, **kwargs)
def save(self, user):
self.job.set_state(
StateEnum.REMOVED,
user,
self.cleaned_data['reason'],
)
self.job.save()
return self.job
class AddTagForm(forms.Form):
title = forms.CharField(max_length=255)
def clean_title(self):
val = self.cleaned_data['title'].strip()
if re.match(r'^[A-Za-z0-9-\+ ]+$', val) is None:
raise forms.ValidationError("Please enter a valid tag title.")
# Canonicalise titles by replacing multiple spaces with a single one
val = re.sub(r'\s+', ' ', val)
return val
def save(self, user):
title = self.cleaned_data['title']
# Return the canonical version of this Tag, ignoring casing.
try:
return Tag.objects.get(title__iexact=title)
except Tag.DoesNotExist:
pass
# Ensure we have a unique slug
slug = slugify(title)
for x in itertools.count(1):
if not Tag.objects.filter(slug=slug).exists():
break
slug = '{}-{}'.format(slugify(title), x)
return Tag.objects.create(slug=slug, user=user, title=title) | freenodejobs/jobs/jobs_add_edit/forms.py | import re
import itertools
from django import forms
from django.utils.text import slugify
from freenodejobs.jobs.enums import StateEnum
from ..models import Job
from ..jobs_tags.models import Tag
class AddEditForm(forms.ModelForm):
class Meta:
model = Job
fields = (
'title',
'job_type',
'location',
'apply_url',
'apply_email',
'description',
'tags',
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['tags'].choices = [
(x.pk, x.title) for x in Tag.objects.all()
]
# Remove empty label
self.fields['job_type'].choices.pop(0)
def clean(self):
if not self.cleaned_data.get('apply_url') and \
not self.cleaned_data.get('apply_email'):
self.add_error(
'apply_url',
"You must specify at least one application method.",
)
self.add_error('apply_email', '')
return self.cleaned_data
def save(self, user):
instance = super().save(commit=False)
instance.user = user
instance.save()
# Ensure newly-unselected tags are removed
for x in instance.job_tags.all():
if x.tag not in self.cleaned_data['tags']:
x.delete()
# Ensure selected tags are selected
for x in self.cleaned_data['tags']:
instance.job_tags.get_or_create(tag=x, defaults={'user': user})
if instance.state == StateEnum.LIVE and self.changed_data:
txt = "Edited whilst live."
instance.set_state(StateEnum.WAITING_FOR_APPROVAL, user, txt)
instance.save()
return instance
class SubmitForApprovalForm(forms.Form):
def __init__(self, job, *args, **kwargs):
self.job = job
super().__init__(*args, **kwargs)
def clean(self):
if self.job.state != StateEnum.NEW:
raise forms.ValidationError("Job is not currently new.")
return self.cleaned_data
def save(self, user):
txt = "Submitted for approval."
self.job.set_state(StateEnum.WAITING_FOR_APPROVAL, user, txt)
self.job.save()
return self.job
class RemoveForm(forms.Form):
reason = forms.CharField()
def __init__(self, job, *args, **kwargs):
self.job = job
super().__init__(*args, **kwargs)
def save(self, user):
self.job.set_state(
StateEnum.REMOVED,
user,
self.cleaned_data['reason'],
)
self.job.save()
return self.job
class AddTagForm(forms.Form):
title = forms.CharField(max_length=255)
def clean_title(self):
val = self.cleaned_data['title'].strip()
if re.match(r'^[A-Za-z0-9-\+ ]+$', val) is None:
raise forms.ValidationError("Please enter a valid tag title.")
# Canonicalise titles by replacing multiple spaces with a single one
val = re.sub(r'\s+', ' ', val)
return val
def save(self, user):
title = self.cleaned_data['title']
# Return the canonical version of this Tag, ignoring casing.
try:
return Tag.objects.get(title__iexact=title)
except Tag.DoesNotExist:
pass
# Ensure we have a unique slug
slug = slugify(title)
for x in itertools.count(1):
if not Tag.objects.filter(slug=slug).exists():
break
slug = '{}-{}'.format(slugify(title), x)
return Tag.objects.create(slug=slug, user=user, title=title) | 0.485356 | 0.104614 |
import requests
import configparser
global token
global instance_
global server_
"""
Obtengo el modulo que fue invocado
"""
module = GetParams("module")
if module == "loginNOC":
ruta_ = GetParams("ruta_")
config = configparser.ConfigParser()
config.read(ruta_)
email_ = config.get('USER', 'user')
pass_ = config.get('USER', 'password')
instance_ = config.get('USER', 'key')
try:
apikey_ = config.get('USER', 'apiKey')
except:
apikey_ = ""
server_ = config.get('NOC', 'server')
try:
if apikey_ != "":
token = apikey_
else:
data = {'email': email_, 'password': <PASSWORD>_}
res = requests.post(server_ + '/api/auth/login', data,
headers={'content-type': 'application/x-www-form-urlencoded'})
if res.status_code == 200:
res = res.json()
if res['success']:
token = res['data']
else:
raise Exception(res['message'])
else:
raise Exception(res.json()['message'])
except Exception as e:
PrintException()
raise (e)
if module == "getData":
name_ = GetParams("name_")
var_ = GetParams("var_")
process_ = GetParams("process_")
try:
data = {'name': name_, 'instance': instance_}
if process_:
data['process'] = process_
headers = {'content-type': 'application/x-www-form-urlencoded','Authorization': 'Bearer {token}'.format(token=token)}
res = requests.post(server_ + '/api/assets/get', data,
headers=headers)
print('RES',res)
if res.status_code == 200:
res = res.json()
if res['success']:
if 'data' in res:
print(res)
tmp = res['data']['value']
if var_:
SetVar(var_,tmp)
else:
raise Exception(res['message'])
else:
raise Exception(res.json()['message'])
except Exception as e:
PrintException()
raise (e)
if module == "getAllData":
name_ = GetParams("name_")
var_ = GetParams("var_")
try:
headers = {'content-type': 'application/x-www-form-urlencoded','Authorization': 'Bearer {token}'.format(token=token)}
res = requests.post(server_ + '/api/assets/list',
headers=headers)
if res.status_code == 200:
res = res.json()
if res['success']:
#print('RES',[a['name'] for a in res['data']])
tmp = [{'name':a['name'],'value':a['value']} for a in res['data']]
for b in tmp:
SetVar(b['name'],b['value'])
else:
raise Exception(res['message'])
else:
raise Exception(res.json()['message'])
except Exception as e:
PrintException()
raise (e) | __init__.py | import requests
import configparser
global token
global instance_
global server_
"""
Obtengo el modulo que fue invocado
"""
module = GetParams("module")
if module == "loginNOC":
ruta_ = GetParams("ruta_")
config = configparser.ConfigParser()
config.read(ruta_)
email_ = config.get('USER', 'user')
pass_ = config.get('USER', 'password')
instance_ = config.get('USER', 'key')
try:
apikey_ = config.get('USER', 'apiKey')
except:
apikey_ = ""
server_ = config.get('NOC', 'server')
try:
if apikey_ != "":
token = apikey_
else:
data = {'email': email_, 'password': <PASSWORD>_}
res = requests.post(server_ + '/api/auth/login', data,
headers={'content-type': 'application/x-www-form-urlencoded'})
if res.status_code == 200:
res = res.json()
if res['success']:
token = res['data']
else:
raise Exception(res['message'])
else:
raise Exception(res.json()['message'])
except Exception as e:
PrintException()
raise (e)
if module == "getData":
name_ = GetParams("name_")
var_ = GetParams("var_")
process_ = GetParams("process_")
try:
data = {'name': name_, 'instance': instance_}
if process_:
data['process'] = process_
headers = {'content-type': 'application/x-www-form-urlencoded','Authorization': 'Bearer {token}'.format(token=token)}
res = requests.post(server_ + '/api/assets/get', data,
headers=headers)
print('RES',res)
if res.status_code == 200:
res = res.json()
if res['success']:
if 'data' in res:
print(res)
tmp = res['data']['value']
if var_:
SetVar(var_,tmp)
else:
raise Exception(res['message'])
else:
raise Exception(res.json()['message'])
except Exception as e:
PrintException()
raise (e)
if module == "getAllData":
name_ = GetParams("name_")
var_ = GetParams("var_")
try:
headers = {'content-type': 'application/x-www-form-urlencoded','Authorization': 'Bearer {token}'.format(token=token)}
res = requests.post(server_ + '/api/assets/list',
headers=headers)
if res.status_code == 200:
res = res.json()
if res['success']:
#print('RES',[a['name'] for a in res['data']])
tmp = [{'name':a['name'],'value':a['value']} for a in res['data']]
for b in tmp:
SetVar(b['name'],b['value'])
else:
raise Exception(res['message'])
else:
raise Exception(res.json()['message'])
except Exception as e:
PrintException()
raise (e) | 0.105458 | 0.072178 |
import os
import numpy as np
import tensorflow as tf
import tensorflow.contrib as tc
from utils import tf_utils
from module import Model
from actor_critic import ActorCritic
from replaybuffer import ReplayBuffer
class DDPG(Model):
""" Interface """
def __init__(self, name, args, sess=None, reuse=False, log_tensorboard=True, save=True):
self.learn_steps = 0
# hyperparameters
self.gamma = args[name]['gamma']
self.tau = args[name]['tau']
self.init_noise_sigma = args[name]['init_noise_sigma']
self.noise_decay = args[name]['noise_decay']
# replay buffer
self.buffer = ReplayBuffer(sample_size=args['batch_size'], max_len=args[name]['buffer_size'])
super(DDPG, self).__init__(name, args, sess=sess, reuse=reuse, build_graph=True, log_tensorboard=log_tensorboard, save=save)
self._initialize_target_net()
@property
def main_variables(self):
return self.actor_critic.trainable_variables
@property
def _target_variables(self):
return self._target_actor_critic.trainable_variables
def act(self, state):
self.sess.run(self.noise_op)
state = state.reshape((-1, self.state_size))
action = self.sess.run(self.actor_critic.actor_action, feed_dict={self.actor_critic.state: state})
return np.squeeze(action)
def step(self, state, action, reward, next_state, done):
self.buffer.add(state, action, reward, next_state, done)
if len(self.buffer) > self.buffer.sample_size + 100:
self._learn()
""" Implementation """
def _build_graph(self):
# env info
self._setup_env()
# main actor-critic
self.actor_critic = self._create_actor_critic()
# target actor-critic
self._target_actor_critic = self._create_actor_critic(is_target=True)
# losses
self.actor_loss, self.critic_loss = self._loss()
# optimizating operation
self.opt_op = self._optimize([self.actor_loss, self.critic_loss])
# target net update operations
self.init_target_op, self.update_target_op = self._targetnet_ops()
# operations that add/remove noise from parameters
self.noise_op = self._noise_params()
def _setup_env(self):
self.state_size = self._args[self.name]['state_size']
self.action_size = self._args[self.name]['action_size']
self.env_info = {}
with tf.name_scope('placeholders'):
self.env_info['state'] = tf.placeholder(tf.float32, shape=(None, self.state_size), name='state')
self.env_info['action'] = tf.placeholder(tf.float32, shape=(None, self.action_size), name='action')
self.env_info['next_state'] = tf.placeholder(tf.float32, shape=(None, self.state_size), name='next_state')
self.env_info['reward'] = tf.placeholder(tf.float32, shape=(None, 1), name='reward')
self.env_info['done'] = tf.placeholder(tf.uint8, shape=(None, 1), name='done')
def _create_actor_critic(self, is_target=False):
name = 'target_actor_critic' if is_target else 'actor_critic'
log_tensorboard = False if is_target else True
actor_critic = ActorCritic(name, self._args, self.env_info, self.action_size, reuse=self.reuse, log_tensorboard=log_tensorboard, is_target=is_target)
return actor_critic
def _loss(self):
with tf.name_scope('loss'):
with tf.name_scope('l2_loss'):
encoder_l2_loss = tf.losses.get_regularization_loss(scope='ddpg/actor_critic/encoder', name='encoder_l2_loss')
actor_l2_loss = tf.losses.get_regularization_loss(scope='ddpg/actor_critic/actor', name='actor_l2_loss')
critic_l2_loss = tf.losses.get_regularization_loss(scope='ddpg/actor_critic/critic', name='critic_l2_loss')
with tf.name_scope('actor_loss'):
actor_loss = tf.negative(tf.reduce_mean(self.actor_critic.Q_with_actor), name='actor_loss') + encoder_l2_loss + actor_l2_loss
with tf.name_scope('critic_loss'):
target_Q = tf.stop_gradient(self.env_info['reward']
+ self.gamma * tf.cast(1 - self.env_info['done'], tf.float32) * self._target_actor_critic.Q_with_actor, name='target_Q')
critic_loss = tf.losses.mean_squared_error(target_Q, self.actor_critic.Q) + encoder_l2_loss + critic_l2_loss
if self.log_tensorboard:
tf.summary.scalar('actor_l2_loss_', actor_l2_loss)
tf.summary.scalar('critic_l2_loss_', critic_l2_loss)
tf.summary.scalar('encoder_l2_loss_', encoder_l2_loss)
tf.summary.scalar('actor_loss_', actor_loss)
tf.summary.scalar('critic_loss_', critic_loss)
return actor_loss, critic_loss
def _optimize(self, losses):
with tf.variable_scope('optimizer'):
actor_loss, critic_loss = losses
actor_opt_op = self._optimize_objective(actor_loss, 'actor')
critic_opt_op = self._optimize_objective(critic_loss, 'critic')
opt_op = tf.group(actor_opt_op, critic_opt_op)
return opt_op
def _optimize_objective(self, loss, name):
# params for optimizer
learning_rate = self._args['actor_critic'][name]['learning_rate'] if 'learning_rate' in self._args['actor_critic'][name] else 1e-3
beta1 = self._args['actor_critic'][name]['beta1'] if 'beta1' in self._args['actor_critic'][name] else 0.9
beta2 = self._args['actor_critic'][name]['beta2'] if 'beta2' in self._args['actor_critic'][name] else 0.999
clip_norm = self._args[name]['actor_critic']['clip_norm'] if 'clip_norm' in self._args['actor_critic'] else 5.
with tf.variable_scope(name+'_opt', reuse=self.reuse):
# setup optimizer
self._optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1, beta2=beta2)
tvars = self.actor_critic.actor_trainable_variables if name == 'actor' else self.actor_critic.critic_trainable_variables
grads, tvars = list(zip(*self._optimizer.compute_gradients(loss, var_list=tvars)))
grads, _ = tf.clip_by_global_norm(grads, clip_norm)
opt_op = self._optimizer.apply_gradients(zip(grads, tvars))
if self.log_tensorboard:
with tf.name_scope(name):
with tf.name_scope('gradients_'):
for grad, var in zip(grads, tvars):
if grad is not None:
tf.summary.histogram(var.name.replace(':0', ''), grad)
with tf.name_scope('params_'):
for var in tvars:
tf.summary.histogram(var.name.replace(':0', ''), var)
return opt_op
def _targetnet_ops(self):
with tf.name_scope('target_net_op'):
target_main_var_pairs = list(zip(self._target_variables, self.main_variables))
init_target_op = list(map(lambda v: tf.assign(v[0], v[1], name='init_target_op'), target_main_var_pairs))
update_target_op = list(map(lambda v: tf.assign(v[0], self.tau * v[1] + (1. - self.tau) * v[0], name='update_target_op'), target_main_var_pairs))
return init_target_op, update_target_op
def _learn(self):
states, actions, rewards, next_states, dones = self.buffer.sample()
feed_dict = {
self.env_info['state']: states,
self.env_info['action']: actions,
self.env_info['reward']: rewards,
self.env_info['next_state']: next_states,
self.env_info['done']: dones,
}
# update the main networks
if self.log_tensorboard:
_, summary = self.sess.run([self.opt_op, self.merged_op], feed_dict=feed_dict)
self.writer.add_summary(summary, self.learn_steps)
else:
_ = self.sess.run(self.opt_op, feed_dict=feed_dict)
# update the target networks
self.sess.run(self.update_target_op)
self.learn_steps += 1
def _noise_params(self):
with tf.variable_scope('noise'):
noise_sigma = tf.get_variable('noise_sigma', initializer=self.init_noise_sigma,
trainable=False)
noise_decay_op = tf.assign(noise_sigma, self.noise_decay * noise_sigma, name='noise_decay_op')
noises = []
for var in self.actor_critic.actor_perturbable_variables:
noise = tf.truncated_normal(tf.shape(var), stddev=noise_sigma)
noises.append(noise)
if self.log_tensorboard:
tf.summary.scalar('noise_sigma_', noise_sigma)
param_noise_pairs = zip(self.actor_critic.actor_perturbable_variables, noises)
with tf.control_dependencies([noise_decay_op]):
noise_op = list(map(lambda v: tf.assign(v[0], v[0] + v[1], name='noise_op'), param_noise_pairs))
return noise_op
def _initialize_target_net(self):
self.sess.run(self.init_target_op) | ddpg-bipedal/tensorflow-imp/ddpg_tf.py | import os
import numpy as np
import tensorflow as tf
import tensorflow.contrib as tc
from utils import tf_utils
from module import Model
from actor_critic import ActorCritic
from replaybuffer import ReplayBuffer
class DDPG(Model):
""" Interface """
def __init__(self, name, args, sess=None, reuse=False, log_tensorboard=True, save=True):
self.learn_steps = 0
# hyperparameters
self.gamma = args[name]['gamma']
self.tau = args[name]['tau']
self.init_noise_sigma = args[name]['init_noise_sigma']
self.noise_decay = args[name]['noise_decay']
# replay buffer
self.buffer = ReplayBuffer(sample_size=args['batch_size'], max_len=args[name]['buffer_size'])
super(DDPG, self).__init__(name, args, sess=sess, reuse=reuse, build_graph=True, log_tensorboard=log_tensorboard, save=save)
self._initialize_target_net()
@property
def main_variables(self):
return self.actor_critic.trainable_variables
@property
def _target_variables(self):
return self._target_actor_critic.trainable_variables
def act(self, state):
self.sess.run(self.noise_op)
state = state.reshape((-1, self.state_size))
action = self.sess.run(self.actor_critic.actor_action, feed_dict={self.actor_critic.state: state})
return np.squeeze(action)
def step(self, state, action, reward, next_state, done):
self.buffer.add(state, action, reward, next_state, done)
if len(self.buffer) > self.buffer.sample_size + 100:
self._learn()
""" Implementation """
def _build_graph(self):
# env info
self._setup_env()
# main actor-critic
self.actor_critic = self._create_actor_critic()
# target actor-critic
self._target_actor_critic = self._create_actor_critic(is_target=True)
# losses
self.actor_loss, self.critic_loss = self._loss()
# optimizating operation
self.opt_op = self._optimize([self.actor_loss, self.critic_loss])
# target net update operations
self.init_target_op, self.update_target_op = self._targetnet_ops()
# operations that add/remove noise from parameters
self.noise_op = self._noise_params()
def _setup_env(self):
self.state_size = self._args[self.name]['state_size']
self.action_size = self._args[self.name]['action_size']
self.env_info = {}
with tf.name_scope('placeholders'):
self.env_info['state'] = tf.placeholder(tf.float32, shape=(None, self.state_size), name='state')
self.env_info['action'] = tf.placeholder(tf.float32, shape=(None, self.action_size), name='action')
self.env_info['next_state'] = tf.placeholder(tf.float32, shape=(None, self.state_size), name='next_state')
self.env_info['reward'] = tf.placeholder(tf.float32, shape=(None, 1), name='reward')
self.env_info['done'] = tf.placeholder(tf.uint8, shape=(None, 1), name='done')
def _create_actor_critic(self, is_target=False):
name = 'target_actor_critic' if is_target else 'actor_critic'
log_tensorboard = False if is_target else True
actor_critic = ActorCritic(name, self._args, self.env_info, self.action_size, reuse=self.reuse, log_tensorboard=log_tensorboard, is_target=is_target)
return actor_critic
def _loss(self):
with tf.name_scope('loss'):
with tf.name_scope('l2_loss'):
encoder_l2_loss = tf.losses.get_regularization_loss(scope='ddpg/actor_critic/encoder', name='encoder_l2_loss')
actor_l2_loss = tf.losses.get_regularization_loss(scope='ddpg/actor_critic/actor', name='actor_l2_loss')
critic_l2_loss = tf.losses.get_regularization_loss(scope='ddpg/actor_critic/critic', name='critic_l2_loss')
with tf.name_scope('actor_loss'):
actor_loss = tf.negative(tf.reduce_mean(self.actor_critic.Q_with_actor), name='actor_loss') + encoder_l2_loss + actor_l2_loss
with tf.name_scope('critic_loss'):
target_Q = tf.stop_gradient(self.env_info['reward']
+ self.gamma * tf.cast(1 - self.env_info['done'], tf.float32) * self._target_actor_critic.Q_with_actor, name='target_Q')
critic_loss = tf.losses.mean_squared_error(target_Q, self.actor_critic.Q) + encoder_l2_loss + critic_l2_loss
if self.log_tensorboard:
tf.summary.scalar('actor_l2_loss_', actor_l2_loss)
tf.summary.scalar('critic_l2_loss_', critic_l2_loss)
tf.summary.scalar('encoder_l2_loss_', encoder_l2_loss)
tf.summary.scalar('actor_loss_', actor_loss)
tf.summary.scalar('critic_loss_', critic_loss)
return actor_loss, critic_loss
def _optimize(self, losses):
with tf.variable_scope('optimizer'):
actor_loss, critic_loss = losses
actor_opt_op = self._optimize_objective(actor_loss, 'actor')
critic_opt_op = self._optimize_objective(critic_loss, 'critic')
opt_op = tf.group(actor_opt_op, critic_opt_op)
return opt_op
def _optimize_objective(self, loss, name):
# params for optimizer
learning_rate = self._args['actor_critic'][name]['learning_rate'] if 'learning_rate' in self._args['actor_critic'][name] else 1e-3
beta1 = self._args['actor_critic'][name]['beta1'] if 'beta1' in self._args['actor_critic'][name] else 0.9
beta2 = self._args['actor_critic'][name]['beta2'] if 'beta2' in self._args['actor_critic'][name] else 0.999
clip_norm = self._args[name]['actor_critic']['clip_norm'] if 'clip_norm' in self._args['actor_critic'] else 5.
with tf.variable_scope(name+'_opt', reuse=self.reuse):
# setup optimizer
self._optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1, beta2=beta2)
tvars = self.actor_critic.actor_trainable_variables if name == 'actor' else self.actor_critic.critic_trainable_variables
grads, tvars = list(zip(*self._optimizer.compute_gradients(loss, var_list=tvars)))
grads, _ = tf.clip_by_global_norm(grads, clip_norm)
opt_op = self._optimizer.apply_gradients(zip(grads, tvars))
if self.log_tensorboard:
with tf.name_scope(name):
with tf.name_scope('gradients_'):
for grad, var in zip(grads, tvars):
if grad is not None:
tf.summary.histogram(var.name.replace(':0', ''), grad)
with tf.name_scope('params_'):
for var in tvars:
tf.summary.histogram(var.name.replace(':0', ''), var)
return opt_op
def _targetnet_ops(self):
with tf.name_scope('target_net_op'):
target_main_var_pairs = list(zip(self._target_variables, self.main_variables))
init_target_op = list(map(lambda v: tf.assign(v[0], v[1], name='init_target_op'), target_main_var_pairs))
update_target_op = list(map(lambda v: tf.assign(v[0], self.tau * v[1] + (1. - self.tau) * v[0], name='update_target_op'), target_main_var_pairs))
return init_target_op, update_target_op
def _learn(self):
states, actions, rewards, next_states, dones = self.buffer.sample()
feed_dict = {
self.env_info['state']: states,
self.env_info['action']: actions,
self.env_info['reward']: rewards,
self.env_info['next_state']: next_states,
self.env_info['done']: dones,
}
# update the main networks
if self.log_tensorboard:
_, summary = self.sess.run([self.opt_op, self.merged_op], feed_dict=feed_dict)
self.writer.add_summary(summary, self.learn_steps)
else:
_ = self.sess.run(self.opt_op, feed_dict=feed_dict)
# update the target networks
self.sess.run(self.update_target_op)
self.learn_steps += 1
def _noise_params(self):
with tf.variable_scope('noise'):
noise_sigma = tf.get_variable('noise_sigma', initializer=self.init_noise_sigma,
trainable=False)
noise_decay_op = tf.assign(noise_sigma, self.noise_decay * noise_sigma, name='noise_decay_op')
noises = []
for var in self.actor_critic.actor_perturbable_variables:
noise = tf.truncated_normal(tf.shape(var), stddev=noise_sigma)
noises.append(noise)
if self.log_tensorboard:
tf.summary.scalar('noise_sigma_', noise_sigma)
param_noise_pairs = zip(self.actor_critic.actor_perturbable_variables, noises)
with tf.control_dependencies([noise_decay_op]):
noise_op = list(map(lambda v: tf.assign(v[0], v[0] + v[1], name='noise_op'), param_noise_pairs))
return noise_op
def _initialize_target_net(self):
self.sess.run(self.init_target_op) | 0.841011 | 0.12326 |
import os
import json
import tensorflow as tf
import numpy as np
from pycocotools.cocoeval import COCOeval
from detection.datasets import coco, data_generator
from detection.models.detectors import faster_rcnn
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
# build dataset
img_mean = (123.675, 116.28, 103.53)
# img_std = (58.395, 57.12, 57.375)
img_std = (1., 1., 1.)
val_dataset = coco.CocoDataSet('./COCO2017/', 'val',
flip_ratio=0,
pad_mode='fixed',
mean=img_mean,
std=img_std,
scale=(800, 1344))
print('len(val_dataset) >>>>>>>>>>>>>> ', len(val_dataset))
# load faster-rcnn model
model = faster_rcnn.FasterRCNN(num_classes=len(val_dataset.get_categories()))
img, img_meta, bboxes, labels = val_dataset[0]
batch_imgs = tf.Variable(np.expand_dims(img, 0))
batch_metas = tf.Variable(np.expand_dims(img_meta, 0))
_ = model((batch_imgs, batch_metas), training=False)
model.load_weights('weights/faster_rcnn.h5', by_name=True)
# test on the validation dataset
batch_size = 1
dataset_results = []
imgIds = []
for idx in range(len(val_dataset)):
if idx % 10 == 0:
print(idx)
img, img_meta, _, _ = val_dataset[idx]
# generate proposals
proposals = model.simple_test_rpn(img, img_meta)
# detect on pictures with proposal
res = model.simple_test_bboxes(img, img_meta, proposals)
image_id = val_dataset.img_ids[idx]
imgIds.append(image_id)
for pos in range(res['class_ids'].shape[0]):
results = dict()
results['score'] = float(res['scores'][pos])
results['category_id'] = val_dataset.label2cat[int(res['class_ids'][pos])]
y1, x1, y2, x2 = [float(num) for num in list(res['rois'][pos])]
results['bbox'] = [x1, y1, x2 - x1 + 1, y2 - y1 + 1]
results['image_id'] = image_id
dataset_results.append(results)
# write result to json
with open('coco_val2017_detection_result.json', 'w') as f:
f.write(json.dumps(dataset_results))
coco_dt = val_dataset.coco.loadRes('coco_val2017_detection_result.json')
# evaluate mAP
cocoEval = COCOeval(val_dataset.coco, coco_dt, 'bbox')
cocoEval.params.imgIds = imgIds
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize() | ObjectDetection/Faster R-CNN/evaluate.py | import os
import json
import tensorflow as tf
import numpy as np
from pycocotools.cocoeval import COCOeval
from detection.datasets import coco, data_generator
from detection.models.detectors import faster_rcnn
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
# build dataset
img_mean = (123.675, 116.28, 103.53)
# img_std = (58.395, 57.12, 57.375)
img_std = (1., 1., 1.)
val_dataset = coco.CocoDataSet('./COCO2017/', 'val',
flip_ratio=0,
pad_mode='fixed',
mean=img_mean,
std=img_std,
scale=(800, 1344))
print('len(val_dataset) >>>>>>>>>>>>>> ', len(val_dataset))
# load faster-rcnn model
model = faster_rcnn.FasterRCNN(num_classes=len(val_dataset.get_categories()))
img, img_meta, bboxes, labels = val_dataset[0]
batch_imgs = tf.Variable(np.expand_dims(img, 0))
batch_metas = tf.Variable(np.expand_dims(img_meta, 0))
_ = model((batch_imgs, batch_metas), training=False)
model.load_weights('weights/faster_rcnn.h5', by_name=True)
# test on the validation dataset
batch_size = 1
dataset_results = []
imgIds = []
for idx in range(len(val_dataset)):
if idx % 10 == 0:
print(idx)
img, img_meta, _, _ = val_dataset[idx]
# generate proposals
proposals = model.simple_test_rpn(img, img_meta)
# detect on pictures with proposal
res = model.simple_test_bboxes(img, img_meta, proposals)
image_id = val_dataset.img_ids[idx]
imgIds.append(image_id)
for pos in range(res['class_ids'].shape[0]):
results = dict()
results['score'] = float(res['scores'][pos])
results['category_id'] = val_dataset.label2cat[int(res['class_ids'][pos])]
y1, x1, y2, x2 = [float(num) for num in list(res['rois'][pos])]
results['bbox'] = [x1, y1, x2 - x1 + 1, y2 - y1 + 1]
results['image_id'] = image_id
dataset_results.append(results)
# write result to json
with open('coco_val2017_detection_result.json', 'w') as f:
f.write(json.dumps(dataset_results))
coco_dt = val_dataset.coco.loadRes('coco_val2017_detection_result.json')
# evaluate mAP
cocoEval = COCOeval(val_dataset.coco, coco_dt, 'bbox')
cocoEval.params.imgIds = imgIds
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize() | 0.340814 | 0.251605 |
from Tkinter import *
class Dialog(Toplevel):
"""Class to open dialogs.
This class is intended as a base class for custom dialogs
"""
def __init__(self, parent, title=None):
"""Initialize a dialog.
Arguments:
parent -- a parent window (the application window)
title -- the dialog title
"""
Toplevel.__init__(self, parent)
self.withdraw()
if parent.winfo_viewable():
self.transient(parent)
if title:
self.title(title)
self.parent = parent
self.result = None
body = Frame(self)
self.initial_focus = self.body(body)
body.pack(padx=5, pady=5)
self.buttonbox()
if not self.initial_focus:
self.initial_focus = self
self.protocol('WM_DELETE_WINDOW', self.cancel)
if self.parent is not None:
self.geometry('+%d+%d' % (parent.winfo_rootx() + 50,
parent.winfo_rooty() + 50))
self.deiconify()
self.initial_focus.focus_set()
self.wait_visibility()
self.grab_set()
self.wait_window(self)
return
def destroy(self):
"""Destroy the window"""
self.initial_focus = None
Toplevel.destroy(self)
return
def body(self, master):
"""create dialog body.
return widget that should have initial focus.
This method should be overridden, and is called
by the __init__ method.
"""
pass
def buttonbox(self):
"""add standard button box.
override if you do not want the standard buttons
"""
box = Frame(self)
w = Button(box, text='OK', width=10, command=self.ok, default=ACTIVE)
w.pack(side=LEFT, padx=5, pady=5)
w = Button(box, text='Cancel', width=10, command=self.cancel)
w.pack(side=LEFT, padx=5, pady=5)
self.bind('<Return>', self.ok)
self.bind('<Escape>', self.cancel)
box.pack()
def ok(self, event=None):
if not self.validate():
self.initial_focus.focus_set()
return
self.withdraw()
self.update_idletasks()
try:
self.apply()
finally:
self.cancel()
def cancel(self, event=None):
if self.parent is not None:
self.parent.focus_set()
self.destroy()
return
def validate(self):
"""validate the data
This method is called automatically to validate the data before the
dialog is destroyed. By default, it always validates OK.
"""
return 1
def apply(self):
"""process the data
This method is called automatically to process the data, *after*
the dialog is destroyed. By default, it does nothing.
"""
pass
class _QueryDialog(Dialog):
def __init__(self, title, prompt, initialvalue=None, minvalue=None, maxvalue=None, parent=None):
if not parent:
import Tkinter
parent = Tkinter._default_root
self.prompt = prompt
self.minvalue = minvalue
self.maxvalue = maxvalue
self.initialvalue = initialvalue
Dialog.__init__(self, parent, title)
def destroy(self):
self.entry = None
Dialog.destroy(self)
return
def body(self, master):
w = Label(master, text=self.prompt, justify=LEFT)
w.grid(row=0, padx=5, sticky=W)
self.entry = Entry(master, name='entry')
self.entry.grid(row=1, padx=5, sticky=W + E)
if self.initialvalue:
self.entry.insert(0, self.initialvalue)
self.entry.select_range(0, END)
return self.entry
def validate(self):
import tkMessageBox
try:
result = self.getresult()
except ValueError:
tkMessageBox.showwarning('Illegal value', self.errormessage + '\nPlease try again', parent=self)
return 0
if self.minvalue is not None and result < self.minvalue:
tkMessageBox.showwarning('Too small', 'The allowed minimum value is %s. Please try again.' % self.minvalue, parent=self)
return 0
else:
if self.maxvalue is not None and result > self.maxvalue:
tkMessageBox.showwarning('Too large', 'The allowed maximum value is %s. Please try again.' % self.maxvalue, parent=self)
return 0
self.result = result
return 1
class _QueryInteger(_QueryDialog):
errormessage = 'Not an integer.'
def getresult(self):
return int(self.entry.get())
def askinteger(title, prompt, **kw):
"""get an integer from the user
Arguments:
title -- the dialog title
prompt -- the label text
**kw -- see SimpleDialog class
Return value is an integer
"""
d = _QueryInteger(title, prompt, **kw)
return d.result
class _QueryFloat(_QueryDialog):
errormessage = 'Not a floating point value.'
def getresult(self):
return float(self.entry.get())
def askfloat(title, prompt, **kw):
"""get a float from the user
Arguments:
title -- the dialog title
prompt -- the label text
**kw -- see SimpleDialog class
Return value is a float
"""
d = _QueryFloat(title, prompt, **kw)
return d.result
class _QueryString(_QueryDialog):
def __init__(self, *args, **kw):
if 'show' in kw:
self.__show = kw['show']
del kw['show']
else:
self.__show = None
_QueryDialog.__init__(self, *args, **kw)
return
def body(self, master):
entry = _QueryDialog.body(self, master)
if self.__show is not None:
entry.configure(show=self.__show)
return entry
def getresult(self):
return self.entry.get()
def askstring(title, prompt, **kw):
"""get a string from the user
Arguments:
title -- the dialog title
prompt -- the label text
**kw -- see SimpleDialog class
Return value is a string
"""
d = _QueryString(title, prompt, **kw)
return d.result
if __name__ == '__main__':
root = Tk()
root.update()
print askinteger('Spam', 'Egg count', initialvalue=144)
print askfloat('Spam', 'Egg weight\n(in tons)', minvalue=1, maxvalue=100)
print askstring('Spam', 'Egg label') | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/lib-tk/tkSimpleDialog.py | from Tkinter import *
class Dialog(Toplevel):
"""Class to open dialogs.
This class is intended as a base class for custom dialogs
"""
def __init__(self, parent, title=None):
"""Initialize a dialog.
Arguments:
parent -- a parent window (the application window)
title -- the dialog title
"""
Toplevel.__init__(self, parent)
self.withdraw()
if parent.winfo_viewable():
self.transient(parent)
if title:
self.title(title)
self.parent = parent
self.result = None
body = Frame(self)
self.initial_focus = self.body(body)
body.pack(padx=5, pady=5)
self.buttonbox()
if not self.initial_focus:
self.initial_focus = self
self.protocol('WM_DELETE_WINDOW', self.cancel)
if self.parent is not None:
self.geometry('+%d+%d' % (parent.winfo_rootx() + 50,
parent.winfo_rooty() + 50))
self.deiconify()
self.initial_focus.focus_set()
self.wait_visibility()
self.grab_set()
self.wait_window(self)
return
def destroy(self):
"""Destroy the window"""
self.initial_focus = None
Toplevel.destroy(self)
return
def body(self, master):
"""create dialog body.
return widget that should have initial focus.
This method should be overridden, and is called
by the __init__ method.
"""
pass
def buttonbox(self):
"""add standard button box.
override if you do not want the standard buttons
"""
box = Frame(self)
w = Button(box, text='OK', width=10, command=self.ok, default=ACTIVE)
w.pack(side=LEFT, padx=5, pady=5)
w = Button(box, text='Cancel', width=10, command=self.cancel)
w.pack(side=LEFT, padx=5, pady=5)
self.bind('<Return>', self.ok)
self.bind('<Escape>', self.cancel)
box.pack()
def ok(self, event=None):
if not self.validate():
self.initial_focus.focus_set()
return
self.withdraw()
self.update_idletasks()
try:
self.apply()
finally:
self.cancel()
def cancel(self, event=None):
if self.parent is not None:
self.parent.focus_set()
self.destroy()
return
def validate(self):
"""validate the data
This method is called automatically to validate the data before the
dialog is destroyed. By default, it always validates OK.
"""
return 1
def apply(self):
"""process the data
This method is called automatically to process the data, *after*
the dialog is destroyed. By default, it does nothing.
"""
pass
class _QueryDialog(Dialog):
def __init__(self, title, prompt, initialvalue=None, minvalue=None, maxvalue=None, parent=None):
if not parent:
import Tkinter
parent = Tkinter._default_root
self.prompt = prompt
self.minvalue = minvalue
self.maxvalue = maxvalue
self.initialvalue = initialvalue
Dialog.__init__(self, parent, title)
def destroy(self):
self.entry = None
Dialog.destroy(self)
return
def body(self, master):
w = Label(master, text=self.prompt, justify=LEFT)
w.grid(row=0, padx=5, sticky=W)
self.entry = Entry(master, name='entry')
self.entry.grid(row=1, padx=5, sticky=W + E)
if self.initialvalue:
self.entry.insert(0, self.initialvalue)
self.entry.select_range(0, END)
return self.entry
def validate(self):
import tkMessageBox
try:
result = self.getresult()
except ValueError:
tkMessageBox.showwarning('Illegal value', self.errormessage + '\nPlease try again', parent=self)
return 0
if self.minvalue is not None and result < self.minvalue:
tkMessageBox.showwarning('Too small', 'The allowed minimum value is %s. Please try again.' % self.minvalue, parent=self)
return 0
else:
if self.maxvalue is not None and result > self.maxvalue:
tkMessageBox.showwarning('Too large', 'The allowed maximum value is %s. Please try again.' % self.maxvalue, parent=self)
return 0
self.result = result
return 1
class _QueryInteger(_QueryDialog):
errormessage = 'Not an integer.'
def getresult(self):
return int(self.entry.get())
def askinteger(title, prompt, **kw):
"""get an integer from the user
Arguments:
title -- the dialog title
prompt -- the label text
**kw -- see SimpleDialog class
Return value is an integer
"""
d = _QueryInteger(title, prompt, **kw)
return d.result
class _QueryFloat(_QueryDialog):
errormessage = 'Not a floating point value.'
def getresult(self):
return float(self.entry.get())
def askfloat(title, prompt, **kw):
"""get a float from the user
Arguments:
title -- the dialog title
prompt -- the label text
**kw -- see SimpleDialog class
Return value is a float
"""
d = _QueryFloat(title, prompt, **kw)
return d.result
class _QueryString(_QueryDialog):
def __init__(self, *args, **kw):
if 'show' in kw:
self.__show = kw['show']
del kw['show']
else:
self.__show = None
_QueryDialog.__init__(self, *args, **kw)
return
def body(self, master):
entry = _QueryDialog.body(self, master)
if self.__show is not None:
entry.configure(show=self.__show)
return entry
def getresult(self):
return self.entry.get()
def askstring(title, prompt, **kw):
"""get a string from the user
Arguments:
title -- the dialog title
prompt -- the label text
**kw -- see SimpleDialog class
Return value is a string
"""
d = _QueryString(title, prompt, **kw)
return d.result
if __name__ == '__main__':
root = Tk()
root.update()
print askinteger('Spam', 'Egg count', initialvalue=144)
print askfloat('Spam', 'Egg weight\n(in tons)', minvalue=1, maxvalue=100)
print askstring('Spam', 'Egg label') | 0.709925 | 0.122078 |
import csv
import numpy as np
import cv2
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Conv2D, Lambda, Dropout
from keras.layers.convolutional import Cropping2D
from keras.optimizers import Adam
# Load Data
samples = []
with open('data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader, None)
for line in reader:
samples.append(line)
# Generator
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
for i in range(3): # center, left and rights images
name = 'data/IMG/' + batch_sample[i].split('/')[-1]
current_image = cv2.cvtColor(cv2.imread(name), cv2.COLOR_BGR2RGB)
images.append(current_image)
center_angle = float(batch_sample[3])
if i == 0:
angles.append(center_angle)
elif i == 1:
angles.append(center_angle + 0.4) # Perspective Transformation for Left Image
elif i == 2:
angles.append(center_angle - 0.4) # Perspective Transformation for Right Image
X_train = np.array(images)
y_train = np.array(angles)
yield tuple(sklearn.utils.shuffle(X_train, y_train))
# nVidia Model
model = Sequential()
# Train and Save
model.fit_generator(train_generator, steps_per_epoch=len(train_samples),validation_data=validation_generator, validation_steps=len(validation_samples), epochs=5, verbose = 1)
model.save('model.h5') | Workshops/09/Self_Driving_Car_Excursion/generator.py | import csv
import numpy as np
import cv2
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Conv2D, Lambda, Dropout
from keras.layers.convolutional import Cropping2D
from keras.optimizers import Adam
# Load Data
samples = []
with open('data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader, None)
for line in reader:
samples.append(line)
# Generator
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
for i in range(3): # center, left and rights images
name = 'data/IMG/' + batch_sample[i].split('/')[-1]
current_image = cv2.cvtColor(cv2.imread(name), cv2.COLOR_BGR2RGB)
images.append(current_image)
center_angle = float(batch_sample[3])
if i == 0:
angles.append(center_angle)
elif i == 1:
angles.append(center_angle + 0.4) # Perspective Transformation for Left Image
elif i == 2:
angles.append(center_angle - 0.4) # Perspective Transformation for Right Image
X_train = np.array(images)
y_train = np.array(angles)
yield tuple(sklearn.utils.shuffle(X_train, y_train))
# nVidia Model
model = Sequential()
# Train and Save
model.fit_generator(train_generator, steps_per_epoch=len(train_samples),validation_data=validation_generator, validation_steps=len(validation_samples), epochs=5, verbose = 1)
model.save('model.h5') | 0.752195 | 0.378115 |
import os
import yaml
import logging
import collections
from ConfigParser import NoSectionError, NoOptionError, DuplicateSectionError
from ratatosk import backend
from ratatosk.utils import update, config_to_dict
from ratatosk.log import get_logger
logger = get_logger()
try:
from collections import OrderedDict as _default_dict
except ImportError:
# fallback for setup.py which hasn't yet built _collections
_default_dict = dict
class RatatoskConfigParser(object):
"""Ratatosk configuration parser. Works on yaml files.
"""
NO_DEFAULT = object()
_instance = None
_config_paths = []
_cls_dict = collections.OrderedDict
@classmethod
def add_config_path(cls, path):
if path and not any(os.path.samefile(path, x) for x in cls._instance._config_paths):
logger.debug("adding config path {}".format(path))
cls._instance._config_paths.append(path)
else:
return
cls._instance.reload()
@classmethod
def del_config_path(cls, path):
if path and any(os.path.samefile(path, x) for x in cls._instance._config_paths):
logger.debug("removing config path {}".format(path))
try:
i = [os.path.samefile(path, x) for x in cls._instance._config_paths].index(True)
del cls._instance._config_paths[i]
except ValueError:
logger.warn("No such path {} in _config_paths".format(path))
else:
return
# Need to clear sections before reloading
cls._instance._sections = cls._cls_dict()
cls._instance.reload()
@classmethod
def instance(cls, *args, **kwargs):
"""Singleton getter"""
if cls._instance is None:
cls._instance = cls(*args, **kwargs)
loaded = cls._instance.reload()
logger.info("Loaded %r" % loaded)
return cls._instance
@classmethod
def clear(cls):
cls._instance._config_paths = []
cls._instance._sections = cls._cls_dict()
def reload(self):
return self._instance.read(self._instance._config_paths)
def __init__(self, defaults=None, dict_type=_default_dict, *args, **kw):
self._dict = dict_type
self._sections = self._dict()
self._defaults = self._dict()
_cls_dict = self._dict
def read(self, file_paths):
"""
Read config files.
:param file_path: The file system path to the configuration file.
:returns: boolean
"""
for path in file_paths:
try:
with open(path) as fp:
_sections = yaml.load(fp)
if _sections is None:
_sections = {}
self._sections = update(self._sections, _sections)
except IOError:
logging.warn("No such file {}".format(path))
return False
return True
def parse_file(self, file_path):
"""
Parse config file settings from file_path, overwriting existing
config settings. If the file does not exist, returns False.
:param file_path: The file system path to the configuration file.
:returns: boolean
"""
if file_path is None:
return None
file_path = os.path.abspath(os.path.expanduser(file_path))
if os.path.exists(file_path):
self.read(file_path)
return True
else:
logger.debug("config file '{}' does not exist, skipping...".format(file_path))
return False
def keys(self, section, subsection=None):
"""
Return a list of keys within 'section'.
:param section: The config section.
:param subsection: The config subsection.
:returns: List of keys in the `section` or `subsection`.
:rtype: list
"""
return self.options(section, subsection)
def options(self, section, subsection=None):
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
if subsection:
try:
opts = opts[subsection].copy()
except KeyError:
raise NoSectionError(subsection)
opts = update(opts, self._defaults)
if '__name__' in opts:
del opts['__name__']
return opts.keys()
def has_key(self, section, key, subsection=None):
"""
Return whether or not a 'section' has the given 'key'.
:param section: The section of the configuration. I.e. [block_section].
:param key: The key within 'section'.
:returns: True if the config `section` has `key`.
:rtype: boolean
"""
if key in self.options(section, subsection):
return True
else:
return False
def sections(self):
"""Return a list of section names"""
return self._sections.keys()
def get(self, section, option, subsection=None):
"""Get an option"""
if not section in self.sections():
raise NoSectionError(section)
if subsection:
if not subsection in self._sections[section]:
raise NoSectionError(subsection)
if not option in self._sections[section][subsection]:
raise NoOptionError(option, subsection)
return self._sections[section][subsection][option]
if not option in self._sections[section]:
raise NoOptionError(option, section)
return self._sections[section][option]
def set(self, section, option, value=None, subsection=None):
"""Set an option"""
if not section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
if subsection:
try:
sectdict = sectdict[subsection]
except KeyError:
raise NoSectionError(subsection)
sectdict[self.optionxform(option)] = value
def optionxform(self, optionstr):
return optionstr.lower()
def has_section(self, section, subsection=None):
"""Indicate whether the named section is present in the configuration"""
if subsection:
return subsection in self._sections.get(section, {})
return section in self._sections
def get_sections(self):
"""
Return a list of configuration sections or [blocks].
:returns: List of sections.
:rtype: list
"""
return self.sections()
def get_section_dict(self, section, subsection=None):
"""
Return a dict representation of a section.
:param section: The section of the configuration.
:param subsection: The subsection of the configuration.
:returns: Dictionary reprisentation of the config section.
:rtype: dict
"""
dict_obj = dict()
for key in self.keys(section, subsection):
dict_obj[key] = self.get(section, key, subsection=subsection)
return dict_obj
def add_section(self, section, subsection=None):
"""
Adds a block section to the config.
:param section: The section to add.
"""
if subsection:
if not self.has_section(section):
raise NoSectionError(section)
if subsection in self._sections[section]:
raise DuplicateSectionError(section)
self._sections[section][subsection] = self._dict()
else:
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = self._dict()
def del_section(self, section, subsection=None):
"""
Deletes a block section to the config.
:param section: The section to delete.
:param subsection: The section to delete.
"""
if subsection:
if not self.has_section(section):
raise NoSectionError(section)
if not subsection in self._sections[section]:
raise NoSectionError(subsection)
del self._sections[section][subsection]
else:
if not self.has_section(section):
raise NoSectionError(section)
del self._sections[section]
def save(self, config, filename):
"""Save configuration to file"""
config_d = config_to_dict(config)
with open(filename, "w") as fh:
fh.write(yaml.safe_dump(config_d, default_flow_style=False, allow_unicode=True, width=1000))
# This is hackish; we can't use one instance to hold both config file
# and custom-config file since parent tasks settings in the latter
# will override those in the former, violating rules about pipeline
# immutability
class RatatoskCustomConfigParser(RatatoskConfigParser):
"""Ratatosk configuration parser. Works on yaml files.
"""
_instance = None
_custom_config_paths = []
@classmethod
def add_config_path(cls, path):
if path and not any(os.path.samefile(path, x) for x in cls._instance._custom_config_paths):
logger.debug("adding config path {}".format(path))
cls._instance._custom_config_paths.append(path)
else:
return
cls._instance.reload()
@classmethod
def del_config_path(cls, path):
if path and path in cls._instance._custom_config_paths:
logger.debug("removing config path {}".format(path))
try:
i = cls._instance._custom_config_paths.index(path)
del cls._instance._custom_config_paths[i]
except ValueError:
logger.warn("No such path {} in _custom_config_paths".format(path))
else:
return
# Need to clear sections before reloading
cls._instance._sections = cls._cls_dict()
cls._instance.reload()
@classmethod
def clear(cls):
cls._instance._custom_config_paths = []
cls._instance._sections = cls._cls_dict()
def reload(self):
return self._instance.read(self._instance._custom_config_paths)
def get_config():
return RatatoskConfigParser.instance()
def get_custom_config():
"""Get separate parser for custom config; else custom config
parent_task setting will override config file settings"""
return RatatoskCustomConfigParser.instance()
def setup_config(config_file=None, custom_config_file=None, **kwargs):
"""Helper function to setup config at startup"""
if config_file:
config = get_config()
config.add_config_path(config_file)
backend.__global_config__ = update(backend.__global_config__, vars(config)["_sections"])
if custom_config_file:
custom_config = get_custom_config()
custom_config.add_config_path(custom_config_file)
backend.__global_config__ = update(backend.__global_config__, vars(custom_config)["_sections"]) | ratatosk/config.py | import os
import yaml
import logging
import collections
from ConfigParser import NoSectionError, NoOptionError, DuplicateSectionError
from ratatosk import backend
from ratatosk.utils import update, config_to_dict
from ratatosk.log import get_logger
logger = get_logger()
try:
from collections import OrderedDict as _default_dict
except ImportError:
# fallback for setup.py which hasn't yet built _collections
_default_dict = dict
class RatatoskConfigParser(object):
"""Ratatosk configuration parser. Works on yaml files.
"""
NO_DEFAULT = object()
_instance = None
_config_paths = []
_cls_dict = collections.OrderedDict
@classmethod
def add_config_path(cls, path):
if path and not any(os.path.samefile(path, x) for x in cls._instance._config_paths):
logger.debug("adding config path {}".format(path))
cls._instance._config_paths.append(path)
else:
return
cls._instance.reload()
@classmethod
def del_config_path(cls, path):
if path and any(os.path.samefile(path, x) for x in cls._instance._config_paths):
logger.debug("removing config path {}".format(path))
try:
i = [os.path.samefile(path, x) for x in cls._instance._config_paths].index(True)
del cls._instance._config_paths[i]
except ValueError:
logger.warn("No such path {} in _config_paths".format(path))
else:
return
# Need to clear sections before reloading
cls._instance._sections = cls._cls_dict()
cls._instance.reload()
@classmethod
def instance(cls, *args, **kwargs):
"""Singleton getter"""
if cls._instance is None:
cls._instance = cls(*args, **kwargs)
loaded = cls._instance.reload()
logger.info("Loaded %r" % loaded)
return cls._instance
@classmethod
def clear(cls):
cls._instance._config_paths = []
cls._instance._sections = cls._cls_dict()
def reload(self):
return self._instance.read(self._instance._config_paths)
def __init__(self, defaults=None, dict_type=_default_dict, *args, **kw):
self._dict = dict_type
self._sections = self._dict()
self._defaults = self._dict()
_cls_dict = self._dict
def read(self, file_paths):
"""
Read config files.
:param file_path: The file system path to the configuration file.
:returns: boolean
"""
for path in file_paths:
try:
with open(path) as fp:
_sections = yaml.load(fp)
if _sections is None:
_sections = {}
self._sections = update(self._sections, _sections)
except IOError:
logging.warn("No such file {}".format(path))
return False
return True
def parse_file(self, file_path):
"""
Parse config file settings from file_path, overwriting existing
config settings. If the file does not exist, returns False.
:param file_path: The file system path to the configuration file.
:returns: boolean
"""
if file_path is None:
return None
file_path = os.path.abspath(os.path.expanduser(file_path))
if os.path.exists(file_path):
self.read(file_path)
return True
else:
logger.debug("config file '{}' does not exist, skipping...".format(file_path))
return False
def keys(self, section, subsection=None):
"""
Return a list of keys within 'section'.
:param section: The config section.
:param subsection: The config subsection.
:returns: List of keys in the `section` or `subsection`.
:rtype: list
"""
return self.options(section, subsection)
def options(self, section, subsection=None):
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
if subsection:
try:
opts = opts[subsection].copy()
except KeyError:
raise NoSectionError(subsection)
opts = update(opts, self._defaults)
if '__name__' in opts:
del opts['__name__']
return opts.keys()
def has_key(self, section, key, subsection=None):
"""
Return whether or not a 'section' has the given 'key'.
:param section: The section of the configuration. I.e. [block_section].
:param key: The key within 'section'.
:returns: True if the config `section` has `key`.
:rtype: boolean
"""
if key in self.options(section, subsection):
return True
else:
return False
def sections(self):
"""Return a list of section names"""
return self._sections.keys()
def get(self, section, option, subsection=None):
"""Get an option"""
if not section in self.sections():
raise NoSectionError(section)
if subsection:
if not subsection in self._sections[section]:
raise NoSectionError(subsection)
if not option in self._sections[section][subsection]:
raise NoOptionError(option, subsection)
return self._sections[section][subsection][option]
if not option in self._sections[section]:
raise NoOptionError(option, section)
return self._sections[section][option]
def set(self, section, option, value=None, subsection=None):
"""Set an option"""
if not section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
if subsection:
try:
sectdict = sectdict[subsection]
except KeyError:
raise NoSectionError(subsection)
sectdict[self.optionxform(option)] = value
def optionxform(self, optionstr):
return optionstr.lower()
def has_section(self, section, subsection=None):
"""Indicate whether the named section is present in the configuration"""
if subsection:
return subsection in self._sections.get(section, {})
return section in self._sections
def get_sections(self):
"""
Return a list of configuration sections or [blocks].
:returns: List of sections.
:rtype: list
"""
return self.sections()
def get_section_dict(self, section, subsection=None):
"""
Return a dict representation of a section.
:param section: The section of the configuration.
:param subsection: The subsection of the configuration.
:returns: Dictionary reprisentation of the config section.
:rtype: dict
"""
dict_obj = dict()
for key in self.keys(section, subsection):
dict_obj[key] = self.get(section, key, subsection=subsection)
return dict_obj
def add_section(self, section, subsection=None):
"""
Adds a block section to the config.
:param section: The section to add.
"""
if subsection:
if not self.has_section(section):
raise NoSectionError(section)
if subsection in self._sections[section]:
raise DuplicateSectionError(section)
self._sections[section][subsection] = self._dict()
else:
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = self._dict()
def del_section(self, section, subsection=None):
"""
Deletes a block section to the config.
:param section: The section to delete.
:param subsection: The section to delete.
"""
if subsection:
if not self.has_section(section):
raise NoSectionError(section)
if not subsection in self._sections[section]:
raise NoSectionError(subsection)
del self._sections[section][subsection]
else:
if not self.has_section(section):
raise NoSectionError(section)
del self._sections[section]
def save(self, config, filename):
"""Save configuration to file"""
config_d = config_to_dict(config)
with open(filename, "w") as fh:
fh.write(yaml.safe_dump(config_d, default_flow_style=False, allow_unicode=True, width=1000))
# This is hackish; we can't use one instance to hold both config file
# and custom-config file since parent tasks settings in the latter
# will override those in the former, violating rules about pipeline
# immutability
class RatatoskCustomConfigParser(RatatoskConfigParser):
"""Ratatosk configuration parser. Works on yaml files.
"""
_instance = None
_custom_config_paths = []
@classmethod
def add_config_path(cls, path):
if path and not any(os.path.samefile(path, x) for x in cls._instance._custom_config_paths):
logger.debug("adding config path {}".format(path))
cls._instance._custom_config_paths.append(path)
else:
return
cls._instance.reload()
@classmethod
def del_config_path(cls, path):
if path and path in cls._instance._custom_config_paths:
logger.debug("removing config path {}".format(path))
try:
i = cls._instance._custom_config_paths.index(path)
del cls._instance._custom_config_paths[i]
except ValueError:
logger.warn("No such path {} in _custom_config_paths".format(path))
else:
return
# Need to clear sections before reloading
cls._instance._sections = cls._cls_dict()
cls._instance.reload()
@classmethod
def clear(cls):
cls._instance._custom_config_paths = []
cls._instance._sections = cls._cls_dict()
def reload(self):
return self._instance.read(self._instance._custom_config_paths)
def get_config():
return RatatoskConfigParser.instance()
def get_custom_config():
"""Get separate parser for custom config; else custom config
parent_task setting will override config file settings"""
return RatatoskCustomConfigParser.instance()
def setup_config(config_file=None, custom_config_file=None, **kwargs):
"""Helper function to setup config at startup"""
if config_file:
config = get_config()
config.add_config_path(config_file)
backend.__global_config__ = update(backend.__global_config__, vars(config)["_sections"])
if custom_config_file:
custom_config = get_custom_config()
custom_config.add_config_path(custom_config_file)
backend.__global_config__ = update(backend.__global_config__, vars(custom_config)["_sections"]) | 0.539226 | 0.072834 |
from gevent.monkey import patch_all; patch_all()
import pytest
from utils import sleep_try
@pytest.fixture(scope='module')
def pool(app):
return app.pools['test-pool']
# ============================================================================
@pytest.mark.usefixtures('client_class', 'docker_client')
class TestBasicApi:
def test_api(self):
res = self.client.get('/api')
assert 'GenericResponse' in res.data.decode('utf-8')
def test_request_invalid_flock(self):
res = self.client.post('/api/flock/request/foo', json={'user_params': {'a': 'b'}})
assert res.json == {'error': 'invalid_flock', 'flock': 'foo'}
assert res.status_code == 404
def test_request_invalid_req_params(self):
res = self.client.post('/api/flock/request/test_1', json={'blah': 'foo', 'user_params': {'a': 'b'}})
assert res.json == {'details': "{'blah': ['Unknown field.']}", 'error': 'invalid_options'}
assert res.status_code == 400
def test_request_invalid_overrides(self):
res = self.client.post('/api/flock/request/test_b', json={'overrides': {'box': 'test-shepherd/alpine'}})
assert res.json == {'error': 'invalid_image_param',
'image_passed': 'test-shepherd/alpine',
'label_expected': 'test.isbox=box'}
def test_request_environ_allow_bool(self):
res = self.client.post('/api/flock/request/test_b', json={'user_params': {'a': 'b'},
'environ': {'FOO': True}})
assert res.json['reqid']
def test_flock_request(self):
res = self.client.post('/api/flock/request/test_b', json={'user_params': {'a': 'b'},
'environ': {'FOO': 'BAR'}})
assert res.json['reqid']
TestBasicApi.reqid = res.json['reqid']
def test_invalid_pool(self, redis):
res = self.client.post('/api/flock/request/test_b?pool=bad-pool')
assert res.json == {'error': 'no_such_pool', 'pool': 'bad-pool'}
def test_start_invalid_flock(self, redis):
res = self.client.post('/api/flock/start/x-invalid')
assert res.json == {'error': 'invalid_reqid'}
assert not redis.hget('p:test-pool:i', 'size')
def test_flock_start(self, pool, redis):
res = self.client.post('/api/flock/start/' + self.reqid,
json={'environ': {'NEW': 'VALUE'}})
assert res.json['containers']['box']
assert res.json['containers']['box']['environ']['NEW'] == 'VALUE'
assert res.json['network']
def assert_done():
assert len(pool.start_events) == 2
sleep_try(0.2, 6.0, assert_done)
for event in pool.start_events:
assert event['Action'] == 'start'
assert event['Actor']['Attributes'][pool.shepherd.reqid_label] == self.reqid
assert redis.exists('p:test-pool:rq:' + self.reqid)
assert redis.scard('p:test-pool:f') == 1
def test_get_flock(self, pool, redis):
res = self.client.get('/api/flock/' + self.reqid)
assert res.json['user_params'] == {'a': 'b'}
assert res.json['environ']
assert res.json['image_list']
assert res.json['id']
def test_flock_stop(self, pool, redis):
res = self.client.post('/api/flock/stop/' + self.reqid)
assert res.json['success'] == True
def assert_done():
assert len(pool.stop_events) == 2
sleep_try(0.2, 6.0, assert_done)
for event in pool.stop_events:
assert event['Action'] == 'die'
assert event['Actor']['Attributes'][pool.shepherd.reqid_label] == self.reqid
assert not redis.exists('p:test-pool:rq:' + self.reqid)
assert redis.scard('p:test-pool:f') == 0
def test_flock_remove(self, pool, redis):
res = self.client.post('/api/flock/remove/' + self.reqid)
assert res.json['success'] == True | test/test_01_basic_api.py | from gevent.monkey import patch_all; patch_all()
import pytest
from utils import sleep_try
@pytest.fixture(scope='module')
def pool(app):
return app.pools['test-pool']
# ============================================================================
@pytest.mark.usefixtures('client_class', 'docker_client')
class TestBasicApi:
def test_api(self):
res = self.client.get('/api')
assert 'GenericResponse' in res.data.decode('utf-8')
def test_request_invalid_flock(self):
res = self.client.post('/api/flock/request/foo', json={'user_params': {'a': 'b'}})
assert res.json == {'error': 'invalid_flock', 'flock': 'foo'}
assert res.status_code == 404
def test_request_invalid_req_params(self):
res = self.client.post('/api/flock/request/test_1', json={'blah': 'foo', 'user_params': {'a': 'b'}})
assert res.json == {'details': "{'blah': ['Unknown field.']}", 'error': 'invalid_options'}
assert res.status_code == 400
def test_request_invalid_overrides(self):
res = self.client.post('/api/flock/request/test_b', json={'overrides': {'box': 'test-shepherd/alpine'}})
assert res.json == {'error': 'invalid_image_param',
'image_passed': 'test-shepherd/alpine',
'label_expected': 'test.isbox=box'}
def test_request_environ_allow_bool(self):
res = self.client.post('/api/flock/request/test_b', json={'user_params': {'a': 'b'},
'environ': {'FOO': True}})
assert res.json['reqid']
def test_flock_request(self):
res = self.client.post('/api/flock/request/test_b', json={'user_params': {'a': 'b'},
'environ': {'FOO': 'BAR'}})
assert res.json['reqid']
TestBasicApi.reqid = res.json['reqid']
def test_invalid_pool(self, redis):
res = self.client.post('/api/flock/request/test_b?pool=bad-pool')
assert res.json == {'error': 'no_such_pool', 'pool': 'bad-pool'}
def test_start_invalid_flock(self, redis):
res = self.client.post('/api/flock/start/x-invalid')
assert res.json == {'error': 'invalid_reqid'}
assert not redis.hget('p:test-pool:i', 'size')
def test_flock_start(self, pool, redis):
res = self.client.post('/api/flock/start/' + self.reqid,
json={'environ': {'NEW': 'VALUE'}})
assert res.json['containers']['box']
assert res.json['containers']['box']['environ']['NEW'] == 'VALUE'
assert res.json['network']
def assert_done():
assert len(pool.start_events) == 2
sleep_try(0.2, 6.0, assert_done)
for event in pool.start_events:
assert event['Action'] == 'start'
assert event['Actor']['Attributes'][pool.shepherd.reqid_label] == self.reqid
assert redis.exists('p:test-pool:rq:' + self.reqid)
assert redis.scard('p:test-pool:f') == 1
def test_get_flock(self, pool, redis):
res = self.client.get('/api/flock/' + self.reqid)
assert res.json['user_params'] == {'a': 'b'}
assert res.json['environ']
assert res.json['image_list']
assert res.json['id']
def test_flock_stop(self, pool, redis):
res = self.client.post('/api/flock/stop/' + self.reqid)
assert res.json['success'] == True
def assert_done():
assert len(pool.stop_events) == 2
sleep_try(0.2, 6.0, assert_done)
for event in pool.stop_events:
assert event['Action'] == 'die'
assert event['Actor']['Attributes'][pool.shepherd.reqid_label] == self.reqid
assert not redis.exists('p:test-pool:rq:' + self.reqid)
assert redis.scard('p:test-pool:f') == 0
def test_flock_remove(self, pool, redis):
res = self.client.post('/api/flock/remove/' + self.reqid)
assert res.json['success'] == True | 0.441673 | 0.272666 |
from unittest.mock import MagicMock
from graphql_relay import to_global_id
from creator.studies.models import Study
from creator.projects.models import Project
from creator.events.models import Event
from creator.studies.factories import StudyFactory
from creator.organizations.factories import OrganizationFactory
from django.contrib.auth import get_user_model
User = get_user_model()
CREATE_STUDY = """
mutation ($input: CreateStudyInput!) {
createStudy(input: $input) {
study { id kfId externalId name }
}
}
"""
UPDATE_STUDY = """
mutation ($id: ID! $input: StudyInput!) {
updateStudy(id: $id, input: $input) {
study { id kfId externalId name }
}
}
"""
def test_new_study_event(
permission_client, db, mocker, settings, mock_cavatica_api
):
"""
Test that new studies creates an event
"""
user, client = permission_client(["add_study"])
settings.FEAT_CAVATICA_CREATE_PROJECTS = True
settings.CAVATICA_HARMONIZATION_TOKEN = "abc"
settings.CAVATICA_DELIVERY_TOKEN = "abc"
organization = OrganizationFactory(members=[user])
post = mocker.patch("requests.post")
MockResp = MagicMock()
MockResp.status_code = 201
MockResp.json.return_value = {"results": {"kf_id": "ABCABCBA"}}
post.return_value = MockResp
variables = {
"input": {
"externalId": "TEST",
"organization": to_global_id("OrganizationNode", organization.pk),
}
}
resp = client.post(
"/graphql",
content_type="application/json",
data={"query": CREATE_STUDY, "variables": variables},
)
assert Event.objects.count() == 4
assert Event.objects.filter(event_type="SD_CRE").count() == 1
sd_cre = Event.objects.filter(event_type="SD_CRE").first()
assert sd_cre.user == user
assert sd_cre.file is None
assert sd_cre.study == Study.objects.first()
assert Event.objects.filter(event_type="PR_STR").count() == 1
assert Event.objects.filter(event_type="PR_SUC").count() == 1
assert Event.objects.filter(event_type="PR_CRE").count() == 1
pr_cre = Event.objects.filter(event_type="PR_CRE").first()
assert pr_cre.file is None
assert pr_cre.version is None
assert pr_cre.study == Study.objects.first()
assert pr_cre.project in Project.objects.all()
def test_update_study_event(permission_client, db, mocker):
"""
Test that updating studies creates an event
"""
user, client = permission_client(["change_study"])
patch = mocker.patch("requests.patch")
MockResp = MagicMock()
MockResp.status_code = 200
MockResp.json.return_value = {"results": {"kf_id": "ABCABCBA"}}
patch.return_value = MockResp
study = StudyFactory(kf_id="SD_ABCABCBA", external_id="TEST")
study.save()
variables = {
"id": to_global_id("StudyNode", study.kf_id),
"input": {"externalId": "TESTING"},
}
resp = client.post(
"/graphql",
content_type="application/json",
data={"query": UPDATE_STUDY, "variables": variables},
)
assert Event.objects.count() == 1
assert Event.objects.filter(event_type="SD_UPD").count() == 1
sd_upd = Event.objects.filter(event_type="SD_UPD").first()
assert sd_upd.description.find("external id") != -1
assert sd_upd.description.find("name") == -1
assert sd_upd.user == user
assert sd_upd.file is None
assert sd_upd.study == Study.objects.first() | tests/events/test_studies.py | from unittest.mock import MagicMock
from graphql_relay import to_global_id
from creator.studies.models import Study
from creator.projects.models import Project
from creator.events.models import Event
from creator.studies.factories import StudyFactory
from creator.organizations.factories import OrganizationFactory
from django.contrib.auth import get_user_model
User = get_user_model()
CREATE_STUDY = """
mutation ($input: CreateStudyInput!) {
createStudy(input: $input) {
study { id kfId externalId name }
}
}
"""
UPDATE_STUDY = """
mutation ($id: ID! $input: StudyInput!) {
updateStudy(id: $id, input: $input) {
study { id kfId externalId name }
}
}
"""
def test_new_study_event(
permission_client, db, mocker, settings, mock_cavatica_api
):
"""
Test that new studies creates an event
"""
user, client = permission_client(["add_study"])
settings.FEAT_CAVATICA_CREATE_PROJECTS = True
settings.CAVATICA_HARMONIZATION_TOKEN = "abc"
settings.CAVATICA_DELIVERY_TOKEN = "abc"
organization = OrganizationFactory(members=[user])
post = mocker.patch("requests.post")
MockResp = MagicMock()
MockResp.status_code = 201
MockResp.json.return_value = {"results": {"kf_id": "ABCABCBA"}}
post.return_value = MockResp
variables = {
"input": {
"externalId": "TEST",
"organization": to_global_id("OrganizationNode", organization.pk),
}
}
resp = client.post(
"/graphql",
content_type="application/json",
data={"query": CREATE_STUDY, "variables": variables},
)
assert Event.objects.count() == 4
assert Event.objects.filter(event_type="SD_CRE").count() == 1
sd_cre = Event.objects.filter(event_type="SD_CRE").first()
assert sd_cre.user == user
assert sd_cre.file is None
assert sd_cre.study == Study.objects.first()
assert Event.objects.filter(event_type="PR_STR").count() == 1
assert Event.objects.filter(event_type="PR_SUC").count() == 1
assert Event.objects.filter(event_type="PR_CRE").count() == 1
pr_cre = Event.objects.filter(event_type="PR_CRE").first()
assert pr_cre.file is None
assert pr_cre.version is None
assert pr_cre.study == Study.objects.first()
assert pr_cre.project in Project.objects.all()
def test_update_study_event(permission_client, db, mocker):
"""
Test that updating studies creates an event
"""
user, client = permission_client(["change_study"])
patch = mocker.patch("requests.patch")
MockResp = MagicMock()
MockResp.status_code = 200
MockResp.json.return_value = {"results": {"kf_id": "ABCABCBA"}}
patch.return_value = MockResp
study = StudyFactory(kf_id="SD_ABCABCBA", external_id="TEST")
study.save()
variables = {
"id": to_global_id("StudyNode", study.kf_id),
"input": {"externalId": "TESTING"},
}
resp = client.post(
"/graphql",
content_type="application/json",
data={"query": UPDATE_STUDY, "variables": variables},
)
assert Event.objects.count() == 1
assert Event.objects.filter(event_type="SD_UPD").count() == 1
sd_upd = Event.objects.filter(event_type="SD_UPD").first()
assert sd_upd.description.find("external id") != -1
assert sd_upd.description.find("name") == -1
assert sd_upd.user == user
assert sd_upd.file is None
assert sd_upd.study == Study.objects.first() | 0.722331 | 0.395718 |
from knack.help_files import helps
helps['ad ds'] = """
type: group
short-summary: Manage domain service with ad
"""
helps['ad ds list'] = """
type: command
short-summary: "The List Domain Services in Resource Group operation lists all the domain services available under \
the given resource group. And The List Domain Services in Subscription operation lists all the domain services \
available under the given subscription (and across all resource groups within that subscription)."
examples:
- name: List Domain Service By Group
text: |-
az ad ds list --resource-group "TestResourceGroup"
- name: List Domain Service By Sub
text: |-
az ad ds list
"""
helps['ad ds show'] = """
type: command
short-summary: "The Get Domain Service operation retrieves a json representation of the Domain Service."
examples:
- name: Get Domain Service
text: |-
az ad ds show --name "TestDomainService.com" --resource-group "TestResourceGroup"
"""
helps['ad ds create'] = """
type: command
short-summary: "The Create Domain Service operation creates a new domain service with the specified parameters. If \
the specific service already exists, then any patchable properties will be updated and any immutable properties will \
remain unchanged."
parameters:
- name: --replica-sets
short-summary: "List of ReplicaSets"
long-summary: |
Usage: --replica-sets location=XX subnet-id=XX
location: Virtual network location
subnet-id: The name of the virtual network that Domain Services will be deployed on. The id of the subnet \
that Domain Services will be deployed on. /virtualNetwork/vnetName/subnets/subnetName.
Multiple actions can be specified by using more than one --replica-sets argument.
- name: --settings
short-summary: "List of settings for Resource Forest"
long-summary: |
Usage: --settings trusted-domain-fqdn=XX trust-direction=XX friendly-name=XX remote-dns-ips=XX \
trust-password=XX
trusted-domain-fqdn: Trusted Domain FQDN
trust-direction: Trust Direction
friendly-name: Friendly Name
remote-dns-ips: Remote Dns ips
trust-password: <PASSWORD>
Multiple actions can be specified by using more than one --settings argument.
examples:
- name: Create Domain Service
text: |-
az ad ds create --domain "TestDomainService.com" --ntlm-v1 "Enabled" --sync-ntlm-pwd "Enabled" --tls-v1 \
"Disabled" --filtered-sync "Enabled" --external-access "Enabled" --ldaps "Enabled" --pfx-cert \
"MIIDPDCCAiSgAwIBAgIQQUI9P6tq2p9OFIJa7DLNvTANBgkqhkiG9w0BAQsFADAgMR4w..." --pfx-cert-pwd "<p<PASSWORD>>" \
--notify-others "<EMAIL>" "<EMAIL>" --notify-dc-admins "Enabled" --notify-global-admins \
"Enabled" --replica-sets location="West US" subnet-id="/subscriptions/1639790a-76a2-4ac4-98d9-8562f5dfcb4d/resourceGrou\
ps/TestNetworkResourceGroup/providers/Microsoft.Network/virtualNetworks/TestVnetWUS/subnets/TestSubnetWUS" --name \
"TestDomainService.com" --resource-group "TestResourceGroup"
"""
helps['ad ds update'] = """
type: command
short-summary: "The Update Domain Service operation can be used to update the existing deployment. The update call \
only supports the properties listed in the PATCH body."
parameters:
- name: --replica-sets
short-summary: "List of ReplicaSets"
long-summary: |
Usage: --replica-sets location=XX subnet-id=XX
location: Virtual network location
subnet-id: The name of the virtual network that Domain Services will be deployed on. The id of the subnet \
that Domain Services will be deployed on. /virtualNetwork/vnetName/subnets/subnetName.
Multiple actions can be specified by using more than one --replica-sets argument.
- name: --settings
short-summary: "List of settings for Resource Forest"
long-summary: |
Usage: --settings trusted-domain-fqdn=XX trust-direction=XX friendly-name=XX remote-dns-ips=XX \
trust-password=XX
trusted-domain-fqdn: Trusted Domain FQDN
trust-direction: Trust Direction
friendly-name: Friendly Name
remote-dns-ips: Remote Dns ips
trust-password: <PASSWORD>
Multiple actions can be specified by using more than one --settings argument.
examples:
- name: Update Domain Service
text: |-
az ad ds update --ntlm-v1 "Enabled" --sync-ntlm-pwd "Enabled" --tls-v1 "Disabled" --filtered-sync \
"Enabled" --external-access "Enabled" --ldaps "Enabled" --pfx-cert "MIIDPDCCAiSgAwIBAgIQQUI9P6tq2p9OFIJa7DLNvTANBgkqhki\
G9w0BAQsFADAgMR4w..." --pfx-cert-pwd "<<PASSWORD>>" --notify-others "<EMAIL>" \
"<EMAIL>" --notify-dc-admins "Enabled" --notify-global-admins "Enabled" --replica-sets location="West \
US" subnet-id="/subscriptions/1639790a-76a2-4ac4-98d9-8562f5dfcb4d/resourceGroups/TestNetworkResourceGroup/providers/Mi\
crosoft.Network/virtualNetworks/TestVnetWUS/subnets/TestSubnetWUS" --replica-sets location="East US" \
subnet-id="/subscriptions/1639790a-76a2-4ac4-98d9-8562f5dfcb4d/resourceGroups/TestNetworkResourceGroup/providers/Micros\
oft.Network/virtualNetworks/TestVnetEUS/subnets/TestSubnetEUS" --name "TestDomainService.com" --resource-group \
"TestResourceGroup"
"""
helps['ad ds delete'] = """
type: command
short-summary: "The Delete Domain Service operation deletes an existing Domain Service."
examples:
- name: Delete Domain Service
text: |-
az ad ds delete --name "TestDomainService.com" --resource-group "TestResourceGroup"
"""
helps['ad ds wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the ad ds is met.
examples:
- name: Pause executing next line of CLI script until the ad ds is successfully created.
text: |-
az ad ds wait --name "TestDomainService.com" --resource-group "TestResourceGroup" --created
- name: Pause executing next line of CLI script until the ad ds is successfully updated.
text: |-
az ad ds wait --name "TestDomainService.com" --resource-group "TestResourceGroup" --updated
- name: Pause executing next line of CLI script until the ad ds is successfully deleted.
text: |-
az ad ds wait --name "TestDomainService.com" --resource-group "TestResourceGroup" --deleted
""" | src/ad/azext_ad/generated/_help.py |
from knack.help_files import helps
helps['ad ds'] = """
type: group
short-summary: Manage domain service with ad
"""
helps['ad ds list'] = """
type: command
short-summary: "The List Domain Services in Resource Group operation lists all the domain services available under \
the given resource group. And The List Domain Services in Subscription operation lists all the domain services \
available under the given subscription (and across all resource groups within that subscription)."
examples:
- name: List Domain Service By Group
text: |-
az ad ds list --resource-group "TestResourceGroup"
- name: List Domain Service By Sub
text: |-
az ad ds list
"""
helps['ad ds show'] = """
type: command
short-summary: "The Get Domain Service operation retrieves a json representation of the Domain Service."
examples:
- name: Get Domain Service
text: |-
az ad ds show --name "TestDomainService.com" --resource-group "TestResourceGroup"
"""
helps['ad ds create'] = """
type: command
short-summary: "The Create Domain Service operation creates a new domain service with the specified parameters. If \
the specific service already exists, then any patchable properties will be updated and any immutable properties will \
remain unchanged."
parameters:
- name: --replica-sets
short-summary: "List of ReplicaSets"
long-summary: |
Usage: --replica-sets location=XX subnet-id=XX
location: Virtual network location
subnet-id: The name of the virtual network that Domain Services will be deployed on. The id of the subnet \
that Domain Services will be deployed on. /virtualNetwork/vnetName/subnets/subnetName.
Multiple actions can be specified by using more than one --replica-sets argument.
- name: --settings
short-summary: "List of settings for Resource Forest"
long-summary: |
Usage: --settings trusted-domain-fqdn=XX trust-direction=XX friendly-name=XX remote-dns-ips=XX \
trust-password=XX
trusted-domain-fqdn: Trusted Domain FQDN
trust-direction: Trust Direction
friendly-name: Friendly Name
remote-dns-ips: Remote Dns ips
trust-password: <PASSWORD>
Multiple actions can be specified by using more than one --settings argument.
examples:
- name: Create Domain Service
text: |-
az ad ds create --domain "TestDomainService.com" --ntlm-v1 "Enabled" --sync-ntlm-pwd "Enabled" --tls-v1 \
"Disabled" --filtered-sync "Enabled" --external-access "Enabled" --ldaps "Enabled" --pfx-cert \
"MIIDPDCCAiSgAwIBAgIQQUI9P6tq2p9OFIJa7DLNvTANBgkqhkiG9w0BAQsFADAgMR4w..." --pfx-cert-pwd "<p<PASSWORD>>" \
--notify-others "<EMAIL>" "<EMAIL>" --notify-dc-admins "Enabled" --notify-global-admins \
"Enabled" --replica-sets location="West US" subnet-id="/subscriptions/1639790a-76a2-4ac4-98d9-8562f5dfcb4d/resourceGrou\
ps/TestNetworkResourceGroup/providers/Microsoft.Network/virtualNetworks/TestVnetWUS/subnets/TestSubnetWUS" --name \
"TestDomainService.com" --resource-group "TestResourceGroup"
"""
helps['ad ds update'] = """
type: command
short-summary: "The Update Domain Service operation can be used to update the existing deployment. The update call \
only supports the properties listed in the PATCH body."
parameters:
- name: --replica-sets
short-summary: "List of ReplicaSets"
long-summary: |
Usage: --replica-sets location=XX subnet-id=XX
location: Virtual network location
subnet-id: The name of the virtual network that Domain Services will be deployed on. The id of the subnet \
that Domain Services will be deployed on. /virtualNetwork/vnetName/subnets/subnetName.
Multiple actions can be specified by using more than one --replica-sets argument.
- name: --settings
short-summary: "List of settings for Resource Forest"
long-summary: |
Usage: --settings trusted-domain-fqdn=XX trust-direction=XX friendly-name=XX remote-dns-ips=XX \
trust-password=XX
trusted-domain-fqdn: Trusted Domain FQDN
trust-direction: Trust Direction
friendly-name: Friendly Name
remote-dns-ips: Remote Dns ips
trust-password: <PASSWORD>
Multiple actions can be specified by using more than one --settings argument.
examples:
- name: Update Domain Service
text: |-
az ad ds update --ntlm-v1 "Enabled" --sync-ntlm-pwd "Enabled" --tls-v1 "Disabled" --filtered-sync \
"Enabled" --external-access "Enabled" --ldaps "Enabled" --pfx-cert "MIIDPDCCAiSgAwIBAgIQQUI9P6tq2p9OFIJa7DLNvTANBgkqhki\
G9w0BAQsFADAgMR4w..." --pfx-cert-pwd "<<PASSWORD>>" --notify-others "<EMAIL>" \
"<EMAIL>" --notify-dc-admins "Enabled" --notify-global-admins "Enabled" --replica-sets location="West \
US" subnet-id="/subscriptions/1639790a-76a2-4ac4-98d9-8562f5dfcb4d/resourceGroups/TestNetworkResourceGroup/providers/Mi\
crosoft.Network/virtualNetworks/TestVnetWUS/subnets/TestSubnetWUS" --replica-sets location="East US" \
subnet-id="/subscriptions/1639790a-76a2-4ac4-98d9-8562f5dfcb4d/resourceGroups/TestNetworkResourceGroup/providers/Micros\
oft.Network/virtualNetworks/TestVnetEUS/subnets/TestSubnetEUS" --name "TestDomainService.com" --resource-group \
"TestResourceGroup"
"""
helps['ad ds delete'] = """
type: command
short-summary: "The Delete Domain Service operation deletes an existing Domain Service."
examples:
- name: Delete Domain Service
text: |-
az ad ds delete --name "TestDomainService.com" --resource-group "TestResourceGroup"
"""
helps['ad ds wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the ad ds is met.
examples:
- name: Pause executing next line of CLI script until the ad ds is successfully created.
text: |-
az ad ds wait --name "TestDomainService.com" --resource-group "TestResourceGroup" --created
- name: Pause executing next line of CLI script until the ad ds is successfully updated.
text: |-
az ad ds wait --name "TestDomainService.com" --resource-group "TestResourceGroup" --updated
- name: Pause executing next line of CLI script until the ad ds is successfully deleted.
text: |-
az ad ds wait --name "TestDomainService.com" --resource-group "TestResourceGroup" --deleted
""" | 0.752377 | 0.233859 |
import numpy as np
import os
os.chdir('C:/Users/DELL/Desktop/Quant_macro/Pset4')
import rep_VFI_shock_labor as ra
import matplotlib.pyplot as plt
###parameters
para = {}
para['theta'] = 0.679
para['beta'] = 0.988
para['delta'] = 0.013
para['kappa'] = 5.24
para['nu'] = 2
para['h'] = 1
kss = (((1-para['theta'])*para['beta'])/(1-para['beta']*(1-para['delta'])))**(1/para['theta'])
n = 50
kmax = kss
kmin = 0.1*kss
hmin = 0
hmax = 1
###steady state
rep_age = ra.rep_agent(para['theta'], para['beta'], para['delta'], para['kappa'], para['nu'], kmin, kmax, hmin, hmax, n=n)
V, gk, gc, gl = rep_age.problem()
f, (ax1, ax) = plt.subplots(1,2)
f.set_figheight(5)
f.set_figwidth(10)
ax1.plot(rep_age.gridk,V[:,0], 'r', label='Bad Shock')
ax1.plot(rep_age.gridk,V[:,1], 'b', label='Good Shock')
ax1.legend(loc = 'upper right')
ax1.set_xlabel('k')
ax1.set_ylabel('V')
ax1.set_title('Value Function')
K, C, Y, I, L, W, LS = rep_age.simulation(T=200)
ax.plot(K, 'b', label='Capital')
ax.legend(loc = 'upper right')
ax.set_xlabel('Time')
ax.set_ylabel('Level')
ax.set_title('Variables')
fb, (ax1b, ax2b, ax3b) = plt.subplots(1,3)
fb.set_figheight(5)
fb.set_figwidth(10)
ax1b.plot(I, 'b', label='Investment')
ax1b.legend(loc = 'upper right')
ax1b.set_xlabel('Time')
ax1b.set_ylabel('Level')
ax1b.set_title('Variables')
ax2b.plot(Y, 'b', label='Output')
ax2b.legend(loc = 'upper right')
ax2b.set_xlabel('Time')
ax2b.set_ylabel('Level')
ax2b.set_title('Variables')
ax3b.plot(C, 'b', label='Consumption')
ax3b.legend(loc = 'upper right')
ax3b.set_xlabel('Time')
ax3b.set_ylabel('Level')
ax3b.set_title('Variables')
f2, (ax3, ax4, ax5) = plt.subplots(1,3)
f2.set_figheight(5)
f2.set_figwidth(10)
ax3.plot(L, 'b', label='Labor supply')
ax3.legend(loc = 'upper right')
ax3.set_xlabel('Time')
ax3.set_ylabel('Level')
ax3.set_title('Labor Supply')
ax4.plot(W, 'b', label='Wages')
ax4.legend(loc = 'upper right')
ax4.set_xlabel('Time')
ax4.set_ylabel('Level')
ax4.set_title('Wages')
ax5.plot(LS, 'b', label='Labor Share')
ax5.legend(loc = 'upper right')
ax5.set_xlabel('Time')
ax5.set_ylabel('Level')
ax5.set_title('Labor Share')
K_ir, L_ir, C_ir, Y_ir, I_ir, W_ir = rep_age.Impulse_resp(kss)
g, (gx1, gx2, gx3) = plt.subplots(1,3)
g.set_figheight(5)
g.set_figwidth(10)
gx1.plot(K_ir, 'b', label='Impulse Capital')
gx1.legend(loc = 'upper right')
gx1.set_xlabel('Time')
gx1.set_ylabel('Level')
gx1.set_title('Impulse Capital')
gx2.plot(L_ir, 'b', label='Impulse Labor')
gx2.legend(loc = 'upper right')
gx2.set_xlabel('Time')
gx2.set_ylabel('Level')
gx2.set_title('Impulse Labor')
gx3.plot(C_ir, 'b', label='Impulse Consumption')
gx3.legend(loc = 'upper right')
gx3.set_xlabel('Time')
gx3.set_ylabel('Level')
gx3.set_title('Impulse Consumption')
g2, (gx4, gx5, gx6) = plt.subplots(1,3)
g2.set_figheight(5)
g2.set_figwidth(10)
gx4.plot(Y_ir, 'b', label='Impulse Output')
gx4.legend(loc = 'upper right')
gx4.set_xlabel('Time')
gx4.set_ylabel('Level')
gx4.set_title('Impulse Output')
gx5.plot(I_ir, 'b', label='Impulse Investment')
gx5.legend(loc = 'upper right')
gx5.set_xlabel('Time')
gx5.set_ylabel('Level')
gx5.set_title('Impulse Investment')
gx6.plot(W_ir, 'b', label='Impulse Wage')
gx6.legend(loc = 'upper right')
gx6.set_xlabel('Time')
gx6.set_ylabel('Level')
gx6.set_title('Impulse Wage') | Pset4_hand_in/Pset_shock_labor_VFI.py | import numpy as np
import os
os.chdir('C:/Users/DELL/Desktop/Quant_macro/Pset4')
import rep_VFI_shock_labor as ra
import matplotlib.pyplot as plt
###parameters
para = {}
para['theta'] = 0.679
para['beta'] = 0.988
para['delta'] = 0.013
para['kappa'] = 5.24
para['nu'] = 2
para['h'] = 1
kss = (((1-para['theta'])*para['beta'])/(1-para['beta']*(1-para['delta'])))**(1/para['theta'])
n = 50
kmax = kss
kmin = 0.1*kss
hmin = 0
hmax = 1
###steady state
rep_age = ra.rep_agent(para['theta'], para['beta'], para['delta'], para['kappa'], para['nu'], kmin, kmax, hmin, hmax, n=n)
V, gk, gc, gl = rep_age.problem()
f, (ax1, ax) = plt.subplots(1,2)
f.set_figheight(5)
f.set_figwidth(10)
ax1.plot(rep_age.gridk,V[:,0], 'r', label='Bad Shock')
ax1.plot(rep_age.gridk,V[:,1], 'b', label='Good Shock')
ax1.legend(loc = 'upper right')
ax1.set_xlabel('k')
ax1.set_ylabel('V')
ax1.set_title('Value Function')
K, C, Y, I, L, W, LS = rep_age.simulation(T=200)
ax.plot(K, 'b', label='Capital')
ax.legend(loc = 'upper right')
ax.set_xlabel('Time')
ax.set_ylabel('Level')
ax.set_title('Variables')
fb, (ax1b, ax2b, ax3b) = plt.subplots(1,3)
fb.set_figheight(5)
fb.set_figwidth(10)
ax1b.plot(I, 'b', label='Investment')
ax1b.legend(loc = 'upper right')
ax1b.set_xlabel('Time')
ax1b.set_ylabel('Level')
ax1b.set_title('Variables')
ax2b.plot(Y, 'b', label='Output')
ax2b.legend(loc = 'upper right')
ax2b.set_xlabel('Time')
ax2b.set_ylabel('Level')
ax2b.set_title('Variables')
ax3b.plot(C, 'b', label='Consumption')
ax3b.legend(loc = 'upper right')
ax3b.set_xlabel('Time')
ax3b.set_ylabel('Level')
ax3b.set_title('Variables')
f2, (ax3, ax4, ax5) = plt.subplots(1,3)
f2.set_figheight(5)
f2.set_figwidth(10)
ax3.plot(L, 'b', label='Labor supply')
ax3.legend(loc = 'upper right')
ax3.set_xlabel('Time')
ax3.set_ylabel('Level')
ax3.set_title('Labor Supply')
ax4.plot(W, 'b', label='Wages')
ax4.legend(loc = 'upper right')
ax4.set_xlabel('Time')
ax4.set_ylabel('Level')
ax4.set_title('Wages')
ax5.plot(LS, 'b', label='Labor Share')
ax5.legend(loc = 'upper right')
ax5.set_xlabel('Time')
ax5.set_ylabel('Level')
ax5.set_title('Labor Share')
K_ir, L_ir, C_ir, Y_ir, I_ir, W_ir = rep_age.Impulse_resp(kss)
g, (gx1, gx2, gx3) = plt.subplots(1,3)
g.set_figheight(5)
g.set_figwidth(10)
gx1.plot(K_ir, 'b', label='Impulse Capital')
gx1.legend(loc = 'upper right')
gx1.set_xlabel('Time')
gx1.set_ylabel('Level')
gx1.set_title('Impulse Capital')
gx2.plot(L_ir, 'b', label='Impulse Labor')
gx2.legend(loc = 'upper right')
gx2.set_xlabel('Time')
gx2.set_ylabel('Level')
gx2.set_title('Impulse Labor')
gx3.plot(C_ir, 'b', label='Impulse Consumption')
gx3.legend(loc = 'upper right')
gx3.set_xlabel('Time')
gx3.set_ylabel('Level')
gx3.set_title('Impulse Consumption')
g2, (gx4, gx5, gx6) = plt.subplots(1,3)
g2.set_figheight(5)
g2.set_figwidth(10)
gx4.plot(Y_ir, 'b', label='Impulse Output')
gx4.legend(loc = 'upper right')
gx4.set_xlabel('Time')
gx4.set_ylabel('Level')
gx4.set_title('Impulse Output')
gx5.plot(I_ir, 'b', label='Impulse Investment')
gx5.legend(loc = 'upper right')
gx5.set_xlabel('Time')
gx5.set_ylabel('Level')
gx5.set_title('Impulse Investment')
gx6.plot(W_ir, 'b', label='Impulse Wage')
gx6.legend(loc = 'upper right')
gx6.set_xlabel('Time')
gx6.set_ylabel('Level')
gx6.set_title('Impulse Wage') | 0.31059 | 0.189802 |
from rest_framework import status
from rest_framework.test import APITestCase
from django.test import override_settings
from django.urls import reverse
from oems.settings import PAGINATION_SIZE, TEST_MEDIA_ROOT
from api import models
from api.tests import utils
@override_settings(MEDIA_ROOT=TEST_MEDIA_ROOT)
class NameListTests(APITestCase):
def test_get_paginated_names(self):
utils.log_as(self, utils.UserType.STAFF)
number_of_names_to_create = PAGINATION_SIZE + 5
self.__create_multiple_objects_and_names(number_of_names_to_create)
utils.log_as(self, utils.UserType.USER)
response = self.client.get(reverse('api:names'), format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(models.Name.objects.count(), number_of_names_to_create)
self.assertEqual(len(response.data['results']), PAGINATION_SIZE)
def test_get_names_as_visitor(self):
utils.log_as(self, utils.UserType.STAFF)
number_of_names_to_create = 5
self.__create_multiple_objects_and_names(number_of_names_to_create)
utils.log_as(self, utils.UserType.VISITOR)
response = self.client.get(reverse('api:names'), format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_post_name_as_staff(self):
utils.log_as(self, utils.UserType.STAFF)
name_content = 'test_post_name_as_staff'
data = {
'name': name_content
}
response = self.client.post(reverse('api:names'), data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(len(models.Name.objects.all()), 1)
name = models.Name.objects.all().first()
self.assertEqual(name.name, name_content)
def test_post_name_as_user_or_visitor(self):
utils.log_as(self, utils.UserType.USER)
name_content = 'test_post_name_as_user_or_visitor'
data = {
'name': name_content
}
response = self.client.post(reverse('api:names'), data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
utils.log_as(self, utils.UserType.VISITOR)
response = self.client.post(reverse('api:names'), data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def __create_multiple_objects_and_names(self, number_of_names):
for i in range(number_of_names):
utils.add_name(self) | api/tests/name_list.py | from rest_framework import status
from rest_framework.test import APITestCase
from django.test import override_settings
from django.urls import reverse
from oems.settings import PAGINATION_SIZE, TEST_MEDIA_ROOT
from api import models
from api.tests import utils
@override_settings(MEDIA_ROOT=TEST_MEDIA_ROOT)
class NameListTests(APITestCase):
def test_get_paginated_names(self):
utils.log_as(self, utils.UserType.STAFF)
number_of_names_to_create = PAGINATION_SIZE + 5
self.__create_multiple_objects_and_names(number_of_names_to_create)
utils.log_as(self, utils.UserType.USER)
response = self.client.get(reverse('api:names'), format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(models.Name.objects.count(), number_of_names_to_create)
self.assertEqual(len(response.data['results']), PAGINATION_SIZE)
def test_get_names_as_visitor(self):
utils.log_as(self, utils.UserType.STAFF)
number_of_names_to_create = 5
self.__create_multiple_objects_and_names(number_of_names_to_create)
utils.log_as(self, utils.UserType.VISITOR)
response = self.client.get(reverse('api:names'), format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_post_name_as_staff(self):
utils.log_as(self, utils.UserType.STAFF)
name_content = 'test_post_name_as_staff'
data = {
'name': name_content
}
response = self.client.post(reverse('api:names'), data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(len(models.Name.objects.all()), 1)
name = models.Name.objects.all().first()
self.assertEqual(name.name, name_content)
def test_post_name_as_user_or_visitor(self):
utils.log_as(self, utils.UserType.USER)
name_content = 'test_post_name_as_user_or_visitor'
data = {
'name': name_content
}
response = self.client.post(reverse('api:names'), data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
utils.log_as(self, utils.UserType.VISITOR)
response = self.client.post(reverse('api:names'), data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def __create_multiple_objects_and_names(self, number_of_names):
for i in range(number_of_names):
utils.add_name(self) | 0.438064 | 0.217608 |
u"""
combine_shapfiles.py
<NAME> (08/2020)
Combine all individual shapefiles for all tracks into 1 file
"""
import os
import sys
import getopt
import pandas as pd
import geopandas as gpd
from rasterio.crs import CRS
#-- main function
def main():
#-- Read the system arguments listed after the program
long_options=['DIR=','FILTER=','MODEL=','ERROR']
optlist,arglist = getopt.getopt(sys.argv[1:],'D:F:M:E',long_options)
#-- Set default settings
ddir = '/DFS-L/DATA/gl_ml/SENTINEL1_2018/'
model_str = 'atrous_32init_drop0.2_customLossR727'
FILTER = 8000
error = False
for opt, arg in optlist:
if opt in ("-D","--DIR"):
ddir = os.path.expanduser(arg)
elif opt in ("-F","--FILTER"):
if arg not in ['NONE','none','None','N','n',0]:
FILTER = float(arg)
elif opt in ("-M","--MODEL"):
model_str = arg
elif opt in ("-E","--ERROR"):
error = True
flt_str = '_%.1fkm'%(FILTER/1000)
#-- get list of all folders (tracks)
folderList = os.listdir(ddir)
folder_list = [f for f in folderList if os.path.isdir(os.path.join(ddir,f))]
print(folder_list)
#-- initialize list to be converted to geopandas dataframe
gdf = []
for d in folder_list:
#-- get list of files
fileList = os.listdir(os.path.join(ddir,d,'%s.dir'%model_str,'stitched.dir','shapefiles.dir'))
if error:
file_list = [f for f in fileList if (f.endswith('%s_ERR.shp'%flt_str))]
else:
file_list = [f for f in fileList if (f.endswith('%s.shp'%flt_str))]
print(d,len(file_list))
for f in file_list:
#-- read file
g = gpd.read_file(os.path.join(ddir,d,'%s.dir'%model_str,'stitched.dir','shapefiles.dir',f))
#-- remove rows corresponding to noise
ind_remove = []
for i in range(len(g)):
if g['ID'][i] == None:
ind_remove.append(i)
g = g.drop(ind_remove)
#-- also add file name to attribute table
g['FILENAME'] = [f]*len(g['ID'])
#-- add to main dataframe list
gdf.append(g)
#print(g.crs)
#-- get projection to save file (same for all files, so just read last one)
#crs_wkt = CRS.from_dict(g.crs).to_wkt()
#crs_wkt = CRS.from_dict(init='epsg:3031').to_wkt()
#print(crs_wkt)
#-- concatenate dataframes
combined = gpd.GeoDataFrame(pd.concat(gdf))
#-- save to file
if error:
suffix = ''
else:
suffix = '_centerLines'
combined.to_file(os.path.join(ddir,'combined_AllTracks%s.shp'%suffix),driver='ESRI Shapefile')#,crs_wkt=crs_wkt)
#-- run main program
if __name__ == '__main__':
main() | combine_shapefiles_allTracks.py | u"""
combine_shapfiles.py
<NAME> (08/2020)
Combine all individual shapefiles for all tracks into 1 file
"""
import os
import sys
import getopt
import pandas as pd
import geopandas as gpd
from rasterio.crs import CRS
#-- main function
def main():
#-- Read the system arguments listed after the program
long_options=['DIR=','FILTER=','MODEL=','ERROR']
optlist,arglist = getopt.getopt(sys.argv[1:],'D:F:M:E',long_options)
#-- Set default settings
ddir = '/DFS-L/DATA/gl_ml/SENTINEL1_2018/'
model_str = 'atrous_32init_drop0.2_customLossR727'
FILTER = 8000
error = False
for opt, arg in optlist:
if opt in ("-D","--DIR"):
ddir = os.path.expanduser(arg)
elif opt in ("-F","--FILTER"):
if arg not in ['NONE','none','None','N','n',0]:
FILTER = float(arg)
elif opt in ("-M","--MODEL"):
model_str = arg
elif opt in ("-E","--ERROR"):
error = True
flt_str = '_%.1fkm'%(FILTER/1000)
#-- get list of all folders (tracks)
folderList = os.listdir(ddir)
folder_list = [f for f in folderList if os.path.isdir(os.path.join(ddir,f))]
print(folder_list)
#-- initialize list to be converted to geopandas dataframe
gdf = []
for d in folder_list:
#-- get list of files
fileList = os.listdir(os.path.join(ddir,d,'%s.dir'%model_str,'stitched.dir','shapefiles.dir'))
if error:
file_list = [f for f in fileList if (f.endswith('%s_ERR.shp'%flt_str))]
else:
file_list = [f for f in fileList if (f.endswith('%s.shp'%flt_str))]
print(d,len(file_list))
for f in file_list:
#-- read file
g = gpd.read_file(os.path.join(ddir,d,'%s.dir'%model_str,'stitched.dir','shapefiles.dir',f))
#-- remove rows corresponding to noise
ind_remove = []
for i in range(len(g)):
if g['ID'][i] == None:
ind_remove.append(i)
g = g.drop(ind_remove)
#-- also add file name to attribute table
g['FILENAME'] = [f]*len(g['ID'])
#-- add to main dataframe list
gdf.append(g)
#print(g.crs)
#-- get projection to save file (same for all files, so just read last one)
#crs_wkt = CRS.from_dict(g.crs).to_wkt()
#crs_wkt = CRS.from_dict(init='epsg:3031').to_wkt()
#print(crs_wkt)
#-- concatenate dataframes
combined = gpd.GeoDataFrame(pd.concat(gdf))
#-- save to file
if error:
suffix = ''
else:
suffix = '_centerLines'
combined.to_file(os.path.join(ddir,'combined_AllTracks%s.shp'%suffix),driver='ESRI Shapefile')#,crs_wkt=crs_wkt)
#-- run main program
if __name__ == '__main__':
main() | 0.054563 | 0.194865 |
import argparse
import random
import numpy as np
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torch.utils.data import DataLoader
from models.fewshot_anom import FewShotSeg
from dataloading.datasets import TestDataset
from dataloading.dataset_specifics import *
from utils import *
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', type=str, required=True)
parser.add_argument('--save_root', type=str, required=True)
parser.add_argument('--pretrained_root', type=str, required=True)
parser.add_argument('--fold', type=int, required=True)
parser.add_argument('--dataset', type=str, required=True)
parser.add_argument('--n_shot', default=1, type=int)
parser.add_argument('--all_slices', default=False, type=bool)
parser.add_argument('--EP1', default=False, type=bool)
parser.add_argument('--seed', default=None, type=int)
parser.add_argument('--workers', default=0, type=int)
return parser.parse_args()
def main():
args = parse_arguments()
# Deterministic setting for reproducability.
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
# Set up logging.
logger = set_logger(args.save_root, 'train.log')
logger.info(args)
# Setup the path to save.
args.save = os.path.join(args.save_root)
# Init model and load state_dict.
model = FewShotSeg(use_coco_init=False)
model = nn.DataParallel(model.cuda())
model.load_state_dict(torch.load(args.pretrained_root, map_location="cpu"))
# Data loader.
test_dataset = TestDataset(args)
query_loader = DataLoader(test_dataset,
batch_size=1,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
drop_last=True)
# Inference.
logger.info(' Start inference ... Note: EP1 is ' + str(args.EP1))
logger.info(' Support: ' + str(test_dataset.support_dir[len(args.data_root):]))
logger.info(' Query: ' +
str([elem[len(args.data_root):] for elem in test_dataset.image_dirs]))
# Get unique labels (classes).
labels = get_label_names(args.dataset)
# Loop over classes.
class_dice = {}
class_iou = {}
for label_val, label_name in labels.items():
# Skip BG class.
if label_name is 'BG':
continue
logger.info(' *------------------Class: {}--------------------*'.format(label_name))
logger.info(' *--------------------------------------------------*')
# Get support sample + mask for current class.
support_sample = test_dataset.getSupport(label=label_val, all_slices=args.all_slices, N=args.n_shot)
test_dataset.label = label_val
# Infer.
with torch.no_grad():
scores = infer(model, query_loader, support_sample, args, logger, label_name)
# Log class-wise results
class_dice[label_name] = torch.tensor(scores.patient_dice).mean().item()
class_iou[label_name] = torch.tensor(scores.patient_iou).mean().item()
logger.info(' Mean class IoU: {}'.format(class_iou[label_name]))
logger.info(' Mean class Dice: {}'.format(class_dice[label_name]))
logger.info(' *--------------------------------------------------*')
# Log final results.
logger.info(' *-----------------Final results--------------------*')
logger.info(' *--------------------------------------------------*')
logger.info(' Mean IoU: {}'.format(class_iou))
logger.info(' Mean Dice: {}'.format(class_dice))
logger.info(' *--------------------------------------------------*')
def infer(model, query_loader, support_sample, args, logger, label_name):
# Test mode.
model.eval()
# Unpack support data.
support_image = [support_sample['image'][[i]].float().cuda() for i in range(support_sample['image'].shape[0])] # n_shot x 3 x H x W
support_fg_mask = [support_sample['label'][[i]].float().cuda() for i in range(support_sample['image'].shape[0])] # n_shot x H x W
# Loop through query volumes.
scores = Scores()
for i, sample in enumerate(query_loader):
# Unpack query data.
query_image = [sample['image'][i].float().cuda() for i in range(sample['image'].shape[0])] # [C x 3 x H x W]
query_label = sample['label'].long() # C x H x W
query_id = sample['id'][0].split('image_')[1][:-len('.nii.gz')]
# Compute output.
if args.EP1 is True:
# Match support slice and query sub-chunck.
query_pred = torch.zeros(query_label.shape[-3:])
C_q = sample['image'].shape[1]
idx_ = np.linspace(0, C_q, args.n_shot+1).astype('int')
for sub_chunck in range(args.n_shot):
support_image_s = [support_image[sub_chunck]] # 1 x 3 x H x W
support_fg_mask_s = [support_fg_mask[sub_chunck]] # 1 x H x W
query_image_s = query_image[0][idx_[sub_chunck]:idx_[sub_chunck+1]] # C' x 3 x H x W
query_pred_s, _, _ = model([support_image_s], [support_fg_mask_s], [query_image_s], train=False) # C x 2 x H x W
query_pred_s = query_pred_s.argmax(dim=1).cpu() # C x H x W
query_pred[idx_[sub_chunck]:idx_[sub_chunck+1]] = query_pred_s
else: # EP 2
query_pred, _, _ = model([support_image], [support_fg_mask], query_image, train=False) # C x 2 x H x W
query_pred = query_pred.argmax(dim=1).cpu() # C x H x W
# Record scores.
scores.record(query_pred, query_label)
# Log.
logger.info(' Tested query volume: ' + sample['id'][0][len(args.data_root):]
+ '. Dice score: ' + str(scores.patient_dice[-1].item()))
# Save predictions.
file_name = 'image_' + query_id + '_' + label_name + '.pt'
torch.save(query_pred, os.path.join(args.save, file_name))
return scores
if __name__ == '__main__':
main() | main_inference.py |
import argparse
import random
import numpy as np
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torch.utils.data import DataLoader
from models.fewshot_anom import FewShotSeg
from dataloading.datasets import TestDataset
from dataloading.dataset_specifics import *
from utils import *
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', type=str, required=True)
parser.add_argument('--save_root', type=str, required=True)
parser.add_argument('--pretrained_root', type=str, required=True)
parser.add_argument('--fold', type=int, required=True)
parser.add_argument('--dataset', type=str, required=True)
parser.add_argument('--n_shot', default=1, type=int)
parser.add_argument('--all_slices', default=False, type=bool)
parser.add_argument('--EP1', default=False, type=bool)
parser.add_argument('--seed', default=None, type=int)
parser.add_argument('--workers', default=0, type=int)
return parser.parse_args()
def main():
args = parse_arguments()
# Deterministic setting for reproducability.
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
# Set up logging.
logger = set_logger(args.save_root, 'train.log')
logger.info(args)
# Setup the path to save.
args.save = os.path.join(args.save_root)
# Init model and load state_dict.
model = FewShotSeg(use_coco_init=False)
model = nn.DataParallel(model.cuda())
model.load_state_dict(torch.load(args.pretrained_root, map_location="cpu"))
# Data loader.
test_dataset = TestDataset(args)
query_loader = DataLoader(test_dataset,
batch_size=1,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
drop_last=True)
# Inference.
logger.info(' Start inference ... Note: EP1 is ' + str(args.EP1))
logger.info(' Support: ' + str(test_dataset.support_dir[len(args.data_root):]))
logger.info(' Query: ' +
str([elem[len(args.data_root):] for elem in test_dataset.image_dirs]))
# Get unique labels (classes).
labels = get_label_names(args.dataset)
# Loop over classes.
class_dice = {}
class_iou = {}
for label_val, label_name in labels.items():
# Skip BG class.
if label_name is 'BG':
continue
logger.info(' *------------------Class: {}--------------------*'.format(label_name))
logger.info(' *--------------------------------------------------*')
# Get support sample + mask for current class.
support_sample = test_dataset.getSupport(label=label_val, all_slices=args.all_slices, N=args.n_shot)
test_dataset.label = label_val
# Infer.
with torch.no_grad():
scores = infer(model, query_loader, support_sample, args, logger, label_name)
# Log class-wise results
class_dice[label_name] = torch.tensor(scores.patient_dice).mean().item()
class_iou[label_name] = torch.tensor(scores.patient_iou).mean().item()
logger.info(' Mean class IoU: {}'.format(class_iou[label_name]))
logger.info(' Mean class Dice: {}'.format(class_dice[label_name]))
logger.info(' *--------------------------------------------------*')
# Log final results.
logger.info(' *-----------------Final results--------------------*')
logger.info(' *--------------------------------------------------*')
logger.info(' Mean IoU: {}'.format(class_iou))
logger.info(' Mean Dice: {}'.format(class_dice))
logger.info(' *--------------------------------------------------*')
def infer(model, query_loader, support_sample, args, logger, label_name):
# Test mode.
model.eval()
# Unpack support data.
support_image = [support_sample['image'][[i]].float().cuda() for i in range(support_sample['image'].shape[0])] # n_shot x 3 x H x W
support_fg_mask = [support_sample['label'][[i]].float().cuda() for i in range(support_sample['image'].shape[0])] # n_shot x H x W
# Loop through query volumes.
scores = Scores()
for i, sample in enumerate(query_loader):
# Unpack query data.
query_image = [sample['image'][i].float().cuda() for i in range(sample['image'].shape[0])] # [C x 3 x H x W]
query_label = sample['label'].long() # C x H x W
query_id = sample['id'][0].split('image_')[1][:-len('.nii.gz')]
# Compute output.
if args.EP1 is True:
# Match support slice and query sub-chunck.
query_pred = torch.zeros(query_label.shape[-3:])
C_q = sample['image'].shape[1]
idx_ = np.linspace(0, C_q, args.n_shot+1).astype('int')
for sub_chunck in range(args.n_shot):
support_image_s = [support_image[sub_chunck]] # 1 x 3 x H x W
support_fg_mask_s = [support_fg_mask[sub_chunck]] # 1 x H x W
query_image_s = query_image[0][idx_[sub_chunck]:idx_[sub_chunck+1]] # C' x 3 x H x W
query_pred_s, _, _ = model([support_image_s], [support_fg_mask_s], [query_image_s], train=False) # C x 2 x H x W
query_pred_s = query_pred_s.argmax(dim=1).cpu() # C x H x W
query_pred[idx_[sub_chunck]:idx_[sub_chunck+1]] = query_pred_s
else: # EP 2
query_pred, _, _ = model([support_image], [support_fg_mask], query_image, train=False) # C x 2 x H x W
query_pred = query_pred.argmax(dim=1).cpu() # C x H x W
# Record scores.
scores.record(query_pred, query_label)
# Log.
logger.info(' Tested query volume: ' + sample['id'][0][len(args.data_root):]
+ '. Dice score: ' + str(scores.patient_dice[-1].item()))
# Save predictions.
file_name = 'image_' + query_id + '_' + label_name + '.pt'
torch.save(query_pred, os.path.join(args.save, file_name))
return scores
if __name__ == '__main__':
main() | 0.78609 | 0.225587 |
widget = WidgetDefault()
widget.halign = "Center"
commonDefaults["ListWidget"] = widget
def generateListWidget(file, screen, list, parentName):
name = list.getName()
file.write(" %s = leListWidget_New();" % (name))
generateBaseWidget(file, screen, list)
mode = list.getSelectionMode().toString()
if mode != "Single":
mode = "LE_LIST_WIDGET_SELECTION_MODE_" + mode.upper()
writeSetLiteralString(file, name, "SelectionMode", mode, "LE_LIST_WIDGET_SELECTION_MODE_SINGLE")
writeSetBoolean(file, name, "AllowEmptySelection", list.getAllowEmpty(), True)
pos = getHorzRelativePosition(list.getIconPosition().toString())
writeSetLiteralString(file, name, "IconPosition", pos, "LE_RELATIVE_POSITION_LEFTOF")
writeSetInt(file, name, "IconMargin", list.getIconMargin(), 10)
items = list.getItems()
if len(items) > 0:
for idx, item in enumerate(items):
file.write(" %s->fn->appendItem(%s);" % (name, name))
text = craftStringAssetName(item.text)
if text != "NULL":
file.write(" %s->fn->setItemString(%s, %d, (leString*)%s);" % (name, name, idx, text))
icon = craftAssetName(item.icon)
if icon != "NULL":
file.write(" %s->fn->setItemIcon(%s, %d, %s);" % (name, name, idx, icon))
writeEvent(file, name, list, "SelectionChangedEvent", "SelectedItemChangedEventCallback", "OnSelectionChanged")
file.write(" %s->fn->addChild(%s, (leWidget*)%s);" % (parentName, parentName, name))
file.writeNewLine()
def generateListEvent(screen, widget, event, genActions):
text = ""
if event.name == "SelectionChangedEvent":
text += "void %s_OnSelectionChanged(%s, uint32_t idx, leBool selected)\n" % (widget.getName(), getWidgetVariableName(widget))
text += generateActions(widget, event, genActions, None, None)
return text
def generateListAction(text, variables, owner, event, action):
name = action.targetName
if action.actionID == "SetSelectionMode":
val = getActionArgumentValue(action, "Mode")
if val == "Single":
val = "LE_LIST_WIDGET_SELECTION_MODE_SINGLE"
elif val == "Multiple":
val = "LE_LIST_WIDGET_SELECTION_MODE_MULTIPLE"
else:
val = "LE_LIST_WIDGET_SELECTION_MODE_CONTIGUOUS"
writeActionFunc(text, action, "setSelectionMode", [val])
elif action.actionID == "SetAllowEmptySelection":
val = getActionArgumentValue(action, "AllowEmpty")
writeActionFunc(text, action, "setAllowEmptySelection", [val])
elif action.actionID == "SetIconPosition":
val = getRelativePosition(getActionArgumentValue(action, "IconPosition"))
writeActionFunc(text, action, "setIconPosition", [val])
elif action.actionID == "SetIconMargin":
val = getActionArgumentValue(action, "IconMargin")
writeActionFunc(text, action, "setIconMargin", [val])
elif action.actionID == "AppendItem":
#str = getActionArgumentValue(action, "String")
#icon = getActionArgumentValue(action, "Image")
writeActionFunc(text, action, "appendItem", [])
elif action.actionID == "InsertItem":
#var = "%s_listInsertIndex" % name
#variables[var] = "int32_t"
#str = getActionArgumentValue(action, "String")
#icon = getActionArgumentValue(action, "Image")
idx = getActionArgumentValue(action, "Index")
writeActionFunc(text, action, "insertItem", [idx])
#writeActionFunc(text, action, "setItemString", [var, str])
#writeActionFunc(text, action, "setItemIcon", [var, icon])
elif action.actionID == "RemoveItem":
idx = getActionArgumentValue(action, "Index")
writeActionFunc(text, action, "removeItem", [idx])
elif action.actionID == "RemoveAllItems":
writeActionFunc(text, action, "removeAllItems", [])
elif action.actionID == "SetItemSelected":
idx = getActionArgumentValue(action, "Index")
sel = getActionArgumentValue(action, "Selected")
writeActionFunc(text, action, "setItemSelected", [idx, sel])
elif action.actionID == "ToggleItemSelect":
idx = getActionArgumentValue(action, "Index")
writeActionFunc(text, action, "toggleItemSelected", [idx])
elif action.actionID == "SelectAll":
writeActionFunc(text, action, "selectAll", [])
elif action.actionID == "DeselectAll":
writeActionFunc(text, action, "deselectAll", [])
elif action.actionID == "SetItemString":
str = getActionArgumentValue(action, "String")
idx = getActionArgumentValue(action, "Index")
writeActionFunc(text, action, "setItemString", [idx, str])
elif action.actionID == "SetItemIcon":
icon = getActionArgumentValue(action, "Image")
idx = getActionArgumentValue(action, "Index")
writeActionFunc(text, action, "setItemIcon", [idx, icon])
else:
generateWidgetAction(text, variables, owner, event, action) | middleware/legato/library/plugins_java/scripts/generator/widget_list.py | widget = WidgetDefault()
widget.halign = "Center"
commonDefaults["ListWidget"] = widget
def generateListWidget(file, screen, list, parentName):
name = list.getName()
file.write(" %s = leListWidget_New();" % (name))
generateBaseWidget(file, screen, list)
mode = list.getSelectionMode().toString()
if mode != "Single":
mode = "LE_LIST_WIDGET_SELECTION_MODE_" + mode.upper()
writeSetLiteralString(file, name, "SelectionMode", mode, "LE_LIST_WIDGET_SELECTION_MODE_SINGLE")
writeSetBoolean(file, name, "AllowEmptySelection", list.getAllowEmpty(), True)
pos = getHorzRelativePosition(list.getIconPosition().toString())
writeSetLiteralString(file, name, "IconPosition", pos, "LE_RELATIVE_POSITION_LEFTOF")
writeSetInt(file, name, "IconMargin", list.getIconMargin(), 10)
items = list.getItems()
if len(items) > 0:
for idx, item in enumerate(items):
file.write(" %s->fn->appendItem(%s);" % (name, name))
text = craftStringAssetName(item.text)
if text != "NULL":
file.write(" %s->fn->setItemString(%s, %d, (leString*)%s);" % (name, name, idx, text))
icon = craftAssetName(item.icon)
if icon != "NULL":
file.write(" %s->fn->setItemIcon(%s, %d, %s);" % (name, name, idx, icon))
writeEvent(file, name, list, "SelectionChangedEvent", "SelectedItemChangedEventCallback", "OnSelectionChanged")
file.write(" %s->fn->addChild(%s, (leWidget*)%s);" % (parentName, parentName, name))
file.writeNewLine()
def generateListEvent(screen, widget, event, genActions):
text = ""
if event.name == "SelectionChangedEvent":
text += "void %s_OnSelectionChanged(%s, uint32_t idx, leBool selected)\n" % (widget.getName(), getWidgetVariableName(widget))
text += generateActions(widget, event, genActions, None, None)
return text
def generateListAction(text, variables, owner, event, action):
name = action.targetName
if action.actionID == "SetSelectionMode":
val = getActionArgumentValue(action, "Mode")
if val == "Single":
val = "LE_LIST_WIDGET_SELECTION_MODE_SINGLE"
elif val == "Multiple":
val = "LE_LIST_WIDGET_SELECTION_MODE_MULTIPLE"
else:
val = "LE_LIST_WIDGET_SELECTION_MODE_CONTIGUOUS"
writeActionFunc(text, action, "setSelectionMode", [val])
elif action.actionID == "SetAllowEmptySelection":
val = getActionArgumentValue(action, "AllowEmpty")
writeActionFunc(text, action, "setAllowEmptySelection", [val])
elif action.actionID == "SetIconPosition":
val = getRelativePosition(getActionArgumentValue(action, "IconPosition"))
writeActionFunc(text, action, "setIconPosition", [val])
elif action.actionID == "SetIconMargin":
val = getActionArgumentValue(action, "IconMargin")
writeActionFunc(text, action, "setIconMargin", [val])
elif action.actionID == "AppendItem":
#str = getActionArgumentValue(action, "String")
#icon = getActionArgumentValue(action, "Image")
writeActionFunc(text, action, "appendItem", [])
elif action.actionID == "InsertItem":
#var = "%s_listInsertIndex" % name
#variables[var] = "int32_t"
#str = getActionArgumentValue(action, "String")
#icon = getActionArgumentValue(action, "Image")
idx = getActionArgumentValue(action, "Index")
writeActionFunc(text, action, "insertItem", [idx])
#writeActionFunc(text, action, "setItemString", [var, str])
#writeActionFunc(text, action, "setItemIcon", [var, icon])
elif action.actionID == "RemoveItem":
idx = getActionArgumentValue(action, "Index")
writeActionFunc(text, action, "removeItem", [idx])
elif action.actionID == "RemoveAllItems":
writeActionFunc(text, action, "removeAllItems", [])
elif action.actionID == "SetItemSelected":
idx = getActionArgumentValue(action, "Index")
sel = getActionArgumentValue(action, "Selected")
writeActionFunc(text, action, "setItemSelected", [idx, sel])
elif action.actionID == "ToggleItemSelect":
idx = getActionArgumentValue(action, "Index")
writeActionFunc(text, action, "toggleItemSelected", [idx])
elif action.actionID == "SelectAll":
writeActionFunc(text, action, "selectAll", [])
elif action.actionID == "DeselectAll":
writeActionFunc(text, action, "deselectAll", [])
elif action.actionID == "SetItemString":
str = getActionArgumentValue(action, "String")
idx = getActionArgumentValue(action, "Index")
writeActionFunc(text, action, "setItemString", [idx, str])
elif action.actionID == "SetItemIcon":
icon = getActionArgumentValue(action, "Image")
idx = getActionArgumentValue(action, "Index")
writeActionFunc(text, action, "setItemIcon", [idx, icon])
else:
generateWidgetAction(text, variables, owner, event, action) | 0.202601 | 0.119305 |
import os
from tensorflow.python.platform import gfile
import numpy as np
from csp.utils import wav_utils
import tensorflow as tf
from tensorflow.python.ops import io_ops
from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio
import numpy as np
from struct import unpack, pack
from ..utils import utils
import random
from .base import BaseInputData
DCT_COEFFICIENT_COUNT = 40
class BatchedInput(BaseInputData):
num_features = DCT_COEFFICIENT_COUNT
def __init__(self, hparams, mode):
self.mode = mode
self.hparams = hparams
if hparams.input_unit == 'char':
chars = [s.strip().split(' ', 1) for s in open('data/vivos/chars.txt', encoding='utf-8')]
self.vocab = {int(char[0]): char[1] for char in chars}
self.vocab_size = len(chars)
else:
words = [s.strip().split(' ', 1) for s in open('data/vivos/words.txt', encoding='utf-8')]
self.vocab = {int(word[0]): word[1] for word in words}
self.vocab_size = len(words)
BaseInputData.__init__(self, hparams, mode)
filenames, targets = [], []
if self.mode == tf.estimator.ModeKeys.TRAIN:
data_filename = "data/vivos/train/data_chars.txt" if self.hparams.input_unit == 'char' \
else "data/vivos/train/data.txt"
elif self.mode == tf.estimator.ModeKeys.EVAL:
data_filename = "data/vivos/test/data_chars.txt" if self.hparams.input_unit == "char" \
else "data/vivos/test/data.txt"
else:
data_filename = "data/vivos/infer/test.txt"
for line in open(data_filename):
if self.mode != tf.estimator.ModeKeys.PREDICT:
if line.strip() == "": continue
filename, target = line.strip().split(' ', 1)
targets.append(target)
else:
filename = line.strip()
filenames.append(filename)
self.size = len(filenames)
self.input_filenames = filenames
self.input_targets = targets
def init_dataset(self):
self.filenames = tf.placeholder(dtype=tf.string)
self.targets = tf.placeholder(dtype=tf.string)
src_dataset = tf.data.Dataset.from_tensor_slices(self.filenames)
src_dataset = src_dataset.map(lambda filename:
tf.cast(tf.py_func(self.load_input, [filename], tf.float64),
tf.float32))
src_dataset = src_dataset.map(lambda feat: (feat, tf.shape(feat)[0]))
if self.mode == tf.estimator.ModeKeys.PREDICT:
src_tgt_dataset = src_dataset
else:
tgt_dataset = tf.data.Dataset.from_tensor_slices(self.targets)
tgt_dataset = tgt_dataset.map(
lambda str: tf.cast(tf.py_func(self.extract_target_features, [str], tf.int64), tf.int32))
tgt_dataset = tgt_dataset.map(lambda feat: (tf.cast(feat, tf.int32), tf.shape(feat)[0]))
src_tgt_dataset = tf.data.Dataset.zip((src_dataset, tgt_dataset))
if self.mode == tf.estimator.ModeKeys.TRAIN and self.hparams.max_train > 0:
src_tgt_dataset.take(self.hparams.max_train)
if self.mode == tf.estimator.ModeKeys.PREDICT:
src_tgt_dataset.take(10)
self.batched_dataset = utils.get_batched_dataset(
src_tgt_dataset,
self.hparams.batch_size,
DCT_COEFFICIENT_COUNT,
self.hparams.num_buckets, self.mode,
padding_values=0 if self.hparams.input_unit == "char" else 1
)
self.iterator = self.batched_dataset.make_initializable_iterator()
def init_from_wav_files(self, wav_filenames):
src_dataset = tf.data.Dataset.from_tensor_slices(wav_filenames)
src_dataset = wav_utils.wav_to_features(src_dataset, self.hparams, 40)
src_dataset = src_dataset.map(lambda feat: (feat, tf.shape(feat)[0]))
self.batched_dataset = utils.get_batched_dataset(
src_dataset,
self.hparams.batch_size,
DCT_COEFFICIENT_COUNT,
self.hparams.num_buckets, self.mode
)
self.iterator = self.batched_dataset.make_initializable_iterator()
def load_input(self, filename):
return np.load(filename.decode('utf-8') + '.npy').astype(float)
def extract_target_features(self, str):
return [[int(x) for x in str.decode('utf-8').split(' ')]]
def reset_iterator(self, sess, skip=0, shuffle=False):
filenames = self.input_filenames
targets = self.input_targets
if shuffle:
bucket = 100
shuffled_filenames = []
shuffled_targets = []
start, end = 0, 0
for i in range(0, len(filenames) // bucket):
start, end = i * bucket, min((i + 1) * bucket, len(filenames))
ls = list(zip(filenames[start:end], targets[start:end]))
random.shuffle(ls)
fs, ts = zip(*ls)
shuffled_filenames += fs
shuffled_targets += ts
filenames = shuffled_filenames
targets = shuffled_targets
filenames = filenames[skip:]
targets = targets[skip:]
sess.run(self.iterator.initializer, feed_dict={
self.filenames: filenames,
self.targets: targets
})
def decode(self, d):
return d
ret = ''
for c in d:
if c <= 0: continue
if self.hparams.input_unit == "word":
if c == 1: return ret # sos
# ret += str(c) + " "
blank = ' ' if self.hparams.input_unit == "word" else ''
ret += self.decode_map[c] + " " + blank if c in self.vocab else '?'
return ret | src/preproc/vivos/vivos_feature.py | import os
from tensorflow.python.platform import gfile
import numpy as np
from csp.utils import wav_utils
import tensorflow as tf
from tensorflow.python.ops import io_ops
from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio
import numpy as np
from struct import unpack, pack
from ..utils import utils
import random
from .base import BaseInputData
DCT_COEFFICIENT_COUNT = 40
class BatchedInput(BaseInputData):
num_features = DCT_COEFFICIENT_COUNT
def __init__(self, hparams, mode):
self.mode = mode
self.hparams = hparams
if hparams.input_unit == 'char':
chars = [s.strip().split(' ', 1) for s in open('data/vivos/chars.txt', encoding='utf-8')]
self.vocab = {int(char[0]): char[1] for char in chars}
self.vocab_size = len(chars)
else:
words = [s.strip().split(' ', 1) for s in open('data/vivos/words.txt', encoding='utf-8')]
self.vocab = {int(word[0]): word[1] for word in words}
self.vocab_size = len(words)
BaseInputData.__init__(self, hparams, mode)
filenames, targets = [], []
if self.mode == tf.estimator.ModeKeys.TRAIN:
data_filename = "data/vivos/train/data_chars.txt" if self.hparams.input_unit == 'char' \
else "data/vivos/train/data.txt"
elif self.mode == tf.estimator.ModeKeys.EVAL:
data_filename = "data/vivos/test/data_chars.txt" if self.hparams.input_unit == "char" \
else "data/vivos/test/data.txt"
else:
data_filename = "data/vivos/infer/test.txt"
for line in open(data_filename):
if self.mode != tf.estimator.ModeKeys.PREDICT:
if line.strip() == "": continue
filename, target = line.strip().split(' ', 1)
targets.append(target)
else:
filename = line.strip()
filenames.append(filename)
self.size = len(filenames)
self.input_filenames = filenames
self.input_targets = targets
def init_dataset(self):
self.filenames = tf.placeholder(dtype=tf.string)
self.targets = tf.placeholder(dtype=tf.string)
src_dataset = tf.data.Dataset.from_tensor_slices(self.filenames)
src_dataset = src_dataset.map(lambda filename:
tf.cast(tf.py_func(self.load_input, [filename], tf.float64),
tf.float32))
src_dataset = src_dataset.map(lambda feat: (feat, tf.shape(feat)[0]))
if self.mode == tf.estimator.ModeKeys.PREDICT:
src_tgt_dataset = src_dataset
else:
tgt_dataset = tf.data.Dataset.from_tensor_slices(self.targets)
tgt_dataset = tgt_dataset.map(
lambda str: tf.cast(tf.py_func(self.extract_target_features, [str], tf.int64), tf.int32))
tgt_dataset = tgt_dataset.map(lambda feat: (tf.cast(feat, tf.int32), tf.shape(feat)[0]))
src_tgt_dataset = tf.data.Dataset.zip((src_dataset, tgt_dataset))
if self.mode == tf.estimator.ModeKeys.TRAIN and self.hparams.max_train > 0:
src_tgt_dataset.take(self.hparams.max_train)
if self.mode == tf.estimator.ModeKeys.PREDICT:
src_tgt_dataset.take(10)
self.batched_dataset = utils.get_batched_dataset(
src_tgt_dataset,
self.hparams.batch_size,
DCT_COEFFICIENT_COUNT,
self.hparams.num_buckets, self.mode,
padding_values=0 if self.hparams.input_unit == "char" else 1
)
self.iterator = self.batched_dataset.make_initializable_iterator()
def init_from_wav_files(self, wav_filenames):
src_dataset = tf.data.Dataset.from_tensor_slices(wav_filenames)
src_dataset = wav_utils.wav_to_features(src_dataset, self.hparams, 40)
src_dataset = src_dataset.map(lambda feat: (feat, tf.shape(feat)[0]))
self.batched_dataset = utils.get_batched_dataset(
src_dataset,
self.hparams.batch_size,
DCT_COEFFICIENT_COUNT,
self.hparams.num_buckets, self.mode
)
self.iterator = self.batched_dataset.make_initializable_iterator()
def load_input(self, filename):
return np.load(filename.decode('utf-8') + '.npy').astype(float)
def extract_target_features(self, str):
return [[int(x) for x in str.decode('utf-8').split(' ')]]
def reset_iterator(self, sess, skip=0, shuffle=False):
filenames = self.input_filenames
targets = self.input_targets
if shuffle:
bucket = 100
shuffled_filenames = []
shuffled_targets = []
start, end = 0, 0
for i in range(0, len(filenames) // bucket):
start, end = i * bucket, min((i + 1) * bucket, len(filenames))
ls = list(zip(filenames[start:end], targets[start:end]))
random.shuffle(ls)
fs, ts = zip(*ls)
shuffled_filenames += fs
shuffled_targets += ts
filenames = shuffled_filenames
targets = shuffled_targets
filenames = filenames[skip:]
targets = targets[skip:]
sess.run(self.iterator.initializer, feed_dict={
self.filenames: filenames,
self.targets: targets
})
def decode(self, d):
return d
ret = ''
for c in d:
if c <= 0: continue
if self.hparams.input_unit == "word":
if c == 1: return ret # sos
# ret += str(c) + " "
blank = ' ' if self.hparams.input_unit == "word" else ''
ret += self.decode_map[c] + " " + blank if c in self.vocab else '?'
return ret | 0.486819 | 0.159283 |
import datetime
import dateutil.tz
import logging
import random
import time
from xmlrpclib import ServerProxy, Transport, Error, Fault, ProtocolError
dt = datetime.datetime.now()
dt_now = dt
delta = datetime.timedelta(minutes=20)
d = (dt + delta)
dt_20min = d
class SpecialTransport(Transport):
# Override
def single_request(self, host, handler, request_body, verbose=0):
# issue XML-RPC request
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
try:
self.send_request(h, handler, request_body)
self.send_host(h, host)
# self.send_user_agent(h)
self.send_content(h, request_body)
response = h.getresponse(buffering=True)
if response.status == 200:
self.verbose = verbose
return self.parse_response(response)
except Fault:
raise
except Exception:
# All unexpected errors leave connection in
# a strange state, so we clear it.
self.close()
raise
# discard any response data and raise exception
if (response.getheader("content-length", 0)):
response.read()
raise ProtocolError(
host + handler,
response.status, response.reason,
response.msg,
)
def _strftime(self, value):
if datetime:
if isinstance(value, datetime.datetime):
return "%04d%02d%02dT%02d:%02d:%02d+0200" % (
value.year, value.month, value.day,
value.hour, value.minute, value.second)
def send_content(self, connection, request_body):
# print dir(connection), request_body
connection.putheader('User-Agent', 'DTS/5.0/4.2')
connection.putheader("Content-Type", "text/xml")
# , 'Content-Type': 'text/xml;charset=utf-8',\
connection.putheader("Content-Length", str(len(request_body)))
connection.putheader('Accept', 'application/xml')
# connection.addheaders('Authorization', 'Basic %s' % base64.encodestring('%s:%s' % login_details)[:-1])
connection.endheaders()
if request_body:
connection.send(request_body)
class SDPOffer(object):
def __init__(self, *args, **kwargs):
object.__init__(self, *args, **kwargs)
self.trans_id = random.randint(10000, 99999)
def do_offer(self, host, otype , port=10010, dap_login_details=('user', 'user')):
'Offers get, update and delete'
client = ServerProxy("http://user:user@%s:%s/Air" % (host, port), transport=SpecialTransport())
try:
if otype == 'get':
offer = { 'offerID' : 10L,
'offerType' : 2L,
'expiryDateTime' :dt_20min,
'originHostName' : 'DTSSVR1',
'originNodeType' : 'EXT',
'originTimeStamp' : dt_now,
'originTransactionID' : str(self.trans_id),
'subscriberNumber' : '47112288',
'subscriberNumberNAI' : 2L}
print 'offer', offer
print(client.GetOffers(offer))
elif otype == 'update':
offer = { 'offerID' : 10L ,
'offerType' : 2L,
'originHostName' : 'DTSSVR1',
'originNodeType' : 'EXT',
'originTimeStamp' : dt_now,
'startDateTime' : dt_now,
'expiryDateTime' : dt_20min.replace(tzinfo=dateutil.tz.gettz('SAST')) ,
'originTransactionID' : str(self.trans_id),
'subscriberNumber' : '47112288',
'originOperatorID' : '11',
'subscriberNumberNAI' : 2L}
print 'offer', offer
print(client.UpdateOffer(offer))
elif otype == 'delete':
offer = {'originNodeType' : 'EXT',
'originHostName' : 'DTSSVR1',
'originTransactionID' : str(self.trans_id),
'originTimeStamp' : dt_now,
'subscriberNumberNAIstruct' : 2L,
'subscriberNumber' : '47112288',
'offerID' : 10L ,
'productID' : 1L }
print offer
print(client.DeleteOffer(offer))
except Exception as e:
logging.error('Exception raised opening a POST : {0}'.format(e))
response = e
return client
if __name__ == '__main__':
sdpoffer = SDPOffer()
print sdpoffer.do_offer('localhost', 'update')
# print sdpoffer.do_offer('172.28.200.102', 'update')
# print sdpoffer.do_offer('172.28.200.102', 'update')
# time.sleep(1)
# print sdpoffer.do_offer('172.28.200.102', 'get')
# time.sleep(1)
# sdpoffer.do_offer('172.28.200.102', 'delete') | src/ocs/send_xml_to_sdp/offer_to_sdp.py | import datetime
import dateutil.tz
import logging
import random
import time
from xmlrpclib import ServerProxy, Transport, Error, Fault, ProtocolError
dt = datetime.datetime.now()
dt_now = dt
delta = datetime.timedelta(minutes=20)
d = (dt + delta)
dt_20min = d
class SpecialTransport(Transport):
# Override
def single_request(self, host, handler, request_body, verbose=0):
# issue XML-RPC request
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
try:
self.send_request(h, handler, request_body)
self.send_host(h, host)
# self.send_user_agent(h)
self.send_content(h, request_body)
response = h.getresponse(buffering=True)
if response.status == 200:
self.verbose = verbose
return self.parse_response(response)
except Fault:
raise
except Exception:
# All unexpected errors leave connection in
# a strange state, so we clear it.
self.close()
raise
# discard any response data and raise exception
if (response.getheader("content-length", 0)):
response.read()
raise ProtocolError(
host + handler,
response.status, response.reason,
response.msg,
)
def _strftime(self, value):
if datetime:
if isinstance(value, datetime.datetime):
return "%04d%02d%02dT%02d:%02d:%02d+0200" % (
value.year, value.month, value.day,
value.hour, value.minute, value.second)
def send_content(self, connection, request_body):
# print dir(connection), request_body
connection.putheader('User-Agent', 'DTS/5.0/4.2')
connection.putheader("Content-Type", "text/xml")
# , 'Content-Type': 'text/xml;charset=utf-8',\
connection.putheader("Content-Length", str(len(request_body)))
connection.putheader('Accept', 'application/xml')
# connection.addheaders('Authorization', 'Basic %s' % base64.encodestring('%s:%s' % login_details)[:-1])
connection.endheaders()
if request_body:
connection.send(request_body)
class SDPOffer(object):
def __init__(self, *args, **kwargs):
object.__init__(self, *args, **kwargs)
self.trans_id = random.randint(10000, 99999)
def do_offer(self, host, otype , port=10010, dap_login_details=('user', 'user')):
'Offers get, update and delete'
client = ServerProxy("http://user:user@%s:%s/Air" % (host, port), transport=SpecialTransport())
try:
if otype == 'get':
offer = { 'offerID' : 10L,
'offerType' : 2L,
'expiryDateTime' :dt_20min,
'originHostName' : 'DTSSVR1',
'originNodeType' : 'EXT',
'originTimeStamp' : dt_now,
'originTransactionID' : str(self.trans_id),
'subscriberNumber' : '47112288',
'subscriberNumberNAI' : 2L}
print 'offer', offer
print(client.GetOffers(offer))
elif otype == 'update':
offer = { 'offerID' : 10L ,
'offerType' : 2L,
'originHostName' : 'DTSSVR1',
'originNodeType' : 'EXT',
'originTimeStamp' : dt_now,
'startDateTime' : dt_now,
'expiryDateTime' : dt_20min.replace(tzinfo=dateutil.tz.gettz('SAST')) ,
'originTransactionID' : str(self.trans_id),
'subscriberNumber' : '47112288',
'originOperatorID' : '11',
'subscriberNumberNAI' : 2L}
print 'offer', offer
print(client.UpdateOffer(offer))
elif otype == 'delete':
offer = {'originNodeType' : 'EXT',
'originHostName' : 'DTSSVR1',
'originTransactionID' : str(self.trans_id),
'originTimeStamp' : dt_now,
'subscriberNumberNAIstruct' : 2L,
'subscriberNumber' : '47112288',
'offerID' : 10L ,
'productID' : 1L }
print offer
print(client.DeleteOffer(offer))
except Exception as e:
logging.error('Exception raised opening a POST : {0}'.format(e))
response = e
return client
if __name__ == '__main__':
sdpoffer = SDPOffer()
print sdpoffer.do_offer('localhost', 'update')
# print sdpoffer.do_offer('172.28.200.102', 'update')
# print sdpoffer.do_offer('172.28.200.102', 'update')
# time.sleep(1)
# print sdpoffer.do_offer('172.28.200.102', 'get')
# time.sleep(1)
# sdpoffer.do_offer('172.28.200.102', 'delete') | 0.049854 | 0.069573 |
__import__("pkg_resources").declare_namespace(__name__)
import os
import sys
import time
import psutil
import logging
logger = logging.getLogger(__name__)
EXTENSION = '.exe' if os.name == 'nt' else ''
def is_in_bindir(pathname, cwd, bin_abspaths):
if os.path.isabs(pathname) and os.path.dirname(pathname) in bin_abspaths:
return True
if not os.path.isabs(pathname) and os.path.join(cwd, os.path.dirname(pathname)) in bin_abspaths:
return True
def log_process(process):
logger.debug("found {!r}".format(process))
logger.debug("exe {!r}".format(process.exe()))
logger.debug("cmdline {!r}".format(process.cmdline()))
logger.debug("cwd() {!r}".format(process.cwd()))
def need_to_kill_process(bin_abspaths, ignore_list, process):
log_process(process)
if process.pid == os.getpid():
logger.debug("this is me")
return False
if os.name != "nt" and process.pid == os.getppid():
logger.debug("this is my father")
return False
if os.name == "nt" and process.exe().endswith("buildout.exe"):
logger.debug("assuming is my child buildout, there's no getppid() on Windows")
return False
else:
for pathname in [[process.exe()], process.cmdline()[:1], process.cmdline()[1:2]]:
if pathname and os.path.basename(pathname[0]).replace(EXTENSION, '') in ignore_list:
logger.debug("ignoring this one")
return False
if pathname and process.cwd() and is_in_bindir(pathname[0], process.cwd(), bin_abspaths):
return True
return False
def get_processes_to_kill(bin_dirpaths, ignore_list):
bin_abspaths = [os.path.abspath(bin_dirpath) for bin_dirpath in bin_dirpaths]
logger.debug("looking for processes in {!r}".format(bin_abspaths))
for process in psutil.process_iter():
try:
if need_to_kill_process(bin_abspaths, ignore_list, process):
yield process
except (psutil.AccessDenied, psutil.NoSuchProcess):
logger.debug("skipping {!r}".format(process))
def kill_process(process):
try:
logger.info("killing {!r}".format(process))
process.kill()
except psutil.NoSuchProcess:
logger.info("process already dead")
except:
logger.exception("kill process failed")
def close_application(bin_dirpaths, ignore_list=()):
logger.debug("sys.executable: {!r}".format(sys.executable))
logger.debug("sys.argv: {!r}".format(sys.argv))
for process in get_processes_to_kill(bin_dirpaths, ignore_list):
kill_process(process)
time.sleep(1)
class CloseApplication(object):
def __init__(self, buildout, name, options):
super(CloseApplication, self).__init__()
self.buildout = buildout
self.name = name
self.options = options
def close_application(self):
bin_dirpath = os.path.join(self.buildout.get("buildout").get("directory"), "bin")
parts_bin_dirpath = os.path.join(self.buildout.get("buildout").get("directory"), "parts", "python", "bin")
ignore_list = self.options.get("ignore-list", '').split()
close_application([bin_dirpath, parts_bin_dirpath], ignore_list)
return []
def update(self):
return self.close_application()
def install(self):
return self.close_application() | src/infi/recipe/close_application/__init__.py | __import__("pkg_resources").declare_namespace(__name__)
import os
import sys
import time
import psutil
import logging
logger = logging.getLogger(__name__)
EXTENSION = '.exe' if os.name == 'nt' else ''
def is_in_bindir(pathname, cwd, bin_abspaths):
if os.path.isabs(pathname) and os.path.dirname(pathname) in bin_abspaths:
return True
if not os.path.isabs(pathname) and os.path.join(cwd, os.path.dirname(pathname)) in bin_abspaths:
return True
def log_process(process):
logger.debug("found {!r}".format(process))
logger.debug("exe {!r}".format(process.exe()))
logger.debug("cmdline {!r}".format(process.cmdline()))
logger.debug("cwd() {!r}".format(process.cwd()))
def need_to_kill_process(bin_abspaths, ignore_list, process):
log_process(process)
if process.pid == os.getpid():
logger.debug("this is me")
return False
if os.name != "nt" and process.pid == os.getppid():
logger.debug("this is my father")
return False
if os.name == "nt" and process.exe().endswith("buildout.exe"):
logger.debug("assuming is my child buildout, there's no getppid() on Windows")
return False
else:
for pathname in [[process.exe()], process.cmdline()[:1], process.cmdline()[1:2]]:
if pathname and os.path.basename(pathname[0]).replace(EXTENSION, '') in ignore_list:
logger.debug("ignoring this one")
return False
if pathname and process.cwd() and is_in_bindir(pathname[0], process.cwd(), bin_abspaths):
return True
return False
def get_processes_to_kill(bin_dirpaths, ignore_list):
bin_abspaths = [os.path.abspath(bin_dirpath) for bin_dirpath in bin_dirpaths]
logger.debug("looking for processes in {!r}".format(bin_abspaths))
for process in psutil.process_iter():
try:
if need_to_kill_process(bin_abspaths, ignore_list, process):
yield process
except (psutil.AccessDenied, psutil.NoSuchProcess):
logger.debug("skipping {!r}".format(process))
def kill_process(process):
try:
logger.info("killing {!r}".format(process))
process.kill()
except psutil.NoSuchProcess:
logger.info("process already dead")
except:
logger.exception("kill process failed")
def close_application(bin_dirpaths, ignore_list=()):
logger.debug("sys.executable: {!r}".format(sys.executable))
logger.debug("sys.argv: {!r}".format(sys.argv))
for process in get_processes_to_kill(bin_dirpaths, ignore_list):
kill_process(process)
time.sleep(1)
class CloseApplication(object):
def __init__(self, buildout, name, options):
super(CloseApplication, self).__init__()
self.buildout = buildout
self.name = name
self.options = options
def close_application(self):
bin_dirpath = os.path.join(self.buildout.get("buildout").get("directory"), "bin")
parts_bin_dirpath = os.path.join(self.buildout.get("buildout").get("directory"), "parts", "python", "bin")
ignore_list = self.options.get("ignore-list", '').split()
close_application([bin_dirpath, parts_bin_dirpath], ignore_list)
return []
def update(self):
return self.close_application()
def install(self):
return self.close_application() | 0.169337 | 0.057652 |
from mocasin.gui.utils import platformOperations, listOperations
class mappingInformation:
"""Data object that holds the necessary information to draw a mapping.
:ivar Mapping __mMappingObject: The actual mocasin Mapping object.
:ivar {str, list[str]} __mappingDescription: A dictionary with core names as key and a list of applied process names as value.
:ivar int __mappingId: The id given to the mapping by the user.
:ivar string __color: The Tkinter color value in which the mapping dots should be drawn.
:ivar list[int] __circleHandles: A list of handles of the drawn mapping dots given by a Tkinter Canvas.
:ivar dict{int, str} __nameHandles: A dictionary with handles of displayed process names as key and the name of the process as value.
"""
def __init__(self, mappingObject, identifier, color):
"""Initializes a mappingInformation object.
:param Mapping mappingObject: The mocasin Mapping object which should be visualized.
:param int identifier: The id of the mapping given by the user.
:param str color: The color in which the mapping should be drawn. Can be set by user or uses a color of the default color vector.
"""
self.__mMappingObject = mappingObject
self.__mappingDescription = mappingObject.to_coreDict()
self.__mappingId = identifier
self.__color = color
self.__circleHandles = []
self.__nameHandles = {}
def addCircleHandle(self, handle):
"""Adds a ne handle to the list of circle handles.
:param int handle: The handle that should be added.
"""
self.__circleHandles.append(handle)
return
def getCircleHandles(self):
"""Returns the list of circle handles.
:returns: The list of circle handles.
:rtype list[int]:
"""
return self.__circleHandles
def addNameHandle(self, handle, name):
"""Adds an entry to the __nameHandles dict.
:param int handle: The handle that should be added as key.
:param str name: The process name that should be added as value.
"""
self.__nameHandles.update({handle: name})
return
def getNameHandles(self):
"""Returns the __nameHandle dict.
:returns: The __nameHandle dictionary.
:rtype {int, str}:
"""
return self.__nameHandles
def clearHandles(self):
"""Resets the __circleHandles list and the __nameHandles dictionary."""
self.__circleHandles.clear()
self.__nameHandles.clear()
def getColor(self):
"""Returns the color value of the mappingInformation object.
:returns: The Tkinter color value __color.
:rtype str:
"""
return self.__color
def getMappingDescription(self):
"""Returns the mapping description dictionary of the mappingInformation object.
:returns: The __mappingDescription dictionary.
:rtype dict{int, str}:
"""
return self.__mappingDescription
def getMappingId(self):
"""Returns the ID of the mappingInformation object.
:returns: The __mappingId value.
:rtype int:
"""
return self.__mappingId
def changeAffinity(self, circleHandle, peName):
"""Wrapper method to the changeAffinity method of the mocasin Mapping object.
:param int circleHandle: The handle of the mapping dot for which the affinity should be changed.
:param str peName: The name of the processing element for which the new affinity should be set.
"""
processName = self.__nameHandles[circleHandle + 1]
self.__mMappingObject.change_affinity(processName, peName)
self.__mappingDescription = self.__mMappingObject.to_coreDict()
return
class TRMVplatformInformation:
"""Data object that holds all necessary information to draw a platform.
:ivar Platform platformObject: The platform object which should be drawn.
:ivar list[(str, list[str])] __platformDescription: The description of the platform that can be interpreted by the drawManager class.
:ivar dict{str, list[int]} __coreDictionary: A dictionary with the name o f the core as key and the start x and y value and the end x and y value and
the handle given by the Canvas as values.
:ivar list[int] __coreClasses: A list that has an entry for every core size that appears in the platform structure.
"""
def __init__(self, platformObject):
"""Initializes a platformInformation object.
:param Platform platformObject: The mocasin Platform object that should be drawn.
"""
self.__mPlatformObject = platformObject
self.__createPlatformDescription(platformObject)
self.__coreDictionary = {}
self.__coreClasses = []
def __createPlatformDescription(self, platformObject):
"""Creates the hierarchic platform description that can be interpreted by the drawManager.
:param Platform platformObject: The mocasin Platform object for which the description should be created.
"""
description = platformOperations.getPlatformDescription(
platformObject.processors(), platformObject.primitives()
)
self.__mEqualList = platformOperations.findEqualPrimitives(
platformObject
)
description = platformOperations.mergeEqualPrimitives(
description, self.__mEqualList
)
networkOnChip = False
for equalSheet in self.__mEqualList:
if len(equalSheet) > 2:
networkOnChip = True
if networkOnChip:
description = platformOperations.createNocMatrix(
description, platformObject
)
self.__platformDescription = description
def getPlatformDescription(self):
"""Returns the platformDescription.
:returns: The __platformDescription value.
:rtype list[(str, [list])]:
"""
return self.__platformDescription
def addCoreClass(self, length):
"""Adds a size of processing elements to the list of existing sizes of processing elements.
:param int length: The size of the processing element that should be appended.
"""
if not length in self.__coreClasses:
self.__coreClasses.append(length)
def getCoreClasses(self):
"""Returns the list of existing sizes of processing elements.
:returns: The __coreClasses value.
:rtype list[int]:
"""
return self.__coreClasses
def updateCoreDict(self, key, value):
"""Adds an entry to the coreDictionary.
:param str key: The name of the processing element.
:param list[int] value: A list of integers containing start x and y value, end x and y value, the handle and the color of the processing element in this order.
"""
self.__coreDictionary.update({key: value})
def getCoreDict(self):
"""Returns the dictionary of existing processing elements.
:returns: The __coreDictionary value.
:rtype dict{str, list[int]}
"""
return self.__coreDictionary
def clearHandles(self):
"""Clears the dictionary containing all information about the drawn cores in case the platform has to be redrawn"""
self.__coreDictionary.clear()
class PlatformInformation:
def __init__(self, platform):
self.__platformObject = platform
self.__clusterDict = {}
self.__primitiveDict = {}
self.__coreDict = {}
self.__nextClusterId = 0
self.__L1Prims = []
self.__coreClasses = []
self.__extractPlatformInformation(platform)
def getPlatformObject(self):
return self.__platformObject
def getClusterDict(self):
return self.__clusterDict
def getPrimitiveDict(self):
return self.__primitiveDict
def updateCoreDict(self, key, value):
self.__coreDict.update({key: value})
def getCoreDict(self):
return self.__coreDict
def addCoreClass(self, length):
"""Adds a size of processing elements to the list of existing sizes of processing elements.
:param int length: The size of the processing element that should be appended.
"""
if not length in self.__coreClasses:
self.__coreClasses.append(length)
def getCoreClasses(self):
return self.__coreClasses
def __extractPlatformInformation(self, platform):
primitives = list(platform.primitives())
self.__findClusters(primitives)
for prim in self.__L1Prims:
pe = prim.producers[0].name
for key in self.__clusterDict:
if pe in self.__clusterDict[key][0]:
self.__clusterDict[key][3] = True
for key in self.__clusterDict:
if len(self.__clusterDict[key][1]) == len(
self.__clusterDict[key][0]
):
self.__clusterDict[key][2] = True
self.__clusterDict[key][1] = ["network_on_chip"]
elif len(self.__clusterDict[key][1]) < len(
self.__clusterDict[key][0]
):
self.__clusterDict[key][4] = True
self.__clusterDict[key][1] = ["L2_Cache"]
elif len(self.__clusterDict[key][1]) > len(
self.__clusterDict[key][0]
):
self.__clusterDict[key][2] = True
self.__clusterDict[key][4] = True
self.__clusterDict[key][1] = ["network_on_chip"]
toRemove = []
for key in self.__primitiveDict:
if key in toRemove:
continue
clusterList = self.__primitiveDict[key]
for innerKey in self.__primitiveDict:
if (
not key == innerKey
and clusterList == self.__primitiveDict[innerKey]
):
toRemove.append(innerKey)
for key in toRemove:
self.__primitiveDict.pop(key)
def __findClusters(self, primitives):
smallestPrim = None
for prim in primitives:
if smallestPrim == None:
smallestPrim = prim
continue
else:
if len(prim.producers) < len(smallestPrim.producers):
smallestPrim = prim
if len(smallestPrim.producers) > 1:
toAdd = True
belongingClusters = []
for key in self.__clusterDict:
peList = self.__clusterDict[key][0]
for processor in smallestPrim.producers:
if (
processor.name in peList
and not key in belongingClusters
):
toAdd = False
belongingClusters.append(key)
if toAdd:
peNames = []
for processor in smallestPrim.producers:
peNames.append(processor.name)
self.__clusterDict.update(
{
self.__nextClusterId: [
peNames,
[smallestPrim.name],
False,
False,
False,
]
}
)
self.__nextClusterId += 1
else:
if len(belongingClusters) == 1:
self.__clusterDict[belongingClusters[0]][1].append(
smallestPrim.name
)
else:
self.__primitiveDict.update(
{smallestPrim.name: belongingClusters}
)
else:
self.__L1Prims.append(smallestPrim)
primitives.remove(smallestPrim)
if len(primitives) > 0:
self.__findClusters(primitives)
class platformLayout:
def __init__(self, dimension, slotSizeX, slotSizeY):
"""Inner layout: [Free, startX, EndX, startY, endY, clusterID]"""
self.__dimension = dimension
self.__slotSizeX = slotSizeX
self.__slotSizeY = slotSizeY
self.__layout = []
self.__primList = []
self.__blankLayout()
self.__primDict = {}
self.__nameStack = []
self.__currentName = None
# Bad design, change later
self.nextId = 0
def __blankLayout(self):
"""Initialize blank layout"""
for i in range(0, self.__dimension):
self.__layout.append([])
for j in range(0, self.__dimension):
self.__layout[i].append(
[
True,
None,
j * self.__slotSizeX,
i * self.__slotSizeY,
(j + 1) * self.__slotSizeX,
(i + 1) * self.__slotSizeY,
]
)
def addPrimitives(self, primDescription):
"""Add new Informations to the adjacency List"""
tmpPrimDescription = dict(primDescription)
while not tmpPrimDescription == {}:
longestKey = None
for key in tmpPrimDescription:
if longestKey == None:
longestKey = key
else:
if len(tmpPrimDescription[key]) > len(
tmpPrimDescription[longestKey]
):
longestKey = key
if self.__primList == []:
self.__primList = tmpPrimDescription[longestKey]
else:
self.__primList = self.sortIn(
tmpPrimDescription[longestKey], self.__primList
)
tmpPrimDescription.pop(longestKey, None)
self.__nameStack.append(longestKey)
self.assignSlots(self.__primList)
def sortIn(self, smallerList, biggerList):
innerList = None
atThisLevel = False
toRemove = []
for item in biggerList:
if isinstance(item, list):
if listOperations.containsItem(item, smallerList[0]):
innerList = item
else:
if item in smallerList:
toRemove.append(item)
atThisLevel = True
if atThisLevel:
for item in toRemove:
biggerList.remove(item)
biggerList.append(smallerList)
if not innerList == None:
biggerList.remove(innerList)
biggerList.append(self.sortIn(smallerList, innerList))
return biggerList
def assignSlots(self, clusterList):
toAssign = []
lowerPrimitives = []
for element in clusterList:
if isinstance(element, list):
for idx in self.assignSlots(element):
lowerPrimitives.append(idx)
else:
toAssign.append(element)
if len(toAssign) == 0:
if not lowerPrimitives == []:
self.__primDict.update({self.nextId: []})
for idx in lowerPrimitives:
for value in self.__primDict[idx]:
self.__primDict[self.nextId].append(value)
self.nextId += 1
return [self.nextId - 1]
else:
for i in range(0, self.__dimension):
if i % 2 == 0:
for j in range(0, self.__dimension):
if self.__layout[i][j][0]:
remaining = len(toAssign) - 1
path = [(i, j)]
posX = j
posY = i
while remaining > 0:
if (
posX < self.__dimension - 1
and self.__layout[posY][posX + 1][0]
):
posX += 1
path.append((posY, posX))
remaining -= 1
elif (
posY < self.__dimension - 1
and self.__layout[posY + 1][posX][0]
):
posY += 1
path.append((posY, posX))
remaining -= 1
else:
break
if remaining == 0:
if not self.nextId in self.__primDict:
self.__primDict.update({self.nextId: []})
for idx in lowerPrimitives:
for entry in self.__primDict[idx]:
self.__primDict[self.nextId].append(
entry
)
if len(toAssign) != len(path):
raise RuntimeError("Something went wrong!")
else:
for idx in range(0, len(toAssign)):
self.__layout[path[idx][0]][
path[idx][1]
][0] = False
self.__layout[path[idx][0]][
path[idx][1]
][1] = toAssign[idx]
self.__primDict[self.nextId].append(
self.__layout[path[idx][0]][
path[idx][1]
][2]
)
self.__primDict[self.nextId].append(
self.__layout[path[idx][0]][
path[idx][1]
][3]
)
self.__primDict[self.nextId].append(
self.__layout[path[idx][0]][
path[idx][1]
][4]
)
self.__primDict[self.nextId].append(
self.__layout[path[idx][0]][
path[idx][1]
][5]
)
lowerPrimitives.append(self.nextId)
self.nextId += 1
return lowerPrimitives
else:
for j in range(self.__dimension - 1, -1, -1):
if self.__layout[i][j][0]:
remaining = len(toAssign) - 1
path = [(i, j)]
posX = j
posY = i
while remaining > 0:
if (
posX > 0
and self.__layout[posY][posX - 1][0]
):
posX -= 1
path.append((posY, posX))
remaining -= 1
elif (
posY < self.__dimension - 1
and self.__layout[posY + 1][posX][0]
):
posY += 1
path.append((posY, posX))
remaining -= 1
else:
break
if remaining == 0:
if not self.nextId in self.__primDict:
self.__primDict.update({self.nextId: []})
for idx in lowerPrimitives:
for entry in self.__primDict[idx]:
self.__primDict[self.nextId].append(
entry
)
if len(toAssign) != len(path):
raise RuntimeError("Something went wrong!")
else:
for idx in range(0, len(toAssign)):
self.__layout[path[idx][0]][
path[idx][1]
][0] = False
self.__layout[path[idx][0]][
path[idx][1]
][1] = toAssign[idx]
"""Insert x and y values in swapped order and revert the whole list
in the end. So it can be read from left to right later on.
"""
self.__primDict[self.nextId].append(
self.__layout[path[idx][0]][
path[idx][1]
][5]
)
self.__primDict[self.nextId].append(
self.__layout[path[idx][0]][
path[idx][1]
][4]
)
self.__primDict[self.nextId].append(
self.__layout[path[idx][0]][
path[idx][1]
][3]
)
self.__primDict[self.nextId].append(
self.__layout[path[idx][0]][
path[idx][1]
][2]
)
self.__primDict[self.nextId].reverse()
lowerPrimitives.append(self.nextId)
self.nextId += 1
return lowerPrimitives
return lowerPrimitives
def getLayout(self):
for i in range(0, self.__dimension):
for j in range(0, self.__dimension):
try:
yield self.__layout[i][j]
except:
print("Failure!")
def getPrimitives(self):
# just an unrealistic large number, so dict entries are smaller
n = 0
usedKeys = []
while n < len(self.__primDict):
actualKey = None
for key in self.__primDict:
if actualKey == None and not key in usedKeys:
actualKey = key
else:
if actualKey == None:
continue
if (
len(self.__primDict[key])
> len(self.__primDict[actualKey])
and not key in usedKeys
):
actualKey = key
n += 1
usedKeys.append(actualKey)
yield (actualKey, self.__primDict[actualKey]) | mocasin/gui/dataTemplates.py |
from mocasin.gui.utils import platformOperations, listOperations
class mappingInformation:
"""Data object that holds the necessary information to draw a mapping.
:ivar Mapping __mMappingObject: The actual mocasin Mapping object.
:ivar {str, list[str]} __mappingDescription: A dictionary with core names as key and a list of applied process names as value.
:ivar int __mappingId: The id given to the mapping by the user.
:ivar string __color: The Tkinter color value in which the mapping dots should be drawn.
:ivar list[int] __circleHandles: A list of handles of the drawn mapping dots given by a Tkinter Canvas.
:ivar dict{int, str} __nameHandles: A dictionary with handles of displayed process names as key and the name of the process as value.
"""
def __init__(self, mappingObject, identifier, color):
"""Initializes a mappingInformation object.
:param Mapping mappingObject: The mocasin Mapping object which should be visualized.
:param int identifier: The id of the mapping given by the user.
:param str color: The color in which the mapping should be drawn. Can be set by user or uses a color of the default color vector.
"""
self.__mMappingObject = mappingObject
self.__mappingDescription = mappingObject.to_coreDict()
self.__mappingId = identifier
self.__color = color
self.__circleHandles = []
self.__nameHandles = {}
def addCircleHandle(self, handle):
"""Adds a ne handle to the list of circle handles.
:param int handle: The handle that should be added.
"""
self.__circleHandles.append(handle)
return
def getCircleHandles(self):
"""Returns the list of circle handles.
:returns: The list of circle handles.
:rtype list[int]:
"""
return self.__circleHandles
def addNameHandle(self, handle, name):
"""Adds an entry to the __nameHandles dict.
:param int handle: The handle that should be added as key.
:param str name: The process name that should be added as value.
"""
self.__nameHandles.update({handle: name})
return
def getNameHandles(self):
"""Returns the __nameHandle dict.
:returns: The __nameHandle dictionary.
:rtype {int, str}:
"""
return self.__nameHandles
def clearHandles(self):
"""Resets the __circleHandles list and the __nameHandles dictionary."""
self.__circleHandles.clear()
self.__nameHandles.clear()
def getColor(self):
"""Returns the color value of the mappingInformation object.
:returns: The Tkinter color value __color.
:rtype str:
"""
return self.__color
def getMappingDescription(self):
"""Returns the mapping description dictionary of the mappingInformation object.
:returns: The __mappingDescription dictionary.
:rtype dict{int, str}:
"""
return self.__mappingDescription
def getMappingId(self):
"""Returns the ID of the mappingInformation object.
:returns: The __mappingId value.
:rtype int:
"""
return self.__mappingId
def changeAffinity(self, circleHandle, peName):
"""Wrapper method to the changeAffinity method of the mocasin Mapping object.
:param int circleHandle: The handle of the mapping dot for which the affinity should be changed.
:param str peName: The name of the processing element for which the new affinity should be set.
"""
processName = self.__nameHandles[circleHandle + 1]
self.__mMappingObject.change_affinity(processName, peName)
self.__mappingDescription = self.__mMappingObject.to_coreDict()
return
class TRMVplatformInformation:
"""Data object that holds all necessary information to draw a platform.
:ivar Platform platformObject: The platform object which should be drawn.
:ivar list[(str, list[str])] __platformDescription: The description of the platform that can be interpreted by the drawManager class.
:ivar dict{str, list[int]} __coreDictionary: A dictionary with the name o f the core as key and the start x and y value and the end x and y value and
the handle given by the Canvas as values.
:ivar list[int] __coreClasses: A list that has an entry for every core size that appears in the platform structure.
"""
def __init__(self, platformObject):
"""Initializes a platformInformation object.
:param Platform platformObject: The mocasin Platform object that should be drawn.
"""
self.__mPlatformObject = platformObject
self.__createPlatformDescription(platformObject)
self.__coreDictionary = {}
self.__coreClasses = []
def __createPlatformDescription(self, platformObject):
"""Creates the hierarchic platform description that can be interpreted by the drawManager.
:param Platform platformObject: The mocasin Platform object for which the description should be created.
"""
description = platformOperations.getPlatformDescription(
platformObject.processors(), platformObject.primitives()
)
self.__mEqualList = platformOperations.findEqualPrimitives(
platformObject
)
description = platformOperations.mergeEqualPrimitives(
description, self.__mEqualList
)
networkOnChip = False
for equalSheet in self.__mEqualList:
if len(equalSheet) > 2:
networkOnChip = True
if networkOnChip:
description = platformOperations.createNocMatrix(
description, platformObject
)
self.__platformDescription = description
def getPlatformDescription(self):
"""Returns the platformDescription.
:returns: The __platformDescription value.
:rtype list[(str, [list])]:
"""
return self.__platformDescription
def addCoreClass(self, length):
"""Adds a size of processing elements to the list of existing sizes of processing elements.
:param int length: The size of the processing element that should be appended.
"""
if not length in self.__coreClasses:
self.__coreClasses.append(length)
def getCoreClasses(self):
"""Returns the list of existing sizes of processing elements.
:returns: The __coreClasses value.
:rtype list[int]:
"""
return self.__coreClasses
def updateCoreDict(self, key, value):
"""Adds an entry to the coreDictionary.
:param str key: The name of the processing element.
:param list[int] value: A list of integers containing start x and y value, end x and y value, the handle and the color of the processing element in this order.
"""
self.__coreDictionary.update({key: value})
def getCoreDict(self):
"""Returns the dictionary of existing processing elements.
:returns: The __coreDictionary value.
:rtype dict{str, list[int]}
"""
return self.__coreDictionary
def clearHandles(self):
"""Clears the dictionary containing all information about the drawn cores in case the platform has to be redrawn"""
self.__coreDictionary.clear()
class PlatformInformation:
def __init__(self, platform):
self.__platformObject = platform
self.__clusterDict = {}
self.__primitiveDict = {}
self.__coreDict = {}
self.__nextClusterId = 0
self.__L1Prims = []
self.__coreClasses = []
self.__extractPlatformInformation(platform)
def getPlatformObject(self):
return self.__platformObject
def getClusterDict(self):
return self.__clusterDict
def getPrimitiveDict(self):
return self.__primitiveDict
def updateCoreDict(self, key, value):
self.__coreDict.update({key: value})
def getCoreDict(self):
return self.__coreDict
def addCoreClass(self, length):
"""Adds a size of processing elements to the list of existing sizes of processing elements.
:param int length: The size of the processing element that should be appended.
"""
if not length in self.__coreClasses:
self.__coreClasses.append(length)
def getCoreClasses(self):
return self.__coreClasses
def __extractPlatformInformation(self, platform):
primitives = list(platform.primitives())
self.__findClusters(primitives)
for prim in self.__L1Prims:
pe = prim.producers[0].name
for key in self.__clusterDict:
if pe in self.__clusterDict[key][0]:
self.__clusterDict[key][3] = True
for key in self.__clusterDict:
if len(self.__clusterDict[key][1]) == len(
self.__clusterDict[key][0]
):
self.__clusterDict[key][2] = True
self.__clusterDict[key][1] = ["network_on_chip"]
elif len(self.__clusterDict[key][1]) < len(
self.__clusterDict[key][0]
):
self.__clusterDict[key][4] = True
self.__clusterDict[key][1] = ["L2_Cache"]
elif len(self.__clusterDict[key][1]) > len(
self.__clusterDict[key][0]
):
self.__clusterDict[key][2] = True
self.__clusterDict[key][4] = True
self.__clusterDict[key][1] = ["network_on_chip"]
toRemove = []
for key in self.__primitiveDict:
if key in toRemove:
continue
clusterList = self.__primitiveDict[key]
for innerKey in self.__primitiveDict:
if (
not key == innerKey
and clusterList == self.__primitiveDict[innerKey]
):
toRemove.append(innerKey)
for key in toRemove:
self.__primitiveDict.pop(key)
def __findClusters(self, primitives):
smallestPrim = None
for prim in primitives:
if smallestPrim == None:
smallestPrim = prim
continue
else:
if len(prim.producers) < len(smallestPrim.producers):
smallestPrim = prim
if len(smallestPrim.producers) > 1:
toAdd = True
belongingClusters = []
for key in self.__clusterDict:
peList = self.__clusterDict[key][0]
for processor in smallestPrim.producers:
if (
processor.name in peList
and not key in belongingClusters
):
toAdd = False
belongingClusters.append(key)
if toAdd:
peNames = []
for processor in smallestPrim.producers:
peNames.append(processor.name)
self.__clusterDict.update(
{
self.__nextClusterId: [
peNames,
[smallestPrim.name],
False,
False,
False,
]
}
)
self.__nextClusterId += 1
else:
if len(belongingClusters) == 1:
self.__clusterDict[belongingClusters[0]][1].append(
smallestPrim.name
)
else:
self.__primitiveDict.update(
{smallestPrim.name: belongingClusters}
)
else:
self.__L1Prims.append(smallestPrim)
primitives.remove(smallestPrim)
if len(primitives) > 0:
self.__findClusters(primitives)
class platformLayout:
def __init__(self, dimension, slotSizeX, slotSizeY):
"""Inner layout: [Free, startX, EndX, startY, endY, clusterID]"""
self.__dimension = dimension
self.__slotSizeX = slotSizeX
self.__slotSizeY = slotSizeY
self.__layout = []
self.__primList = []
self.__blankLayout()
self.__primDict = {}
self.__nameStack = []
self.__currentName = None
# Bad design, change later
self.nextId = 0
def __blankLayout(self):
"""Initialize blank layout"""
for i in range(0, self.__dimension):
self.__layout.append([])
for j in range(0, self.__dimension):
self.__layout[i].append(
[
True,
None,
j * self.__slotSizeX,
i * self.__slotSizeY,
(j + 1) * self.__slotSizeX,
(i + 1) * self.__slotSizeY,
]
)
def addPrimitives(self, primDescription):
"""Add new Informations to the adjacency List"""
tmpPrimDescription = dict(primDescription)
while not tmpPrimDescription == {}:
longestKey = None
for key in tmpPrimDescription:
if longestKey == None:
longestKey = key
else:
if len(tmpPrimDescription[key]) > len(
tmpPrimDescription[longestKey]
):
longestKey = key
if self.__primList == []:
self.__primList = tmpPrimDescription[longestKey]
else:
self.__primList = self.sortIn(
tmpPrimDescription[longestKey], self.__primList
)
tmpPrimDescription.pop(longestKey, None)
self.__nameStack.append(longestKey)
self.assignSlots(self.__primList)
def sortIn(self, smallerList, biggerList):
innerList = None
atThisLevel = False
toRemove = []
for item in biggerList:
if isinstance(item, list):
if listOperations.containsItem(item, smallerList[0]):
innerList = item
else:
if item in smallerList:
toRemove.append(item)
atThisLevel = True
if atThisLevel:
for item in toRemove:
biggerList.remove(item)
biggerList.append(smallerList)
if not innerList == None:
biggerList.remove(innerList)
biggerList.append(self.sortIn(smallerList, innerList))
return biggerList
def assignSlots(self, clusterList):
toAssign = []
lowerPrimitives = []
for element in clusterList:
if isinstance(element, list):
for idx in self.assignSlots(element):
lowerPrimitives.append(idx)
else:
toAssign.append(element)
if len(toAssign) == 0:
if not lowerPrimitives == []:
self.__primDict.update({self.nextId: []})
for idx in lowerPrimitives:
for value in self.__primDict[idx]:
self.__primDict[self.nextId].append(value)
self.nextId += 1
return [self.nextId - 1]
else:
for i in range(0, self.__dimension):
if i % 2 == 0:
for j in range(0, self.__dimension):
if self.__layout[i][j][0]:
remaining = len(toAssign) - 1
path = [(i, j)]
posX = j
posY = i
while remaining > 0:
if (
posX < self.__dimension - 1
and self.__layout[posY][posX + 1][0]
):
posX += 1
path.append((posY, posX))
remaining -= 1
elif (
posY < self.__dimension - 1
and self.__layout[posY + 1][posX][0]
):
posY += 1
path.append((posY, posX))
remaining -= 1
else:
break
if remaining == 0:
if not self.nextId in self.__primDict:
self.__primDict.update({self.nextId: []})
for idx in lowerPrimitives:
for entry in self.__primDict[idx]:
self.__primDict[self.nextId].append(
entry
)
if len(toAssign) != len(path):
raise RuntimeError("Something went wrong!")
else:
for idx in range(0, len(toAssign)):
self.__layout[path[idx][0]][
path[idx][1]
][0] = False
self.__layout[path[idx][0]][
path[idx][1]
][1] = toAssign[idx]
self.__primDict[self.nextId].append(
self.__layout[path[idx][0]][
path[idx][1]
][2]
)
self.__primDict[self.nextId].append(
self.__layout[path[idx][0]][
path[idx][1]
][3]
)
self.__primDict[self.nextId].append(
self.__layout[path[idx][0]][
path[idx][1]
][4]
)
self.__primDict[self.nextId].append(
self.__layout[path[idx][0]][
path[idx][1]
][5]
)
lowerPrimitives.append(self.nextId)
self.nextId += 1
return lowerPrimitives
else:
for j in range(self.__dimension - 1, -1, -1):
if self.__layout[i][j][0]:
remaining = len(toAssign) - 1
path = [(i, j)]
posX = j
posY = i
while remaining > 0:
if (
posX > 0
and self.__layout[posY][posX - 1][0]
):
posX -= 1
path.append((posY, posX))
remaining -= 1
elif (
posY < self.__dimension - 1
and self.__layout[posY + 1][posX][0]
):
posY += 1
path.append((posY, posX))
remaining -= 1
else:
break
if remaining == 0:
if not self.nextId in self.__primDict:
self.__primDict.update({self.nextId: []})
for idx in lowerPrimitives:
for entry in self.__primDict[idx]:
self.__primDict[self.nextId].append(
entry
)
if len(toAssign) != len(path):
raise RuntimeError("Something went wrong!")
else:
for idx in range(0, len(toAssign)):
self.__layout[path[idx][0]][
path[idx][1]
][0] = False
self.__layout[path[idx][0]][
path[idx][1]
][1] = toAssign[idx]
"""Insert x and y values in swapped order and revert the whole list
in the end. So it can be read from left to right later on.
"""
self.__primDict[self.nextId].append(
self.__layout[path[idx][0]][
path[idx][1]
][5]
)
self.__primDict[self.nextId].append(
self.__layout[path[idx][0]][
path[idx][1]
][4]
)
self.__primDict[self.nextId].append(
self.__layout[path[idx][0]][
path[idx][1]
][3]
)
self.__primDict[self.nextId].append(
self.__layout[path[idx][0]][
path[idx][1]
][2]
)
self.__primDict[self.nextId].reverse()
lowerPrimitives.append(self.nextId)
self.nextId += 1
return lowerPrimitives
return lowerPrimitives
def getLayout(self):
for i in range(0, self.__dimension):
for j in range(0, self.__dimension):
try:
yield self.__layout[i][j]
except:
print("Failure!")
def getPrimitives(self):
# just an unrealistic large number, so dict entries are smaller
n = 0
usedKeys = []
while n < len(self.__primDict):
actualKey = None
for key in self.__primDict:
if actualKey == None and not key in usedKeys:
actualKey = key
else:
if actualKey == None:
continue
if (
len(self.__primDict[key])
> len(self.__primDict[actualKey])
and not key in usedKeys
):
actualKey = key
n += 1
usedKeys.append(actualKey)
yield (actualKey, self.__primDict[actualKey]) | 0.913452 | 0.565299 |
import asyncio
import os
from serial import Serial
from serial.tools import list_ports
from .pybricks import PybricksHub
from ..tools import chunk
FILE_PACKET_SIZE = 1024
FILE_TRANSFER_SCRIPT = f"""
import sys
import micropython
import utime
PACKETSIZE = {FILE_PACKET_SIZE}
def receive_file(filename, filesize):
micropython.kbd_intr(-1)
with open(filename, "wb") as f:
# Initialize buffers
done = 0
buf = bytearray(PACKETSIZE)
sys.stdin.buffer.read(1)
while done < filesize:
# Size of last package
if filesize - done < PACKETSIZE:
buf = bytearray(filesize - done)
# Read one packet from standard in.
time_now = utime.ticks_ms()
bytes_read = sys.stdin.buffer.readinto(buf)
# If transmission took a long time, something bad happened.
if utime.ticks_ms() - time_now > 5000:
print("transfer timed out")
return
# Write the data and say we're ready for more.
f.write(buf)
done += bytes_read
print("ACK")
"""
class REPLHub:
"""Run scripts on generic MicroPython boards with a REPL over USB."""
EOL = b"\r\n" # MicroPython EOL
def __init__(self):
self.reset_buffers()
def reset_buffers(self):
"""Resets internal buffers that track (parsed) serial data."""
self.print_output = False
self.output = []
self.buffer = b""
self.log_file = None
try:
self.serial.read(self.serial.in_waiting)
except AttributeError:
pass
async def connect(self, device=None):
"""Connects to a SPIKE Prime or MINDSTORMS Inventor Hub."""
# Go through all comports.
port = None
devices = list_ports.comports()
for dev in devices:
if dev.product == "LEGO Technic Large Hub in FS Mode" or dev.vid == 0x0694:
port = dev.device
break
# Raise error if there is no hub.
if port is None:
raise OSError("Could not find hub.")
# Open the serial connection.
print("Connecting to {0}".format(port))
self.serial = Serial(port)
self.serial.read(self.serial.in_waiting)
print("Connected!")
async def disconnect(self):
"""Disconnects from the hub."""
self.serial.close()
def parse_input(self):
"""Reads waiting serial data and parse as needed."""
data = self.serial.read(self.serial.in_waiting)
self.buffer += data
def is_idle(self, key=b">>> "):
"""Checks if REPL is ready for a new command."""
self.parse_input()
return self.buffer[-len(key) :] == key
async def reset_hub(self):
"""Soft resets the hub to clear MicroPython variables."""
# Cancel anything that is running
for i in range(5):
self.serial.write(b"\x03")
await asyncio.sleep(0.1)
# Soft reboot
self.serial.write(b"\x04")
await asyncio.sleep(0.5)
# Prevent runtime from coming up
while not self.is_idle():
self.serial.write(b"\x03")
await asyncio.sleep(0.1)
# Clear all buffers
self.reset_buffers()
# Load file transfer function
await self.exec_paste_mode(FILE_TRANSFER_SCRIPT, print_output=False)
self.reset_buffers()
print("Hub is ready.")
async def exec_line(self, line, wait=True):
"""Executes one line on the REPL."""
# Initialize
self.reset_buffers()
encoded = line.encode()
start_len = len(self.buffer)
# Write the command and prepare expected echo.
echo = encoded + b"\r\n"
self.serial.write(echo)
# Wait until the echo has been read.
while len(self.buffer) < start_len + len(echo):
await asyncio.sleep(0.05)
self.parse_input()
# Raise error if we did not get the echo back.
if echo not in self.buffer[start_len:]:
print(start_len, self.buffer, self.buffer[start_len - 1 :], echo)
raise ValueError("Failed to execute line: {0}.".format(line))
# We are done if we don't want to wait for the result.
if not wait:
return
# Wait for MicroPython to execute the command.
while not self.is_idle():
await asyncio.sleep(0.1)
line_handler = PybricksHub.line_handler
async def exec_paste_mode(self, code, wait=True, print_output=True):
"""Executes commands via paste mode."""
# Initialize buffers
self.reset_buffers()
self.print_output = print_output
# Convert script string to binary.
encoded = code.encode()
# Enter paste mode.
self.serial.write(b"\x05")
while not self.is_idle(key=b"=== "):
await asyncio.sleep(0.1)
# Paste the script, chunk by chunk to avoid overrun
start_len = len(self.buffer)
echo = encoded + b"\r\n"
for c in chunk(echo, 200):
self.serial.write(c)
# Wait until the pasted code is echoed back.
while len(self.buffer) < start_len + len(c):
await asyncio.sleep(0.05)
self.parse_input()
# If it isn't, then stop.
if c not in self.buffer[start_len:]:
print(start_len, self.buffer, self.buffer[start_len - 1 :], echo)
raise ValueError("Failed to paste: {0}.".format(code))
start_len += len(c)
# Parse hub output until the script is done.
line_index = len(self.buffer)
self.output = []
# Exit paste mode and start executing.
self.serial.write(b"\x04")
# If we don't want to wait, we are done.
if not wait:
return
# Look for output while the program runs
while not self.is_idle():
# Keep parsing hub data.
self.parse_input()
# Look for completed lines that we haven't parsed yet.
next_line_index = self.buffer.find(self.EOL, line_index)
if next_line_index >= 0:
# If a new line is found, parse it.
self.line_handler(self.buffer[line_index:next_line_index])
line_index = next_line_index + len(self.EOL)
await asyncio.sleep(0.1)
# Parse remaining hub data.
while (next_line_index := self.buffer.find(self.EOL, line_index)) >= 0:
self.line_handler(self.buffer[line_index:next_line_index])
line_index = next_line_index + len(self.EOL)
async def run(self, py_path, wait=True, print_output=True):
"""Executes a script via paste mode."""
script = open(py_path).read()
self.script_dir, _ = os.path.split(py_path)
await self.reset_hub()
await self.exec_paste_mode(script, wait, print_output)
async def upload_file(self, destination, contents):
"""Uploads a file to the hub."""
# Print upload info.
size = len(contents)
print(f"Uploading {destination} ({size} bytes)")
self.reset_buffers()
# Prepare hub to receive file
await self.exec_line(f"receive_file('{destination}', {size})", wait=False)
ACK = b"ACK" + self.EOL
progress = 0
# Write file chunk by chunk.
for data in chunk(contents, FILE_PACKET_SIZE):
# Send a chunk and wait for acknowledgement of receipt
buffer_now = len(self.buffer)
progress += self.serial.write(data)
while len(self.buffer) < buffer_now + len(ACK):
await asyncio.sleep(0.01)
self.parse_input()
# Raise error if we didn't get acknowledgement
if self.buffer[buffer_now : buffer_now + len(ACK)] != ACK:
print(self.buffer[buffer_now:])
raise ValueError("Did not get expected response from the hub.")
# Print progress
print(f"Progress: {int(progress / size * 100)}%", end="\r")
# Get REPL back in normal state
await self.exec_line("# File transfer complete") | pybricksdev/connections/lego.py |
import asyncio
import os
from serial import Serial
from serial.tools import list_ports
from .pybricks import PybricksHub
from ..tools import chunk
FILE_PACKET_SIZE = 1024
FILE_TRANSFER_SCRIPT = f"""
import sys
import micropython
import utime
PACKETSIZE = {FILE_PACKET_SIZE}
def receive_file(filename, filesize):
micropython.kbd_intr(-1)
with open(filename, "wb") as f:
# Initialize buffers
done = 0
buf = bytearray(PACKETSIZE)
sys.stdin.buffer.read(1)
while done < filesize:
# Size of last package
if filesize - done < PACKETSIZE:
buf = bytearray(filesize - done)
# Read one packet from standard in.
time_now = utime.ticks_ms()
bytes_read = sys.stdin.buffer.readinto(buf)
# If transmission took a long time, something bad happened.
if utime.ticks_ms() - time_now > 5000:
print("transfer timed out")
return
# Write the data and say we're ready for more.
f.write(buf)
done += bytes_read
print("ACK")
"""
class REPLHub:
"""Run scripts on generic MicroPython boards with a REPL over USB."""
EOL = b"\r\n" # MicroPython EOL
def __init__(self):
self.reset_buffers()
def reset_buffers(self):
"""Resets internal buffers that track (parsed) serial data."""
self.print_output = False
self.output = []
self.buffer = b""
self.log_file = None
try:
self.serial.read(self.serial.in_waiting)
except AttributeError:
pass
async def connect(self, device=None):
"""Connects to a SPIKE Prime or MINDSTORMS Inventor Hub."""
# Go through all comports.
port = None
devices = list_ports.comports()
for dev in devices:
if dev.product == "LEGO Technic Large Hub in FS Mode" or dev.vid == 0x0694:
port = dev.device
break
# Raise error if there is no hub.
if port is None:
raise OSError("Could not find hub.")
# Open the serial connection.
print("Connecting to {0}".format(port))
self.serial = Serial(port)
self.serial.read(self.serial.in_waiting)
print("Connected!")
async def disconnect(self):
"""Disconnects from the hub."""
self.serial.close()
def parse_input(self):
"""Reads waiting serial data and parse as needed."""
data = self.serial.read(self.serial.in_waiting)
self.buffer += data
def is_idle(self, key=b">>> "):
"""Checks if REPL is ready for a new command."""
self.parse_input()
return self.buffer[-len(key) :] == key
async def reset_hub(self):
"""Soft resets the hub to clear MicroPython variables."""
# Cancel anything that is running
for i in range(5):
self.serial.write(b"\x03")
await asyncio.sleep(0.1)
# Soft reboot
self.serial.write(b"\x04")
await asyncio.sleep(0.5)
# Prevent runtime from coming up
while not self.is_idle():
self.serial.write(b"\x03")
await asyncio.sleep(0.1)
# Clear all buffers
self.reset_buffers()
# Load file transfer function
await self.exec_paste_mode(FILE_TRANSFER_SCRIPT, print_output=False)
self.reset_buffers()
print("Hub is ready.")
async def exec_line(self, line, wait=True):
"""Executes one line on the REPL."""
# Initialize
self.reset_buffers()
encoded = line.encode()
start_len = len(self.buffer)
# Write the command and prepare expected echo.
echo = encoded + b"\r\n"
self.serial.write(echo)
# Wait until the echo has been read.
while len(self.buffer) < start_len + len(echo):
await asyncio.sleep(0.05)
self.parse_input()
# Raise error if we did not get the echo back.
if echo not in self.buffer[start_len:]:
print(start_len, self.buffer, self.buffer[start_len - 1 :], echo)
raise ValueError("Failed to execute line: {0}.".format(line))
# We are done if we don't want to wait for the result.
if not wait:
return
# Wait for MicroPython to execute the command.
while not self.is_idle():
await asyncio.sleep(0.1)
line_handler = PybricksHub.line_handler
async def exec_paste_mode(self, code, wait=True, print_output=True):
"""Executes commands via paste mode."""
# Initialize buffers
self.reset_buffers()
self.print_output = print_output
# Convert script string to binary.
encoded = code.encode()
# Enter paste mode.
self.serial.write(b"\x05")
while not self.is_idle(key=b"=== "):
await asyncio.sleep(0.1)
# Paste the script, chunk by chunk to avoid overrun
start_len = len(self.buffer)
echo = encoded + b"\r\n"
for c in chunk(echo, 200):
self.serial.write(c)
# Wait until the pasted code is echoed back.
while len(self.buffer) < start_len + len(c):
await asyncio.sleep(0.05)
self.parse_input()
# If it isn't, then stop.
if c not in self.buffer[start_len:]:
print(start_len, self.buffer, self.buffer[start_len - 1 :], echo)
raise ValueError("Failed to paste: {0}.".format(code))
start_len += len(c)
# Parse hub output until the script is done.
line_index = len(self.buffer)
self.output = []
# Exit paste mode and start executing.
self.serial.write(b"\x04")
# If we don't want to wait, we are done.
if not wait:
return
# Look for output while the program runs
while not self.is_idle():
# Keep parsing hub data.
self.parse_input()
# Look for completed lines that we haven't parsed yet.
next_line_index = self.buffer.find(self.EOL, line_index)
if next_line_index >= 0:
# If a new line is found, parse it.
self.line_handler(self.buffer[line_index:next_line_index])
line_index = next_line_index + len(self.EOL)
await asyncio.sleep(0.1)
# Parse remaining hub data.
while (next_line_index := self.buffer.find(self.EOL, line_index)) >= 0:
self.line_handler(self.buffer[line_index:next_line_index])
line_index = next_line_index + len(self.EOL)
async def run(self, py_path, wait=True, print_output=True):
"""Executes a script via paste mode."""
script = open(py_path).read()
self.script_dir, _ = os.path.split(py_path)
await self.reset_hub()
await self.exec_paste_mode(script, wait, print_output)
async def upload_file(self, destination, contents):
"""Uploads a file to the hub."""
# Print upload info.
size = len(contents)
print(f"Uploading {destination} ({size} bytes)")
self.reset_buffers()
# Prepare hub to receive file
await self.exec_line(f"receive_file('{destination}', {size})", wait=False)
ACK = b"ACK" + self.EOL
progress = 0
# Write file chunk by chunk.
for data in chunk(contents, FILE_PACKET_SIZE):
# Send a chunk and wait for acknowledgement of receipt
buffer_now = len(self.buffer)
progress += self.serial.write(data)
while len(self.buffer) < buffer_now + len(ACK):
await asyncio.sleep(0.01)
self.parse_input()
# Raise error if we didn't get acknowledgement
if self.buffer[buffer_now : buffer_now + len(ACK)] != ACK:
print(self.buffer[buffer_now:])
raise ValueError("Did not get expected response from the hub.")
# Print progress
print(f"Progress: {int(progress / size * 100)}%", end="\r")
# Get REPL back in normal state
await self.exec_line("# File transfer complete") | 0.566498 | 0.188492 |
import requests
from bs4 import BeautifulSoup
import os
url="https://www.airbnb.com.tw/s/Tainan-City/homes?tab_id=home_tab&refinement_paths%5B%5D=%2Fhomes&flexible_trip_dates%5B%5D=august&flexible_trip_dates%5B%5D=september&flexible_trip_lengths%5B%5D=weekend_trip&date_picker_type=calendar&query=Tainan%20City&place_id=ChIJE_4_lcx8bjQRTnbcpapMf9Q&checkin=2021-08-11&checkout=2021-08-12&source=structured_search_input_header&search_type=autocomplete_click"
r=requests.get(url)
soup=BeautifulSoup(r.text,'html.parser')
images=soup.find_all('img')
i=0
for image in images:
link=image['src']
print(link)
with open('a'+str(i)+'.jpg','wb') as f:
img=requests.get(link)
f.write(img.content)
i+=1
#%%
import requests
from bs4 import BeautifulSoup
import os
import re
url='https://tw.news.yahoo.com/%E5%9C%A8%E7%BE%8E%E5%9C%8B%E6%89%93%E5%88%B0%E7%96%AB%E8%8B%97%E4%BA%86-%E4%BD%95%E5%BA%AD%E6%AD%A1%E6%9B%9D%E6%B3%A8%E5%B0%84%E5%8F%8D%E6%87%89-050504433.html'
imagedownload(url,'c:/test2')
def imagedownload(url,folder):
try:
os.mkdir(os.path.join(os.getcwd(),folder))
#os.mkdir() 方法用于以数字权限模式创建目录。默认的模式为 0777 (八进制)。
# 如果目录有多级,则创建最后一级,如果最后一级目录的上级目录有不存在的,则会抛出一个 OSError。
#os.path 获取文件的属性信息。
#os.path.join(): 將多個路徑組合後返回
#os.getcwd()返回当前工作目录
# https://www.runoob.com/python/os-file-methods.html (請看這 有 os 函數 說明)
except:
pass
os.chdir(os.path.join(os.getcwd(),folder))
#os.chdir() 方法用于改变当前工作目录到指定的路径。
r=requests.get(url)
soup=BeautifulSoup(r.text,'html.parser')
images=soup.find_all('img')
i=0
for image in images:
print(images[i])
link=image['src']
print(link)
if 'jpg' in link or 'jpeg' in link:
with open(str(i)+'.jpg','wb') as f:
im=requests.get(link)
f.write(im.content)
i+=1
elif 'png' in link:
with open(str(i)+'.png','wb') as f:
im=requests.get(link)
f.write(im.content)
i+=1
elif 'gif' in link:
with open(str(i)+'.gif','wb') as f:
im=requests.get(link)
f.write(im.content)
i+=1
else:
print('這不是圖片')
#%%
url='https://www.airbnb.com.tw/rooms/30408488?check_in=2021-08-27&check_out=2021-08-28&translate_ugc=false&federated_search_id=fb3016d6-9a81-4041-99b3-87c196274f7b&source_impression_id=p3_1628602696_r5ah8CeuNTktx7RV&guests=1&adults=1'
url='https://www.airbnb.com.tw/s/Tainan-City/homes?tab_id=home_tab&refinement_paths%5B%5D=%2Fhomes&flexible_trip_dates%5B%5D=august&flexible_trip_dates%5B%5D=september&flexible_trip_lengths%5B%5D=weekend_trip&date_picker_type=calendar&query=Tainan%20City&place_id=ChIJE_4_lcx8bjQRTnbcpapMf9Q&checkin=2021-08-27&checkout=2021-08-28&source=structured_search_input_header&search_type=autocomplete_click'
url='https://www.airbnb.com.tw/s/Tainan-City/homes?tab_id=home_tab&refinement_paths%5B%5D=%2Fhomes&flexible_trip_dates%5B%5D=august&flexible_trip_dates%5B%5D=september&flexible_trip_lengths%5B%5D=weekend_trip&date_picker_type=calendar&query=Tainan%20City&place_id=ChIJE_4_lcx8bjQRTnbcpapMf9Q&checkin=2021-08-27&checkout=2021-08-28&source=structured_search_input_header&search_type=autocomplete_click'
url='https://tw.news.yahoo.com/%E5%9C%A8%E7%BE%8E%E5%9C%8B%E6%89%93%E5%88%B0%E7%96%AB%E8%8B%97%E4%BA%86-%E4%BD%95%E5%BA%AD%E6%AD%A1%E6%9B%9D%E6%B3%A8%E5%B0%84%E5%8F%8D%E6%87%89-050504433.html'
imagedownload(url,'c:/test2')
#%%
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
driver=webdriver.Chrome('D:/py/chromedriver')
url='http://the-internet.herokuapp.com/login'
driver.get(url)
driver.find_element_by_xpath('.//*[@id="username"]').send_keys('tomsmith')
driver.find_element_by_xpath('.//*[@id="password"]').send_keys('<PASSWORD>!')
driver.find_element_by_xpath('.//*[@id="login"]/button/i').click()
# 抓定一行
time.sleep(10)
driver.quit()
#browser = webdriver.Firefox() #將geckodriver丟至安裝路徑
#browser.get('http://the-internet.herokuapp.com/login')
#%%
##
##
#
#重要
##
##
from selenium import webdriver
from bs4 import BeautifulSoup
import time
driver=webdriver.Chrome('D:/py/chromedriver')
driver.maximize_window()
driver.implicitly_wait(10) #隱藏等待10秒
driver.get('https://hahow.in/courses') #取得example網頁
print(driver.title)
soup=BeautifulSoup(driver.page_source,'lxml')
fp=open('c:/test2/hahow1.html','w',encoding='utf8')
fp.write(soup.prettify())
print('已存靜態網頁內容…')
fp.close()
time.sleep(5)
driver.quit()
#%%
import time
from selenium import webdriver
driver=webdriver.Chrome('D:/py/chromedriver')
driver.implicitly_wait(10) #隱藏等待10秒
url='https://hahow.in/courses'
driver.get(url) #取得動態
items=driver.find_elements_by_css_selector('h4.title') # 模糊可以抓很多
#print(items)
for item in items:
print(item.text+'\n')
fp.close()
time.sleep(5)
driver.quit()
#TypeError: 'WebElement' object is not iterable
#要處理的物件無法迴圈
#%%
from selenium import webdriver
import time
driver=webdriver.Chrome('D:/py/chromedriver')
driver.maximize_window()
driver.get('https://www.google.com/')
time.sleep(3)
driver.find_element_by_link_text('Gmail').click()
time.sleep(3)
driver.back()
time.sleep(3)
driver.find_element_by_partial_link_text('il').click()
# %%
from selenium import webdriver
import time
driver=webdriver.Chrome('D:/py/chromedriver')
driver.maximize_window()
driver.implicitly_wait(10)
driver.get('https://www.google.com/')
print(driver.title)
html=driver.page_source
print(html)
time.sleep(5)
driver.quit()
#%%
#測試網站
#自 動化
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
driver=webdriver.Chrome('D:/py/chromedriver')
url='http://the-internet.herokuapp.com/login'
driver.get(url)
time.sleep(1)
driver.find_element_by_xpath('.//*[@id="username"]').send_keys('tom<PASSWORD>')
time.sleep(1)
driver.find_element_by_xpath('.//*[@id="password"]').send_keys('<PASSWORD>!')
#username 那" " ,xpath 那 ''
time.sleep(1)
driver.find_element_by_xpath('.//*[@id="login"]/button').click()
# 滑鼠右鍵檢查後 ,找到程式碼後,copy > copy Xpath
time.sleep(5)
driver.quit()
# %%
import time
from selenium import webdriver
from bs4 import BeautifulSoup
driver=webdriver.Chrome('D:/py/chromedriver')
driver.maximize_window()
driver.implicitly_wait(10) #隱藏等待10秒
driver.get('https://hahow.in/courses') #取得動態
print(driver.title)
soup=BeautifulSoup(driver.page_source,'lxml')
fp=open('c:/test2/hahow1.html','w',encoding='utf8')
fp.write(soup.prettify())
print('已存靜態網頁內容...')
fp.close()
time.sleep(5)
driver.quit()
# %%
# %%
# %%
# %%
# %%
# %% | lesson2_web_crawler/08_13.py | import requests
from bs4 import BeautifulSoup
import os
url="https://www.airbnb.com.tw/s/Tainan-City/homes?tab_id=home_tab&refinement_paths%5B%5D=%2Fhomes&flexible_trip_dates%5B%5D=august&flexible_trip_dates%5B%5D=september&flexible_trip_lengths%5B%5D=weekend_trip&date_picker_type=calendar&query=Tainan%20City&place_id=ChIJE_4_lcx8bjQRTnbcpapMf9Q&checkin=2021-08-11&checkout=2021-08-12&source=structured_search_input_header&search_type=autocomplete_click"
r=requests.get(url)
soup=BeautifulSoup(r.text,'html.parser')
images=soup.find_all('img')
i=0
for image in images:
link=image['src']
print(link)
with open('a'+str(i)+'.jpg','wb') as f:
img=requests.get(link)
f.write(img.content)
i+=1
#%%
import requests
from bs4 import BeautifulSoup
import os
import re
url='https://tw.news.yahoo.com/%E5%9C%A8%E7%BE%8E%E5%9C%8B%E6%89%93%E5%88%B0%E7%96%AB%E8%8B%97%E4%BA%86-%E4%BD%95%E5%BA%AD%E6%AD%A1%E6%9B%9D%E6%B3%A8%E5%B0%84%E5%8F%8D%E6%87%89-050504433.html'
imagedownload(url,'c:/test2')
def imagedownload(url,folder):
try:
os.mkdir(os.path.join(os.getcwd(),folder))
#os.mkdir() 方法用于以数字权限模式创建目录。默认的模式为 0777 (八进制)。
# 如果目录有多级,则创建最后一级,如果最后一级目录的上级目录有不存在的,则会抛出一个 OSError。
#os.path 获取文件的属性信息。
#os.path.join(): 將多個路徑組合後返回
#os.getcwd()返回当前工作目录
# https://www.runoob.com/python/os-file-methods.html (請看這 有 os 函數 說明)
except:
pass
os.chdir(os.path.join(os.getcwd(),folder))
#os.chdir() 方法用于改变当前工作目录到指定的路径。
r=requests.get(url)
soup=BeautifulSoup(r.text,'html.parser')
images=soup.find_all('img')
i=0
for image in images:
print(images[i])
link=image['src']
print(link)
if 'jpg' in link or 'jpeg' in link:
with open(str(i)+'.jpg','wb') as f:
im=requests.get(link)
f.write(im.content)
i+=1
elif 'png' in link:
with open(str(i)+'.png','wb') as f:
im=requests.get(link)
f.write(im.content)
i+=1
elif 'gif' in link:
with open(str(i)+'.gif','wb') as f:
im=requests.get(link)
f.write(im.content)
i+=1
else:
print('這不是圖片')
#%%
url='https://www.airbnb.com.tw/rooms/30408488?check_in=2021-08-27&check_out=2021-08-28&translate_ugc=false&federated_search_id=fb3016d6-9a81-4041-99b3-87c196274f7b&source_impression_id=p3_1628602696_r5ah8CeuNTktx7RV&guests=1&adults=1'
url='https://www.airbnb.com.tw/s/Tainan-City/homes?tab_id=home_tab&refinement_paths%5B%5D=%2Fhomes&flexible_trip_dates%5B%5D=august&flexible_trip_dates%5B%5D=september&flexible_trip_lengths%5B%5D=weekend_trip&date_picker_type=calendar&query=Tainan%20City&place_id=ChIJE_4_lcx8bjQRTnbcpapMf9Q&checkin=2021-08-27&checkout=2021-08-28&source=structured_search_input_header&search_type=autocomplete_click'
url='https://www.airbnb.com.tw/s/Tainan-City/homes?tab_id=home_tab&refinement_paths%5B%5D=%2Fhomes&flexible_trip_dates%5B%5D=august&flexible_trip_dates%5B%5D=september&flexible_trip_lengths%5B%5D=weekend_trip&date_picker_type=calendar&query=Tainan%20City&place_id=ChIJE_4_lcx8bjQRTnbcpapMf9Q&checkin=2021-08-27&checkout=2021-08-28&source=structured_search_input_header&search_type=autocomplete_click'
url='https://tw.news.yahoo.com/%E5%9C%A8%E7%BE%8E%E5%9C%8B%E6%89%93%E5%88%B0%E7%96%AB%E8%8B%97%E4%BA%86-%E4%BD%95%E5%BA%AD%E6%AD%A1%E6%9B%9D%E6%B3%A8%E5%B0%84%E5%8F%8D%E6%87%89-050504433.html'
imagedownload(url,'c:/test2')
#%%
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
driver=webdriver.Chrome('D:/py/chromedriver')
url='http://the-internet.herokuapp.com/login'
driver.get(url)
driver.find_element_by_xpath('.//*[@id="username"]').send_keys('tomsmith')
driver.find_element_by_xpath('.//*[@id="password"]').send_keys('<PASSWORD>!')
driver.find_element_by_xpath('.//*[@id="login"]/button/i').click()
# 抓定一行
time.sleep(10)
driver.quit()
#browser = webdriver.Firefox() #將geckodriver丟至安裝路徑
#browser.get('http://the-internet.herokuapp.com/login')
#%%
##
##
#
#重要
##
##
from selenium import webdriver
from bs4 import BeautifulSoup
import time
driver=webdriver.Chrome('D:/py/chromedriver')
driver.maximize_window()
driver.implicitly_wait(10) #隱藏等待10秒
driver.get('https://hahow.in/courses') #取得example網頁
print(driver.title)
soup=BeautifulSoup(driver.page_source,'lxml')
fp=open('c:/test2/hahow1.html','w',encoding='utf8')
fp.write(soup.prettify())
print('已存靜態網頁內容…')
fp.close()
time.sleep(5)
driver.quit()
#%%
import time
from selenium import webdriver
driver=webdriver.Chrome('D:/py/chromedriver')
driver.implicitly_wait(10) #隱藏等待10秒
url='https://hahow.in/courses'
driver.get(url) #取得動態
items=driver.find_elements_by_css_selector('h4.title') # 模糊可以抓很多
#print(items)
for item in items:
print(item.text+'\n')
fp.close()
time.sleep(5)
driver.quit()
#TypeError: 'WebElement' object is not iterable
#要處理的物件無法迴圈
#%%
from selenium import webdriver
import time
driver=webdriver.Chrome('D:/py/chromedriver')
driver.maximize_window()
driver.get('https://www.google.com/')
time.sleep(3)
driver.find_element_by_link_text('Gmail').click()
time.sleep(3)
driver.back()
time.sleep(3)
driver.find_element_by_partial_link_text('il').click()
# %%
from selenium import webdriver
import time
driver=webdriver.Chrome('D:/py/chromedriver')
driver.maximize_window()
driver.implicitly_wait(10)
driver.get('https://www.google.com/')
print(driver.title)
html=driver.page_source
print(html)
time.sleep(5)
driver.quit()
#%%
#測試網站
#自 動化
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
driver=webdriver.Chrome('D:/py/chromedriver')
url='http://the-internet.herokuapp.com/login'
driver.get(url)
time.sleep(1)
driver.find_element_by_xpath('.//*[@id="username"]').send_keys('tom<PASSWORD>')
time.sleep(1)
driver.find_element_by_xpath('.//*[@id="password"]').send_keys('<PASSWORD>!')
#username 那" " ,xpath 那 ''
time.sleep(1)
driver.find_element_by_xpath('.//*[@id="login"]/button').click()
# 滑鼠右鍵檢查後 ,找到程式碼後,copy > copy Xpath
time.sleep(5)
driver.quit()
# %%
import time
from selenium import webdriver
from bs4 import BeautifulSoup
driver=webdriver.Chrome('D:/py/chromedriver')
driver.maximize_window()
driver.implicitly_wait(10) #隱藏等待10秒
driver.get('https://hahow.in/courses') #取得動態
print(driver.title)
soup=BeautifulSoup(driver.page_source,'lxml')
fp=open('c:/test2/hahow1.html','w',encoding='utf8')
fp.write(soup.prettify())
print('已存靜態網頁內容...')
fp.close()
time.sleep(5)
driver.quit()
# %%
# %%
# %%
# %%
# %%
# %% | 0.082771 | 0.09236 |
import os
import sys
import time
import numpy as np
from datetime import timedelta
from tensorboardX import SummaryWriter
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
class Logger(object):
"""
generate logger files(train.log and tensorboard log file) in experiment root
"""
def __init__(self, args, mode='train'):
if not os.path.exists(args.experiment_root):
os.makedirs(args.experiment_root)
else:
print("Experiment root `{}` already exists".format(args.experiment_root))
sys.exit()
self.writer = SummaryWriter(args.experiment_root)
print('{} using the following parameters:'.format(mode))
for k, v in sorted(vars(args).items()):
print('{}: {}'.format(k, v))
# self.iters = args.iters
self.iter_per_epoch = args.iter_per_epoch
self.epochs = args.epochs
self.iters = args.epochs * args.iter_per_epoch
self.start_time = time.time()
self.times = [0] * 20
self.i = 0
def save_log(self, epoch, step, log):
global_step = step + (epoch - 1) * self.iter_per_epoch
p_time = time.time()
self.times[self.i] = p_time - self.start_time
self.start_time = p_time
self.i = (self.i + 1) % 20
eta = int((self.iters - global_step) * sum(self.times) / 20)
info = 'Epoch {}/{} Iter {}/{} -> '.format(epoch, self.epochs, step, self.iter_per_epoch)
for i in range(len(log) // 2):
k, v = log[2 * i], log[2 * i + 1]
self.writer.add_scalar(k, v, global_step)
info += '{} : {:.3f}, '.format(k, v)
print(info + 'ETA : {}'.format(timedelta(seconds=eta)))
class Trainer(object):
"""
Train a model
"""
def __init__(self, args, dataset, model, optimizer, scheduler=None, device=None, cudnn=True):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if device is None else device
self.loader = DataLoader(dataset, args.batch_size, shuffle=True, drop_last=True, num_workers=args.num_workers, pin_memory=True)
self.model = torch.nn.DataParallel(model).to(self.device).train()
self.optimizer = optimizer
if scheduler is None:
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, args.lr_decay_epoch, args.gamma)
else:
self.scheduler = scheduler
args.iter_per_epoch = len(self.loader)
self.args = args
self.Log = Logger(args)
if cudnn:
torch.backends.cudnn.benchmark = True
self.start_epoch = 1
def train(self, cal_loss):
for epoch in range(self.start_epoch, self.args.epochs+1):
self.scheduler.step()
for step, data in enumerate(self.loader, 1):
self.optimizer.zero_grad()
ret = cal_loss(data, self.model)
loss, log = ret
loss.backward()
self.optimizer.step()
self.Log.save_log(epoch, step, log)
if epoch % self.args.save_epoch == 0:
state_dict = self.model.cpu().module.state_dict()
torch.save(state_dict, os.path.join(self.args.experiment_root, 'model{}.pkl'.format(epoch)))
self.model.to(self.device)
class Tester(object):
def __init__(self, args, model, dataset, cudnn=True):
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if args.checkpoint is not None:
model.load_state_dict(torch.load(os.path.join(args.experiment_root, args.checkpoint)), False)
self.model = torch.nn.DataParallel(model).to(self.device).eval()
self.loader = DataLoader(dataset, args.test_batch_size, num_workers=args.num_workers)
torch.set_grad_enabled(False)
if cudnn:
torch.backends.cudnn.benchmark = True
def image_feature(self):
print('Compute image features')
embs = []
for images in tqdm(self.loader):
images = images.to(self.device)
b, au, c, h, w = images.size()
emb = self.model(images.view(-1, c, h, w))
_, *s = emb.size()
emb = emb.view(b, au, *s).mean(dim=1)
embs.append(emb.cpu().numpy())
embs = np.concatenate(embs, axis=0)
torch.cuda.empty_cache()
return embs | reid/utils/trainer.py | import os
import sys
import time
import numpy as np
from datetime import timedelta
from tensorboardX import SummaryWriter
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
class Logger(object):
"""
generate logger files(train.log and tensorboard log file) in experiment root
"""
def __init__(self, args, mode='train'):
if not os.path.exists(args.experiment_root):
os.makedirs(args.experiment_root)
else:
print("Experiment root `{}` already exists".format(args.experiment_root))
sys.exit()
self.writer = SummaryWriter(args.experiment_root)
print('{} using the following parameters:'.format(mode))
for k, v in sorted(vars(args).items()):
print('{}: {}'.format(k, v))
# self.iters = args.iters
self.iter_per_epoch = args.iter_per_epoch
self.epochs = args.epochs
self.iters = args.epochs * args.iter_per_epoch
self.start_time = time.time()
self.times = [0] * 20
self.i = 0
def save_log(self, epoch, step, log):
global_step = step + (epoch - 1) * self.iter_per_epoch
p_time = time.time()
self.times[self.i] = p_time - self.start_time
self.start_time = p_time
self.i = (self.i + 1) % 20
eta = int((self.iters - global_step) * sum(self.times) / 20)
info = 'Epoch {}/{} Iter {}/{} -> '.format(epoch, self.epochs, step, self.iter_per_epoch)
for i in range(len(log) // 2):
k, v = log[2 * i], log[2 * i + 1]
self.writer.add_scalar(k, v, global_step)
info += '{} : {:.3f}, '.format(k, v)
print(info + 'ETA : {}'.format(timedelta(seconds=eta)))
class Trainer(object):
"""
Train a model
"""
def __init__(self, args, dataset, model, optimizer, scheduler=None, device=None, cudnn=True):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if device is None else device
self.loader = DataLoader(dataset, args.batch_size, shuffle=True, drop_last=True, num_workers=args.num_workers, pin_memory=True)
self.model = torch.nn.DataParallel(model).to(self.device).train()
self.optimizer = optimizer
if scheduler is None:
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, args.lr_decay_epoch, args.gamma)
else:
self.scheduler = scheduler
args.iter_per_epoch = len(self.loader)
self.args = args
self.Log = Logger(args)
if cudnn:
torch.backends.cudnn.benchmark = True
self.start_epoch = 1
def train(self, cal_loss):
for epoch in range(self.start_epoch, self.args.epochs+1):
self.scheduler.step()
for step, data in enumerate(self.loader, 1):
self.optimizer.zero_grad()
ret = cal_loss(data, self.model)
loss, log = ret
loss.backward()
self.optimizer.step()
self.Log.save_log(epoch, step, log)
if epoch % self.args.save_epoch == 0:
state_dict = self.model.cpu().module.state_dict()
torch.save(state_dict, os.path.join(self.args.experiment_root, 'model{}.pkl'.format(epoch)))
self.model.to(self.device)
class Tester(object):
def __init__(self, args, model, dataset, cudnn=True):
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if args.checkpoint is not None:
model.load_state_dict(torch.load(os.path.join(args.experiment_root, args.checkpoint)), False)
self.model = torch.nn.DataParallel(model).to(self.device).eval()
self.loader = DataLoader(dataset, args.test_batch_size, num_workers=args.num_workers)
torch.set_grad_enabled(False)
if cudnn:
torch.backends.cudnn.benchmark = True
def image_feature(self):
print('Compute image features')
embs = []
for images in tqdm(self.loader):
images = images.to(self.device)
b, au, c, h, w = images.size()
emb = self.model(images.view(-1, c, h, w))
_, *s = emb.size()
emb = emb.view(b, au, *s).mean(dim=1)
embs.append(emb.cpu().numpy())
embs = np.concatenate(embs, axis=0)
torch.cuda.empty_cache()
return embs | 0.574395 | 0.239227 |
import apps.basics.op_drf.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Tool',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', apps.basics.op_drf.fields.DescriptionField(blank=True, default='', help_text='描述', null=True, verbose_name='描述')),
('modifier', apps.basics.op_drf.fields.ModifierCharField(blank=True, help_text='该记录最后修改者', max_length=255, null=True, verbose_name='修改者')),
('dept_belong_id', models.CharField(blank=True, max_length=64, null=True, verbose_name='数据归属部门')),
('update_datetime', apps.basics.op_drf.fields.UpdateDateTimeField(auto_now=True, help_text='修改时间', null=True, verbose_name='修改时间')),
('create_datetime', apps.basics.op_drf.fields.CreateDateTimeField(auto_now_add=True, help_text='创建时间', null=True, verbose_name='创建时间')),
('name', models.CharField(max_length=100, verbose_name='埋点名称')),
('p_type', models.IntegerField(default=1, verbose_name='埋点类型')),
('key_yn', models.IntegerField(default=0, verbose_name='标记核心')),
('status', models.IntegerField(default=1, verbose_name='埋点状态')),
('page_ext', models.TextField(verbose_name='页面扩展信息')),
('action_ext', models.TextField(verbose_name='事件扩展信息')),
('creator', models.ForeignKey(db_constraint=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_query_name='creator_query', to=settings.AUTH_USER_MODEL, verbose_name='创建者')),
],
options={
'verbose_name': '埋点',
'verbose_name_plural': '埋点',
},
),
] | backend/apps/projects/point/migrations/0001_initial.py |
import apps.basics.op_drf.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Tool',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', apps.basics.op_drf.fields.DescriptionField(blank=True, default='', help_text='描述', null=True, verbose_name='描述')),
('modifier', apps.basics.op_drf.fields.ModifierCharField(blank=True, help_text='该记录最后修改者', max_length=255, null=True, verbose_name='修改者')),
('dept_belong_id', models.CharField(blank=True, max_length=64, null=True, verbose_name='数据归属部门')),
('update_datetime', apps.basics.op_drf.fields.UpdateDateTimeField(auto_now=True, help_text='修改时间', null=True, verbose_name='修改时间')),
('create_datetime', apps.basics.op_drf.fields.CreateDateTimeField(auto_now_add=True, help_text='创建时间', null=True, verbose_name='创建时间')),
('name', models.CharField(max_length=100, verbose_name='埋点名称')),
('p_type', models.IntegerField(default=1, verbose_name='埋点类型')),
('key_yn', models.IntegerField(default=0, verbose_name='标记核心')),
('status', models.IntegerField(default=1, verbose_name='埋点状态')),
('page_ext', models.TextField(verbose_name='页面扩展信息')),
('action_ext', models.TextField(verbose_name='事件扩展信息')),
('creator', models.ForeignKey(db_constraint=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_query_name='creator_query', to=settings.AUTH_USER_MODEL, verbose_name='创建者')),
],
options={
'verbose_name': '埋点',
'verbose_name_plural': '埋点',
},
),
] | 0.402979 | 0.212538 |
import pytest
from common.switch import SaiObjType
from ptf.testutils import simple_tcp_packet, send_packet, verify_packets, verify_no_packet_any
def test_l2_access_to_access_vlan(sai, dataplane):
vlan_id = "10"
macs = ['00:11:11:11:11:11', '00:22:22:22:22:22']
port_oids = []
vlan_oid = sai.get_vid(SaiObjType.VLAN, vlan_id)
sai.create("SAI_OBJECT_TYPE_VLAN:" + vlan_oid, ["SAI_VLAN_ATTR_VLAN_ID", vlan_id])
for idx in range(2):
sai.create_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[idx], "SAI_VLAN_TAGGING_MODE_UNTAGGED")
port_oid = sai.get("SAI_OBJECT_TYPE_BRIDGE_PORT:" + sai.sw.dot1q_bp_oids[idx],
["SAI_BRIDGE_PORT_ATTR_PORT_ID", "oid:0x0"]).oid()
sai.set("SAI_OBJECT_TYPE_PORT:" + port_oid, ["SAI_PORT_ATTR_PORT_VLAN_ID", vlan_id])
port_oids.append(port_oid)
sai.create_fdb(vlan_oid, macs[idx], sai.sw.dot1q_bp_oids[idx])
try:
if not sai.libsaivs:
pkt = simple_tcp_packet(eth_dst=macs[1],
eth_src=macs[0],
ip_dst='10.0.0.1',
ip_id=101,
ip_ttl=64)
send_packet(self, 0, str(pkt))
verify_packets(self, pkt, [1])
finally:
for idx in range(2):
sai.remove_fdb(vlan_oid, macs[idx])
# Set PVID to default VLAN ID
sai.set("SAI_OBJECT_TYPE_PORT:" + port_oids[idx], ["SAI_PORT_ATTR_PORT_VLAN_ID", sai.sw.default_vlan_id])
sai.remove_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[idx])
oid = sai.pop_vid(SaiObjType.VLAN, vlan_id)
sai.remove("SAI_OBJECT_TYPE_VLAN:" + oid)
def test_l2_trunk_to_trunk_vlan(sai, dataplane):
vlan_id = "10"
macs = ['00:11:11:11:11:11', '00:22:22:22:22:22']
vlan_oid = sai.get_vid(SaiObjType.VLAN, vlan_id)
sai.create("SAI_OBJECT_TYPE_VLAN:" + vlan_oid, ["SAI_VLAN_ATTR_VLAN_ID", vlan_id])
for idx in range(2):
sai.create_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[idx], "SAI_VLAN_TAGGING_MODE_TAGGED")
sai.create_fdb(vlan_oid, macs[idx], sai.sw.dot1q_bp_oids[idx])
try:
if not sai.libsaivs:
pkt = simple_tcp_packet(eth_dst=macs[1],
eth_src=macs[0],
ip_dst='10.0.0.1',
ip_id=101,
ip_ttl=64)
send_packet(self, 0, str(pkt))
verify_packets(self, pkt, [1])
finally:
for idx in range(2):
sai.remove_fdb(vlan_oid, macs[idx])
sai.remove_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[idx])
oid = sai.pop_vid(SaiObjType.VLAN, vlan_id)
sai.remove("SAI_OBJECT_TYPE_VLAN:" + oid)
def test_l2_access_to_trunk_vlan(sai, dataplane):
vlan_id = "10"
macs = ['00:11:11:11:11:11', '00:22:22:22:22:22']
vlan_oid = sai.get_vid(SaiObjType.VLAN, vlan_id)
sai.create("SAI_OBJECT_TYPE_VLAN:" + vlan_oid, ["SAI_VLAN_ATTR_VLAN_ID", vlan_id])
sai.create_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[0], "SAI_VLAN_TAGGING_MODE_UNTAGGED")
sai.create_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[1], "SAI_VLAN_TAGGING_MODE_TAGGED")
port_oid = sai.get("SAI_OBJECT_TYPE_BRIDGE_PORT:" + sai.sw.dot1q_bp_oids[0],
["SAI_BRIDGE_PORT_ATTR_PORT_ID", "oid:0x0"]).oid()
sai.set("SAI_OBJECT_TYPE_PORT:" + port_oid, ["SAI_PORT_ATTR_PORT_VLAN_ID", vlan_id])
for idx in range(2):
sai.create_fdb(vlan_oid, macs[idx], sai.sw.dot1q_bp_oids[idx])
try:
if not sai.libsaivs:
pkt = simple_tcp_packet(eth_dst=macs[1],
eth_src=macs[0],
ip_dst='10.0.0.1',
ip_id=102,
ip_ttl=64)
exp_pkt = simple_tcp_packet(eth_dst=macs[1],
eth_src=macs[0],
ip_dst='10.0.0.1',
dl_vlan_enable=True,
vlan_vid=10,
ip_id=102,
ip_ttl=64,
pktlen=104)
send_packet(self, 0, str(pkt))
verify_packets(self, exp_pkt, [1])
finally:
# Set PVID to default VLAN ID
sai.set("SAI_OBJECT_TYPE_PORT:" + port_oid, ["SAI_PORT_ATTR_PORT_VLAN_ID", sai.sw.default_vlan_id])
for idx in range(2):
sai.remove_fdb(vlan_oid, macs[idx])
sai.remove_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[idx])
oid = sai.pop_vid(SaiObjType.VLAN, vlan_id)
sai.remove("SAI_OBJECT_TYPE_VLAN:" + oid)
def test_l2_trunk_to_access_vlan(sai, dataplane):
vlan_id = "10"
macs = ['00:11:11:11:11:11', '00:22:22:22:22:22']
vlan_oid = sai.get_vid(SaiObjType.VLAN, vlan_id)
sai.create("SAI_OBJECT_TYPE_VLAN:" + vlan_oid, ["SAI_VLAN_ATTR_VLAN_ID", vlan_id])
sai.create_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[0], "SAI_VLAN_TAGGING_MODE_TAGGED")
sai.create_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[1], "SAI_VLAN_TAGGING_MODE_UNTAGGED")
port_oid = sai.get("SAI_OBJECT_TYPE_BRIDGE_PORT:" + sai.sw.dot1q_bp_oids[1],
["SAI_BRIDGE_PORT_ATTR_PORT_ID", "oid:0x0"]).oid()
sai.set("SAI_OBJECT_TYPE_PORT:" + port_oid, ["SAI_PORT_ATTR_PORT_VLAN_ID", vlan_id])
for idx in range(2):
sai.create_fdb(vlan_oid, macs[idx], sai.sw.dot1q_bp_oids[idx])
try:
if not sai.libsaivs:
pkt = simple_tcp_packet(eth_dst=macs[1],
eth_src=macs[0],
ip_dst='10.0.0.1',
dl_vlan_enable=True,
vlan_vid=10,
ip_id=102,
ip_ttl=64,
pktlen=104)
exp_pkt = simple_tcp_packet(eth_dst=macs[1],
eth_src=macs[0],
ip_dst='10.0.0.1',
ip_id=102,
ip_ttl=64)
send_packet(self, 0, str(pkt))
verify_packets(self, exp_pkt, [1])
finally:
# Set PVID to default VLAN ID
sai.set("SAI_OBJECT_TYPE_PORT:" + port_oid, ["SAI_PORT_ATTR_PORT_VLAN_ID", sai.sw.default_vlan_id])
for idx in range(2):
sai.remove_fdb(vlan_oid, macs[idx])
sai.remove_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[idx])
oid = sai.pop_vid(SaiObjType.VLAN, vlan_id)
sai.remove("SAI_OBJECT_TYPE_VLAN:" + oid)
def test_l2_flood(sai, dataplane):
vlan_id = "10"
macs = ['00:11:11:11:11:11', '00:22:22:22:22:22']
port_oids = []
vlan_oid = sai.get_vid(SaiObjType.VLAN, vlan_id)
sai.create("SAI_OBJECT_TYPE_VLAN:" + vlan_oid, ["SAI_VLAN_ATTR_VLAN_ID", vlan_id])
for idx in range(3):
sai.create_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[idx], "SAI_VLAN_TAGGING_MODE_UNTAGGED")
port_oid = sai.get("SAI_OBJECT_TYPE_BRIDGE_PORT:" + sai.sw.dot1q_bp_oids[idx],
["SAI_BRIDGE_PORT_ATTR_PORT_ID", "oid:0x0"]).oid()
sai.set("SAI_OBJECT_TYPE_PORT:" + port_oid, ["SAI_PORT_ATTR_PORT_VLAN_ID", vlan_id])
port_oids.append(port_oid)
try:
if not sai.libsaivs:
pkt = simple_tcp_packet(eth_dst=macs[1],
eth_src=macs[0],
ip_dst='10.0.0.1',
ip_id=107,
ip_ttl=64)
send_packet(self, 0, str(pkt))
verify_packets(self, pkt, [1, 2])
send_packet(self, 1, str(pkt))
verify_packets(self, pkt, [0, 2])
send_packet(self, 2, str(pkt))
verify_packets(self, pkt, [0, 1])
finally:
for idx in range(3):
# Set PVID to default VLAN ID
sai.set("SAI_OBJECT_TYPE_PORT:" + port_oids[idx], ["SAI_PORT_ATTR_PORT_VLAN_ID", sai.sw.default_vlan_id])
sai.remove_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[idx])
oid = sai.pop_vid(SaiObjType.VLAN, vlan_id)
sai.remove("SAI_OBJECT_TYPE_VLAN:" + oid)
def test_l2_lag(sai, dataplane):
vlan_id = "10"
macs = ['00:11:11:11:11:11', '00:22:22:22:22:22']
port_oids = []
# Remove bridge ports
for oid in sai.sw.dot1q_bp_oids[0:3]:
port = sai.get("SAI_OBJECT_TYPE_BRIDGE_PORT:" + oid,
["SAI_BRIDGE_PORT_ATTR_PORT_ID", "oid:0x0"])
port_oids.append(port.oid())
sai.remove("SAI_OBJECT_TYPE_BRIDGE_PORT:" + oid)
# Remove port #3 from the default VLAN
sai.remove_vlan_member(sai.sw.default_vlan_id, sai.sw.dot1q_bp_oids[3])
# Create LAG
lag_oid = sai.get_vid(SaiObjType.LAG, "lag1")
sai.create("SAI_OBJECT_TYPE_LAG:" + lag_oid, [])
# Create LAG members
for oid in port_oids[0:3]:
lag_mbr_oid = sai.get_vid(SaiObjType.LAG_MEMBER, lag_oid + ',' + oid)
sai.create("SAI_OBJECT_TYPE_LAG_MEMBER:" + lag_mbr_oid,
[
"SAI_LAG_MEMBER_ATTR_LAG_ID", lag_oid,
"SAI_LAG_MEMBER_ATTR_PORT_ID", oid
])
# Create bridge port for LAG
lag_bp_oid = sai.get_vid(SaiObjType.BRIDGE_PORT, lag_oid)
sai.create("SAI_OBJECT_TYPE_BRIDGE_PORT:" + lag_bp_oid,
[
"SAI_BRIDGE_PORT_ATTR_TYPE", "SAI_BRIDGE_PORT_TYPE_PORT",
"SAI_BRIDGE_PORT_ATTR_PORT_ID", lag_oid,
#"SAI_BRIDGE_PORT_ATTR_BRIDGE_ID", sai.sw.dot1q_br_oid,
"SAI_BRIDGE_PORT_ATTR_ADMIN_STATE", "true"
])
# Create VLAN
vlan_oid = sai.get_vid(SaiObjType.VLAN, vlan_id)
sai.create("SAI_OBJECT_TYPE_VLAN:" + vlan_oid, ["SAI_VLAN_ATTR_VLAN_ID", vlan_id])
# Create VLAN members
sai.create_vlan_member(vlan_id, lag_bp_oid, "SAI_VLAN_TAGGING_MODE_UNTAGGED")
sai.create_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[3], "SAI_VLAN_TAGGING_MODE_UNTAGGED")
port3_oid = sai.get("SAI_OBJECT_TYPE_BRIDGE_PORT:" + sai.sw.dot1q_bp_oids[3],
["SAI_BRIDGE_PORT_ATTR_PORT_ID", "oid:0x0"]).oid()
sai.set("SAI_OBJECT_TYPE_PORT:" + port3_oid, ["SAI_PORT_ATTR_PORT_VLAN_ID", vlan_id])
sai.set("SAI_OBJECT_TYPE_LAG:" + lag_oid, ["SAI_LAG_ATTR_PORT_VLAN_ID", vlan_id])
sai.create_fdb(vlan_oid, macs[0], lag_bp_oid)
sai.create_fdb(vlan_oid, macs[1], sai.sw.dot1q_bp_oids[3])
try:
if not sai.libsaivs:
count = [0, 0, 0]
dst_ip = int(socket.inet_aton('10.10.10.1').encode('hex'),16)
max_itrs = 200
for i in range(0, max_itrs):
dst_ip_addr = socket.inet_ntoa(hex(dst_ip)[2:].zfill(8).decode('hex'))
pkt = simple_tcp_packet(eth_dst=macs[0],
eth_src=macs[1],
ip_dst=dst_ip_addr,
ip_src='192.168.8.1',
ip_id=109,
ip_ttl=64)
send_packet(self, 3, str(pkt))
rcv_idx = verify_any_packet_any_port(self, [pkt], [0, 1, 2])
count[rcv_idx] += 1
dst_ip += 1
print(count)
for i in range(0, 3):
self.assertTrue((count[i] >= ((max_itrs / 3) * 0.8)),
"Not all paths are equally balanced")
pkt = simple_tcp_packet(eth_src=macs[0],
eth_dst=macs[1],
ip_dst='10.0.0.1',
ip_id=109,
ip_ttl=64)
print("Sending packet port 1 (lag member) -> port 4")
send_packet(self, 0, str(pkt))
verify_packets(self, pkt, [3])
print("Sending packet port 2 (lag member) -> port 4")
send_packet(self, 1, str(pkt))
verify_packets(self, pkt, [3])
print("Sending packet port 3 (lag member) -> port 4")
send_packet(self, 2, str(pkt))
verify_packets(self, pkt, [3])
finally:
for idx in range(2):
sai.remove_fdb(vlan_oid, macs[idx])
sai.remove_vlan_member(vlan_id, lag_bp_oid)
sai.remove_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[3])
sai.remove("SAI_OBJECT_TYPE_VLAN:" + sai.pop_vid(SaiObjType.VLAN, vlan_id))
# Delete LAG members
for oid in port_oids[0:3]:
lag_mbr_oid = sai.pop_vid(SaiObjType.LAG_MEMBER, lag_oid + ',' + oid)
sai.remove("SAI_OBJECT_TYPE_LAG_MEMBER:" + lag_mbr_oid)
# Delete LAG
sai.remove("SAI_OBJECT_TYPE_BRIDGE_PORT:" + lag_bp_oid)
sai.pop_vid(SaiObjType.BRIDGE_PORT, lag_oid)
sai.remove("SAI_OBJECT_TYPE_LAG:" + lag_oid)
sai.pop_vid(SaiObjType.LAG, "lag1")
# Create bridge port for ports removed from LAG
for idx, oid in enumerate(port_oids):
bp_oid = sai.get_vid(SaiObjType.BRIDGE_PORT, oid)
sai.create("SAI_OBJECT_TYPE_BRIDGE_PORT:" + bp_oid,
[
"SAI_BRIDGE_PORT_ATTR_TYPE", "SAI_BRIDGE_PORT_TYPE_PORT",
"SAI_BRIDGE_PORT_ATTR_PORT_ID", oid,
#"SAI_BRIDGE_PORT_ATTR_BRIDGE_ID", sai.dot1q_br_oid,
"SAI_BRIDGE_PORT_ATTR_ADMIN_STATE", "true"
])
sai.sw.dot1q_bp_oids[idx] = bp_oid
# Add ports to default VLAN
for oid in sai.sw.dot1q_bp_oids[0:4]:
sai.create_vlan_member(sai.sw.default_vlan_id, oid, "SAI_VLAN_TAGGING_MODE_UNTAGGED")
# Set PVID
port_oids.append(port3_oid)
for oid in port_oids:
sai.set("SAI_OBJECT_TYPE_PORT:" + oid, ["SAI_PORT_ATTR_PORT_VLAN_ID", sai.sw.default_vlan_id]) | tests/test_l2_basic.py | import pytest
from common.switch import SaiObjType
from ptf.testutils import simple_tcp_packet, send_packet, verify_packets, verify_no_packet_any
def test_l2_access_to_access_vlan(sai, dataplane):
vlan_id = "10"
macs = ['00:11:11:11:11:11', '00:22:22:22:22:22']
port_oids = []
vlan_oid = sai.get_vid(SaiObjType.VLAN, vlan_id)
sai.create("SAI_OBJECT_TYPE_VLAN:" + vlan_oid, ["SAI_VLAN_ATTR_VLAN_ID", vlan_id])
for idx in range(2):
sai.create_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[idx], "SAI_VLAN_TAGGING_MODE_UNTAGGED")
port_oid = sai.get("SAI_OBJECT_TYPE_BRIDGE_PORT:" + sai.sw.dot1q_bp_oids[idx],
["SAI_BRIDGE_PORT_ATTR_PORT_ID", "oid:0x0"]).oid()
sai.set("SAI_OBJECT_TYPE_PORT:" + port_oid, ["SAI_PORT_ATTR_PORT_VLAN_ID", vlan_id])
port_oids.append(port_oid)
sai.create_fdb(vlan_oid, macs[idx], sai.sw.dot1q_bp_oids[idx])
try:
if not sai.libsaivs:
pkt = simple_tcp_packet(eth_dst=macs[1],
eth_src=macs[0],
ip_dst='10.0.0.1',
ip_id=101,
ip_ttl=64)
send_packet(self, 0, str(pkt))
verify_packets(self, pkt, [1])
finally:
for idx in range(2):
sai.remove_fdb(vlan_oid, macs[idx])
# Set PVID to default VLAN ID
sai.set("SAI_OBJECT_TYPE_PORT:" + port_oids[idx], ["SAI_PORT_ATTR_PORT_VLAN_ID", sai.sw.default_vlan_id])
sai.remove_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[idx])
oid = sai.pop_vid(SaiObjType.VLAN, vlan_id)
sai.remove("SAI_OBJECT_TYPE_VLAN:" + oid)
def test_l2_trunk_to_trunk_vlan(sai, dataplane):
vlan_id = "10"
macs = ['00:11:11:11:11:11', '00:22:22:22:22:22']
vlan_oid = sai.get_vid(SaiObjType.VLAN, vlan_id)
sai.create("SAI_OBJECT_TYPE_VLAN:" + vlan_oid, ["SAI_VLAN_ATTR_VLAN_ID", vlan_id])
for idx in range(2):
sai.create_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[idx], "SAI_VLAN_TAGGING_MODE_TAGGED")
sai.create_fdb(vlan_oid, macs[idx], sai.sw.dot1q_bp_oids[idx])
try:
if not sai.libsaivs:
pkt = simple_tcp_packet(eth_dst=macs[1],
eth_src=macs[0],
ip_dst='10.0.0.1',
ip_id=101,
ip_ttl=64)
send_packet(self, 0, str(pkt))
verify_packets(self, pkt, [1])
finally:
for idx in range(2):
sai.remove_fdb(vlan_oid, macs[idx])
sai.remove_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[idx])
oid = sai.pop_vid(SaiObjType.VLAN, vlan_id)
sai.remove("SAI_OBJECT_TYPE_VLAN:" + oid)
def test_l2_access_to_trunk_vlan(sai, dataplane):
vlan_id = "10"
macs = ['00:11:11:11:11:11', '00:22:22:22:22:22']
vlan_oid = sai.get_vid(SaiObjType.VLAN, vlan_id)
sai.create("SAI_OBJECT_TYPE_VLAN:" + vlan_oid, ["SAI_VLAN_ATTR_VLAN_ID", vlan_id])
sai.create_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[0], "SAI_VLAN_TAGGING_MODE_UNTAGGED")
sai.create_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[1], "SAI_VLAN_TAGGING_MODE_TAGGED")
port_oid = sai.get("SAI_OBJECT_TYPE_BRIDGE_PORT:" + sai.sw.dot1q_bp_oids[0],
["SAI_BRIDGE_PORT_ATTR_PORT_ID", "oid:0x0"]).oid()
sai.set("SAI_OBJECT_TYPE_PORT:" + port_oid, ["SAI_PORT_ATTR_PORT_VLAN_ID", vlan_id])
for idx in range(2):
sai.create_fdb(vlan_oid, macs[idx], sai.sw.dot1q_bp_oids[idx])
try:
if not sai.libsaivs:
pkt = simple_tcp_packet(eth_dst=macs[1],
eth_src=macs[0],
ip_dst='10.0.0.1',
ip_id=102,
ip_ttl=64)
exp_pkt = simple_tcp_packet(eth_dst=macs[1],
eth_src=macs[0],
ip_dst='10.0.0.1',
dl_vlan_enable=True,
vlan_vid=10,
ip_id=102,
ip_ttl=64,
pktlen=104)
send_packet(self, 0, str(pkt))
verify_packets(self, exp_pkt, [1])
finally:
# Set PVID to default VLAN ID
sai.set("SAI_OBJECT_TYPE_PORT:" + port_oid, ["SAI_PORT_ATTR_PORT_VLAN_ID", sai.sw.default_vlan_id])
for idx in range(2):
sai.remove_fdb(vlan_oid, macs[idx])
sai.remove_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[idx])
oid = sai.pop_vid(SaiObjType.VLAN, vlan_id)
sai.remove("SAI_OBJECT_TYPE_VLAN:" + oid)
def test_l2_trunk_to_access_vlan(sai, dataplane):
vlan_id = "10"
macs = ['00:11:11:11:11:11', '00:22:22:22:22:22']
vlan_oid = sai.get_vid(SaiObjType.VLAN, vlan_id)
sai.create("SAI_OBJECT_TYPE_VLAN:" + vlan_oid, ["SAI_VLAN_ATTR_VLAN_ID", vlan_id])
sai.create_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[0], "SAI_VLAN_TAGGING_MODE_TAGGED")
sai.create_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[1], "SAI_VLAN_TAGGING_MODE_UNTAGGED")
port_oid = sai.get("SAI_OBJECT_TYPE_BRIDGE_PORT:" + sai.sw.dot1q_bp_oids[1],
["SAI_BRIDGE_PORT_ATTR_PORT_ID", "oid:0x0"]).oid()
sai.set("SAI_OBJECT_TYPE_PORT:" + port_oid, ["SAI_PORT_ATTR_PORT_VLAN_ID", vlan_id])
for idx in range(2):
sai.create_fdb(vlan_oid, macs[idx], sai.sw.dot1q_bp_oids[idx])
try:
if not sai.libsaivs:
pkt = simple_tcp_packet(eth_dst=macs[1],
eth_src=macs[0],
ip_dst='10.0.0.1',
dl_vlan_enable=True,
vlan_vid=10,
ip_id=102,
ip_ttl=64,
pktlen=104)
exp_pkt = simple_tcp_packet(eth_dst=macs[1],
eth_src=macs[0],
ip_dst='10.0.0.1',
ip_id=102,
ip_ttl=64)
send_packet(self, 0, str(pkt))
verify_packets(self, exp_pkt, [1])
finally:
# Set PVID to default VLAN ID
sai.set("SAI_OBJECT_TYPE_PORT:" + port_oid, ["SAI_PORT_ATTR_PORT_VLAN_ID", sai.sw.default_vlan_id])
for idx in range(2):
sai.remove_fdb(vlan_oid, macs[idx])
sai.remove_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[idx])
oid = sai.pop_vid(SaiObjType.VLAN, vlan_id)
sai.remove("SAI_OBJECT_TYPE_VLAN:" + oid)
def test_l2_flood(sai, dataplane):
vlan_id = "10"
macs = ['00:11:11:11:11:11', '00:22:22:22:22:22']
port_oids = []
vlan_oid = sai.get_vid(SaiObjType.VLAN, vlan_id)
sai.create("SAI_OBJECT_TYPE_VLAN:" + vlan_oid, ["SAI_VLAN_ATTR_VLAN_ID", vlan_id])
for idx in range(3):
sai.create_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[idx], "SAI_VLAN_TAGGING_MODE_UNTAGGED")
port_oid = sai.get("SAI_OBJECT_TYPE_BRIDGE_PORT:" + sai.sw.dot1q_bp_oids[idx],
["SAI_BRIDGE_PORT_ATTR_PORT_ID", "oid:0x0"]).oid()
sai.set("SAI_OBJECT_TYPE_PORT:" + port_oid, ["SAI_PORT_ATTR_PORT_VLAN_ID", vlan_id])
port_oids.append(port_oid)
try:
if not sai.libsaivs:
pkt = simple_tcp_packet(eth_dst=macs[1],
eth_src=macs[0],
ip_dst='10.0.0.1',
ip_id=107,
ip_ttl=64)
send_packet(self, 0, str(pkt))
verify_packets(self, pkt, [1, 2])
send_packet(self, 1, str(pkt))
verify_packets(self, pkt, [0, 2])
send_packet(self, 2, str(pkt))
verify_packets(self, pkt, [0, 1])
finally:
for idx in range(3):
# Set PVID to default VLAN ID
sai.set("SAI_OBJECT_TYPE_PORT:" + port_oids[idx], ["SAI_PORT_ATTR_PORT_VLAN_ID", sai.sw.default_vlan_id])
sai.remove_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[idx])
oid = sai.pop_vid(SaiObjType.VLAN, vlan_id)
sai.remove("SAI_OBJECT_TYPE_VLAN:" + oid)
def test_l2_lag(sai, dataplane):
vlan_id = "10"
macs = ['00:11:11:11:11:11', '00:22:22:22:22:22']
port_oids = []
# Remove bridge ports
for oid in sai.sw.dot1q_bp_oids[0:3]:
port = sai.get("SAI_OBJECT_TYPE_BRIDGE_PORT:" + oid,
["SAI_BRIDGE_PORT_ATTR_PORT_ID", "oid:0x0"])
port_oids.append(port.oid())
sai.remove("SAI_OBJECT_TYPE_BRIDGE_PORT:" + oid)
# Remove port #3 from the default VLAN
sai.remove_vlan_member(sai.sw.default_vlan_id, sai.sw.dot1q_bp_oids[3])
# Create LAG
lag_oid = sai.get_vid(SaiObjType.LAG, "lag1")
sai.create("SAI_OBJECT_TYPE_LAG:" + lag_oid, [])
# Create LAG members
for oid in port_oids[0:3]:
lag_mbr_oid = sai.get_vid(SaiObjType.LAG_MEMBER, lag_oid + ',' + oid)
sai.create("SAI_OBJECT_TYPE_LAG_MEMBER:" + lag_mbr_oid,
[
"SAI_LAG_MEMBER_ATTR_LAG_ID", lag_oid,
"SAI_LAG_MEMBER_ATTR_PORT_ID", oid
])
# Create bridge port for LAG
lag_bp_oid = sai.get_vid(SaiObjType.BRIDGE_PORT, lag_oid)
sai.create("SAI_OBJECT_TYPE_BRIDGE_PORT:" + lag_bp_oid,
[
"SAI_BRIDGE_PORT_ATTR_TYPE", "SAI_BRIDGE_PORT_TYPE_PORT",
"SAI_BRIDGE_PORT_ATTR_PORT_ID", lag_oid,
#"SAI_BRIDGE_PORT_ATTR_BRIDGE_ID", sai.sw.dot1q_br_oid,
"SAI_BRIDGE_PORT_ATTR_ADMIN_STATE", "true"
])
# Create VLAN
vlan_oid = sai.get_vid(SaiObjType.VLAN, vlan_id)
sai.create("SAI_OBJECT_TYPE_VLAN:" + vlan_oid, ["SAI_VLAN_ATTR_VLAN_ID", vlan_id])
# Create VLAN members
sai.create_vlan_member(vlan_id, lag_bp_oid, "SAI_VLAN_TAGGING_MODE_UNTAGGED")
sai.create_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[3], "SAI_VLAN_TAGGING_MODE_UNTAGGED")
port3_oid = sai.get("SAI_OBJECT_TYPE_BRIDGE_PORT:" + sai.sw.dot1q_bp_oids[3],
["SAI_BRIDGE_PORT_ATTR_PORT_ID", "oid:0x0"]).oid()
sai.set("SAI_OBJECT_TYPE_PORT:" + port3_oid, ["SAI_PORT_ATTR_PORT_VLAN_ID", vlan_id])
sai.set("SAI_OBJECT_TYPE_LAG:" + lag_oid, ["SAI_LAG_ATTR_PORT_VLAN_ID", vlan_id])
sai.create_fdb(vlan_oid, macs[0], lag_bp_oid)
sai.create_fdb(vlan_oid, macs[1], sai.sw.dot1q_bp_oids[3])
try:
if not sai.libsaivs:
count = [0, 0, 0]
dst_ip = int(socket.inet_aton('10.10.10.1').encode('hex'),16)
max_itrs = 200
for i in range(0, max_itrs):
dst_ip_addr = socket.inet_ntoa(hex(dst_ip)[2:].zfill(8).decode('hex'))
pkt = simple_tcp_packet(eth_dst=macs[0],
eth_src=macs[1],
ip_dst=dst_ip_addr,
ip_src='192.168.8.1',
ip_id=109,
ip_ttl=64)
send_packet(self, 3, str(pkt))
rcv_idx = verify_any_packet_any_port(self, [pkt], [0, 1, 2])
count[rcv_idx] += 1
dst_ip += 1
print(count)
for i in range(0, 3):
self.assertTrue((count[i] >= ((max_itrs / 3) * 0.8)),
"Not all paths are equally balanced")
pkt = simple_tcp_packet(eth_src=macs[0],
eth_dst=macs[1],
ip_dst='10.0.0.1',
ip_id=109,
ip_ttl=64)
print("Sending packet port 1 (lag member) -> port 4")
send_packet(self, 0, str(pkt))
verify_packets(self, pkt, [3])
print("Sending packet port 2 (lag member) -> port 4")
send_packet(self, 1, str(pkt))
verify_packets(self, pkt, [3])
print("Sending packet port 3 (lag member) -> port 4")
send_packet(self, 2, str(pkt))
verify_packets(self, pkt, [3])
finally:
for idx in range(2):
sai.remove_fdb(vlan_oid, macs[idx])
sai.remove_vlan_member(vlan_id, lag_bp_oid)
sai.remove_vlan_member(vlan_id, sai.sw.dot1q_bp_oids[3])
sai.remove("SAI_OBJECT_TYPE_VLAN:" + sai.pop_vid(SaiObjType.VLAN, vlan_id))
# Delete LAG members
for oid in port_oids[0:3]:
lag_mbr_oid = sai.pop_vid(SaiObjType.LAG_MEMBER, lag_oid + ',' + oid)
sai.remove("SAI_OBJECT_TYPE_LAG_MEMBER:" + lag_mbr_oid)
# Delete LAG
sai.remove("SAI_OBJECT_TYPE_BRIDGE_PORT:" + lag_bp_oid)
sai.pop_vid(SaiObjType.BRIDGE_PORT, lag_oid)
sai.remove("SAI_OBJECT_TYPE_LAG:" + lag_oid)
sai.pop_vid(SaiObjType.LAG, "lag1")
# Create bridge port for ports removed from LAG
for idx, oid in enumerate(port_oids):
bp_oid = sai.get_vid(SaiObjType.BRIDGE_PORT, oid)
sai.create("SAI_OBJECT_TYPE_BRIDGE_PORT:" + bp_oid,
[
"SAI_BRIDGE_PORT_ATTR_TYPE", "SAI_BRIDGE_PORT_TYPE_PORT",
"SAI_BRIDGE_PORT_ATTR_PORT_ID", oid,
#"SAI_BRIDGE_PORT_ATTR_BRIDGE_ID", sai.dot1q_br_oid,
"SAI_BRIDGE_PORT_ATTR_ADMIN_STATE", "true"
])
sai.sw.dot1q_bp_oids[idx] = bp_oid
# Add ports to default VLAN
for oid in sai.sw.dot1q_bp_oids[0:4]:
sai.create_vlan_member(sai.sw.default_vlan_id, oid, "SAI_VLAN_TAGGING_MODE_UNTAGGED")
# Set PVID
port_oids.append(port3_oid)
for oid in port_oids:
sai.set("SAI_OBJECT_TYPE_PORT:" + oid, ["SAI_PORT_ATTR_PORT_VLAN_ID", sai.sw.default_vlan_id]) | 0.118347 | 0.268003 |
from enum import Enum
import os
try:
import pathlib
except ImportError as e:
try:
import pathlib2 as pathlib
except ImportError:
raise e
from . import constants as exob
VALID_CHARACTERS = ("abcdefghijklmnopqrstuvwxyz1234567890_-.")
class NamingRule(Enum):
SIMPLE = 1
STRICT = 2
THOROUGH = 3
NONE = 4
def _assert_unique(parent_path, name):
try:
name_str = str(name)
except UnicodeEncodeError:
name = name.encode('utf8')
if (parent_path / name).exists():
raise RuntimeError(
"'{}' already exists in '{}'".format(name, parent_path)
)
def _assert_nonempty(parent_path, name):
try:
name_str = str(name)
except UnicodeEncodeError:
name_str = name.encode('utf8')
if len(name_str) < 1:
raise NameError("Name cannot be empty.")
def _assert_nonreserved(name):
# NOTE ignore unicode errors, they are not reserved
try:
name_str = str(name)
except UnicodeEncodeError:
name_str = name.encode('utf8')
reserved_names = [
exob.META_FILENAME,
exob.ATTRIBUTES_FILENAME,
exob.RAW_FOLDER_NAME
]
if name_str in reserved_names:
raise NameError(
"Name cannot be '{}' because it is a reserved filename in Exdir.".format(name_str)
)
if pathlib.PureWindowsPath(name_str).is_reserved():
raise NameError(
"Name cannot be '{}' because it is a reserved filename in Windows.".format(name_str)
)
def _assert_valid_characters(name):
try:
name_str = str(name)
except UnicodeEncodeError:
name_str = name.encode('utf8')
for char in name_str:
if char not in VALID_CHARACTERS:
raise NameError(
"Name '{}' contains invalid character '{}'.\n"
"Valid characters are:\n{}".format(name_str, char, VALID_CHARACTERS)
)
def unique(parent_path, name):
_assert_nonempty(parent_path, name)
_assert_unique(parent_path, name)
def minimal(parent_path, name):
_assert_nonempty(parent_path, name)
_assert_nonreserved(name)
_assert_unique(parent_path, name)
def strict(parent_path, name):
_assert_nonreserved(name)
_assert_unique(parent_path, name)
_assert_valid_characters(name)
def thorough(parent_path, name):
_assert_nonempty(parent_path, name)
_assert_nonreserved(name)
try:
name_str = str(name)
except UnicodeEncodeError:
name_str = name.encode('utf8')
name_lower = name_str.lower()
_assert_valid_characters(name_lower)
if isinstance(pathlib.Path(parent_path), pathlib.WindowsPath):
# use _assert_unique if we're already on Windows, because it is much faster
# than the test below
_assert_unique(parent_path, name)
return
# os.listdir is much faster here than os.walk or parent_path.iterdir
for item in os.listdir(str(parent_path)):
if name_lower == item.lower():
raise RuntimeError(
"A directory with name (case independent) '{}' already exists "
" and cannot be made according to the naming rule 'thorough'.".format(name)
)
def none(parent_path, name):
pass | exdir/core/validation.py | from enum import Enum
import os
try:
import pathlib
except ImportError as e:
try:
import pathlib2 as pathlib
except ImportError:
raise e
from . import constants as exob
VALID_CHARACTERS = ("abcdefghijklmnopqrstuvwxyz1234567890_-.")
class NamingRule(Enum):
SIMPLE = 1
STRICT = 2
THOROUGH = 3
NONE = 4
def _assert_unique(parent_path, name):
try:
name_str = str(name)
except UnicodeEncodeError:
name = name.encode('utf8')
if (parent_path / name).exists():
raise RuntimeError(
"'{}' already exists in '{}'".format(name, parent_path)
)
def _assert_nonempty(parent_path, name):
try:
name_str = str(name)
except UnicodeEncodeError:
name_str = name.encode('utf8')
if len(name_str) < 1:
raise NameError("Name cannot be empty.")
def _assert_nonreserved(name):
# NOTE ignore unicode errors, they are not reserved
try:
name_str = str(name)
except UnicodeEncodeError:
name_str = name.encode('utf8')
reserved_names = [
exob.META_FILENAME,
exob.ATTRIBUTES_FILENAME,
exob.RAW_FOLDER_NAME
]
if name_str in reserved_names:
raise NameError(
"Name cannot be '{}' because it is a reserved filename in Exdir.".format(name_str)
)
if pathlib.PureWindowsPath(name_str).is_reserved():
raise NameError(
"Name cannot be '{}' because it is a reserved filename in Windows.".format(name_str)
)
def _assert_valid_characters(name):
try:
name_str = str(name)
except UnicodeEncodeError:
name_str = name.encode('utf8')
for char in name_str:
if char not in VALID_CHARACTERS:
raise NameError(
"Name '{}' contains invalid character '{}'.\n"
"Valid characters are:\n{}".format(name_str, char, VALID_CHARACTERS)
)
def unique(parent_path, name):
_assert_nonempty(parent_path, name)
_assert_unique(parent_path, name)
def minimal(parent_path, name):
_assert_nonempty(parent_path, name)
_assert_nonreserved(name)
_assert_unique(parent_path, name)
def strict(parent_path, name):
_assert_nonreserved(name)
_assert_unique(parent_path, name)
_assert_valid_characters(name)
def thorough(parent_path, name):
_assert_nonempty(parent_path, name)
_assert_nonreserved(name)
try:
name_str = str(name)
except UnicodeEncodeError:
name_str = name.encode('utf8')
name_lower = name_str.lower()
_assert_valid_characters(name_lower)
if isinstance(pathlib.Path(parent_path), pathlib.WindowsPath):
# use _assert_unique if we're already on Windows, because it is much faster
# than the test below
_assert_unique(parent_path, name)
return
# os.listdir is much faster here than os.walk or parent_path.iterdir
for item in os.listdir(str(parent_path)):
if name_lower == item.lower():
raise RuntimeError(
"A directory with name (case independent) '{}' already exists "
" and cannot be made according to the naming rule 'thorough'.".format(name)
)
def none(parent_path, name):
pass | 0.449634 | 0.219547 |
import copy
from django.core.exceptions import ValidationError
from django.db import models
from django.forms import widgets
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from rest_framework.utils import model_meta
from ..core.validators import validate_password
class SetPasswordSerializer(serializers.Serializer):
password = serializers.CharField(max_length=128, label=_('Password'),
style={'widget': widgets.PasswordInput}, validators=[validate_password])
class HyperlinkedModelSerializer(serializers.HyperlinkedModelSerializer):
""" support for postonly_fields, fields whose value can only be set on post """
def validate(self, attrs):
""" calls model.clean() """
attrs = super(HyperlinkedModelSerializer, self).validate(attrs)
if isinstance(attrs, models.Model):
return attrs
validated_data = dict(attrs)
ModelClass = self.Meta.model
# Remove many-to-many relationships from validated_data.
info = model_meta.get_field_info(ModelClass)
for field_name, relation_info in info.relations.items():
if relation_info.to_many and (field_name in validated_data):
validated_data.pop(field_name)
if self.instance:
# on update: Merge provided fields with instance field
instance = copy.deepcopy(self.instance)
for key, value in validated_data.items():
setattr(instance, key, value)
else:
instance = ModelClass(**validated_data)
instance.clean()
return attrs
def post_only_cleanning(self, instance, validated_data):
""" removes postonly_fields from attrs """
model_attrs = dict(**validated_data)
post_only_fields = getattr(self, 'post_only_fields', None)
if instance is not None and post_only_fields:
for attr, value in validated_data.items():
if attr in post_only_fields:
model_attrs.pop(attr)
return model_attrs
def update(self, instance, validated_data):
""" removes postonly_fields from attrs when not posting """
model_attrs = self.post_only_cleanning(instance, validated_data)
return super(HyperlinkedModelSerializer, self).update(instance, model_attrs)
def partial_update(self, instance, validated_data):
""" removes postonly_fields from attrs when not posting """
model_attrs = self.post_only_cleanning(instance, validated_data)
return super(HyperlinkedModelSerializer, self).partial_update(instance, model_attrs)
class RelatedHyperlinkedModelSerializer(HyperlinkedModelSerializer):
""" returns object on to_internal_value based on URL """
def to_internal_value(self, data):
try:
url = data.get('url')
except AttributeError:
url = None
if not url:
raise ValidationError({
'url': "URL is required."
})
account = self.get_account()
queryset = self.Meta.model.objects.filter(account=account)
self.fields['url'].queryset = queryset
obj = self.fields['url'].to_internal_value(url)
return obj
class SetPasswordHyperlinkedSerializer(HyperlinkedModelSerializer):
password = serializers.CharField(max_length=128, label=_('Password'),
validators=[validate_password], write_only=True, required=False,
style={'widget': widgets.PasswordInput})
def validate_password(self, value):
""" POST only password """
if self.instance:
if value:
raise serializers.ValidationError(_("Can not set password"))
elif not value:
raise serializers.ValidationError(_("Password required"))
return value
def validate(self, attrs):
""" remove password in case is not a real model field """
try:
self.Meta.model._meta.get_field('password')
except models.FieldDoesNotExist:
pass
else:
password = attrs.pop('password', None)
attrs = super().validate(attrs)
if password is not None:
attrs['password'] = password
return attrs
def create(self, validated_data):
password = validated_data.pop('password')
instance = self.Meta.model(**validated_data)
instance.set_password(password)
instance.save()
return instance | orchestra/api/serializers.py | import copy
from django.core.exceptions import ValidationError
from django.db import models
from django.forms import widgets
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from rest_framework.utils import model_meta
from ..core.validators import validate_password
class SetPasswordSerializer(serializers.Serializer):
password = serializers.CharField(max_length=128, label=_('Password'),
style={'widget': widgets.PasswordInput}, validators=[validate_password])
class HyperlinkedModelSerializer(serializers.HyperlinkedModelSerializer):
""" support for postonly_fields, fields whose value can only be set on post """
def validate(self, attrs):
""" calls model.clean() """
attrs = super(HyperlinkedModelSerializer, self).validate(attrs)
if isinstance(attrs, models.Model):
return attrs
validated_data = dict(attrs)
ModelClass = self.Meta.model
# Remove many-to-many relationships from validated_data.
info = model_meta.get_field_info(ModelClass)
for field_name, relation_info in info.relations.items():
if relation_info.to_many and (field_name in validated_data):
validated_data.pop(field_name)
if self.instance:
# on update: Merge provided fields with instance field
instance = copy.deepcopy(self.instance)
for key, value in validated_data.items():
setattr(instance, key, value)
else:
instance = ModelClass(**validated_data)
instance.clean()
return attrs
def post_only_cleanning(self, instance, validated_data):
""" removes postonly_fields from attrs """
model_attrs = dict(**validated_data)
post_only_fields = getattr(self, 'post_only_fields', None)
if instance is not None and post_only_fields:
for attr, value in validated_data.items():
if attr in post_only_fields:
model_attrs.pop(attr)
return model_attrs
def update(self, instance, validated_data):
""" removes postonly_fields from attrs when not posting """
model_attrs = self.post_only_cleanning(instance, validated_data)
return super(HyperlinkedModelSerializer, self).update(instance, model_attrs)
def partial_update(self, instance, validated_data):
""" removes postonly_fields from attrs when not posting """
model_attrs = self.post_only_cleanning(instance, validated_data)
return super(HyperlinkedModelSerializer, self).partial_update(instance, model_attrs)
class RelatedHyperlinkedModelSerializer(HyperlinkedModelSerializer):
""" returns object on to_internal_value based on URL """
def to_internal_value(self, data):
try:
url = data.get('url')
except AttributeError:
url = None
if not url:
raise ValidationError({
'url': "URL is required."
})
account = self.get_account()
queryset = self.Meta.model.objects.filter(account=account)
self.fields['url'].queryset = queryset
obj = self.fields['url'].to_internal_value(url)
return obj
class SetPasswordHyperlinkedSerializer(HyperlinkedModelSerializer):
password = serializers.CharField(max_length=128, label=_('Password'),
validators=[validate_password], write_only=True, required=False,
style={'widget': widgets.PasswordInput})
def validate_password(self, value):
""" POST only password """
if self.instance:
if value:
raise serializers.ValidationError(_("Can not set password"))
elif not value:
raise serializers.ValidationError(_("Password required"))
return value
def validate(self, attrs):
""" remove password in case is not a real model field """
try:
self.Meta.model._meta.get_field('password')
except models.FieldDoesNotExist:
pass
else:
password = attrs.pop('password', None)
attrs = super().validate(attrs)
if password is not None:
attrs['password'] = password
return attrs
def create(self, validated_data):
password = validated_data.pop('password')
instance = self.Meta.model(**validated_data)
instance.set_password(password)
instance.save()
return instance | 0.441432 | 0.141875 |
from unittest import TestCase
from microfreshener.core.model.microtosca import MicroToscaModel
from microfreshener.core.model.nodes import Service, Datastore, MessageBroker, MessageRouter
from microfreshener.core.errors import MicroToscaModelError, GroupNotFoundError, GroupNotFoundError
from microfreshener.core.model import Team, Edge
class TestGroupMicrotosca(TestCase):
@classmethod
def setUpClass(self):
self.name = "prova-model"
self.microtosca = MicroToscaModel(self.name)
def test_create_team(self):
first = self.microtosca.add_node(Service("first-team"))
second = self.microtosca.add_node(Service("second-team"))
team = Team("prova-team")
team.add_member(first)
team.add_member(second)
self.assertIn(first, team)
self.assertIn(second, team)
self.assertEqual(len(team.members), 2)
self.assertEqual(team[first.name], first)
def test_add_get_team(self):
team_name = "prova-team-add"
first = self.microtosca.add_node(Service("first-team-add"))
second = self.microtosca.add_node(Service("second-team-add"))
team = Team(team_name)
team.add_member(first)
team.add_member(second)
self.microtosca.add_group(team)
self.assertIsInstance(self.microtosca.get_group(team_name), Team)
self.assertEqual(self.microtosca.get_group(team_name), team)
def test_get_team_error(self):
with self.assertRaises(GroupNotFoundError):
self.microtosca.get_group("fake team")
def test_remove_member(self):
first = self.microtosca.add_node(Service("fteam"))
second = self.microtosca.add_node(Service("steam"))
team = Team("pteam")
team.add_member(first)
team.add_member(second)
self.assertEqual(len(team.members), 2)
team.remove_member(first)
self.assertIn(first, self.microtosca.nodes)
self.assertEqual(len(team.members), 1)
self.assertNotIn(first, team.members)
self.assertIn(second, team.members)
def test_add_edge(self):
edge = Edge("myedge")
edge.add_member(Service("first-edge"))
edge.add_member(Service("second-edge"))
self.microtosca.add_group(edge)
self.assertEqual(len(edge.members), 2)
self.assertIsInstance(self.microtosca.get_group("myedge"), Edge)
def test_get_edge_error(self):
microtosca = MicroToscaModel(self.name)
with self.assertRaises(GroupNotFoundError):
microtosca.edge | tests/test_model/test_group.py | from unittest import TestCase
from microfreshener.core.model.microtosca import MicroToscaModel
from microfreshener.core.model.nodes import Service, Datastore, MessageBroker, MessageRouter
from microfreshener.core.errors import MicroToscaModelError, GroupNotFoundError, GroupNotFoundError
from microfreshener.core.model import Team, Edge
class TestGroupMicrotosca(TestCase):
@classmethod
def setUpClass(self):
self.name = "prova-model"
self.microtosca = MicroToscaModel(self.name)
def test_create_team(self):
first = self.microtosca.add_node(Service("first-team"))
second = self.microtosca.add_node(Service("second-team"))
team = Team("prova-team")
team.add_member(first)
team.add_member(second)
self.assertIn(first, team)
self.assertIn(second, team)
self.assertEqual(len(team.members), 2)
self.assertEqual(team[first.name], first)
def test_add_get_team(self):
team_name = "prova-team-add"
first = self.microtosca.add_node(Service("first-team-add"))
second = self.microtosca.add_node(Service("second-team-add"))
team = Team(team_name)
team.add_member(first)
team.add_member(second)
self.microtosca.add_group(team)
self.assertIsInstance(self.microtosca.get_group(team_name), Team)
self.assertEqual(self.microtosca.get_group(team_name), team)
def test_get_team_error(self):
with self.assertRaises(GroupNotFoundError):
self.microtosca.get_group("fake team")
def test_remove_member(self):
first = self.microtosca.add_node(Service("fteam"))
second = self.microtosca.add_node(Service("steam"))
team = Team("pteam")
team.add_member(first)
team.add_member(second)
self.assertEqual(len(team.members), 2)
team.remove_member(first)
self.assertIn(first, self.microtosca.nodes)
self.assertEqual(len(team.members), 1)
self.assertNotIn(first, team.members)
self.assertIn(second, team.members)
def test_add_edge(self):
edge = Edge("myedge")
edge.add_member(Service("first-edge"))
edge.add_member(Service("second-edge"))
self.microtosca.add_group(edge)
self.assertEqual(len(edge.members), 2)
self.assertIsInstance(self.microtosca.get_group("myedge"), Edge)
def test_get_edge_error(self):
microtosca = MicroToscaModel(self.name)
with self.assertRaises(GroupNotFoundError):
microtosca.edge | 0.727201 | 0.461623 |
from typing import List, Tuple, Union
import matplotlib as mpl
from matplotlib.figure import Figure
from matplotlib.axes import Subplot
import matplotlib.pyplot as plt
from numpy import ndarray
from .extend import ExtendDict
from .color import PLOT_COLORS
# DEFAULT PARAMETERS
class PlotProperty(object):
FIGURE_SIZE: Tuple[int, int] = (16, 9)
ROWS_NUMBER: int = 1
COLS_NUMBER: int = 1
GRID_POSITION_LEFT: float = 0.1
GRID_POSITION_RIGHT: float = 0.95
GRID_POSITION_BOTTOM: float = 0.2
GRID_POSITION_TOP: float = 0.95
GRID_SPACE_WIDTH: float = 0.03
GRID_SPACE_HEIGHT: float = 0.02
GRID_RATIO_WIDTH: List[float] = [1.]
GRID_RATIO_HEIGHT: List[float] = [1.]
FONT_SIZE: float = 25.0
LABEL_FONT_SIZE = FONT_SIZE*0.8
LEGENG_FONT_SIZE = FONT_SIZE*0.5
TICKS_FONT_SIZE = FONT_SIZE
COMMON_ALIGN = 'center'
VERTICAL_ALIGN = 'center'
HORIZONTAL_ALIGN = 'center'
LINE_STYLE = 'solid'
LINE_WIDTH = 2.0
MARKER = 'o'
PLOT_FORMAT = ','
MARKER_SIZE = 5.0
DPI = 100 # resolution of the figure in unit of dot per inch
IGFONT = ExtendDict(family='IPAexGothic')
def configure_figure(figsize: Tuple[int, int] = PlotProperty.FIGURE_SIZE,
nrows: int = PlotProperty.ROWS_NUMBER,
ncols: int = PlotProperty.COLS_NUMBER,
left: float = PlotProperty.GRID_POSITION_LEFT,
right: float = PlotProperty.GRID_POSITION_RIGHT,
top: float = PlotProperty.GRID_POSITION_TOP,
bottom: float = PlotProperty.GRID_POSITION_BOTTOM,
wspace: float = PlotProperty.GRID_SPACE_WIDTH,
hspace: float = PlotProperty.GRID_SPACE_HEIGHT,
sharex: bool = True, sharey: bool = True,
width_ratios: List[float] = PlotProperty.GRID_RATIO_WIDTH,
height_ratios: List[float] = PlotProperty.GRID_RATIO_HEIGHT) -> Tuple[Figure, Union[ndarray, Subplot]]:
sharex_ = 'col' if sharex else None
sharey_ = 'row' if sharey else None
if nrows > PlotProperty.ROWS_NUMBER and height_ratios == PlotProperty.GRID_RATIO_HEIGHT:
height_ratios = PlotProperty.GRID_RATIO_HEIGHT * nrows
if ncols > PlotProperty.COLS_NUMBER and width_ratios == PlotProperty.GRID_RATIO_WIDTH:
width_ratios = PlotProperty.GRID_RATIO_WIDTH * ncols
fig, ax = plt.subplots(
nrows=nrows, ncols=ncols,
sharex=sharex_, sharey=sharey_,
figsize=figsize, dpi=PlotProperty.DPI,
gridspec_kw={'height_ratios': height_ratios, 'width_ratios': width_ratios})
fig.subplots_adjust(
left=left, right=right, bottom=bottom, top=top,
wspace=wspace, hspace=hspace)
# grd = fig.add_gridspec(grid_num_v,grid_num_h)
return fig, ax
class SimplePlot(object):
def __init__(self, configure: bool = True, **args) -> None:
self.figsize: Tuple[int, int] = args.get('figsize',
PlotProperty.FIGURE_SIZE)
self.nrows: int = args.get('nrows', PlotProperty.ROWS_NUMBER)
self.ncols: int = args.get('ncols', PlotProperty.COLS_NUMBER)
self.left: float = args.get('left', PlotProperty.GRID_POSITION_LEFT)
self.right: float = args.get('right', PlotProperty.GRID_POSITION_RIGHT)
self.top: float = args.get('top', PlotProperty.GRID_POSITION_TOP)
self.bottom: float = args.get(
'bottom', PlotProperty.GRID_POSITION_BOTTOM)
self.wspace: float = args.get('wspace', PlotProperty.GRID_SPACE_WIDTH)
self.hspace: float = args.get('hspace', PlotProperty.GRID_SPACE_HEIGHT)
self.fsize: float = args.get('fsize', PlotProperty.FONT_SIZE)
self.labfsize: float = args.get(
'labfsize', PlotProperty.LABEL_FONT_SIZE)
self.legfsize: float = args.get(
'legfsize', PlotProperty.LEGENG_FONT_SIZE)
self.tckfsize: float = args.get(
'tckfsize', PlotProperty.TICKS_FONT_SIZE)
self.calign: str = args.get('calign', PlotProperty.COMMON_ALIGN)
self.valign: str = args.get('valign', PlotProperty.VERTICAL_ALIGN)
self.halign: str = args.get('halign', PlotProperty.HORIZONTAL_ALIGN)
self.lstyle: str = args.get('lstyle', PlotProperty.LINE_STYLE)
self.lwidth: float = args.get('lwidth', PlotProperty.LINE_WIDTH)
self.marker: str = args.get('marker', PlotProperty.MARKER)
self.pltfmt: str = args.get('pltfmt', PlotProperty.PLOT_FORMAT)
self.masize: float = args.get('masize', PlotProperty.MARKER_SIZE)
self.igfont: ExtendDict = args.get('igfont', PlotProperty.IGFONT)
self.colors = args.get('colors', PLOT_COLORS)
self.sharex: bool = args.get('sharex', True)
self.sharey: bool = args.get('sharey', True)
self.width_ratios: List = args.get('width_ratios',
PlotProperty.GRID_RATIO_WIDTH)
self.height_ratios: List = args.get('height_ratios',
PlotProperty.GRID_RATIO_HEIGHT)
if configure:
self.configure()
def configure(self) -> None:
self.set_rcparams()
self.fig, self.axes = configure_figure(
figsize=self.figsize,
nrows=self.nrows, ncols=self.ncols,
left=self.left, right=self.right,
top=self.top, bottom=self.bottom,
wspace=self.wspace, hspace=self.hspace,
sharex=self.sharex, sharey=self.sharey,
width_ratios=self.width_ratios,
height_ratios=self.height_ratios)
def set_rcparams(self) -> None:
plt.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['mathtext.fontset'] = 'cm'
plt.rcParams['mathtext.rm'] = 'serif'
plt.rcParams['axes.titleweight'] = 'bold'
# plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.linewidth'] = self.lwidth
plt.rcParams['grid.linestyle'] = 'solid'
plt.rcParams['grid.linewidth'] = 1.0
plt.rcParams['grid.alpha'] = 0.2
plt.rcParams['xtick.major.size'] = 8
plt.rcParams['xtick.minor.size'] = 5
plt.rcParams['xtick.major.width'] = self.lwidth
plt.rcParams['xtick.minor.width'] = self.lwidth
plt.rcParams['xtick.major.pad'] = 5
plt.rcParams['ytick.major.size'] = 8
plt.rcParams['xtick.top'] = True
plt.rcParams['ytick.minor.size'] = 5
plt.rcParams['ytick.major.width'] = self.lwidth
plt.rcParams['ytick.minor.width'] = self.lwidth
plt.rcParams['ytick.major.pad'] = 5
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['xtick.labelsize'] = self.labfsize
plt.rcParams['ytick.labelsize'] = self.labfsize
plt.rcParams['ytick.right'] = True
def get(self, name):
return self.__dict__.get(name)
def set(self, name, value):
self.__dict__[name] = value
return self.get(name) | src/plot.py |
from typing import List, Tuple, Union
import matplotlib as mpl
from matplotlib.figure import Figure
from matplotlib.axes import Subplot
import matplotlib.pyplot as plt
from numpy import ndarray
from .extend import ExtendDict
from .color import PLOT_COLORS
# DEFAULT PARAMETERS
class PlotProperty(object):
FIGURE_SIZE: Tuple[int, int] = (16, 9)
ROWS_NUMBER: int = 1
COLS_NUMBER: int = 1
GRID_POSITION_LEFT: float = 0.1
GRID_POSITION_RIGHT: float = 0.95
GRID_POSITION_BOTTOM: float = 0.2
GRID_POSITION_TOP: float = 0.95
GRID_SPACE_WIDTH: float = 0.03
GRID_SPACE_HEIGHT: float = 0.02
GRID_RATIO_WIDTH: List[float] = [1.]
GRID_RATIO_HEIGHT: List[float] = [1.]
FONT_SIZE: float = 25.0
LABEL_FONT_SIZE = FONT_SIZE*0.8
LEGENG_FONT_SIZE = FONT_SIZE*0.5
TICKS_FONT_SIZE = FONT_SIZE
COMMON_ALIGN = 'center'
VERTICAL_ALIGN = 'center'
HORIZONTAL_ALIGN = 'center'
LINE_STYLE = 'solid'
LINE_WIDTH = 2.0
MARKER = 'o'
PLOT_FORMAT = ','
MARKER_SIZE = 5.0
DPI = 100 # resolution of the figure in unit of dot per inch
IGFONT = ExtendDict(family='IPAexGothic')
def configure_figure(figsize: Tuple[int, int] = PlotProperty.FIGURE_SIZE,
nrows: int = PlotProperty.ROWS_NUMBER,
ncols: int = PlotProperty.COLS_NUMBER,
left: float = PlotProperty.GRID_POSITION_LEFT,
right: float = PlotProperty.GRID_POSITION_RIGHT,
top: float = PlotProperty.GRID_POSITION_TOP,
bottom: float = PlotProperty.GRID_POSITION_BOTTOM,
wspace: float = PlotProperty.GRID_SPACE_WIDTH,
hspace: float = PlotProperty.GRID_SPACE_HEIGHT,
sharex: bool = True, sharey: bool = True,
width_ratios: List[float] = PlotProperty.GRID_RATIO_WIDTH,
height_ratios: List[float] = PlotProperty.GRID_RATIO_HEIGHT) -> Tuple[Figure, Union[ndarray, Subplot]]:
sharex_ = 'col' if sharex else None
sharey_ = 'row' if sharey else None
if nrows > PlotProperty.ROWS_NUMBER and height_ratios == PlotProperty.GRID_RATIO_HEIGHT:
height_ratios = PlotProperty.GRID_RATIO_HEIGHT * nrows
if ncols > PlotProperty.COLS_NUMBER and width_ratios == PlotProperty.GRID_RATIO_WIDTH:
width_ratios = PlotProperty.GRID_RATIO_WIDTH * ncols
fig, ax = plt.subplots(
nrows=nrows, ncols=ncols,
sharex=sharex_, sharey=sharey_,
figsize=figsize, dpi=PlotProperty.DPI,
gridspec_kw={'height_ratios': height_ratios, 'width_ratios': width_ratios})
fig.subplots_adjust(
left=left, right=right, bottom=bottom, top=top,
wspace=wspace, hspace=hspace)
# grd = fig.add_gridspec(grid_num_v,grid_num_h)
return fig, ax
class SimplePlot(object):
def __init__(self, configure: bool = True, **args) -> None:
self.figsize: Tuple[int, int] = args.get('figsize',
PlotProperty.FIGURE_SIZE)
self.nrows: int = args.get('nrows', PlotProperty.ROWS_NUMBER)
self.ncols: int = args.get('ncols', PlotProperty.COLS_NUMBER)
self.left: float = args.get('left', PlotProperty.GRID_POSITION_LEFT)
self.right: float = args.get('right', PlotProperty.GRID_POSITION_RIGHT)
self.top: float = args.get('top', PlotProperty.GRID_POSITION_TOP)
self.bottom: float = args.get(
'bottom', PlotProperty.GRID_POSITION_BOTTOM)
self.wspace: float = args.get('wspace', PlotProperty.GRID_SPACE_WIDTH)
self.hspace: float = args.get('hspace', PlotProperty.GRID_SPACE_HEIGHT)
self.fsize: float = args.get('fsize', PlotProperty.FONT_SIZE)
self.labfsize: float = args.get(
'labfsize', PlotProperty.LABEL_FONT_SIZE)
self.legfsize: float = args.get(
'legfsize', PlotProperty.LEGENG_FONT_SIZE)
self.tckfsize: float = args.get(
'tckfsize', PlotProperty.TICKS_FONT_SIZE)
self.calign: str = args.get('calign', PlotProperty.COMMON_ALIGN)
self.valign: str = args.get('valign', PlotProperty.VERTICAL_ALIGN)
self.halign: str = args.get('halign', PlotProperty.HORIZONTAL_ALIGN)
self.lstyle: str = args.get('lstyle', PlotProperty.LINE_STYLE)
self.lwidth: float = args.get('lwidth', PlotProperty.LINE_WIDTH)
self.marker: str = args.get('marker', PlotProperty.MARKER)
self.pltfmt: str = args.get('pltfmt', PlotProperty.PLOT_FORMAT)
self.masize: float = args.get('masize', PlotProperty.MARKER_SIZE)
self.igfont: ExtendDict = args.get('igfont', PlotProperty.IGFONT)
self.colors = args.get('colors', PLOT_COLORS)
self.sharex: bool = args.get('sharex', True)
self.sharey: bool = args.get('sharey', True)
self.width_ratios: List = args.get('width_ratios',
PlotProperty.GRID_RATIO_WIDTH)
self.height_ratios: List = args.get('height_ratios',
PlotProperty.GRID_RATIO_HEIGHT)
if configure:
self.configure()
def configure(self) -> None:
self.set_rcparams()
self.fig, self.axes = configure_figure(
figsize=self.figsize,
nrows=self.nrows, ncols=self.ncols,
left=self.left, right=self.right,
top=self.top, bottom=self.bottom,
wspace=self.wspace, hspace=self.hspace,
sharex=self.sharex, sharey=self.sharey,
width_ratios=self.width_ratios,
height_ratios=self.height_ratios)
def set_rcparams(self) -> None:
plt.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['mathtext.fontset'] = 'cm'
plt.rcParams['mathtext.rm'] = 'serif'
plt.rcParams['axes.titleweight'] = 'bold'
# plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.linewidth'] = self.lwidth
plt.rcParams['grid.linestyle'] = 'solid'
plt.rcParams['grid.linewidth'] = 1.0
plt.rcParams['grid.alpha'] = 0.2
plt.rcParams['xtick.major.size'] = 8
plt.rcParams['xtick.minor.size'] = 5
plt.rcParams['xtick.major.width'] = self.lwidth
plt.rcParams['xtick.minor.width'] = self.lwidth
plt.rcParams['xtick.major.pad'] = 5
plt.rcParams['ytick.major.size'] = 8
plt.rcParams['xtick.top'] = True
plt.rcParams['ytick.minor.size'] = 5
plt.rcParams['ytick.major.width'] = self.lwidth
plt.rcParams['ytick.minor.width'] = self.lwidth
plt.rcParams['ytick.major.pad'] = 5
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['xtick.labelsize'] = self.labfsize
plt.rcParams['ytick.labelsize'] = self.labfsize
plt.rcParams['ytick.right'] = True
def get(self, name):
return self.__dict__.get(name)
def set(self, name, value):
self.__dict__[name] = value
return self.get(name) | 0.925306 | 0.430327 |
from zope.interface import implements
from twisted.python import log
from twisted.web.client import HTTP11ClientProtocol
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from twisted.internet.protocol import ClientFactory, Protocol
from devicehive import ApiInfoRequest
from devicehive.interfaces import IProtoFactory, IProtoHandler
from devicehive.utils import TextDataConsumer, JsonDataConsumer, parse_url, parse_date
from devicehive.device.ws import WebSocketFactory
from devicehive.poll import PollFactory
__all__ = ['AutoProtocol', 'AutoFactory']
class AutoProtocol(HTTP11ClientProtocol):
"""
The protocol makes API-INFO request. Not intended to external use.
"""
def __init__(self, factory):
self.factory = factory
def connectionMade(self) :
log.msg('AutoProtocol: Connection with {0} has been established.'.format(self.factory.url))
self.request(ApiInfoRequest(self.factory.url, self.factory.host)).addCallbacks(self.api_received, self.api_failed)
def api_received(self, response):
if response.code == 200 :
result_proto = Deferred()
result_proto.addCallbacks(self.api_succeed, self.api_failed)
response.deliverBody(JsonDataConsumer(result_proto))
else :
def get_response_text(reason):
self.api_failed(reason)
response_defer = Deferred()
response_defer.addBoth(get_response_text)
response.deliverBody(TextDataConsumer(response_defer))
def api_succeed(self, resp):
self.factory.api_received(resp['webSocketServerUrl'], resp['serverTimestamp'])
def api_failed(self, reason):
self.factory.api_failed(reason)
class AutoFactory(ClientFactory):
"""
The first thing the factory does, it makes an '/info' request. Then an information
retrived from response is used to deceide which DeviceHive protocol is more appropriate.
"""
implements(IProtoFactory, IProtoHandler)
url = 'http://localhost'
host = 'localhost'
port = 80
ws_url = 'http://localhost'
ws_host = 'localhost'
ws_port = 8020
handler = None
def __init__(self, handler):
if not IProtoHandler.implementedBy(handler.__class__) :
raise TypeError('The protocol handler has to conform to IProtoHandler interface.')
self.handler = handler
self.handler.factory = self
self.factory = None
def buildProtocol(self, addr):
return AutoProtocol(self)
def clientConnectionFailed(self, connector, reason):
log.err('Failed to make "/info" call. Reason: {0}.'.format(reason))
self.handle_connection_failure(reason)
def api_received(self, wsurl, server_time):
log.msg('The call to "/info" api has finished successfully.')
try :
self.server_time = parse_date(server_time)
except ValueError :
log.msg('Failed to parse a date-time string "{0}" returned from "/info" api call.'.format(server_time))
self.server_time = datetime.utcnow()
if wsurl is not None :
wsurl = wsurl.strip().replace('ws://', 'http://', 1).replace('wss://', 'https://', 1)
if wsurl.startswith('http://') or wsurl.startswith('https://') :
self.ws_url, self.ws_host, self.ws_port = parse_url(wsurl)
self.handler.on_apimeta(wsurl, self.server_time)
self.connect_ws()
return
self.handler.on_apimeta(wsurl, self.server_time)
self.connect_poll()
def api_failed(self, reason):
log.err('The call to "/info" api failed. Reason: {0}.'.format(reason))
self.on_failure(None, reason)
def handle_connection_failure(self, reason):
if isinstance(self.factory, WebSocketFactory) :
self.connect_poll()
else :
self.handler.on_connection_failed(reason)
def connect_ws(self):
log.msg('WebSocket protocol has been selected. URL: {0}; HOST: {1}; PORT: {2};'.format(self.ws_url, self.ws_host, self.ws_port))
factory = WebSocketFactory(self)
factory.timestamp = self.server_time
reactor.connectDeviceHive(self.ws_url, factory)
def connect_poll(self):
log.msg('Long-Polling protocol has been selected.')
factory = PollFactory(self)
factory.timestamp = self.server_time
factory.connect(self.url)
# begin IProtoHandler implementation
def on_apimeta(self, websocket_server, server_time):
self.handler.on_apimeta(websocket_server, server_time)
def on_connected(self):
log.msg('AutoFactory: Connection with {0} has been established.'.format(self.factory.url))
self.handler.on_connected()
def on_connection_failed(self, reason):
log.err('Sub-factory connection failure. Reason: {0}.'.format(reason))
self.handle_connection_failure(reason)
def on_closing_connection(self):
self.handler.on_closing_connection()
def on_command(self, device_id, command, finished):
self.handler.on_command(device_id, command, finished)
def on_failure(self, device_id, reason):
self.handler.on_failure(device_id, reason)
# end IProtoHandler implementation
# begin IProtoFactory implementation
def authenticate(self, device_id, device_key):
return self.subfactory(device_id, device_key)
def notify(self, notification, params, device_id = None, device_key = None):
return self.factory.notify(notification, params, device_id, device_key)
def subscribe(self, device_id = None, device_key = None):
return self.factory.subscribe(device_id, device_key)
def unsubscribe(self, device_id = None, device_key = None):
return self.factory.unsubscribe(device_id, device_key)
def device_save(self, info):
return self.factory.device_save(info)
def connect(self, url):
reactor.connectDeviceHive(url, self)
# end IProtoFactory implementation | Software/src/liv/iotConnectors/devicehive/devicehive/auto.py |
from zope.interface import implements
from twisted.python import log
from twisted.web.client import HTTP11ClientProtocol
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from twisted.internet.protocol import ClientFactory, Protocol
from devicehive import ApiInfoRequest
from devicehive.interfaces import IProtoFactory, IProtoHandler
from devicehive.utils import TextDataConsumer, JsonDataConsumer, parse_url, parse_date
from devicehive.device.ws import WebSocketFactory
from devicehive.poll import PollFactory
__all__ = ['AutoProtocol', 'AutoFactory']
class AutoProtocol(HTTP11ClientProtocol):
"""
The protocol makes API-INFO request. Not intended to external use.
"""
def __init__(self, factory):
self.factory = factory
def connectionMade(self) :
log.msg('AutoProtocol: Connection with {0} has been established.'.format(self.factory.url))
self.request(ApiInfoRequest(self.factory.url, self.factory.host)).addCallbacks(self.api_received, self.api_failed)
def api_received(self, response):
if response.code == 200 :
result_proto = Deferred()
result_proto.addCallbacks(self.api_succeed, self.api_failed)
response.deliverBody(JsonDataConsumer(result_proto))
else :
def get_response_text(reason):
self.api_failed(reason)
response_defer = Deferred()
response_defer.addBoth(get_response_text)
response.deliverBody(TextDataConsumer(response_defer))
def api_succeed(self, resp):
self.factory.api_received(resp['webSocketServerUrl'], resp['serverTimestamp'])
def api_failed(self, reason):
self.factory.api_failed(reason)
class AutoFactory(ClientFactory):
"""
The first thing the factory does, it makes an '/info' request. Then an information
retrived from response is used to deceide which DeviceHive protocol is more appropriate.
"""
implements(IProtoFactory, IProtoHandler)
url = 'http://localhost'
host = 'localhost'
port = 80
ws_url = 'http://localhost'
ws_host = 'localhost'
ws_port = 8020
handler = None
def __init__(self, handler):
if not IProtoHandler.implementedBy(handler.__class__) :
raise TypeError('The protocol handler has to conform to IProtoHandler interface.')
self.handler = handler
self.handler.factory = self
self.factory = None
def buildProtocol(self, addr):
return AutoProtocol(self)
def clientConnectionFailed(self, connector, reason):
log.err('Failed to make "/info" call. Reason: {0}.'.format(reason))
self.handle_connection_failure(reason)
def api_received(self, wsurl, server_time):
log.msg('The call to "/info" api has finished successfully.')
try :
self.server_time = parse_date(server_time)
except ValueError :
log.msg('Failed to parse a date-time string "{0}" returned from "/info" api call.'.format(server_time))
self.server_time = datetime.utcnow()
if wsurl is not None :
wsurl = wsurl.strip().replace('ws://', 'http://', 1).replace('wss://', 'https://', 1)
if wsurl.startswith('http://') or wsurl.startswith('https://') :
self.ws_url, self.ws_host, self.ws_port = parse_url(wsurl)
self.handler.on_apimeta(wsurl, self.server_time)
self.connect_ws()
return
self.handler.on_apimeta(wsurl, self.server_time)
self.connect_poll()
def api_failed(self, reason):
log.err('The call to "/info" api failed. Reason: {0}.'.format(reason))
self.on_failure(None, reason)
def handle_connection_failure(self, reason):
if isinstance(self.factory, WebSocketFactory) :
self.connect_poll()
else :
self.handler.on_connection_failed(reason)
def connect_ws(self):
log.msg('WebSocket protocol has been selected. URL: {0}; HOST: {1}; PORT: {2};'.format(self.ws_url, self.ws_host, self.ws_port))
factory = WebSocketFactory(self)
factory.timestamp = self.server_time
reactor.connectDeviceHive(self.ws_url, factory)
def connect_poll(self):
log.msg('Long-Polling protocol has been selected.')
factory = PollFactory(self)
factory.timestamp = self.server_time
factory.connect(self.url)
# begin IProtoHandler implementation
def on_apimeta(self, websocket_server, server_time):
self.handler.on_apimeta(websocket_server, server_time)
def on_connected(self):
log.msg('AutoFactory: Connection with {0} has been established.'.format(self.factory.url))
self.handler.on_connected()
def on_connection_failed(self, reason):
log.err('Sub-factory connection failure. Reason: {0}.'.format(reason))
self.handle_connection_failure(reason)
def on_closing_connection(self):
self.handler.on_closing_connection()
def on_command(self, device_id, command, finished):
self.handler.on_command(device_id, command, finished)
def on_failure(self, device_id, reason):
self.handler.on_failure(device_id, reason)
# end IProtoHandler implementation
# begin IProtoFactory implementation
def authenticate(self, device_id, device_key):
return self.subfactory(device_id, device_key)
def notify(self, notification, params, device_id = None, device_key = None):
return self.factory.notify(notification, params, device_id, device_key)
def subscribe(self, device_id = None, device_key = None):
return self.factory.subscribe(device_id, device_key)
def unsubscribe(self, device_id = None, device_key = None):
return self.factory.unsubscribe(device_id, device_key)
def device_save(self, info):
return self.factory.device_save(info)
def connect(self, url):
reactor.connectDeviceHive(url, self)
# end IProtoFactory implementation | 0.509276 | 0.071397 |
import RPi.GPIO as GPIO
import argparse
import ast
import logging as l
from marcs.CubeSolver.logger import log, set_log_level
from pathlib import Path
from time import sleep
class Winding:
def __init__(self, pin1: int, pin2: int):
self.pin1 = pin1
self.pin2 = pin2
self.energized = 0
for pin in [pin1, pin2]:
GPIO.setup(pin, GPIO.OUT)
log(l.DEBUG, f"winding instantiated with pins [{pin1}, {pin2}]")
def energize(self, direction: int = 1):
if direction == 1:
GPIO.output(self.pin1, GPIO.HIGH)
GPIO.output(self.pin2, GPIO.LOW)
self.energized = 1
elif direction == -1:
GPIO.output(self.pin1, GPIO.LOW)
GPIO.output(self.pin2, GPIO.HIGH)
self.energized = -1
elif direction == 0:
self.de_energize()
else:
raise ValueError(f"direction is either 1 or -1, got {direction}")
def de_energize(self):
GPIO.output(self.pin1, GPIO.LOW)
GPIO.output(self.pin2, GPIO.LOW)
self.energized = 0
class Stepper:
def __init__(self, pinA1: int, pinA2: int, pinB1: int, pinB2: int):
self.windingA = Winding(pinA1, pinA2)
self.windingB = Winding(pinB1, pinB2)
self.cached_state = -1
self.state_dict = {
"[1, 0]": 0,
"[1, 1]": 1,
"[0, 1]": 2,
"[-1, 1]": 3,
"[-1, 0]": 4,
"[-1, -1]": 5,
"[0, -1]": 6,
"[1, -1]": 7,
"[0, 0]": 8, # Both windings off this won't happen at runtime
}
self.inverted_state_dict = {v: ast.literal_eval(k) for k, v in self.state_dict.items()}
@property
def state(self):
return self.state_dict[str([self.windingA.energized, self.windingB.energized])]
@state.setter
def state(self, state):
states = self.inverted_state_dict.get(state)
log(l.DEBUG, f"Setting windings to {states}")
self.windingA.energize(states[0])
self.windingB.energize(states[1])
def disarm(self):
self.cached_state = self.state
self.windingA.de_energize()
self.windingB.de_energize()
def arm(self):
if not self.cached_state == -1:
self.state = self.cached_state
else:
log(l.DEBUG, "Can't arm before disarm, ignoring")
def get_next_state(self, half_step: bool = True, direction: str = "CW"):
if not direction == "CW" and not direction == "CCW":
raise ValueError(f"direction is either 'CW' or 'CCW', got '{direction}'")
if not half_step:
full_step_states = [0, 2, 4, 6]
if direction == "CCW":
return full_step_states[(int(self.state / 2) + 1) % 4] # Uhhh yeah sorry about this code
elif direction == "CW":
return full_step_states[(int(self.state / 2) - 1) % 4] # Only way I could think of to accommodate wraparound
elif half_step:
if direction == "CCW":
return (self.state + 1) % 8 # move one state since half step
elif direction == "CW":
return (self.state + 15) % 8
@staticmethod
def create_state_file_if_needed(filename: str):
if not Path("states").is_dir():
Path("states").mkdir(parents=False)
if not Path("states", filename).exists():
with open(Path("states", filename), "w") as fp:
fp.write("-1")
def store_state(self, filename: str):
self.create_state_file_if_needed(filename)
with open(str(Path("states", filename)), "w") as fp:
fp.write(f"{self.state}")
def load_state(self, filename: str):
if not Path("states", filename).exists():
raise FileNotFoundError(f"No state file created for {filename}")
with open(str(Path("states", filename)), "r") as fp:
state = fp.read()
try:
state = int(state)
except ValueError:
raise ValueError(f"read state '{state}' is not an integer")
if state not in list(range(8)):
log(l.WARNING, f"Read invalid state '{state}' from '{filename}', not loading")
else:
winding_states = self.inverted_state_dict[state]
self.windingA.energize(winding_states[0])
self.windingB.energize(winding_states[1])
def step(self, half_step: bool, sleep_time: float, direction: str = "CW", n: int = 1):
for i in range(n):
next_state = self.get_next_state(half_step=half_step, direction=direction)
log(l.DEBUG, f"state: {next_state}")
self.state = next_state
sleep(sleep_time)
if __name__ == "__main__":
argParser = argparse.ArgumentParser(description="Stepper motor driver")
argParser.add_argument("--spin", default=False, dest="spin", action="store_true", help="spin motor continuously")
argParser.add_argument("--step", default=False, dest="step", action="store_true", help="Do a single step of the motor")
argParser.add_argument("--half-step", default=False, dest="half_step", action="store_true", help="Do a half step of the motor")
argParser.add_argument("--turn", default=False, dest="full_turn", action="store_true", help="Do one full turn")
argParser.add_argument("-d", "--direction", type=str, default="CW", choices=["CW", "CCW"], help="spin CW or CCW")
args = argParser.parse_args()
set_log_level(l.DEBUG)
log(l.INFO, "setting up GPIO")
GPIO.setmode(GPIO.BCM)
B1N1 = 24
B1N2 = 23
A1N1 = 25
A1N2 = 8
STBY = 7
GPIO.setup(STBY, GPIO.OUT)
GPIO.output(STBY, GPIO.HIGH) # Standby needs to be high or motor is braked
log(l.INFO, "motor armed")
stepper = Stepper(A1N1, A1N2, B1N1, B1N2)
wait_time = 1e-2
if args.spin:
log(l.INFO, "starting, press CTRL+C to exit")
while True:
try:
stepper.step(half_step=False)
sleep(wait_time)
except KeyboardInterrupt:
log(l.INFO, "Cleaning up GPIOS and exiting...")
GPIO.cleanup()
exit(0)
elif args.step: # FIXME this does not work because state resets to 0 every time, maybe store last state in file?
stepper.step()
elif args.half_step:
stepper.step(half_step=True)
elif args.full_turn:
for i in range(200):
stepper.step(half_step=True)
sleep(wait_time)
else:
argParser.error("Must specify one of '--spin', '--step', '--half-step' or '--turn'")
GPIO.cleanup() | stepper.py | import RPi.GPIO as GPIO
import argparse
import ast
import logging as l
from marcs.CubeSolver.logger import log, set_log_level
from pathlib import Path
from time import sleep
class Winding:
def __init__(self, pin1: int, pin2: int):
self.pin1 = pin1
self.pin2 = pin2
self.energized = 0
for pin in [pin1, pin2]:
GPIO.setup(pin, GPIO.OUT)
log(l.DEBUG, f"winding instantiated with pins [{pin1}, {pin2}]")
def energize(self, direction: int = 1):
if direction == 1:
GPIO.output(self.pin1, GPIO.HIGH)
GPIO.output(self.pin2, GPIO.LOW)
self.energized = 1
elif direction == -1:
GPIO.output(self.pin1, GPIO.LOW)
GPIO.output(self.pin2, GPIO.HIGH)
self.energized = -1
elif direction == 0:
self.de_energize()
else:
raise ValueError(f"direction is either 1 or -1, got {direction}")
def de_energize(self):
GPIO.output(self.pin1, GPIO.LOW)
GPIO.output(self.pin2, GPIO.LOW)
self.energized = 0
class Stepper:
def __init__(self, pinA1: int, pinA2: int, pinB1: int, pinB2: int):
self.windingA = Winding(pinA1, pinA2)
self.windingB = Winding(pinB1, pinB2)
self.cached_state = -1
self.state_dict = {
"[1, 0]": 0,
"[1, 1]": 1,
"[0, 1]": 2,
"[-1, 1]": 3,
"[-1, 0]": 4,
"[-1, -1]": 5,
"[0, -1]": 6,
"[1, -1]": 7,
"[0, 0]": 8, # Both windings off this won't happen at runtime
}
self.inverted_state_dict = {v: ast.literal_eval(k) for k, v in self.state_dict.items()}
@property
def state(self):
return self.state_dict[str([self.windingA.energized, self.windingB.energized])]
@state.setter
def state(self, state):
states = self.inverted_state_dict.get(state)
log(l.DEBUG, f"Setting windings to {states}")
self.windingA.energize(states[0])
self.windingB.energize(states[1])
def disarm(self):
self.cached_state = self.state
self.windingA.de_energize()
self.windingB.de_energize()
def arm(self):
if not self.cached_state == -1:
self.state = self.cached_state
else:
log(l.DEBUG, "Can't arm before disarm, ignoring")
def get_next_state(self, half_step: bool = True, direction: str = "CW"):
if not direction == "CW" and not direction == "CCW":
raise ValueError(f"direction is either 'CW' or 'CCW', got '{direction}'")
if not half_step:
full_step_states = [0, 2, 4, 6]
if direction == "CCW":
return full_step_states[(int(self.state / 2) + 1) % 4] # Uhhh yeah sorry about this code
elif direction == "CW":
return full_step_states[(int(self.state / 2) - 1) % 4] # Only way I could think of to accommodate wraparound
elif half_step:
if direction == "CCW":
return (self.state + 1) % 8 # move one state since half step
elif direction == "CW":
return (self.state + 15) % 8
@staticmethod
def create_state_file_if_needed(filename: str):
if not Path("states").is_dir():
Path("states").mkdir(parents=False)
if not Path("states", filename).exists():
with open(Path("states", filename), "w") as fp:
fp.write("-1")
def store_state(self, filename: str):
self.create_state_file_if_needed(filename)
with open(str(Path("states", filename)), "w") as fp:
fp.write(f"{self.state}")
def load_state(self, filename: str):
if not Path("states", filename).exists():
raise FileNotFoundError(f"No state file created for {filename}")
with open(str(Path("states", filename)), "r") as fp:
state = fp.read()
try:
state = int(state)
except ValueError:
raise ValueError(f"read state '{state}' is not an integer")
if state not in list(range(8)):
log(l.WARNING, f"Read invalid state '{state}' from '{filename}', not loading")
else:
winding_states = self.inverted_state_dict[state]
self.windingA.energize(winding_states[0])
self.windingB.energize(winding_states[1])
def step(self, half_step: bool, sleep_time: float, direction: str = "CW", n: int = 1):
for i in range(n):
next_state = self.get_next_state(half_step=half_step, direction=direction)
log(l.DEBUG, f"state: {next_state}")
self.state = next_state
sleep(sleep_time)
if __name__ == "__main__":
argParser = argparse.ArgumentParser(description="Stepper motor driver")
argParser.add_argument("--spin", default=False, dest="spin", action="store_true", help="spin motor continuously")
argParser.add_argument("--step", default=False, dest="step", action="store_true", help="Do a single step of the motor")
argParser.add_argument("--half-step", default=False, dest="half_step", action="store_true", help="Do a half step of the motor")
argParser.add_argument("--turn", default=False, dest="full_turn", action="store_true", help="Do one full turn")
argParser.add_argument("-d", "--direction", type=str, default="CW", choices=["CW", "CCW"], help="spin CW or CCW")
args = argParser.parse_args()
set_log_level(l.DEBUG)
log(l.INFO, "setting up GPIO")
GPIO.setmode(GPIO.BCM)
B1N1 = 24
B1N2 = 23
A1N1 = 25
A1N2 = 8
STBY = 7
GPIO.setup(STBY, GPIO.OUT)
GPIO.output(STBY, GPIO.HIGH) # Standby needs to be high or motor is braked
log(l.INFO, "motor armed")
stepper = Stepper(A1N1, A1N2, B1N1, B1N2)
wait_time = 1e-2
if args.spin:
log(l.INFO, "starting, press CTRL+C to exit")
while True:
try:
stepper.step(half_step=False)
sleep(wait_time)
except KeyboardInterrupt:
log(l.INFO, "Cleaning up GPIOS and exiting...")
GPIO.cleanup()
exit(0)
elif args.step: # FIXME this does not work because state resets to 0 every time, maybe store last state in file?
stepper.step()
elif args.half_step:
stepper.step(half_step=True)
elif args.full_turn:
for i in range(200):
stepper.step(half_step=True)
sleep(wait_time)
else:
argParser.error("Must specify one of '--spin', '--step', '--half-step' or '--turn'")
GPIO.cleanup() | 0.515864 | 0.344085 |
from typing import Iterable, Dict, Tuple, List, Set, Optional, Any
import datetime
import hashlib
import io
import re
import boto3
from botocore.config import Config # type: ignore
from .config import S3EnvConfig
from ..data_store_util import wide
from ...api.data_store import ConfigEntity
from ...api.data_store.abc_backend import (
AbcDataStoreBackend,
Entity,
SUPPORTED_ACTIVITIES,
ACTIVITY_TEMPLATE_DEFINITION,
ACTIVITY_PROXY_CONFIGURATION,
ServiceIdConfigEntity,
ServiceColorTemplateEntity,
GatewayConfigEntity,
NamespaceTemplateEntity,
TemplateEntity,
)
from ....protect import RouteProtection
from ....msg import note, debug
MAX_CONTENT_SIZE = 4 * 1024 * 1024 # 4 MB
# Matches namespace, activity, purpose
# Note that this doesn't have any matching for the base path or the version.
NAMESPACE_PATH_RE = re.compile(r'/namespace/([^/]+)/([^/]+)/(.*)$')
# Matches service, color, activity, purpose
# Note that this doesn't have any matching for the base path or the version.
SERVICE_COLOR_PATH_RE = re.compile(r'/service/([^/]+)/([^/]+)/([^/]+)/(.*)$')
class ProcessingVersion:
"""Processing handler for an activity, before it's committed to s3."""
count = 0
uploaded_data: Dict[str, Tuple[Entity, bytes]]
__slots__ = ('name', 'activity', 'uploaded_data', 'config')
def __init__(self, config: S3EnvConfig, activity: str) -> None:
# This isn't thread safe, but additionally this shouldn't be run in multiple threads.
self.name = '{0}-{1}'.format(activity, ProcessingVersion.count)
self.activity = activity
self.config = config
ProcessingVersion.count += 1
self.uploaded_data = {}
def add_entity(self, entity: Entity, contents: str) -> None:
"""Add an entity into this version."""
data = contents.encode('utf-8')
assert len(data) < MAX_CONTENT_SIZE
self.uploaded_data[self.config.get_path(
wide.get_entity_path(self.name, entity),
)] = (entity, data,)
def get_final_version_name(self) -> str:
"""Get the final name for this version. It's based on a hash of the contents,
and a timestamp."""
hashing = hashlib.md5()
keys = list(self.uploaded_data.keys())
keys.sort()
for key in keys:
hashing.update(self.uploaded_data[key][1])
now = datetime.datetime.now(datetime.timezone.utc)
return "{y:04d}{mo:02d}{d:02d}{hr:02d}{mi:02d}{s:02d}-{hs}".format(
hs=hashing.hexdigest(),
y=now.year, mo=now.month, d=now.day, hr=now.hour, mi=now.minute, s=now.second,
)
def clear(self) -> None:
"""Clear this version's pending data to upload."""
self.uploaded_data.clear()
class S3Backend(AbcDataStoreBackend):
"""
This implementation uses per-S3 object to store each object.
The stored objects is based on paths.
- versions: The list of versions, by activity, is in the path 'version/(activity)/(id)'.
The contents of the object is not important. Only when something is committed is the id
added into the list. The ID is the md5 sum of the contents added to the version.
- namespace entities: the files are stored in
'(version)/namespace/(namespace)/(template or extracted)/(purpose)'.
- service/color entities: the files are stored under the path
'(version)/service/(service)/(color)/(template or extracted)/(purpose)'.
"""
active_versions: Dict[str, ProcessingVersion]
client: Optional[Any] # mypy_boto3.s3.S3Client
def __init__(self, config: S3EnvConfig) -> None:
self.config = config
self.client = None
self.active_versions = {}
def get_client(self) -> Any:
"""Get the S3 client."""
if not self.client:
self.client = boto3.session.Session(
region_name=self.config.aws_region,
profile_name=self.config.aws_profile, # type: ignore
).client('s3', config=Config(
max_pool_connections=1,
retries=dict(max_attempts=2)
))
return self.client
# -----------------------------------------------------------------------
# Read Actions
def get_active_version(self, activity: str) -> str:
if activity not in SUPPORTED_ACTIVITIES:
raise ValueError(
'invalid activity {0}; valid values are {1}'.format(activity, SUPPORTED_ACTIVITIES)
)
most_recent: Optional[datetime.datetime] = None
active_version: str = activity + '-first'
for version, last_modified in self._get_versions(activity):
if not most_recent or last_modified > most_recent:
active_version = version
most_recent = last_modified
return active_version
def get_template_entities(self, version: str) -> Iterable[TemplateEntity]:
for key, _ in self._list_entries(
self.config.get_path(wide.get_activity_prefix(
version, ACTIVITY_TEMPLATE_DEFINITION,
))
):
entity = wide.parse_template_path(version, self.config.split_key_to_path(key))
if entity:
yield entity
def get_config_entities(self, version: str) -> Iterable[ConfigEntity]:
for key, _ in self._list_entries(
self.config.get_path(
wide.get_activity_prefix(version, ACTIVITY_PROXY_CONFIGURATION)
)
):
entity = wide.parse_config_path(version, self.config.split_key_to_path(key))
if entity:
yield entity
def get_namespace_template_entities(
self, version: str, namespace: Optional[str] = None,
protection: Optional[RouteProtection] = None,
purpose: Optional[str] = None,
) -> Iterable[NamespaceTemplateEntity]:
for key, _ in self._list_entries(
self.config.get_path(wide.get_namespace_template_prefix(version))
):
n_s = wide.parse_namespace_template_path(version, self.config.split_key_to_path(key))
if not n_s:
continue
if (
(namespace is None or namespace == n_s.namespace)
and (protection is None or protection == n_s.protection)
and (purpose is None or purpose == n_s.purpose)
):
yield n_s
def get_gateway_config_entities(
self, version: str, namespace: Optional[str] = None,
protection: Optional[RouteProtection] = None, purpose: Optional[str] = None,
) -> Iterable[GatewayConfigEntity]:
for key, _ in self._list_entries(
self.config.get_path(wide.get_gateway_config_prefix(version))
):
g_c = wide.parse_gateway_config_path(version, self.config.split_key_to_path(key))
if not g_c:
continue
if (
(namespace is None or namespace == g_c.namespace_id)
and (protection is None or protection == g_c.protection)
and (purpose is None or purpose == g_c.purpose)
):
yield g_c
def get_service_color_template_entities(
self,
version: str,
namespace: Optional[str] = None,
service: Optional[str] = None,
color: Optional[str] = None,
purpose: Optional[str] = None,
) -> Iterable[ServiceColorTemplateEntity]:
for key, _ in self._list_entries(
self.config.get_path(wide.get_service_color_template_prefix(version))
):
s_c = wide.parse_service_color_template_path(
version, self.config.split_key_to_path(key),
)
if not s_c:
continue
if (
(namespace is None or namespace == s_c.namespace)
and (service is None or service == s_c.service)
and (color is None or color == s_c.color)
and (purpose is None or purpose == s_c.purpose)
):
yield s_c
def get_service_id_config_entities(
self,
version: str,
namespace_id: Optional[str] = None,
service_id: Optional[str] = None,
service: Optional[str] = None,
color: Optional[str] = None,
purpose: Optional[str] = None,
) -> Iterable[ServiceIdConfigEntity]:
for key, _ in self._list_entries(
self.config.get_path(wide.get_service_id_config_prefix(version))
):
s_c = wide.parse_service_id_config_path(version, self.config.split_key_to_path(key))
if not s_c:
debug('Skipped item {s}', s=key)
continue
if (
(namespace_id is None or namespace_id == s_c.namespace_id)
and (service_id is None or service_id == s_c.service_id)
and (service is None or service == s_c.service)
and (color is None or color == s_c.color)
and (purpose is None or purpose == s_c.purpose)
):
yield s_c
else:
debug('Not match: {s}', s=s_c)
def download(self, version: str, entity: Entity) -> str:
path = self.config.get_path(wide.get_entity_path(version, entity))
return self._download(path)
# -----------------------------------------------------------------------
# Write Actions
def start_changes(self, activity: str) -> str:
if activity not in SUPPORTED_ACTIVITIES:
raise ValueError(
'invalid activity {0}; valid values are {1}'.format(activity, SUPPORTED_ACTIVITIES)
)
version = ProcessingVersion(self.config, activity)
self.active_versions[version.name] = version
return version.name
def commit_changes(self, version: str) -> None:
activity_version = self.active_versions[version]
del self.active_versions[version]
# Grab the last active version, so that it can be preserved when performing
# an old-version purge.
previously_active_version = self.get_active_version(activity_version.activity)
# The data is only written when the commit happens.
# That's the only way we'll make sure we have all the data necessary to
# compute the checksum. This can mean lots of extra memory usage, though,
# so this isn't great.
final_version = activity_version.get_final_version_name()
uploaded_paths = []
try:
for entity, data in activity_version.uploaded_data.values():
path = self.config.get_path(wide.get_entity_path(final_version, entity))
self._upload(path, data)
uploaded_paths.append(path)
self._upload(
self.config.get_path(wide.get_version_reference_path(
activity_version.activity, final_version,
)),
final_version.encode('utf-8')
)
except Exception:
self._delete(uploaded_paths)
raise
# Clean up our memory before moving on.
activity_version.clear()
if self.config.purge_old_versions:
self._clean_old_versions(
activity_version.activity,
self.config.purge_older_than_days,
{final_version, previously_active_version},
)
def rollback_changes(self, version: str) -> None:
"""Performed on error, to revert any uploads."""
# Because uploads are delayed until commit, this does nothing with
# s3 store.
if version in self.active_versions:
del self.active_versions[version]
def upload(self, version: str, entity: Entity, contents: str) -> None:
activity_version = self.active_versions[version]
activity_version.add_entity(entity, contents)
# -----------------------------------------------------------------------
# Support
def _get_versions(self, activity: str) -> Iterable[Tuple[str, datetime.datetime]]:
for key, when in self._list_entries(
self.config.get_path(wide.get_version_reference_prefix(activity))
):
version = wide.parse_version_reference_path(
activity, self.config.split_key_to_path(key),
)
if version:
yield version, when
def _clean_old_versions(
self, activity: str,
purge_older_than_days: int,
do_not_remove_versions: Set[str],
) -> None:
# This should remove any version older than (some old date), but always leave
# the previously active version around, in case anything is actively pulling from it.
# That is, delete all versions before date except for previously_active_version and
# final_version.
older_than = (
datetime.datetime.now(datetime.timezone.utc)
- datetime.timedelta(days=purge_older_than_days)
)
for version, when in self._get_versions(activity):
if version not in do_not_remove_versions and when < older_than:
note('Removing old activity {a} version {v}', a=activity, v=version)
to_delete = [
d[0]
for d in self._list_entries(
self.config.get_path(wide.get_activity_prefix(version, activity))
)
]
# don't forget the reference to this version!
to_delete.append(
self.config.get_path(wide.get_version_reference_path(activity, version))
)
self._delete(to_delete)
def _list_entries(self, path: str) -> Iterable[Tuple[str, datetime.datetime]]:
debug("Listing entries under {p}", p=path)
paginator = self.get_client().get_paginator('list_objects_v2')
response_iterator = paginator.paginate(
Bucket=self.config.bucket,
EncodingType='url',
Prefix=path,
FetchOwner=False,
)
for page in response_iterator:
if 'Contents' not in page:
continue
for info in page['Contents']:
key = info.get('Key', '')
modified = info.get('LastModified', None)
if key and modified and info.get('Size', 0) < MAX_CONTENT_SIZE:
yield key, modified
def _upload(self, path: str, contents: bytes) -> None:
assert len(contents) < MAX_CONTENT_SIZE
# Shouldn't be necessary due to the construction of the path argument...
# this is defensive coding.
while path[0] == '/':
path = path[1:]
print("Uploading {0}".format(path))
inp = io.BytesIO(contents)
self.get_client().upload_fileobj(inp, self.config.bucket, path)
def _download(self, path: str) -> str:
# Shouldn't be necessary due to the construction of the path argument...
# this is defensive coding.
while path[0] == '/':
path = path[1:]
out = io.BytesIO()
self.get_client().download_fileobj(self.config.bucket, path, out)
return out.getvalue().decode('utf-8')
def _delete(self, keys: List[str]) -> None:
if len(keys) <= 0:
return
if len(keys) > 1000:
raise Exception("Cannot handle > 1000 paths right now.")
self.get_client().delete_objects(
Bucket=self.config.bucket,
Delete={
'Objects': [{'Key': p} for p in keys],
}
) | old-stuff-for-reference/nightjar-base/nightjar-src/python-src/nightjar/backend/impl/data_store_s3/backend.py |
from typing import Iterable, Dict, Tuple, List, Set, Optional, Any
import datetime
import hashlib
import io
import re
import boto3
from botocore.config import Config # type: ignore
from .config import S3EnvConfig
from ..data_store_util import wide
from ...api.data_store import ConfigEntity
from ...api.data_store.abc_backend import (
AbcDataStoreBackend,
Entity,
SUPPORTED_ACTIVITIES,
ACTIVITY_TEMPLATE_DEFINITION,
ACTIVITY_PROXY_CONFIGURATION,
ServiceIdConfigEntity,
ServiceColorTemplateEntity,
GatewayConfigEntity,
NamespaceTemplateEntity,
TemplateEntity,
)
from ....protect import RouteProtection
from ....msg import note, debug
MAX_CONTENT_SIZE = 4 * 1024 * 1024 # 4 MB
# Matches namespace, activity, purpose
# Note that this doesn't have any matching for the base path or the version.
NAMESPACE_PATH_RE = re.compile(r'/namespace/([^/]+)/([^/]+)/(.*)$')
# Matches service, color, activity, purpose
# Note that this doesn't have any matching for the base path or the version.
SERVICE_COLOR_PATH_RE = re.compile(r'/service/([^/]+)/([^/]+)/([^/]+)/(.*)$')
class ProcessingVersion:
"""Processing handler for an activity, before it's committed to s3."""
count = 0
uploaded_data: Dict[str, Tuple[Entity, bytes]]
__slots__ = ('name', 'activity', 'uploaded_data', 'config')
def __init__(self, config: S3EnvConfig, activity: str) -> None:
# This isn't thread safe, but additionally this shouldn't be run in multiple threads.
self.name = '{0}-{1}'.format(activity, ProcessingVersion.count)
self.activity = activity
self.config = config
ProcessingVersion.count += 1
self.uploaded_data = {}
def add_entity(self, entity: Entity, contents: str) -> None:
"""Add an entity into this version."""
data = contents.encode('utf-8')
assert len(data) < MAX_CONTENT_SIZE
self.uploaded_data[self.config.get_path(
wide.get_entity_path(self.name, entity),
)] = (entity, data,)
def get_final_version_name(self) -> str:
"""Get the final name for this version. It's based on a hash of the contents,
and a timestamp."""
hashing = hashlib.md5()
keys = list(self.uploaded_data.keys())
keys.sort()
for key in keys:
hashing.update(self.uploaded_data[key][1])
now = datetime.datetime.now(datetime.timezone.utc)
return "{y:04d}{mo:02d}{d:02d}{hr:02d}{mi:02d}{s:02d}-{hs}".format(
hs=hashing.hexdigest(),
y=now.year, mo=now.month, d=now.day, hr=now.hour, mi=now.minute, s=now.second,
)
def clear(self) -> None:
"""Clear this version's pending data to upload."""
self.uploaded_data.clear()
class S3Backend(AbcDataStoreBackend):
"""
This implementation uses per-S3 object to store each object.
The stored objects is based on paths.
- versions: The list of versions, by activity, is in the path 'version/(activity)/(id)'.
The contents of the object is not important. Only when something is committed is the id
added into the list. The ID is the md5 sum of the contents added to the version.
- namespace entities: the files are stored in
'(version)/namespace/(namespace)/(template or extracted)/(purpose)'.
- service/color entities: the files are stored under the path
'(version)/service/(service)/(color)/(template or extracted)/(purpose)'.
"""
active_versions: Dict[str, ProcessingVersion]
client: Optional[Any] # mypy_boto3.s3.S3Client
def __init__(self, config: S3EnvConfig) -> None:
self.config = config
self.client = None
self.active_versions = {}
def get_client(self) -> Any:
"""Get the S3 client."""
if not self.client:
self.client = boto3.session.Session(
region_name=self.config.aws_region,
profile_name=self.config.aws_profile, # type: ignore
).client('s3', config=Config(
max_pool_connections=1,
retries=dict(max_attempts=2)
))
return self.client
# -----------------------------------------------------------------------
# Read Actions
def get_active_version(self, activity: str) -> str:
if activity not in SUPPORTED_ACTIVITIES:
raise ValueError(
'invalid activity {0}; valid values are {1}'.format(activity, SUPPORTED_ACTIVITIES)
)
most_recent: Optional[datetime.datetime] = None
active_version: str = activity + '-first'
for version, last_modified in self._get_versions(activity):
if not most_recent or last_modified > most_recent:
active_version = version
most_recent = last_modified
return active_version
def get_template_entities(self, version: str) -> Iterable[TemplateEntity]:
for key, _ in self._list_entries(
self.config.get_path(wide.get_activity_prefix(
version, ACTIVITY_TEMPLATE_DEFINITION,
))
):
entity = wide.parse_template_path(version, self.config.split_key_to_path(key))
if entity:
yield entity
def get_config_entities(self, version: str) -> Iterable[ConfigEntity]:
for key, _ in self._list_entries(
self.config.get_path(
wide.get_activity_prefix(version, ACTIVITY_PROXY_CONFIGURATION)
)
):
entity = wide.parse_config_path(version, self.config.split_key_to_path(key))
if entity:
yield entity
def get_namespace_template_entities(
self, version: str, namespace: Optional[str] = None,
protection: Optional[RouteProtection] = None,
purpose: Optional[str] = None,
) -> Iterable[NamespaceTemplateEntity]:
for key, _ in self._list_entries(
self.config.get_path(wide.get_namespace_template_prefix(version))
):
n_s = wide.parse_namespace_template_path(version, self.config.split_key_to_path(key))
if not n_s:
continue
if (
(namespace is None or namespace == n_s.namespace)
and (protection is None or protection == n_s.protection)
and (purpose is None or purpose == n_s.purpose)
):
yield n_s
def get_gateway_config_entities(
self, version: str, namespace: Optional[str] = None,
protection: Optional[RouteProtection] = None, purpose: Optional[str] = None,
) -> Iterable[GatewayConfigEntity]:
for key, _ in self._list_entries(
self.config.get_path(wide.get_gateway_config_prefix(version))
):
g_c = wide.parse_gateway_config_path(version, self.config.split_key_to_path(key))
if not g_c:
continue
if (
(namespace is None or namespace == g_c.namespace_id)
and (protection is None or protection == g_c.protection)
and (purpose is None or purpose == g_c.purpose)
):
yield g_c
def get_service_color_template_entities(
self,
version: str,
namespace: Optional[str] = None,
service: Optional[str] = None,
color: Optional[str] = None,
purpose: Optional[str] = None,
) -> Iterable[ServiceColorTemplateEntity]:
for key, _ in self._list_entries(
self.config.get_path(wide.get_service_color_template_prefix(version))
):
s_c = wide.parse_service_color_template_path(
version, self.config.split_key_to_path(key),
)
if not s_c:
continue
if (
(namespace is None or namespace == s_c.namespace)
and (service is None or service == s_c.service)
and (color is None or color == s_c.color)
and (purpose is None or purpose == s_c.purpose)
):
yield s_c
def get_service_id_config_entities(
self,
version: str,
namespace_id: Optional[str] = None,
service_id: Optional[str] = None,
service: Optional[str] = None,
color: Optional[str] = None,
purpose: Optional[str] = None,
) -> Iterable[ServiceIdConfigEntity]:
for key, _ in self._list_entries(
self.config.get_path(wide.get_service_id_config_prefix(version))
):
s_c = wide.parse_service_id_config_path(version, self.config.split_key_to_path(key))
if not s_c:
debug('Skipped item {s}', s=key)
continue
if (
(namespace_id is None or namespace_id == s_c.namespace_id)
and (service_id is None or service_id == s_c.service_id)
and (service is None or service == s_c.service)
and (color is None or color == s_c.color)
and (purpose is None or purpose == s_c.purpose)
):
yield s_c
else:
debug('Not match: {s}', s=s_c)
def download(self, version: str, entity: Entity) -> str:
path = self.config.get_path(wide.get_entity_path(version, entity))
return self._download(path)
# -----------------------------------------------------------------------
# Write Actions
def start_changes(self, activity: str) -> str:
if activity not in SUPPORTED_ACTIVITIES:
raise ValueError(
'invalid activity {0}; valid values are {1}'.format(activity, SUPPORTED_ACTIVITIES)
)
version = ProcessingVersion(self.config, activity)
self.active_versions[version.name] = version
return version.name
def commit_changes(self, version: str) -> None:
activity_version = self.active_versions[version]
del self.active_versions[version]
# Grab the last active version, so that it can be preserved when performing
# an old-version purge.
previously_active_version = self.get_active_version(activity_version.activity)
# The data is only written when the commit happens.
# That's the only way we'll make sure we have all the data necessary to
# compute the checksum. This can mean lots of extra memory usage, though,
# so this isn't great.
final_version = activity_version.get_final_version_name()
uploaded_paths = []
try:
for entity, data in activity_version.uploaded_data.values():
path = self.config.get_path(wide.get_entity_path(final_version, entity))
self._upload(path, data)
uploaded_paths.append(path)
self._upload(
self.config.get_path(wide.get_version_reference_path(
activity_version.activity, final_version,
)),
final_version.encode('utf-8')
)
except Exception:
self._delete(uploaded_paths)
raise
# Clean up our memory before moving on.
activity_version.clear()
if self.config.purge_old_versions:
self._clean_old_versions(
activity_version.activity,
self.config.purge_older_than_days,
{final_version, previously_active_version},
)
def rollback_changes(self, version: str) -> None:
"""Performed on error, to revert any uploads."""
# Because uploads are delayed until commit, this does nothing with
# s3 store.
if version in self.active_versions:
del self.active_versions[version]
def upload(self, version: str, entity: Entity, contents: str) -> None:
activity_version = self.active_versions[version]
activity_version.add_entity(entity, contents)
# -----------------------------------------------------------------------
# Support
def _get_versions(self, activity: str) -> Iterable[Tuple[str, datetime.datetime]]:
for key, when in self._list_entries(
self.config.get_path(wide.get_version_reference_prefix(activity))
):
version = wide.parse_version_reference_path(
activity, self.config.split_key_to_path(key),
)
if version:
yield version, when
def _clean_old_versions(
self, activity: str,
purge_older_than_days: int,
do_not_remove_versions: Set[str],
) -> None:
# This should remove any version older than (some old date), but always leave
# the previously active version around, in case anything is actively pulling from it.
# That is, delete all versions before date except for previously_active_version and
# final_version.
older_than = (
datetime.datetime.now(datetime.timezone.utc)
- datetime.timedelta(days=purge_older_than_days)
)
for version, when in self._get_versions(activity):
if version not in do_not_remove_versions and when < older_than:
note('Removing old activity {a} version {v}', a=activity, v=version)
to_delete = [
d[0]
for d in self._list_entries(
self.config.get_path(wide.get_activity_prefix(version, activity))
)
]
# don't forget the reference to this version!
to_delete.append(
self.config.get_path(wide.get_version_reference_path(activity, version))
)
self._delete(to_delete)
def _list_entries(self, path: str) -> Iterable[Tuple[str, datetime.datetime]]:
debug("Listing entries under {p}", p=path)
paginator = self.get_client().get_paginator('list_objects_v2')
response_iterator = paginator.paginate(
Bucket=self.config.bucket,
EncodingType='url',
Prefix=path,
FetchOwner=False,
)
for page in response_iterator:
if 'Contents' not in page:
continue
for info in page['Contents']:
key = info.get('Key', '')
modified = info.get('LastModified', None)
if key and modified and info.get('Size', 0) < MAX_CONTENT_SIZE:
yield key, modified
def _upload(self, path: str, contents: bytes) -> None:
assert len(contents) < MAX_CONTENT_SIZE
# Shouldn't be necessary due to the construction of the path argument...
# this is defensive coding.
while path[0] == '/':
path = path[1:]
print("Uploading {0}".format(path))
inp = io.BytesIO(contents)
self.get_client().upload_fileobj(inp, self.config.bucket, path)
def _download(self, path: str) -> str:
# Shouldn't be necessary due to the construction of the path argument...
# this is defensive coding.
while path[0] == '/':
path = path[1:]
out = io.BytesIO()
self.get_client().download_fileobj(self.config.bucket, path, out)
return out.getvalue().decode('utf-8')
def _delete(self, keys: List[str]) -> None:
if len(keys) <= 0:
return
if len(keys) > 1000:
raise Exception("Cannot handle > 1000 paths right now.")
self.get_client().delete_objects(
Bucket=self.config.bucket,
Delete={
'Objects': [{'Key': p} for p in keys],
}
) | 0.846117 | 0.150247 |
import cv2
import numpy as np
from openvino.inference_engine import IECore,IENetwork
class FacialLandmarksDetectionModel:
'''
Class for the Face Detection Model.
'''
def __init__(self, model_name, device='CPU', extensions=None):
self.model_name = model_name
self.device = device
self.extensions = extensions
self.model_structure = self.model_name
self.model_weights = self.model_name.split('.')[0]+'.bin'
self.plugin = None
self.network = None
self.exec_net = None
self.input_name = None
self.input_shape = None
self.output_names = None
self.output_shape = None
try:
self.model=IENetwork(self.model_structure, self.model_weights)
except Exception as e:
raise ValueError("Could not Initialise the network. Have you enterred the correct model path?")
self.input_name=next(iter(self.model.inputs))
self.input_shape=self.model.inputs[self.input_name].shape
self.output_name=next(iter(self.model.outputs))
self.output_shape=self.model.outputs[self.output_name].shape
def load_model(self):
self.plugin=IECore()
supported_layers = self.plugin.query_network(network=self.model, device_name=self.device)
unsupported_layers = [l for l in self.model.layers.keys() if l not in supported_layers]
if len(unsupported_layers)!=0:
print("unsupported layers found")
exit(1)
self.exec_net=self.plugin.load_network(network=self.model,device_name=self.device,num_requests=1)
def predict(self, image):
self.processed_image=self.preprocess_input(image)
outputs = self.exec_net.infer({self.input_name:self.processed_image})
coords = self.preprocess_output(outputs)
h=image.shape[0]
w=image.shape[1]
coords = coords* np.array([w, h, w, h])
coords = coords.astype(np.int32)
l_xmin=coords[0]-10
l_xmax=coords[0]+10
l_ymin=coords[1]-10
l_ymax=coords[1]+10
r_xmin=coords[2]-10
r_xmax=coords[2]+10
r_ymin=coords[3]-10
r_ymax=coords[3]+10
left_eye = image[l_ymin:l_ymax, l_xmin:l_xmax]
right_eye = image[r_ymin:r_ymax, r_xmin:r_xmax]
eye_coords = [[l_xmin,l_ymin,l_xmax,l_ymax], [r_xmin,r_ymin,r_xmax,r_ymax]]
return left_eye, right_eye, eye_coords
def check_model(self):
raise NotImplementedError
def preprocess_input(self, image):
image_ct = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
self.image=cv2.resize(image_ct,(self.input_shape[3],self.input_shape[2])) ## cv2.resize(frame, (w, h))
self.image=self.image.transpose((2, 0, 1))
self.image=self.image.reshape(1, *self.image.shape)
return self.image
def preprocess_output(self, outputs):
res=outputs[self.output_name][0]
lx = res[0].tolist()[0][0]
ly = res[1].tolist()[0][0]
rx = res[2].tolist()[0][0]
ry = res[3].tolist()[0][0]
return(lx,ly,rx,ry) | src/facial_landmarks_detection.py | import cv2
import numpy as np
from openvino.inference_engine import IECore,IENetwork
class FacialLandmarksDetectionModel:
'''
Class for the Face Detection Model.
'''
def __init__(self, model_name, device='CPU', extensions=None):
self.model_name = model_name
self.device = device
self.extensions = extensions
self.model_structure = self.model_name
self.model_weights = self.model_name.split('.')[0]+'.bin'
self.plugin = None
self.network = None
self.exec_net = None
self.input_name = None
self.input_shape = None
self.output_names = None
self.output_shape = None
try:
self.model=IENetwork(self.model_structure, self.model_weights)
except Exception as e:
raise ValueError("Could not Initialise the network. Have you enterred the correct model path?")
self.input_name=next(iter(self.model.inputs))
self.input_shape=self.model.inputs[self.input_name].shape
self.output_name=next(iter(self.model.outputs))
self.output_shape=self.model.outputs[self.output_name].shape
def load_model(self):
self.plugin=IECore()
supported_layers = self.plugin.query_network(network=self.model, device_name=self.device)
unsupported_layers = [l for l in self.model.layers.keys() if l not in supported_layers]
if len(unsupported_layers)!=0:
print("unsupported layers found")
exit(1)
self.exec_net=self.plugin.load_network(network=self.model,device_name=self.device,num_requests=1)
def predict(self, image):
self.processed_image=self.preprocess_input(image)
outputs = self.exec_net.infer({self.input_name:self.processed_image})
coords = self.preprocess_output(outputs)
h=image.shape[0]
w=image.shape[1]
coords = coords* np.array([w, h, w, h])
coords = coords.astype(np.int32)
l_xmin=coords[0]-10
l_xmax=coords[0]+10
l_ymin=coords[1]-10
l_ymax=coords[1]+10
r_xmin=coords[2]-10
r_xmax=coords[2]+10
r_ymin=coords[3]-10
r_ymax=coords[3]+10
left_eye = image[l_ymin:l_ymax, l_xmin:l_xmax]
right_eye = image[r_ymin:r_ymax, r_xmin:r_xmax]
eye_coords = [[l_xmin,l_ymin,l_xmax,l_ymax], [r_xmin,r_ymin,r_xmax,r_ymax]]
return left_eye, right_eye, eye_coords
def check_model(self):
raise NotImplementedError
def preprocess_input(self, image):
image_ct = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
self.image=cv2.resize(image_ct,(self.input_shape[3],self.input_shape[2])) ## cv2.resize(frame, (w, h))
self.image=self.image.transpose((2, 0, 1))
self.image=self.image.reshape(1, *self.image.shape)
return self.image
def preprocess_output(self, outputs):
res=outputs[self.output_name][0]
lx = res[0].tolist()[0][0]
ly = res[1].tolist()[0][0]
rx = res[2].tolist()[0][0]
ry = res[3].tolist()[0][0]
return(lx,ly,rx,ry) | 0.36523 | 0.158565 |
import logging
from karmabot.db import db_session
from karmabot.db.karma_transaction import KarmaTransaction
from karmabot.db.karma_user import KarmaUser
from karmabot.settings import KARMABOT_ID, MAX_POINTS, SLACK_CLIENT, SLACK_ID_FORMAT
from karmabot.slack import get_available_username, get_channel_name, post_msg
class GetUserInfoException(Exception):
pass
def _parse_karma_change(karma_change):
user_id, voting = karma_change
if SLACK_ID_FORMAT.match(user_id):
receiver = user_id.strip("<>@")
else:
receiver = user_id.strip(" #").lower() # ?
points = voting.count("+") - voting.count("-")
return receiver, points
def process_karma_changes(message, karma_changes):
for karma_change in karma_changes:
receiver_id, points = _parse_karma_change(karma_change)
try:
karma = Karma(
giver_id=message.user_id,
receiver_id=receiver_id,
channel_id=message.channel_id,
)
except GetUserInfoException:
return
try:
text = karma.change_karma(points)
except Exception as exc:
text = str(exc)
post_msg(message.channel_id, text)
class Karma:
def __init__(self, giver_id, receiver_id, channel_id):
self.session = db_session.create_session()
self.giver = self.session.query(KarmaUser).get(giver_id)
self.receiver = self.session.query(KarmaUser).get(receiver_id)
self.channel_id = channel_id
self.last_score_maxed_out = False
if not self.giver:
self.giver = self._create_karma_user(giver_id)
if not self.receiver:
self.receiver = self._create_karma_user(receiver_id)
def _create_karma_user(self, user_id):
user_info = SLACK_CLIENT.api_call("users.info", user=user_id)
error = user_info.get("error")
if error is not None:
logging.info(f"Cannot get user info for {user_id} - error: {error}")
raise GetUserInfoException
slack_id = user_info["user"]["id"]
username = get_available_username(user_info)
new_user = KarmaUser(user_id=slack_id, username=username)
self.session.add(new_user)
self.session.commit()
logging.info(f"Created new KarmaUser: {repr(new_user)}")
return new_user
def _calc_final_score(self, points):
if abs(points) > MAX_POINTS:
self.last_score_maxed_out = True
return MAX_POINTS if points > 0 else -MAX_POINTS
else:
self.last_score_maxed_out = False
return points
def _create_msg_bot_self_karma(self, points) -> str:
if points > 0:
text = (
f"Thanks {self.giver.username} for the extra karma"
f", my karma is {self.receiver.karma_points} now"
)
else:
text = (
f"Not cool {self.giver.username} lowering my karma "
f"to {self.receiver.karma_points}, but you are probably"
f" right, I will work harder next time"
)
return text
def _create_msg(self, points):
receiver_name = self.receiver.username
poses = "'" if receiver_name.endswith("s") else "'s"
action = "increase" if points > 0 else "decrease"
text = (
f"{receiver_name}{poses} karma {action}d to "
f"{self.receiver.karma_points}"
)
if self.last_score_maxed_out:
text += f" (= max {action} of {MAX_POINTS})"
return text
def _save_transaction(self, points):
transaction = KarmaTransaction(
giver_id=self.giver.user_id,
receiver_id=self.receiver.user_id,
channel=get_channel_name(self.channel_id),
karma=points,
)
self.session.add(transaction)
self.session.commit()
finished_transaction = (
self.session.query(KarmaTransaction)
.order_by(KarmaTransaction.id.desc())
.first()
)
logging.info(repr(finished_transaction))
def change_karma(self, points):
""" Updates Karma in the database """
if not isinstance(points, int):
err = (
"Program bug: change_karma should "
"not be called with a non int for "
"points arg!"
)
raise RuntimeError(err)
try:
if self.receiver.user_id == self.giver.user_id:
raise ValueError("Sorry, cannot give karma to self")
points = self._calc_final_score(points)
self.receiver.karma_points += points
self.session.commit()
self._save_transaction(points)
if self.receiver.user_id == KARMABOT_ID:
return self._create_msg_bot_self_karma(points)
else:
return self._create_msg(points)
finally:
logging.info(
(
f"[Karmachange] {self.giver.user_id} to "
f"{self.receiver.user_id}: {points}"
)
)
self.session.close() | src/karmabot/karma.py | import logging
from karmabot.db import db_session
from karmabot.db.karma_transaction import KarmaTransaction
from karmabot.db.karma_user import KarmaUser
from karmabot.settings import KARMABOT_ID, MAX_POINTS, SLACK_CLIENT, SLACK_ID_FORMAT
from karmabot.slack import get_available_username, get_channel_name, post_msg
class GetUserInfoException(Exception):
pass
def _parse_karma_change(karma_change):
user_id, voting = karma_change
if SLACK_ID_FORMAT.match(user_id):
receiver = user_id.strip("<>@")
else:
receiver = user_id.strip(" #").lower() # ?
points = voting.count("+") - voting.count("-")
return receiver, points
def process_karma_changes(message, karma_changes):
for karma_change in karma_changes:
receiver_id, points = _parse_karma_change(karma_change)
try:
karma = Karma(
giver_id=message.user_id,
receiver_id=receiver_id,
channel_id=message.channel_id,
)
except GetUserInfoException:
return
try:
text = karma.change_karma(points)
except Exception as exc:
text = str(exc)
post_msg(message.channel_id, text)
class Karma:
def __init__(self, giver_id, receiver_id, channel_id):
self.session = db_session.create_session()
self.giver = self.session.query(KarmaUser).get(giver_id)
self.receiver = self.session.query(KarmaUser).get(receiver_id)
self.channel_id = channel_id
self.last_score_maxed_out = False
if not self.giver:
self.giver = self._create_karma_user(giver_id)
if not self.receiver:
self.receiver = self._create_karma_user(receiver_id)
def _create_karma_user(self, user_id):
user_info = SLACK_CLIENT.api_call("users.info", user=user_id)
error = user_info.get("error")
if error is not None:
logging.info(f"Cannot get user info for {user_id} - error: {error}")
raise GetUserInfoException
slack_id = user_info["user"]["id"]
username = get_available_username(user_info)
new_user = KarmaUser(user_id=slack_id, username=username)
self.session.add(new_user)
self.session.commit()
logging.info(f"Created new KarmaUser: {repr(new_user)}")
return new_user
def _calc_final_score(self, points):
if abs(points) > MAX_POINTS:
self.last_score_maxed_out = True
return MAX_POINTS if points > 0 else -MAX_POINTS
else:
self.last_score_maxed_out = False
return points
def _create_msg_bot_self_karma(self, points) -> str:
if points > 0:
text = (
f"Thanks {self.giver.username} for the extra karma"
f", my karma is {self.receiver.karma_points} now"
)
else:
text = (
f"Not cool {self.giver.username} lowering my karma "
f"to {self.receiver.karma_points}, but you are probably"
f" right, I will work harder next time"
)
return text
def _create_msg(self, points):
receiver_name = self.receiver.username
poses = "'" if receiver_name.endswith("s") else "'s"
action = "increase" if points > 0 else "decrease"
text = (
f"{receiver_name}{poses} karma {action}d to "
f"{self.receiver.karma_points}"
)
if self.last_score_maxed_out:
text += f" (= max {action} of {MAX_POINTS})"
return text
def _save_transaction(self, points):
transaction = KarmaTransaction(
giver_id=self.giver.user_id,
receiver_id=self.receiver.user_id,
channel=get_channel_name(self.channel_id),
karma=points,
)
self.session.add(transaction)
self.session.commit()
finished_transaction = (
self.session.query(KarmaTransaction)
.order_by(KarmaTransaction.id.desc())
.first()
)
logging.info(repr(finished_transaction))
def change_karma(self, points):
""" Updates Karma in the database """
if not isinstance(points, int):
err = (
"Program bug: change_karma should "
"not be called with a non int for "
"points arg!"
)
raise RuntimeError(err)
try:
if self.receiver.user_id == self.giver.user_id:
raise ValueError("Sorry, cannot give karma to self")
points = self._calc_final_score(points)
self.receiver.karma_points += points
self.session.commit()
self._save_transaction(points)
if self.receiver.user_id == KARMABOT_ID:
return self._create_msg_bot_self_karma(points)
else:
return self._create_msg(points)
finally:
logging.info(
(
f"[Karmachange] {self.giver.user_id} to "
f"{self.receiver.user_id}: {points}"
)
)
self.session.close() | 0.391871 | 0.086131 |
from urllib.request import unquote
from neomodel import db
from neomodel.exception import DoesNotExist, RequiredProperty, UniqueProperty
import grest.messages as msg
from grest.exceptions import HTTPException
from grest.utils import serialize
from grest.validation import validate_models
from grest.global_config import ENABLE_DELETE_ALL
def delete(self,
request,
primary_id,
secondary_model_name=None,
secondary_id=None):
try:
# patch __log
self.__log = self._GRest__log
(primary, secondary) = validate_models(self,
primary_id,
secondary_model_name,
secondary_id)
primary_selected_item = None
if primary.id is not None:
primary_selected_item = primary.model.nodes.get_or_none(
**{primary.selection_field: primary.id})
secondary_selected_item = None
if secondary.id is not None:
secondary_selected_item = secondary.model.nodes.get_or_none(
**{secondary.selection_field: secondary.id})
if all([primary_selected_item,
secondary_selected_item,
secondary.model,
secondary.id]):
# user either wants to delete a relation or
# has provided invalid information
if hasattr(primary_selected_item, secondary.model_name):
relation_exists = primary_selected_item.relation_exists(
secondary.model_name,
secondary_selected_item)
if not relation_exists:
# There is an no relation
raise HTTPException(msg.RELATION_DOES_NOT_EXIST, 404)
else:
# Get relation between primary and secondary objects
relation = getattr(
primary_selected_item,
secondary.model_name)
with db.transaction:
# remove all relationships
for each_relation in relation.all():
relation.disconnect(each_relation)
if secondary_selected_item not in relation.all():
return serialize(dict(result="OK"))
else:
raise HTTPException(msg.DELETE_FAILED,
500)
elif all([primary_selected_item is not None,
secondary.model is None,
secondary.id is None]):
with db.transaction:
if primary_selected_item.delete():
return serialize(dict(result="OK"))
else:
raise HTTPException(msg.DELETE_FAILED, 500)
else:
raise HTTPException(msg.RELATION_DOES_NOT_EXIST, 404)
except (DoesNotExist, AttributeError) as e:
self.__log.exception(e)
raise HTTPException(msg.ITEM_DOES_NOT_EXIST, 404)
except UniqueProperty as e:
self.__log.exception(e)
raise HTTPException(msg.NON_UNIQUE_PROPERTY, 409)
except RequiredProperty as e:
self.__log.exception(e)
raise HTTPException(msg.REQUIRE_PROPERTY_MISSING, 500)
def delete_all(self, request):
try:
# patch __log
self.__log = self._GRest__log
(primary, _) = validate_models(self)
if all([ENABLE_DELETE_ALL == "True", primary.model]):
# user wants to delete all items (including relations)
results = db.cypher_query("MATCH (n:{0}) DETACH DELETE n".format(
primary.model.__name__))
if results[0] == []:
return serialize(dict(result="OK"))
else:
raise HTTPException(msg.DELETE_FAILED, 500)
else:
raise HTTPException(msg.FEATURE_IS_DISABLED, 403)
except (DoesNotExist, AttributeError) as e:
self.__log.exception(e)
raise HTTPException(msg.ITEM_DOES_NOT_EXIST, 404)
except UniqueProperty as e:
self.__log.exception(e)
raise HTTPException(msg.NON_UNIQUE_PROPERTY, 409)
except RequiredProperty as e:
self.__log.exception(e)
raise HTTPException(msg.REQUIRE_PROPERTY_MISSING, 500) | grest/verbs/delete.py |
from urllib.request import unquote
from neomodel import db
from neomodel.exception import DoesNotExist, RequiredProperty, UniqueProperty
import grest.messages as msg
from grest.exceptions import HTTPException
from grest.utils import serialize
from grest.validation import validate_models
from grest.global_config import ENABLE_DELETE_ALL
def delete(self,
request,
primary_id,
secondary_model_name=None,
secondary_id=None):
try:
# patch __log
self.__log = self._GRest__log
(primary, secondary) = validate_models(self,
primary_id,
secondary_model_name,
secondary_id)
primary_selected_item = None
if primary.id is not None:
primary_selected_item = primary.model.nodes.get_or_none(
**{primary.selection_field: primary.id})
secondary_selected_item = None
if secondary.id is not None:
secondary_selected_item = secondary.model.nodes.get_or_none(
**{secondary.selection_field: secondary.id})
if all([primary_selected_item,
secondary_selected_item,
secondary.model,
secondary.id]):
# user either wants to delete a relation or
# has provided invalid information
if hasattr(primary_selected_item, secondary.model_name):
relation_exists = primary_selected_item.relation_exists(
secondary.model_name,
secondary_selected_item)
if not relation_exists:
# There is an no relation
raise HTTPException(msg.RELATION_DOES_NOT_EXIST, 404)
else:
# Get relation between primary and secondary objects
relation = getattr(
primary_selected_item,
secondary.model_name)
with db.transaction:
# remove all relationships
for each_relation in relation.all():
relation.disconnect(each_relation)
if secondary_selected_item not in relation.all():
return serialize(dict(result="OK"))
else:
raise HTTPException(msg.DELETE_FAILED,
500)
elif all([primary_selected_item is not None,
secondary.model is None,
secondary.id is None]):
with db.transaction:
if primary_selected_item.delete():
return serialize(dict(result="OK"))
else:
raise HTTPException(msg.DELETE_FAILED, 500)
else:
raise HTTPException(msg.RELATION_DOES_NOT_EXIST, 404)
except (DoesNotExist, AttributeError) as e:
self.__log.exception(e)
raise HTTPException(msg.ITEM_DOES_NOT_EXIST, 404)
except UniqueProperty as e:
self.__log.exception(e)
raise HTTPException(msg.NON_UNIQUE_PROPERTY, 409)
except RequiredProperty as e:
self.__log.exception(e)
raise HTTPException(msg.REQUIRE_PROPERTY_MISSING, 500)
def delete_all(self, request):
try:
# patch __log
self.__log = self._GRest__log
(primary, _) = validate_models(self)
if all([ENABLE_DELETE_ALL == "True", primary.model]):
# user wants to delete all items (including relations)
results = db.cypher_query("MATCH (n:{0}) DETACH DELETE n".format(
primary.model.__name__))
if results[0] == []:
return serialize(dict(result="OK"))
else:
raise HTTPException(msg.DELETE_FAILED, 500)
else:
raise HTTPException(msg.FEATURE_IS_DISABLED, 403)
except (DoesNotExist, AttributeError) as e:
self.__log.exception(e)
raise HTTPException(msg.ITEM_DOES_NOT_EXIST, 404)
except UniqueProperty as e:
self.__log.exception(e)
raise HTTPException(msg.NON_UNIQUE_PROPERTY, 409)
except RequiredProperty as e:
self.__log.exception(e)
raise HTTPException(msg.REQUIRE_PROPERTY_MISSING, 500) | 0.426202 | 0.06165 |
import requests # You will need to install requests through pip or another mechanism
import getpass # This allows us to collect the password without showing it in the terminal
# Ask for credentials
jss_username = raw_input("Please enter your JSS username: ")
jss_password = getpass.getpass("Please enter your JSS password: ")
# Set our JSS URL
jss_url = 'https://your.jss.com:8443'
print ""
# Start an infinite loop that will allow us to keep searching for devices until we select no
while True:
# Get the match term
match = raw_input("Search term (username, serialnumber, device name, etc)\nNote use * as wildcard: ")
print "--------------------------------------------------------"
print "Searching for: %s" % match
# Request all computer matching the search term
computers_request = requests.get(jss_url + '/JSSResource/computers/match/%s' % match,
auth=(jss_username,jss_password),
headers={'Accept': 'application/json'}
)
# Request all mobile_devices matching the search term
mobile_devices_request = requests.get(jss_url + '/JSSResource/mobiledevices/match/%s' % match,
auth=(jss_username,jss_password),
headers={'Accept': 'application/json'}
)
# String concatnanation and using len() to get the amount of devices
print "Search term: %s" % match
print "Found %s mobile devices" % len(mobile_devices_request.json()['mobile_devices'])
print "Found %s computers" % len(computers_request.json()['computers'])
# Adding a pause
raw_input("--Press enter to see results--")
# For loop to go over the json returned by the jss and display each entry
computers = computers_request.json()['computers']
for computer in computers:
print "Device Type: Computer"
print "Computer Name: %s" % computer['name']
print "Username: %s" % computer['username']
print "Serial Number: %s" % computer['serial_number']
print ""
# For loop to go over the json returned by the jss and display each entry
mobile_devices = mobile_devices_request.json()['mobile_devices']
for mobile_device in mobile_devices:
print "Device Type: Computer"
print "Mobile Device Name: %s" % mobile_device['name']
print "Username: %s" % mobile_device['username']
print "Serial Number: %s" %mobile_device['serial_number']
print ""
# See if we want to continue or break the loop
if raw_input("Would you like to continue? [y,n] ") in 'yesYESYes':
continue
else:
break
print "Goodbye" | JSS_device_search.py | import requests # You will need to install requests through pip or another mechanism
import getpass # This allows us to collect the password without showing it in the terminal
# Ask for credentials
jss_username = raw_input("Please enter your JSS username: ")
jss_password = getpass.getpass("Please enter your JSS password: ")
# Set our JSS URL
jss_url = 'https://your.jss.com:8443'
print ""
# Start an infinite loop that will allow us to keep searching for devices until we select no
while True:
# Get the match term
match = raw_input("Search term (username, serialnumber, device name, etc)\nNote use * as wildcard: ")
print "--------------------------------------------------------"
print "Searching for: %s" % match
# Request all computer matching the search term
computers_request = requests.get(jss_url + '/JSSResource/computers/match/%s' % match,
auth=(jss_username,jss_password),
headers={'Accept': 'application/json'}
)
# Request all mobile_devices matching the search term
mobile_devices_request = requests.get(jss_url + '/JSSResource/mobiledevices/match/%s' % match,
auth=(jss_username,jss_password),
headers={'Accept': 'application/json'}
)
# String concatnanation and using len() to get the amount of devices
print "Search term: %s" % match
print "Found %s mobile devices" % len(mobile_devices_request.json()['mobile_devices'])
print "Found %s computers" % len(computers_request.json()['computers'])
# Adding a pause
raw_input("--Press enter to see results--")
# For loop to go over the json returned by the jss and display each entry
computers = computers_request.json()['computers']
for computer in computers:
print "Device Type: Computer"
print "Computer Name: %s" % computer['name']
print "Username: %s" % computer['username']
print "Serial Number: %s" % computer['serial_number']
print ""
# For loop to go over the json returned by the jss and display each entry
mobile_devices = mobile_devices_request.json()['mobile_devices']
for mobile_device in mobile_devices:
print "Device Type: Computer"
print "Mobile Device Name: %s" % mobile_device['name']
print "Username: %s" % mobile_device['username']
print "Serial Number: %s" %mobile_device['serial_number']
print ""
# See if we want to continue or break the loop
if raw_input("Would you like to continue? [y,n] ") in 'yesYESYes':
continue
else:
break
print "Goodbye" | 0.242564 | 0.118003 |
import librosa
import soundfile
import os, glob, pickle
import numpy as np
import subprocess
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
from tkinter import messagebox
from PIL import Image, ImageTk
def prepareFile(ruta):
x,y=[],[]
for file in glob.glob(ruta):
print(file)
file_name=os.path.basename(file)
emotion=emotions[file_name.split("-")[2]]
feature=extract_feature(file, mfcc=True, chroma=True, mel=True)
x.append(feature)
y.append(emotion)
return x,y
def load_data(test_size=0.2):
#path = "C:\\Users\\carlos\\gitKraken\\pyhton2020AProjectLibrosa\\resources\\data\\Actor_*\\*.wav"
path = "../resources/data/Actor_*/*.wav"
x,y=[],[]
for file in glob.glob(path):
file_name=os.path.basename(file)
emotion=emotions[file_name.split("-")[2]]
if emotion not in observed_emotions:
continue
feature=extract_feature(file, mfcc=True, chroma=True, mel=True)
x.append(feature)
y.append(emotion)
return train_test_split(np.array(x), y, test_size=test_size, random_state=9)
#funcion para extraer data de un audio
def extract_feature(file_name, mfcc, chroma, mel):
with soundfile.SoundFile(file_name) as sound_file:
X = sound_file.read(dtype="float32")
sample_rate=sound_file.samplerate
if chroma:
stft=np.abs(librosa.stft(X))
result=np.array([])
if mfcc:
mfccs=np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0)
result=np.hstack((result, mfccs))
if chroma:
chroma=np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)
result=np.hstack((result, chroma))
if mel:
mel=np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)
result=np.hstack((result, mel))
return result
emotions={
'01':'neutral',
'02':'calm',
'03':'happy',
'04':'sad',
'05':'angry',
'06':'fearful',
'07':'disgust',
'08':'surprised'
}
#DataFlair - Emotions to observe
#observed_emotions=['calm', 'happy', 'fearful', 'disgust','neutral','sad','angry','surprised']
observed_emotions=['calm', 'happy', 'fearful', 'disgust','neutral']
model=MLPClassifier(alpha=0.01, batch_size=256, epsilon=1e-08, hidden_layer_sizes=(300,),
learning_rate='adaptive', max_iter=500)
x_train,x_test,y_train,y_test=load_data(test_size=0.25)
def buttonTrainFuntion():
print((x_train.shape[0], x_test.shape[0]))
print(f'Features extracted: {x_train.shape[1]}')
global model
model.fit(x_train,y_train)
messagebox.showinfo(message="""Atributos del dataset: {}
Tamaño del dataset de Entrenamiento: {}
Tamaño del dataset de Prueba: {}
""".format(x_train.shape[1],x_train.shape[0], x_test.shape[0]),
title="Entrenamiento Finalizado")
def buttonTestFuntion():
y_pred=model.predict(x_test)
#DataFlair - Calculate the accuracy of our model
accuracy=accuracy_score(y_true=y_test, y_pred=y_pred)
#DataFlair - Print the accuracy
print("Accuracy: {:.2f}%".format(accuracy*100))
messagebox.showinfo(message="Modelo testeado\nSe obtuvo la siguiente precision\nAccuracy: {:.2f}%".format(accuracy*100), title="Testeo Finalizado")
def buttonRecordFunction(labelAudioIcon, labelAudioText, labelRecordingText, labelResultadoEmocion, labelEmotionImage):
messagebox.showinfo(message="Presione ACEPTAR cuando esté listo para grabar", title="Grabación")
labelResultadoEmocion.place_forget()
labelEmotionImage.place_forget()
labelAudioIcon.place_forget()
labelAudioText.place_forget()
labelRecordingText.place(x = 30, y = 50 , width=180, height=60)
#TODO: INICIAR GRABACIÓN
def buttonStopFunction(labelAudioIcon, labelAudioText, labelRecordingText):
messagebox.showinfo(message="Presione ACEPTAR para finalizar la grabación", title="Grabación")
labelRecordingText.place_forget()
#TODO:FINALIZAR GRABACIÓN
labelAudioIcon.place(x = 30, y = 50 , width=60, height=60)
labelAudioText.place(x = 100, y = 50 , width=110, height=60)
def buttonPredictFunction(labelResultadoEmocion, labelEmotionImage,ruta):
print(ruta)
caracteristicas,emocionCorrecta = prepareFile(ruta)
print(caracteristicas)
print(emocionCorrecta)
pred=model.predict(caracteristicas)
print(pred)
labelResultadoEmocion.place(x = 270, y = 70 , width=180, height=40)
labelEmotionImage.place(x = 271, y = 121, width=178, height=208)
#TODO: OBTENER LA PREDICCIÓN:'neutral','calm','happy','sad','angry','fearful','disgust','surprised'
#prediction = random.choice(['sad','angry','happy','fearful','disgust','calm','surprised','neutral'])
prediction = pred[0]
labelResultadoEmocion.configure(text=prediction)
emotionImage = ImageTk.PhotoImage(Image.open("../resources/emotions/" + prediction + ".png").resize((170, 200), Image.ANTIALIAS))
labelEmotionImage.configure(image=emotionImage)
labelEmotionImage.image = emotionImage
def buttonPlayFunction(rutaAudio):
rutaVLC = "D:/Program Files/VLC/vlc.exe"
#rutaVLC = "C:/Program Files/VideoLAN/VLC/vlc.exe"
rutaAudio=rutaAudio.replace("/","\\")
print(rutaAudio)
#r"C:\Users\carlos\gitKraken\pyhton2020AProjectLibrosa\resources\data\Actor_01\03-01-03-01-02-01-01.wav"
p = subprocess.Popen([rutaVLC,r"{}".format(rutaAudio)])
def buttonPauseFunction():
pass | src/funcionalidad.py | import librosa
import soundfile
import os, glob, pickle
import numpy as np
import subprocess
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
from tkinter import messagebox
from PIL import Image, ImageTk
def prepareFile(ruta):
x,y=[],[]
for file in glob.glob(ruta):
print(file)
file_name=os.path.basename(file)
emotion=emotions[file_name.split("-")[2]]
feature=extract_feature(file, mfcc=True, chroma=True, mel=True)
x.append(feature)
y.append(emotion)
return x,y
def load_data(test_size=0.2):
#path = "C:\\Users\\carlos\\gitKraken\\pyhton2020AProjectLibrosa\\resources\\data\\Actor_*\\*.wav"
path = "../resources/data/Actor_*/*.wav"
x,y=[],[]
for file in glob.glob(path):
file_name=os.path.basename(file)
emotion=emotions[file_name.split("-")[2]]
if emotion not in observed_emotions:
continue
feature=extract_feature(file, mfcc=True, chroma=True, mel=True)
x.append(feature)
y.append(emotion)
return train_test_split(np.array(x), y, test_size=test_size, random_state=9)
#funcion para extraer data de un audio
def extract_feature(file_name, mfcc, chroma, mel):
with soundfile.SoundFile(file_name) as sound_file:
X = sound_file.read(dtype="float32")
sample_rate=sound_file.samplerate
if chroma:
stft=np.abs(librosa.stft(X))
result=np.array([])
if mfcc:
mfccs=np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0)
result=np.hstack((result, mfccs))
if chroma:
chroma=np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)
result=np.hstack((result, chroma))
if mel:
mel=np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)
result=np.hstack((result, mel))
return result
emotions={
'01':'neutral',
'02':'calm',
'03':'happy',
'04':'sad',
'05':'angry',
'06':'fearful',
'07':'disgust',
'08':'surprised'
}
#DataFlair - Emotions to observe
#observed_emotions=['calm', 'happy', 'fearful', 'disgust','neutral','sad','angry','surprised']
observed_emotions=['calm', 'happy', 'fearful', 'disgust','neutral']
model=MLPClassifier(alpha=0.01, batch_size=256, epsilon=1e-08, hidden_layer_sizes=(300,),
learning_rate='adaptive', max_iter=500)
x_train,x_test,y_train,y_test=load_data(test_size=0.25)
def buttonTrainFuntion():
print((x_train.shape[0], x_test.shape[0]))
print(f'Features extracted: {x_train.shape[1]}')
global model
model.fit(x_train,y_train)
messagebox.showinfo(message="""Atributos del dataset: {}
Tamaño del dataset de Entrenamiento: {}
Tamaño del dataset de Prueba: {}
""".format(x_train.shape[1],x_train.shape[0], x_test.shape[0]),
title="Entrenamiento Finalizado")
def buttonTestFuntion():
y_pred=model.predict(x_test)
#DataFlair - Calculate the accuracy of our model
accuracy=accuracy_score(y_true=y_test, y_pred=y_pred)
#DataFlair - Print the accuracy
print("Accuracy: {:.2f}%".format(accuracy*100))
messagebox.showinfo(message="Modelo testeado\nSe obtuvo la siguiente precision\nAccuracy: {:.2f}%".format(accuracy*100), title="Testeo Finalizado")
def buttonRecordFunction(labelAudioIcon, labelAudioText, labelRecordingText, labelResultadoEmocion, labelEmotionImage):
messagebox.showinfo(message="Presione ACEPTAR cuando esté listo para grabar", title="Grabación")
labelResultadoEmocion.place_forget()
labelEmotionImage.place_forget()
labelAudioIcon.place_forget()
labelAudioText.place_forget()
labelRecordingText.place(x = 30, y = 50 , width=180, height=60)
#TODO: INICIAR GRABACIÓN
def buttonStopFunction(labelAudioIcon, labelAudioText, labelRecordingText):
messagebox.showinfo(message="Presione ACEPTAR para finalizar la grabación", title="Grabación")
labelRecordingText.place_forget()
#TODO:FINALIZAR GRABACIÓN
labelAudioIcon.place(x = 30, y = 50 , width=60, height=60)
labelAudioText.place(x = 100, y = 50 , width=110, height=60)
def buttonPredictFunction(labelResultadoEmocion, labelEmotionImage,ruta):
print(ruta)
caracteristicas,emocionCorrecta = prepareFile(ruta)
print(caracteristicas)
print(emocionCorrecta)
pred=model.predict(caracteristicas)
print(pred)
labelResultadoEmocion.place(x = 270, y = 70 , width=180, height=40)
labelEmotionImage.place(x = 271, y = 121, width=178, height=208)
#TODO: OBTENER LA PREDICCIÓN:'neutral','calm','happy','sad','angry','fearful','disgust','surprised'
#prediction = random.choice(['sad','angry','happy','fearful','disgust','calm','surprised','neutral'])
prediction = pred[0]
labelResultadoEmocion.configure(text=prediction)
emotionImage = ImageTk.PhotoImage(Image.open("../resources/emotions/" + prediction + ".png").resize((170, 200), Image.ANTIALIAS))
labelEmotionImage.configure(image=emotionImage)
labelEmotionImage.image = emotionImage
def buttonPlayFunction(rutaAudio):
rutaVLC = "D:/Program Files/VLC/vlc.exe"
#rutaVLC = "C:/Program Files/VideoLAN/VLC/vlc.exe"
rutaAudio=rutaAudio.replace("/","\\")
print(rutaAudio)
#r"C:\Users\carlos\gitKraken\pyhton2020AProjectLibrosa\resources\data\Actor_01\03-01-03-01-02-01-01.wav"
p = subprocess.Popen([rutaVLC,r"{}".format(rutaAudio)])
def buttonPauseFunction():
pass | 0.267791 | 0.178347 |
import requests
import json
import re
# 好看视频解析入口
class Bili(object):
def __init__(self, bv):
self.bv = bv
def get_video(self):
url = self.bv
if len(url) >= 23:
base_url = url
else:
base_url = "https://m.bilibili.com/video/" + str(self.bv)
headers = {
"user-agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1"
}
try:
response = requests.get(url=base_url, headers=headers, timeout=5)
if response.status_code == 200:
pattern = re.compile("options = \{(.*?)\}", re.S)
# res ="{"+ re.findall(pattern,response.text)[0]+"}"
try:
res = re.findall(pattern, response.text)[0]
aid = re.findall("aid: (.*?),", res)[0]
cid = re.findall("cid: (.*?),", res)[0]
readyDuration = re.findall("readyDuration: (.*?),", res)[0]
bvid = re.findall("bvid: '(.*?)',", res)[0]
readyPoster = re.findall("readyPoster: '(.*?)',", res)[0]
readyVideoUrl = re.findall("readyVideoUrl: '(.*?)',", res)[0]
rows = {
"aid": aid,
"bvid": bvid,
"cid": cid,
"readyPoster": "https:" + readyPoster,
"readyVideoUrl": "https:" + readyVideoUrl,
"readyDuration": readyDuration
}
return json.dumps(rows, ensure_ascii=False)
except Exception as e:
return "暂无相关数据,请检查相关数据:"+str(e)
else:
return "暂无相关数据,请检查相关数据"
except Exception as e:
return e
if __name__ == '__main__':
bili = Bili("https://haokan.baidu.com/v?vid=11231414361411904045")
print(bili.get_video()) | app/src/main/python/haokan.py | import requests
import json
import re
# 好看视频解析入口
class Bili(object):
def __init__(self, bv):
self.bv = bv
def get_video(self):
url = self.bv
if len(url) >= 23:
base_url = url
else:
base_url = "https://m.bilibili.com/video/" + str(self.bv)
headers = {
"user-agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1"
}
try:
response = requests.get(url=base_url, headers=headers, timeout=5)
if response.status_code == 200:
pattern = re.compile("options = \{(.*?)\}", re.S)
# res ="{"+ re.findall(pattern,response.text)[0]+"}"
try:
res = re.findall(pattern, response.text)[0]
aid = re.findall("aid: (.*?),", res)[0]
cid = re.findall("cid: (.*?),", res)[0]
readyDuration = re.findall("readyDuration: (.*?),", res)[0]
bvid = re.findall("bvid: '(.*?)',", res)[0]
readyPoster = re.findall("readyPoster: '(.*?)',", res)[0]
readyVideoUrl = re.findall("readyVideoUrl: '(.*?)',", res)[0]
rows = {
"aid": aid,
"bvid": bvid,
"cid": cid,
"readyPoster": "https:" + readyPoster,
"readyVideoUrl": "https:" + readyVideoUrl,
"readyDuration": readyDuration
}
return json.dumps(rows, ensure_ascii=False)
except Exception as e:
return "暂无相关数据,请检查相关数据:"+str(e)
else:
return "暂无相关数据,请检查相关数据"
except Exception as e:
return e
if __name__ == '__main__':
bili = Bili("https://haokan.baidu.com/v?vid=11231414361411904045")
print(bili.get_video()) | 0.108626 | 0.098469 |
import numpy as np
from ivac.utils import get_nfeatures
# equilibrium IVAC
def c0_all(trajs):
nfeatures = get_nfeatures(trajs)
numer = np.zeros((nfeatures, nfeatures))
denom = 0.0
for traj in trajs:
numer += traj.T @ traj
denom += len(traj)
return numer / denom
def ct_all(trajs, lag):
nfeatures = get_nfeatures(trajs)
numer = np.zeros((nfeatures, nfeatures))
denom = 0.0
for traj in trajs:
x = traj[: len(traj) - lag]
y = traj[lag:]
numer += x.T @ y
denom += len(x)
return numer / denom
def c0_all_adj_ct(trajs, lag):
nfeatures = get_nfeatures(trajs)
numer = np.zeros((nfeatures, nfeatures))
denom = 0.0
for traj in trajs:
x = traj[: len(traj) - lag]
y = traj[lag:]
numer += x.T @ x + y.T @ y
denom += 2 * len(x)
return numer / denom
def ic_all(trajs, lags):
nfeatures = get_nfeatures(trajs)
ic = np.zeros((nfeatures, nfeatures))
for lag in lags:
ic += ct_all(trajs, lag)
return ic
def c0_all_adj_ic(trajs, lags):
nfeatures = get_nfeatures(trajs)
c0 = np.zeros((nfeatures, nfeatures))
for lag in lags:
c0 += c0_all_adj_ct(trajs, lag)
return c0 / len(lags)
# nonequilibrium IVAC: weight estimation
def c0_trunc(trajs, cutlag):
nfeatures = get_nfeatures(trajs)
numer = np.zeros((nfeatures, nfeatures))
denom = 0.0
for traj in trajs:
x = traj[: len(traj) - cutlag]
numer += x.T @ x
denom += len(x)
return numer / denom
def ct_trunc(trajs, lag, cutlag):
nfeatures = get_nfeatures(trajs)
numer = np.zeros((nfeatures, nfeatures))
denom = 0.0
for traj in trajs:
x = traj[: len(traj) - cutlag]
y = traj[lag : len(traj) - cutlag + lag]
numer += x.T @ y
denom += len(x)
return numer / denom
def ic_trunc(trajs, lags, cutlag):
nfeatures = get_nfeatures(trajs)
ic = np.zeros((nfeatures, nfeatures))
for lag in lags:
ic += ct_trunc(trajs, lag, cutlag)
return ic
# nonequilibrium IVAC: reweighted matrices with truncated data
def c0_rt(trajs, cutlag, weights):
nfeatures = get_nfeatures(trajs)
numer = np.zeros((nfeatures, nfeatures))
denom = 0.0
for traj, weight in zip(trajs, weights):
x = traj[: len(traj) - cutlag]
w = weight[: len(traj) - cutlag]
numer += np.einsum("n,ni,nj", w, x, x)
denom += np.sum(w)
return numer / denom
def ct_rt(trajs, lag, cutlag, weights):
nfeatures = get_nfeatures(trajs)
numer = np.zeros((nfeatures, nfeatures))
denom = 0.0
for traj, weight in zip(trajs, weights):
x = traj[: len(traj) - cutlag]
y = traj[lag : len(traj) - cutlag + lag]
w = weight[: len(traj) - cutlag]
numer += np.einsum("n,ni,nj", w, x, y)
denom += np.sum(w)
return numer / denom
def c0_rt_adj_ct(trajs, lag, cutlag, weights):
nfeatures = get_nfeatures(trajs)
numer = np.zeros((nfeatures, nfeatures))
denom = 0.0
for traj, weight in zip(trajs, weights):
x = traj[: len(traj) - cutlag]
y = traj[lag : len(traj) - cutlag + lag]
w = weight[: len(traj) - cutlag]
numer += np.einsum("n,ni,nj", w, x, x)
numer += np.einsum("n,ni,nj", w, y, y)
denom += 2.0 * np.sum(w)
return numer / denom
def ic_rt(trajs, lags, cutlag, weights):
nfeatures = get_nfeatures(trajs)
ic = np.zeros((nfeatures, nfeatures))
for lag in lags:
ic += ct_rt(trajs, lag, cutlag, weights)
return ic
def c0_rt_adj_ic(trajs, lags, cutlag, weights):
nfeatures = get_nfeatures(trajs)
c0 = np.zeros((nfeatures, nfeatures))
for lag in lags:
c0 += c0_rt_adj_ct(trajs, lag, cutlag, weights)
return c0 / len(lags)
# nonequilibrium IVAC: reweighted matrices with all data
def c0_ra(trajs, cutlag, weights):
nfeatures = get_nfeatures(trajs)
numer = np.zeros((nfeatures, nfeatures))
denom = 0.0
for traj, weight in zip(trajs, weights):
w = weight[: len(traj) - cutlag]
for shift in range(cutlag + 1):
x = traj[shift : shift + len(traj) - cutlag]
numer += np.einsum("n,ni,nj", w, x, x)
denom += np.sum(w)
return numer / denom
def ct_ra(trajs, lag, cutlag, weights):
nfeatures = get_nfeatures(trajs)
numer = np.zeros((nfeatures, nfeatures))
denom = 0.0
for traj, weight in zip(trajs, weights):
w = weight[: len(traj) - cutlag]
for shift in range(cutlag - lag + 1):
x = traj[shift : shift + len(traj) - cutlag]
y = traj[shift + lag : shift + len(traj) - cutlag + lag]
numer += np.einsum("n,ni,nj", w, x, y)
denom += np.sum(w)
return numer / denom
def c0_ra_adj_ct(trajs, lag, cutlag, weights):
nfeatures = get_nfeatures(trajs)
numer = np.zeros((nfeatures, nfeatures))
denom = 0.0
for traj, weight in zip(trajs, weights):
w = weight[: len(traj) - cutlag]
for shift in range(cutlag - lag + 1):
x = traj[shift : shift + len(traj) - cutlag]
y = traj[shift + lag : shift + len(traj) - cutlag + lag]
numer += np.einsum("n,ni,nj", w, x, x)
numer += np.einsum("n,ni,nj", w, y, y)
denom += 2.0 * np.sum(w)
return numer / denom
def ic_ra(trajs, lags, cutlag, weights):
nfeatures = get_nfeatures(trajs)
ic = np.zeros((nfeatures, nfeatures))
for lag in lags:
ic += ct_ra(trajs, lag, cutlag, weights)
return ic
def c0_ra_adj_ic(trajs, lags, cutlag, weights):
nfeatures = get_nfeatures(trajs)
c0 = np.zeros((nfeatures, nfeatures))
for lag in lags:
c0 += c0_ra_adj_ct(trajs, lags, cutlag, weights)
return c0 / len(lags) | tests/ref_corr.py |
import numpy as np
from ivac.utils import get_nfeatures
# equilibrium IVAC
def c0_all(trajs):
nfeatures = get_nfeatures(trajs)
numer = np.zeros((nfeatures, nfeatures))
denom = 0.0
for traj in trajs:
numer += traj.T @ traj
denom += len(traj)
return numer / denom
def ct_all(trajs, lag):
nfeatures = get_nfeatures(trajs)
numer = np.zeros((nfeatures, nfeatures))
denom = 0.0
for traj in trajs:
x = traj[: len(traj) - lag]
y = traj[lag:]
numer += x.T @ y
denom += len(x)
return numer / denom
def c0_all_adj_ct(trajs, lag):
nfeatures = get_nfeatures(trajs)
numer = np.zeros((nfeatures, nfeatures))
denom = 0.0
for traj in trajs:
x = traj[: len(traj) - lag]
y = traj[lag:]
numer += x.T @ x + y.T @ y
denom += 2 * len(x)
return numer / denom
def ic_all(trajs, lags):
nfeatures = get_nfeatures(trajs)
ic = np.zeros((nfeatures, nfeatures))
for lag in lags:
ic += ct_all(trajs, lag)
return ic
def c0_all_adj_ic(trajs, lags):
nfeatures = get_nfeatures(trajs)
c0 = np.zeros((nfeatures, nfeatures))
for lag in lags:
c0 += c0_all_adj_ct(trajs, lag)
return c0 / len(lags)
# nonequilibrium IVAC: weight estimation
def c0_trunc(trajs, cutlag):
nfeatures = get_nfeatures(trajs)
numer = np.zeros((nfeatures, nfeatures))
denom = 0.0
for traj in trajs:
x = traj[: len(traj) - cutlag]
numer += x.T @ x
denom += len(x)
return numer / denom
def ct_trunc(trajs, lag, cutlag):
nfeatures = get_nfeatures(trajs)
numer = np.zeros((nfeatures, nfeatures))
denom = 0.0
for traj in trajs:
x = traj[: len(traj) - cutlag]
y = traj[lag : len(traj) - cutlag + lag]
numer += x.T @ y
denom += len(x)
return numer / denom
def ic_trunc(trajs, lags, cutlag):
nfeatures = get_nfeatures(trajs)
ic = np.zeros((nfeatures, nfeatures))
for lag in lags:
ic += ct_trunc(trajs, lag, cutlag)
return ic
# nonequilibrium IVAC: reweighted matrices with truncated data
def c0_rt(trajs, cutlag, weights):
nfeatures = get_nfeatures(trajs)
numer = np.zeros((nfeatures, nfeatures))
denom = 0.0
for traj, weight in zip(trajs, weights):
x = traj[: len(traj) - cutlag]
w = weight[: len(traj) - cutlag]
numer += np.einsum("n,ni,nj", w, x, x)
denom += np.sum(w)
return numer / denom
def ct_rt(trajs, lag, cutlag, weights):
nfeatures = get_nfeatures(trajs)
numer = np.zeros((nfeatures, nfeatures))
denom = 0.0
for traj, weight in zip(trajs, weights):
x = traj[: len(traj) - cutlag]
y = traj[lag : len(traj) - cutlag + lag]
w = weight[: len(traj) - cutlag]
numer += np.einsum("n,ni,nj", w, x, y)
denom += np.sum(w)
return numer / denom
def c0_rt_adj_ct(trajs, lag, cutlag, weights):
nfeatures = get_nfeatures(trajs)
numer = np.zeros((nfeatures, nfeatures))
denom = 0.0
for traj, weight in zip(trajs, weights):
x = traj[: len(traj) - cutlag]
y = traj[lag : len(traj) - cutlag + lag]
w = weight[: len(traj) - cutlag]
numer += np.einsum("n,ni,nj", w, x, x)
numer += np.einsum("n,ni,nj", w, y, y)
denom += 2.0 * np.sum(w)
return numer / denom
def ic_rt(trajs, lags, cutlag, weights):
nfeatures = get_nfeatures(trajs)
ic = np.zeros((nfeatures, nfeatures))
for lag in lags:
ic += ct_rt(trajs, lag, cutlag, weights)
return ic
def c0_rt_adj_ic(trajs, lags, cutlag, weights):
nfeatures = get_nfeatures(trajs)
c0 = np.zeros((nfeatures, nfeatures))
for lag in lags:
c0 += c0_rt_adj_ct(trajs, lag, cutlag, weights)
return c0 / len(lags)
# nonequilibrium IVAC: reweighted matrices with all data
def c0_ra(trajs, cutlag, weights):
nfeatures = get_nfeatures(trajs)
numer = np.zeros((nfeatures, nfeatures))
denom = 0.0
for traj, weight in zip(trajs, weights):
w = weight[: len(traj) - cutlag]
for shift in range(cutlag + 1):
x = traj[shift : shift + len(traj) - cutlag]
numer += np.einsum("n,ni,nj", w, x, x)
denom += np.sum(w)
return numer / denom
def ct_ra(trajs, lag, cutlag, weights):
nfeatures = get_nfeatures(trajs)
numer = np.zeros((nfeatures, nfeatures))
denom = 0.0
for traj, weight in zip(trajs, weights):
w = weight[: len(traj) - cutlag]
for shift in range(cutlag - lag + 1):
x = traj[shift : shift + len(traj) - cutlag]
y = traj[shift + lag : shift + len(traj) - cutlag + lag]
numer += np.einsum("n,ni,nj", w, x, y)
denom += np.sum(w)
return numer / denom
def c0_ra_adj_ct(trajs, lag, cutlag, weights):
nfeatures = get_nfeatures(trajs)
numer = np.zeros((nfeatures, nfeatures))
denom = 0.0
for traj, weight in zip(trajs, weights):
w = weight[: len(traj) - cutlag]
for shift in range(cutlag - lag + 1):
x = traj[shift : shift + len(traj) - cutlag]
y = traj[shift + lag : shift + len(traj) - cutlag + lag]
numer += np.einsum("n,ni,nj", w, x, x)
numer += np.einsum("n,ni,nj", w, y, y)
denom += 2.0 * np.sum(w)
return numer / denom
def ic_ra(trajs, lags, cutlag, weights):
nfeatures = get_nfeatures(trajs)
ic = np.zeros((nfeatures, nfeatures))
for lag in lags:
ic += ct_ra(trajs, lag, cutlag, weights)
return ic
def c0_ra_adj_ic(trajs, lags, cutlag, weights):
nfeatures = get_nfeatures(trajs)
c0 = np.zeros((nfeatures, nfeatures))
for lag in lags:
c0 += c0_ra_adj_ct(trajs, lags, cutlag, weights)
return c0 / len(lags) | 0.567457 | 0.478894 |
from collections import defaultdict
from itertools import permutations
import numpy as np
import sys
def read_scan(input):
lines = input.splitlines()
assert(lines[0].startswith("---"))
scan = []
for line in lines[1:]:
scan.append(tuple(map(int, line.split(","))))
return scan
def read_scans(fin):
scans = list(map(read_scan, fin.read().strip().split("\n\n")))
return scans
def generate_orientations(scan):
for x in [1,-1]:
for y in [1,-1]:
for z in [1,-1]:
for swap in permutations(range(3)):
rotation = np.eye(3)
rotation[:,0] = rotation[:,0]*x
rotation[:,1] = rotation[:,1]*y
rotation[:,2] = rotation[:,2]*z
rotation = rotation[:,swap]
rotated = [tuple(rotation.dot(np.array(vector))) for vector in scan]
yield (rotation, rotated)
def find_alignment(scan1, scan2, threshold=12):
for point1 in scan1:
for point2 in scan2:
translation = np.array(point1)-np.array(point2)
beacons = set(scan1).intersection({tuple(p) for p in np.array(scan2)+translation})
if len(beacons) >= threshold:
return (translation, beacons)
return (None, None)
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: day19_scanner_part1.py <input file>")
sys.exit(-1)
with open(sys.argv[1], 'r') as fin:
scans = read_scans(fin)
alignments = defaultdict(list)
for scan1 in range(len(scans)):
for scan2 in range(len(scans)):
if scan1 == scan2:
continue
print("align scan", scan1, "and ", scan2)
for rotation, rotated in generate_orientations(scans[scan2]):
translation, beacons = find_alignment(scans[scan1], rotated)
if translation is not None:
alignments[scan1].append((scan2, rotation, translation))
print("found overlap with rotation", rotation, "and translation", translation)
break
# do depth first search in alignment tree and create set of all beacons
beacons = set()
visited = set()
stack = [(0, np.array([0,0,0]), np.eye(3))]
while len(stack)>0:
scanner, position, rotation = stack.pop()
visited.add(scanner)
print("add beacons for scanner", scanner, "with position", position, "and rotation", rotation)
for beacon in scans[scanner]:
beacon = tuple(position + rotation.dot(np.array(beacon)))
print("add beacon ", beacon)
beacons.add(beacon)
for next_scanner, next_rotation, next_translation in alignments[scanner]:
if next_scanner not in visited:
print("push scanner", next_scanner)
stack.append((next_scanner, position + rotation.dot(next_translation), rotation.dot(next_rotation)))
print(beacons)
print(len(beacons)) | day19_scanner.py | from collections import defaultdict
from itertools import permutations
import numpy as np
import sys
def read_scan(input):
lines = input.splitlines()
assert(lines[0].startswith("---"))
scan = []
for line in lines[1:]:
scan.append(tuple(map(int, line.split(","))))
return scan
def read_scans(fin):
scans = list(map(read_scan, fin.read().strip().split("\n\n")))
return scans
def generate_orientations(scan):
for x in [1,-1]:
for y in [1,-1]:
for z in [1,-1]:
for swap in permutations(range(3)):
rotation = np.eye(3)
rotation[:,0] = rotation[:,0]*x
rotation[:,1] = rotation[:,1]*y
rotation[:,2] = rotation[:,2]*z
rotation = rotation[:,swap]
rotated = [tuple(rotation.dot(np.array(vector))) for vector in scan]
yield (rotation, rotated)
def find_alignment(scan1, scan2, threshold=12):
for point1 in scan1:
for point2 in scan2:
translation = np.array(point1)-np.array(point2)
beacons = set(scan1).intersection({tuple(p) for p in np.array(scan2)+translation})
if len(beacons) >= threshold:
return (translation, beacons)
return (None, None)
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: day19_scanner_part1.py <input file>")
sys.exit(-1)
with open(sys.argv[1], 'r') as fin:
scans = read_scans(fin)
alignments = defaultdict(list)
for scan1 in range(len(scans)):
for scan2 in range(len(scans)):
if scan1 == scan2:
continue
print("align scan", scan1, "and ", scan2)
for rotation, rotated in generate_orientations(scans[scan2]):
translation, beacons = find_alignment(scans[scan1], rotated)
if translation is not None:
alignments[scan1].append((scan2, rotation, translation))
print("found overlap with rotation", rotation, "and translation", translation)
break
# do depth first search in alignment tree and create set of all beacons
beacons = set()
visited = set()
stack = [(0, np.array([0,0,0]), np.eye(3))]
while len(stack)>0:
scanner, position, rotation = stack.pop()
visited.add(scanner)
print("add beacons for scanner", scanner, "with position", position, "and rotation", rotation)
for beacon in scans[scanner]:
beacon = tuple(position + rotation.dot(np.array(beacon)))
print("add beacon ", beacon)
beacons.add(beacon)
for next_scanner, next_rotation, next_translation in alignments[scanner]:
if next_scanner not in visited:
print("push scanner", next_scanner)
stack.append((next_scanner, position + rotation.dot(next_translation), rotation.dot(next_rotation)))
print(beacons)
print(len(beacons)) | 0.386995 | 0.444806 |
import numpy as np
from fitter import *
from scipy.constants import hbar
cons_w = 2*3.14*6.002e9
cons_ke = 2*3.14*0.017e6
cons_k = 2*3.14*1.4e6
cons_delta = 0
def Plin(p):
return 10.**(p/10.-3.)
def photons(power):
return Plin(power)/(hbar*cons_w)*(cons_ke/((cons_k/2)**2+cons_delta**2))
path = r'D:\data\20200223\074606_Power_Sweep_229mV'
data_name = path+path[16:]+r'.dat'
data = np.loadtxt(data_name, unpack=True)
n = 27
power= np.array(np.array_split(data[0],n))
freq = np.array_split(data[1],n)[0]
real = np.array_split(data[2],n)
imag = np.array_split(data[3],n)
absol = np.array_split(data[4],n)
f = Fitter(S21r)
# fr = Fitter(custom_real)
# fm = Fitter(custom_imag)
# plt.plot(np.real(d.data),np.imag(d.data))
# plt.show()
k = np.zeros(n)
f0 = np.zeros(n)
Q = np.zeros(n)
k_err = np.zeros(n)
f0_err = np.zeros(n)
Q_err = np.zeros(n)
left1 = 151
right1 = 246
for i in range(11):
result = f.fit(freq[left1:right1], absol[i][left1:right1], print_report = True)
# f.plot()
k[i] = np.abs(result.params['k'].value)
f0[i] = result.params['f0'].value
Q[i] = f0[i]/k[i]
k_err[i] = result.params['k'].stderr
f0_err[i] = result.params['f0'].stderr
Q_err[i] = (f0_err[i]/f0[i] + k_err[i]/k[i])*(f0[i]/k[i])
left = 81
right = 141
for i in range(20,27):
result = f.fit(freq[left:right], absol[i][left:right], print_report = True)
# f.plot()
k[i] = np.abs(result.params['k'].value)
f0[i] = result.params['f0'].value
Q[i] = f0[i]/k[i]
k_err[i] = result.params['k'].stderr
f0_err[i] = result.params['f0'].stderr
Q_err[i] = (f0_err[i]/f0[i] + k_err[i]/k[i])*(f0[i]/k[i])
# power = np.delete(power.T[0],[35])
# Q = np.delete(Q,[35])
# Q_err = np.delete(Q_err,[35])
# k = np.delete(k,[35])
# k_err = np.delete(k_err,[35])
# print(power)
fig, ax = plt.subplots(nrows=1, ncols=1, sharex=True)
ax.errorbar(photons(power.T[0]-80), Q/1e3, fmt='.', yerr = Q_err/1e3 ,capsize=2, elinewidth=1, markeredgewidth=2)
# ax.plot(power.T[0],(f0/ki)/1e3)
ax.set_xlabel(r'Photon number')
ax.set_ylabel(r'Q (kU)')
ax.set_xscale('log')
ax.set_title(path[8:])
plt.show()
# fig1, ax1 = plt.subplots(nrows=1, ncols=1, sharex=True)
# ax1.errorbar(photons(power.T[0]-80), k/1e6, fmt='.', yerr = k_err/1e6 ,capsize=2, elinewidth=1, markeredgewidth=2)
# # ax.plot(power.T[0],(f0/ki)/1e3)
# ax1.set_xlabel(r'Photon number')
# ax1.set_ylabel(r'Linewidth (MHz)')
# ax1.set_xscale('log')
# ax1.set_title(path[8:])
# plt.show()
# np.savetxt(r'D:\data\20200217\Analysis_quality_factor\500M_below_cavity_229.txt', (photons(power.T[0]-80), k/1e6, k_err/1e6, Q/1e3, Q_err/1e3))
# fr.fit(freq, real[-1], print_report = True)
# fr.plot()
# fm.fit(freq, imag[-1], print_report = True)
# fm.plot() | scripts/Qubit/Analysis/Fit/quick_S11_fit.py | import numpy as np
from fitter import *
from scipy.constants import hbar
cons_w = 2*3.14*6.002e9
cons_ke = 2*3.14*0.017e6
cons_k = 2*3.14*1.4e6
cons_delta = 0
def Plin(p):
return 10.**(p/10.-3.)
def photons(power):
return Plin(power)/(hbar*cons_w)*(cons_ke/((cons_k/2)**2+cons_delta**2))
path = r'D:\data\20200223\074606_Power_Sweep_229mV'
data_name = path+path[16:]+r'.dat'
data = np.loadtxt(data_name, unpack=True)
n = 27
power= np.array(np.array_split(data[0],n))
freq = np.array_split(data[1],n)[0]
real = np.array_split(data[2],n)
imag = np.array_split(data[3],n)
absol = np.array_split(data[4],n)
f = Fitter(S21r)
# fr = Fitter(custom_real)
# fm = Fitter(custom_imag)
# plt.plot(np.real(d.data),np.imag(d.data))
# plt.show()
k = np.zeros(n)
f0 = np.zeros(n)
Q = np.zeros(n)
k_err = np.zeros(n)
f0_err = np.zeros(n)
Q_err = np.zeros(n)
left1 = 151
right1 = 246
for i in range(11):
result = f.fit(freq[left1:right1], absol[i][left1:right1], print_report = True)
# f.plot()
k[i] = np.abs(result.params['k'].value)
f0[i] = result.params['f0'].value
Q[i] = f0[i]/k[i]
k_err[i] = result.params['k'].stderr
f0_err[i] = result.params['f0'].stderr
Q_err[i] = (f0_err[i]/f0[i] + k_err[i]/k[i])*(f0[i]/k[i])
left = 81
right = 141
for i in range(20,27):
result = f.fit(freq[left:right], absol[i][left:right], print_report = True)
# f.plot()
k[i] = np.abs(result.params['k'].value)
f0[i] = result.params['f0'].value
Q[i] = f0[i]/k[i]
k_err[i] = result.params['k'].stderr
f0_err[i] = result.params['f0'].stderr
Q_err[i] = (f0_err[i]/f0[i] + k_err[i]/k[i])*(f0[i]/k[i])
# power = np.delete(power.T[0],[35])
# Q = np.delete(Q,[35])
# Q_err = np.delete(Q_err,[35])
# k = np.delete(k,[35])
# k_err = np.delete(k_err,[35])
# print(power)
fig, ax = plt.subplots(nrows=1, ncols=1, sharex=True)
ax.errorbar(photons(power.T[0]-80), Q/1e3, fmt='.', yerr = Q_err/1e3 ,capsize=2, elinewidth=1, markeredgewidth=2)
# ax.plot(power.T[0],(f0/ki)/1e3)
ax.set_xlabel(r'Photon number')
ax.set_ylabel(r'Q (kU)')
ax.set_xscale('log')
ax.set_title(path[8:])
plt.show()
# fig1, ax1 = plt.subplots(nrows=1, ncols=1, sharex=True)
# ax1.errorbar(photons(power.T[0]-80), k/1e6, fmt='.', yerr = k_err/1e6 ,capsize=2, elinewidth=1, markeredgewidth=2)
# # ax.plot(power.T[0],(f0/ki)/1e3)
# ax1.set_xlabel(r'Photon number')
# ax1.set_ylabel(r'Linewidth (MHz)')
# ax1.set_xscale('log')
# ax1.set_title(path[8:])
# plt.show()
# np.savetxt(r'D:\data\20200217\Analysis_quality_factor\500M_below_cavity_229.txt', (photons(power.T[0]-80), k/1e6, k_err/1e6, Q/1e3, Q_err/1e3))
# fr.fit(freq, real[-1], print_report = True)
# fr.plot()
# fm.fit(freq, imag[-1], print_report = True)
# fm.plot() | 0.162879 | 0.234862 |
import prediction_grid as pg
from sklearn.neighbors import KNeighborsClassifier
from sklearn import datasets
from knn import knn_predict
import matplotlib.pyplot as plt
import numpy as np
import synth_data as sd
# Point to find the distance from
point = np.array([2.4, 2.2])
# Toy data points
points = np.array([[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3], [3, 1],
[3, 2], [3, 3]])
# Labels of the correspoinding points
labels = np.array([*np.zeros((4, ), dtype=int), *np.ones((5, ), dtype=int)])
plt.scatter(points[:, 0], points[:, 1], c=labels)
plt.plot(point[0], point[1], "ro", alpha=0.5)
plt.xlim(0.5, 3.5)
plt.ylim(0.5, 3.5)
plt.show()
# Sample generator testing
points, labels = sd.make_synth_data()
plt.scatter(points[:, 0], points[:, 1], c=labels)
plt.show()
points, labels = sd.make_synth_data()
limits = (-3, 4, -3, 4, 0.1)
k = 5
xx, yy, prediction_grid = pg.make_prediction_grid(
limits,
points,
labels,
k,
)
pg.plot_prediction_grid(xx,
yy,
prediction_grid,
"knn_prediction_grid_5.pdf",
points=points,
labels=labels,
xlab="Variable1",
ylab="Variable2",
title="Prediction Grid(K=5)")
# Load Iris dataset from sklearn
iris = datasets.load_iris()
predictors = iris['data'][:, :2]
labels = iris['target']
fig, ax = plt.subplots(figsize=(10, 10))
ax.scatter(predictors[:, 0], predictors[:, 1], c=labels)
pg.annotate(ax,
xlab=iris['target_names'][0],
ylab=iris['target_names'][1],
title="Iris Data Scatter-Plot")
plt.show()
fig.savefig("Iris_scatter.pdf")
limits = (4, 8.2, 1.8, 4.7, 0.1)
xx, yy, prediction_grid = pg.make_prediction_grid(
limits, predictors, labels, k=5)
pg.plot_prediction_grid(xx,
yy,
prediction_grid,
"Iris_grid_homemade.pdf",
points=predictors,
labels=labels,
xlab=iris['target_names'][0].capitalize(),
ylab=iris['target_names'][1].capitalize(),
title="Iris dataset prediction-grid using homemade predictor")
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(predictors, labels)
sk_predictions = knn.predict(predictors)
my_predictions = np.array(
[knn_predict(point, predictors, labels) for point in predictors])
sum(labels == my_predictions) / sk_predictions.shape
limits = (4, 8.2, 1.8, 4.7, 0.1)
xx, yy, prediction_grid = pg.make_prediction_grid_sklearn(
limits, predictors, labels, k=5)
pg.plot_prediction_grid(xx,
yy,
prediction_grid,
"Iris_grid_sklearn.pdf",
points=predictors,
labels=labels,
xlab=iris['target_names'][0].capitalize(),
ylab=iris['target_names'][1].capitalize(),
title="Iris dataset prediction-grid Using sklearn") | KNN Classification/scripts/testing.py | import prediction_grid as pg
from sklearn.neighbors import KNeighborsClassifier
from sklearn import datasets
from knn import knn_predict
import matplotlib.pyplot as plt
import numpy as np
import synth_data as sd
# Point to find the distance from
point = np.array([2.4, 2.2])
# Toy data points
points = np.array([[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3], [3, 1],
[3, 2], [3, 3]])
# Labels of the correspoinding points
labels = np.array([*np.zeros((4, ), dtype=int), *np.ones((5, ), dtype=int)])
plt.scatter(points[:, 0], points[:, 1], c=labels)
plt.plot(point[0], point[1], "ro", alpha=0.5)
plt.xlim(0.5, 3.5)
plt.ylim(0.5, 3.5)
plt.show()
# Sample generator testing
points, labels = sd.make_synth_data()
plt.scatter(points[:, 0], points[:, 1], c=labels)
plt.show()
points, labels = sd.make_synth_data()
limits = (-3, 4, -3, 4, 0.1)
k = 5
xx, yy, prediction_grid = pg.make_prediction_grid(
limits,
points,
labels,
k,
)
pg.plot_prediction_grid(xx,
yy,
prediction_grid,
"knn_prediction_grid_5.pdf",
points=points,
labels=labels,
xlab="Variable1",
ylab="Variable2",
title="Prediction Grid(K=5)")
# Load Iris dataset from sklearn
iris = datasets.load_iris()
predictors = iris['data'][:, :2]
labels = iris['target']
fig, ax = plt.subplots(figsize=(10, 10))
ax.scatter(predictors[:, 0], predictors[:, 1], c=labels)
pg.annotate(ax,
xlab=iris['target_names'][0],
ylab=iris['target_names'][1],
title="Iris Data Scatter-Plot")
plt.show()
fig.savefig("Iris_scatter.pdf")
limits = (4, 8.2, 1.8, 4.7, 0.1)
xx, yy, prediction_grid = pg.make_prediction_grid(
limits, predictors, labels, k=5)
pg.plot_prediction_grid(xx,
yy,
prediction_grid,
"Iris_grid_homemade.pdf",
points=predictors,
labels=labels,
xlab=iris['target_names'][0].capitalize(),
ylab=iris['target_names'][1].capitalize(),
title="Iris dataset prediction-grid using homemade predictor")
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(predictors, labels)
sk_predictions = knn.predict(predictors)
my_predictions = np.array(
[knn_predict(point, predictors, labels) for point in predictors])
sum(labels == my_predictions) / sk_predictions.shape
limits = (4, 8.2, 1.8, 4.7, 0.1)
xx, yy, prediction_grid = pg.make_prediction_grid_sklearn(
limits, predictors, labels, k=5)
pg.plot_prediction_grid(xx,
yy,
prediction_grid,
"Iris_grid_sklearn.pdf",
points=predictors,
labels=labels,
xlab=iris['target_names'][0].capitalize(),
ylab=iris['target_names'][1].capitalize(),
title="Iris dataset prediction-grid Using sklearn") | 0.886365 | 0.707361 |
import re
import sys
from pathlib import Path
from toollib import utils, regexp
from toollib.decorator import sys_required
from toollib.tcli import here
from toollib.tcli.base import BaseCmd
from toollib.tcli.option import Options, Arg
class Cmd(BaseCmd):
def __init__(self):
super().__init__()
def add_options(self):
options = Options(
name='ssh key',
desc='ssh免密登录配置',
optional={
self.set_sshkey: [
Arg('-u', '--user', required=True, type=str, help='用户'),
Arg('-p', '--passwd', required=True, type=str, help='密码'),
Arg('--port', default=22, type=int, help='端口'),
Arg('-i', '--ips', required=True, type=str,
help='ips, 1.多个ip可用逗号隔开;2.也可指定文件(一行一ip)'),
]}
)
return options
@sys_required('centos|\.el\d', errmsg='centos|el')
def set_sshkey(self):
user = self.parse_args.user
passwd = self.parse_args.passwd
port = self.parse_args.port
ips = self._parse_ips(self.parse_args.ips)
shb = here.joinpath('commands/plugins/set_sshkey.sh.x')
cmd = f'chmod u+x {shb} && {shb} {user} {passwd} {port} {" ".join(ips)}'
p = utils.syscmd(cmd)
out, err = p.communicate()
if out:
sys.stdout.write(u'{0}'.format(out.decode('utf-8')))
if err:
sys.stderr.write(u'{0}'.format(err.decode('utf-8')))
def _parse_ips(self, ips) -> set:
parse_ips = set()
if Path(ips).is_file():
with open(ips, mode='r', encoding='utf8') as fp:
ip_list = [ip.replace('\n', '') for ip in fp.readlines() if not ip.startswith('#')]
else:
ip_list = ips.split(',')
ip_list = [ip.strip() for ip in ip_list if ip.strip()]
if not ip_list:
raise ValueError('ips不能为空')
for ip in ip_list:
if not re.match(regexp.ipv4_simple, ip):
raise ValueError('%s =>ip格式错误' % ip)
parse_ips.add(ip)
return parse_ips | toollib/tcli/commands/_set_sshkey.py | import re
import sys
from pathlib import Path
from toollib import utils, regexp
from toollib.decorator import sys_required
from toollib.tcli import here
from toollib.tcli.base import BaseCmd
from toollib.tcli.option import Options, Arg
class Cmd(BaseCmd):
def __init__(self):
super().__init__()
def add_options(self):
options = Options(
name='ssh key',
desc='ssh免密登录配置',
optional={
self.set_sshkey: [
Arg('-u', '--user', required=True, type=str, help='用户'),
Arg('-p', '--passwd', required=True, type=str, help='密码'),
Arg('--port', default=22, type=int, help='端口'),
Arg('-i', '--ips', required=True, type=str,
help='ips, 1.多个ip可用逗号隔开;2.也可指定文件(一行一ip)'),
]}
)
return options
@sys_required('centos|\.el\d', errmsg='centos|el')
def set_sshkey(self):
user = self.parse_args.user
passwd = self.parse_args.passwd
port = self.parse_args.port
ips = self._parse_ips(self.parse_args.ips)
shb = here.joinpath('commands/plugins/set_sshkey.sh.x')
cmd = f'chmod u+x {shb} && {shb} {user} {passwd} {port} {" ".join(ips)}'
p = utils.syscmd(cmd)
out, err = p.communicate()
if out:
sys.stdout.write(u'{0}'.format(out.decode('utf-8')))
if err:
sys.stderr.write(u'{0}'.format(err.decode('utf-8')))
def _parse_ips(self, ips) -> set:
parse_ips = set()
if Path(ips).is_file():
with open(ips, mode='r', encoding='utf8') as fp:
ip_list = [ip.replace('\n', '') for ip in fp.readlines() if not ip.startswith('#')]
else:
ip_list = ips.split(',')
ip_list = [ip.strip() for ip in ip_list if ip.strip()]
if not ip_list:
raise ValueError('ips不能为空')
for ip in ip_list:
if not re.match(regexp.ipv4_simple, ip):
raise ValueError('%s =>ip格式错误' % ip)
parse_ips.add(ip)
return parse_ips | 0.171581 | 0.083928 |
def _get_buffer_view(in_obj):
if isinstance(in_obj, str):
raise TypeError('Unicode-objects must be encoded before calculating a CRC')
mv = memoryview(in_obj)
if mv.ndim > 1:
raise BufferError('Buffer must be single dimension')
return mv
def _crc8(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFF
for x in mv.tobytes():
crc = table[x ^ crc]
return crc
def _crc8r(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFF
for x in mv.tobytes():
crc = table[x ^ crc]
return crc
def _crc16(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFF
for x in mv.tobytes():
crc = table[x ^ ((crc>>8) & 0xFF)] ^ ((crc << 8) & 0xFF00)
return crc
def _crc16r(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFF
for x in mv.tobytes():
crc = table[x ^ (crc & 0xFF)] ^ (crc >> 8)
return crc
def _crc24(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFFFF
for x in mv.tobytes():
crc = table[x ^ (crc>>16 & 0xFF)] ^ ((crc << 8) & 0xFFFF00)
return crc
def _crc24r(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFFFF
for x in mv.tobytes():
crc = table[x ^ (crc & 0xFF)] ^ (crc >> 8)
return crc
def _crc32(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFFFFFF
for x in mv.tobytes():
crc = table[x ^ ((crc>>24) & 0xFF)] ^ ((crc << 8) & 0xFFFFFF00)
return crc
def _crc32r(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFFFFFF
for x in mv.tobytes():
crc = table[x ^ (crc & 0xFF)] ^ (crc >> 8)
return crc
def _crc64(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFFFFFFFFFFFFFF
for x in mv.tobytes():
crc = table[x ^ ((crc>>56) & 0xFF)] ^ ((crc << 8) & 0xFFFFFFFFFFFFFF00)
return crc
def _crc64r(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFFFFFFFFFFFFFF
for x in mv.tobytes():
crc = table[x ^ (crc & 0xFF)] ^ (crc >> 8)
return crc | third_party/gsutil/third_party/crcmod/python3/crcmod/_crcfunpy.py |
def _get_buffer_view(in_obj):
if isinstance(in_obj, str):
raise TypeError('Unicode-objects must be encoded before calculating a CRC')
mv = memoryview(in_obj)
if mv.ndim > 1:
raise BufferError('Buffer must be single dimension')
return mv
def _crc8(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFF
for x in mv.tobytes():
crc = table[x ^ crc]
return crc
def _crc8r(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFF
for x in mv.tobytes():
crc = table[x ^ crc]
return crc
def _crc16(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFF
for x in mv.tobytes():
crc = table[x ^ ((crc>>8) & 0xFF)] ^ ((crc << 8) & 0xFF00)
return crc
def _crc16r(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFF
for x in mv.tobytes():
crc = table[x ^ (crc & 0xFF)] ^ (crc >> 8)
return crc
def _crc24(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFFFF
for x in mv.tobytes():
crc = table[x ^ (crc>>16 & 0xFF)] ^ ((crc << 8) & 0xFFFF00)
return crc
def _crc24r(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFFFF
for x in mv.tobytes():
crc = table[x ^ (crc & 0xFF)] ^ (crc >> 8)
return crc
def _crc32(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFFFFFF
for x in mv.tobytes():
crc = table[x ^ ((crc>>24) & 0xFF)] ^ ((crc << 8) & 0xFFFFFF00)
return crc
def _crc32r(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFFFFFF
for x in mv.tobytes():
crc = table[x ^ (crc & 0xFF)] ^ (crc >> 8)
return crc
def _crc64(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFFFFFFFFFFFFFF
for x in mv.tobytes():
crc = table[x ^ ((crc>>56) & 0xFF)] ^ ((crc << 8) & 0xFFFFFFFFFFFFFF00)
return crc
def _crc64r(data, crc, table):
mv = _get_buffer_view(data)
crc = crc & 0xFFFFFFFFFFFFFFFF
for x in mv.tobytes():
crc = table[x ^ (crc & 0xFF)] ^ (crc >> 8)
return crc | 0.566738 | 0.152916 |
import numpy
import dolfin
from ocellaris.utils import timeit, OcellarisError
@timeit
def get_dof_region_marks(simulation, V):
"""
Given a function space, return a dictionary mapping dof number to a
list of region number (indexed from 0, same as region list from the
input file).
Many dofs will not be included in the mapping since they are not
inside a boundary region (not on a boundary facet). This property is
used elsewhere to identify boundary dofs, in mark_cell_layers() and
in SlopeLimiterBoundaryConditions
"""
# This function only supports a small subset of function spaces
family = V.ufl_element().family()
assert family in ('Lagrange', 'Discontinuous Lagrange')
# Get local indices for the facet dofs for each facet in the cell
facet_dof_indices = get_facet_dof_indices(V)
# Get dofs that share the same location (relevant for DG)
same_loc_dofs = get_same_loc_dofs(V)
# Loop over mesh and get dofs that are connected to boundary regions
dm = V.dofmap()
facet_marks = [int(m) for m in simulation.data['boundary_marker'].array()]
mesh = simulation.data['mesh']
dof_region_marks = {}
for cell in dolfin.cells(mesh, 'all'):
dofs = dm.cell_dofs(cell.index())
for ifacet, facet in enumerate(dolfin.facets(cell)):
# Get facet region marker
mark = facet_marks[facet.index()] - 1
# Skip non-boundary facets
if mark == -1:
continue
facet_dofs = dofs[facet_dof_indices[ifacet]]
for fdof in facet_dofs:
dof_region_marks.setdefault(fdof, []).append(mark)
# Treat all dofs in the same location in the same way
for fdof, regions in list(dof_region_marks.items()):
for dof2 in same_loc_dofs[fdof]:
if dof2 not in dof_region_marks:
dof_region_marks[dof2] = regions
continue
for mark in regions:
if mark not in dof_region_marks[dof2]:
dof_region_marks[dof2].append(mark)
# Order must be the same on all ranks independent of iteration order
# of the dof_region_marks dictionary in the above loop
for marks in dof_region_marks.values():
marks.sort()
return dof_region_marks
def get_facet_dof_indices(V):
"""
Get local indices for the facet dofs for each facet in the cell
"""
ndim = V.mesh().topology().dim()
degree = V.ufl_element().degree()
ndim_deg = (ndim, degree)
if ndim_deg == (2, 1):
# Linear triangle
facet_dof_indices = numpy.zeros((3, 2), dtype=int)
facet_dof_indices[0, :] = (1, 2)
facet_dof_indices[1, :] = (0, 2)
facet_dof_indices[2, :] = (0, 1)
elif ndim_deg == (2, 2):
# Quadratic triangle
facet_dof_indices = numpy.zeros((3, 3), dtype=int)
facet_dof_indices[0, :] = (1, 2, 3)
facet_dof_indices[1, :] = (0, 2, 4)
facet_dof_indices[2, :] = (0, 1, 5)
elif ndim_deg == (3, 1):
# Linear tetrahedron
facet_dof_indices = numpy.zeros((4, 3), dtype=int)
facet_dof_indices[0, :] = (1, 2, 3)
facet_dof_indices[1, :] = (0, 2, 3)
facet_dof_indices[2, :] = (0, 1, 3)
facet_dof_indices[3, :] = (0, 1, 2)
elif ndim_deg == (3, 2):
# Quadratic tetrahedron
facet_dof_indices = numpy.zeros((4, 6), dtype=int)
facet_dof_indices[0, :] = (1, 2, 3, 4, 5, 6)
facet_dof_indices[1, :] = (0, 2, 3, 4, 7, 8)
facet_dof_indices[2, :] = (0, 1, 3, 5, 7, 9)
facet_dof_indices[3, :] = (0, 1, 2, 6, 8, 9)
else:
raise OcellarisError(
'Unsupported element ndim=%d degree=%d' % ndim_deg,
'The boundary condition get_dof_region_marks '
'code does not support this element',
)
return facet_dof_indices
def get_same_loc_dofs(V):
"""
Return a dictionary mapping dof number to other dofs at the same
location in space. V should obviously be a discontinuous space,
otherwise there will not be multiple dofs in the same location
"""
gdim = V.mesh().geometry().dim()
dof_coordinates = V.tabulate_dof_coordinates().reshape((-1, gdim))
# Map dof coordinate to dofs, this is for DG so multiple dofs
# will share the same location
coord_to_dofs = {}
max_neighbours = 0
for dof in range(len(dof_coordinates)):
coord = tuple(round(x, 5) for x in dof_coordinates[dof])
dofs = coord_to_dofs.setdefault(coord, [])
dofs.append(dof)
max_neighbours = max(max_neighbours, len(dofs) - 1)
# Loop through dofs at same coordinate and map them to each other
same_loc_dofs = {}
for dofs in coord_to_dofs.values():
for dof in dofs:
same_loc_dofs[dof] = tuple(d for d in dofs if d != dof)
return same_loc_dofs
def mark_cell_layers(
simulation, V, layers=0, dof_region_marks=None, named_boundaries=None
):
"""
Return all cells on the boundary and all connected cells in a given
number of layers surrounding the boundary cells. Vertex neighbours
are used to determine a cells neighbours.
The initial list of cells is taken from a dictionary mapping dof to
boundary region number. All cells containing these dofs are taken as
the zeroth layer. If no such dictionary is provided then
get_dof_region_marks(simulation, V) is used, hence the cells
containing the boundary facing facet are used as the zeroth level.
If named_boundaries is given then only cells on the given named
boundaries are included in the zeroth layer. The special name 'all'
will include all boundary regions.
@return: set of the cell numbers of marked cells
"""
if named_boundaries is None:
named_boundaries = ['all']
if not named_boundaries:
return set()
if dof_region_marks is None:
dof_region_marks = get_dof_region_marks(simulation, V)
# Get all regions with names
all_regions = {region.name: region.index for region in simulation.data['boundary']}
all_idxs = {region.index: region.name for region in simulation.data['boundary']}
# Verify that the boundary names are unique
assert len(all_regions) == len(simulation.data['boundary'])
assert len(all_regions) == len(all_idxs)
# Check that all named regions correspond to boundary regions
for rname in named_boundaries:
if rname != 'all' and rname not in all_regions:
raise OcellarisError(
'Unknown boundary region in input',
'%r is not a boundary region' % rname,
)
# Names of all boundary regions
if 'all' not in named_boundaries:
to_keep = [mark for mark, name in all_idxs.items() if name in named_boundaries]
# Remove marks to unwanted bounbdary regions
drm = {
dof: [mark for mark in dof_marks if mark in to_keep]
for dof, dof_marks in dof_region_marks.items()
}
# Remove dofs wih empty region mark list
dof_region_marks = {k: v for k, v in drm.items() if v}
# Mark initial zeroth layer cells
mesh = simulation.data['mesh']
dm = V.dofmap()
boundary_cells = set()
for cell in dolfin.cells(mesh):
dofs = dm.cell_dofs(cell.index())
for dof in dofs:
if dof in dof_region_marks:
boundary_cells.add(cell.index())
continue
# Iteratively mark cells adjacent to boundary cells
for _ in range(layers):
boundary_cells_old = set(boundary_cells)
for cell_index in boundary_cells_old:
vertices = simulation.data['connectivity_CV'](cell_index)
for vert_index in vertices:
for nb in simulation.data['connectivity_VC'](vert_index):
boundary_cells.add(nb)
return boundary_cells | ocellaris/solver_parts/boundary_conditions/dof_marker.py |
import numpy
import dolfin
from ocellaris.utils import timeit, OcellarisError
@timeit
def get_dof_region_marks(simulation, V):
"""
Given a function space, return a dictionary mapping dof number to a
list of region number (indexed from 0, same as region list from the
input file).
Many dofs will not be included in the mapping since they are not
inside a boundary region (not on a boundary facet). This property is
used elsewhere to identify boundary dofs, in mark_cell_layers() and
in SlopeLimiterBoundaryConditions
"""
# This function only supports a small subset of function spaces
family = V.ufl_element().family()
assert family in ('Lagrange', 'Discontinuous Lagrange')
# Get local indices for the facet dofs for each facet in the cell
facet_dof_indices = get_facet_dof_indices(V)
# Get dofs that share the same location (relevant for DG)
same_loc_dofs = get_same_loc_dofs(V)
# Loop over mesh and get dofs that are connected to boundary regions
dm = V.dofmap()
facet_marks = [int(m) for m in simulation.data['boundary_marker'].array()]
mesh = simulation.data['mesh']
dof_region_marks = {}
for cell in dolfin.cells(mesh, 'all'):
dofs = dm.cell_dofs(cell.index())
for ifacet, facet in enumerate(dolfin.facets(cell)):
# Get facet region marker
mark = facet_marks[facet.index()] - 1
# Skip non-boundary facets
if mark == -1:
continue
facet_dofs = dofs[facet_dof_indices[ifacet]]
for fdof in facet_dofs:
dof_region_marks.setdefault(fdof, []).append(mark)
# Treat all dofs in the same location in the same way
for fdof, regions in list(dof_region_marks.items()):
for dof2 in same_loc_dofs[fdof]:
if dof2 not in dof_region_marks:
dof_region_marks[dof2] = regions
continue
for mark in regions:
if mark not in dof_region_marks[dof2]:
dof_region_marks[dof2].append(mark)
# Order must be the same on all ranks independent of iteration order
# of the dof_region_marks dictionary in the above loop
for marks in dof_region_marks.values():
marks.sort()
return dof_region_marks
def get_facet_dof_indices(V):
"""
Get local indices for the facet dofs for each facet in the cell
"""
ndim = V.mesh().topology().dim()
degree = V.ufl_element().degree()
ndim_deg = (ndim, degree)
if ndim_deg == (2, 1):
# Linear triangle
facet_dof_indices = numpy.zeros((3, 2), dtype=int)
facet_dof_indices[0, :] = (1, 2)
facet_dof_indices[1, :] = (0, 2)
facet_dof_indices[2, :] = (0, 1)
elif ndim_deg == (2, 2):
# Quadratic triangle
facet_dof_indices = numpy.zeros((3, 3), dtype=int)
facet_dof_indices[0, :] = (1, 2, 3)
facet_dof_indices[1, :] = (0, 2, 4)
facet_dof_indices[2, :] = (0, 1, 5)
elif ndim_deg == (3, 1):
# Linear tetrahedron
facet_dof_indices = numpy.zeros((4, 3), dtype=int)
facet_dof_indices[0, :] = (1, 2, 3)
facet_dof_indices[1, :] = (0, 2, 3)
facet_dof_indices[2, :] = (0, 1, 3)
facet_dof_indices[3, :] = (0, 1, 2)
elif ndim_deg == (3, 2):
# Quadratic tetrahedron
facet_dof_indices = numpy.zeros((4, 6), dtype=int)
facet_dof_indices[0, :] = (1, 2, 3, 4, 5, 6)
facet_dof_indices[1, :] = (0, 2, 3, 4, 7, 8)
facet_dof_indices[2, :] = (0, 1, 3, 5, 7, 9)
facet_dof_indices[3, :] = (0, 1, 2, 6, 8, 9)
else:
raise OcellarisError(
'Unsupported element ndim=%d degree=%d' % ndim_deg,
'The boundary condition get_dof_region_marks '
'code does not support this element',
)
return facet_dof_indices
def get_same_loc_dofs(V):
"""
Return a dictionary mapping dof number to other dofs at the same
location in space. V should obviously be a discontinuous space,
otherwise there will not be multiple dofs in the same location
"""
gdim = V.mesh().geometry().dim()
dof_coordinates = V.tabulate_dof_coordinates().reshape((-1, gdim))
# Map dof coordinate to dofs, this is for DG so multiple dofs
# will share the same location
coord_to_dofs = {}
max_neighbours = 0
for dof in range(len(dof_coordinates)):
coord = tuple(round(x, 5) for x in dof_coordinates[dof])
dofs = coord_to_dofs.setdefault(coord, [])
dofs.append(dof)
max_neighbours = max(max_neighbours, len(dofs) - 1)
# Loop through dofs at same coordinate and map them to each other
same_loc_dofs = {}
for dofs in coord_to_dofs.values():
for dof in dofs:
same_loc_dofs[dof] = tuple(d for d in dofs if d != dof)
return same_loc_dofs
def mark_cell_layers(
simulation, V, layers=0, dof_region_marks=None, named_boundaries=None
):
"""
Return all cells on the boundary and all connected cells in a given
number of layers surrounding the boundary cells. Vertex neighbours
are used to determine a cells neighbours.
The initial list of cells is taken from a dictionary mapping dof to
boundary region number. All cells containing these dofs are taken as
the zeroth layer. If no such dictionary is provided then
get_dof_region_marks(simulation, V) is used, hence the cells
containing the boundary facing facet are used as the zeroth level.
If named_boundaries is given then only cells on the given named
boundaries are included in the zeroth layer. The special name 'all'
will include all boundary regions.
@return: set of the cell numbers of marked cells
"""
if named_boundaries is None:
named_boundaries = ['all']
if not named_boundaries:
return set()
if dof_region_marks is None:
dof_region_marks = get_dof_region_marks(simulation, V)
# Get all regions with names
all_regions = {region.name: region.index for region in simulation.data['boundary']}
all_idxs = {region.index: region.name for region in simulation.data['boundary']}
# Verify that the boundary names are unique
assert len(all_regions) == len(simulation.data['boundary'])
assert len(all_regions) == len(all_idxs)
# Check that all named regions correspond to boundary regions
for rname in named_boundaries:
if rname != 'all' and rname not in all_regions:
raise OcellarisError(
'Unknown boundary region in input',
'%r is not a boundary region' % rname,
)
# Names of all boundary regions
if 'all' not in named_boundaries:
to_keep = [mark for mark, name in all_idxs.items() if name in named_boundaries]
# Remove marks to unwanted bounbdary regions
drm = {
dof: [mark for mark in dof_marks if mark in to_keep]
for dof, dof_marks in dof_region_marks.items()
}
# Remove dofs wih empty region mark list
dof_region_marks = {k: v for k, v in drm.items() if v}
# Mark initial zeroth layer cells
mesh = simulation.data['mesh']
dm = V.dofmap()
boundary_cells = set()
for cell in dolfin.cells(mesh):
dofs = dm.cell_dofs(cell.index())
for dof in dofs:
if dof in dof_region_marks:
boundary_cells.add(cell.index())
continue
# Iteratively mark cells adjacent to boundary cells
for _ in range(layers):
boundary_cells_old = set(boundary_cells)
for cell_index in boundary_cells_old:
vertices = simulation.data['connectivity_CV'](cell_index)
for vert_index in vertices:
for nb in simulation.data['connectivity_VC'](vert_index):
boundary_cells.add(nb)
return boundary_cells | 0.758332 | 0.64607 |
import math
from openmdao.lib.datatypes.api import Float
from nastranwrapper.nastran import NastranComponent
class BladeStatic(NastranComponent):
""" Model of a Blade quad elements - Nastran Implementation."""
group1_thickness = Float(0.5, nastran_card="PSHELL",
nastran_id="1", nastran_fieldnum=3,
iotype='in', units='inch',
desc='Thickness for group 1')
group2_thickness = Float(0.03, nastran_card="PSHELL",
nastran_id="2", nastran_fieldnum=3,
iotype='in', units='inch',
desc='Thickness for group 2')
# these are actually groups of stresses that will be
# constrained
group1_stress = Float(0., #nastran_func=stress1,
iotype='out',
units='lb/(inch*inch)',
desc='Stress in group 1')
group2_stress = Float(0., #nastran_func=stress2,
iotype='out',
units='lb/(inch*inch)',
desc='Stress in group 2')
displacement_z_dir = Float(0.1632, iotype='out',
units='inch',
desc='Displacement in z-direction',
#nastran_func=x1disp)
nastran_header="displacement vector",
nastran_subcase=1,
nastran_constraints={"POINT ID." : "28"},
nastran_columns=["T3"])
def mass(filep):
filep.reset_anchor()
filep.mark_anchor("MASS AXIS SYSTEM (S)")
return filep.transfer_var(1, 2)
weight = Float(0., nastran_func=mass, iotype='out', units='lb',
desc='Weight of the structure')
def execute(self):
""" Simulates the analysis of a blade with quad elements.
Force, Stress, Displacement,Frequency and Weight are returned at
the Blade output.
"""
super(BladeStatic, self).execute()
stresses = []
header = "S T R E S S E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 4 ) OPTION = BILIN"
columns = ["VON MISES"]
data = self.parser.get(header, None, \
{}, columns, row_width=15)
von_mises =[]
for element in data:
values = map(lambda x: x[0],element)
biggest = -1.0E+10
for value in values:
if value != '':
biggest = max(float(value),biggest)
von_mises.append(biggest)
groups = [range(25601,25945+1), range(1, 25600+1)]
[self.group1_stress, self.group2_stress] = group_von_mises(groups, von_mises)
def group_von_mises(groups, von_mises):
final = []
for group in groups:
final.append([])
for element in group:
final[-1].append(abs(von_mises[element-1])) # stresses is zero indexed
# we actually just wanted the maximum
final[-1] = max(final[-1])
return final
def calculate_stress((ax, tors)):
sigma = 2 * ax * ax
tau = 3 * tors * tors
val = math.sqrt(.5 * (sigma + tau))
return val | src/nastranwrapper/test/nastran_models/blade_2dv_static_nastran.py | import math
from openmdao.lib.datatypes.api import Float
from nastranwrapper.nastran import NastranComponent
class BladeStatic(NastranComponent):
""" Model of a Blade quad elements - Nastran Implementation."""
group1_thickness = Float(0.5, nastran_card="PSHELL",
nastran_id="1", nastran_fieldnum=3,
iotype='in', units='inch',
desc='Thickness for group 1')
group2_thickness = Float(0.03, nastran_card="PSHELL",
nastran_id="2", nastran_fieldnum=3,
iotype='in', units='inch',
desc='Thickness for group 2')
# these are actually groups of stresses that will be
# constrained
group1_stress = Float(0., #nastran_func=stress1,
iotype='out',
units='lb/(inch*inch)',
desc='Stress in group 1')
group2_stress = Float(0., #nastran_func=stress2,
iotype='out',
units='lb/(inch*inch)',
desc='Stress in group 2')
displacement_z_dir = Float(0.1632, iotype='out',
units='inch',
desc='Displacement in z-direction',
#nastran_func=x1disp)
nastran_header="displacement vector",
nastran_subcase=1,
nastran_constraints={"POINT ID." : "28"},
nastran_columns=["T3"])
def mass(filep):
filep.reset_anchor()
filep.mark_anchor("MASS AXIS SYSTEM (S)")
return filep.transfer_var(1, 2)
weight = Float(0., nastran_func=mass, iotype='out', units='lb',
desc='Weight of the structure')
def execute(self):
""" Simulates the analysis of a blade with quad elements.
Force, Stress, Displacement,Frequency and Weight are returned at
the Blade output.
"""
super(BladeStatic, self).execute()
stresses = []
header = "S T R E S S E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 4 ) OPTION = BILIN"
columns = ["VON MISES"]
data = self.parser.get(header, None, \
{}, columns, row_width=15)
von_mises =[]
for element in data:
values = map(lambda x: x[0],element)
biggest = -1.0E+10
for value in values:
if value != '':
biggest = max(float(value),biggest)
von_mises.append(biggest)
groups = [range(25601,25945+1), range(1, 25600+1)]
[self.group1_stress, self.group2_stress] = group_von_mises(groups, von_mises)
def group_von_mises(groups, von_mises):
final = []
for group in groups:
final.append([])
for element in group:
final[-1].append(abs(von_mises[element-1])) # stresses is zero indexed
# we actually just wanted the maximum
final[-1] = max(final[-1])
return final
def calculate_stress((ax, tors)):
sigma = 2 * ax * ax
tau = 3 * tors * tors
val = math.sqrt(.5 * (sigma + tau))
return val | 0.63023 | 0.40928 |
import argparse as _argparse
from game import *
from player import *
from util import timefn
parser = _argparse.ArgumentParser()
parser.add_argument("-ep", "--epochs", type=int, help="Training epochs",
default=20000)
parser.add_argument("-l", "--length", type=int, help="Board length",
default=3)
parser.add_argument("-n", "--nb", type=int, help="Number of players",
default=2)
parser.add_argument("-a", "--alpha", type=float, help="Learning rate",
default=0.8)
parser.add_argument("-g", "--gamma", type=float, help="Discount ratio",
default=0.95)
parser.add_argument("-r", "--reward", type=float, help="Reward per winning",
default=10.0)
parser.add_argument("-p", "--penalty", type=float, help="Penalty per losing",
default=-10.0)
parser.add_argument("-t", "--tie", type=float, help="Fee per tie",
default=-1.0)
parser.add_argument("-v", "--verbose", type=bool, help="Verbose or not",
default=True)
def main(args):
show_setting(args)
board = Board(args.length, args.length)
algPara = (args.alpha, args.gamma, args.reward, args.tie, args.penalty)
players = [QPlayer(i+1, QAlgorithm(*algPara)) for i in range(args.nb)]
umpire = Umpire()
train(args.epochs, players, umpire, board)
# demo
demo(args.verbose, players, umpire, board)
@timefn
def train(epochs, players, umpire, board):
game = ClassicGame()
for _ in range(epochs):
umpire.setPlayers(players)
game.start(umpire, board, board, board)
def demo(verbose, players, umpire, board):
demo = ClassicGame(verbose)
players[0] = HumanPlayer(1)
while True:
umpire.setPlayers(players)
demo.start(umpire, board, board, board)
def show_setting(args):
print("=== Trial Setting ===")
print("Epochs : ", args.epochs)
print("Board length : ", args.length)
print("Player number : ", args.nb)
print("Learning rate (alpha) : ", args.alpha)
print("Discount ratio (gamma) : ", args.gamma)
print("Reward per winning : ", args.reward)
print("Penalty per losing : ", args.penalty)
print("Fee per tie : ", args.tie)
print("Verbose : ", args.verbose)
if __name__ == '__main__':
args = parser.parse_args()
main(args) | main.py | import argparse as _argparse
from game import *
from player import *
from util import timefn
parser = _argparse.ArgumentParser()
parser.add_argument("-ep", "--epochs", type=int, help="Training epochs",
default=20000)
parser.add_argument("-l", "--length", type=int, help="Board length",
default=3)
parser.add_argument("-n", "--nb", type=int, help="Number of players",
default=2)
parser.add_argument("-a", "--alpha", type=float, help="Learning rate",
default=0.8)
parser.add_argument("-g", "--gamma", type=float, help="Discount ratio",
default=0.95)
parser.add_argument("-r", "--reward", type=float, help="Reward per winning",
default=10.0)
parser.add_argument("-p", "--penalty", type=float, help="Penalty per losing",
default=-10.0)
parser.add_argument("-t", "--tie", type=float, help="Fee per tie",
default=-1.0)
parser.add_argument("-v", "--verbose", type=bool, help="Verbose or not",
default=True)
def main(args):
show_setting(args)
board = Board(args.length, args.length)
algPara = (args.alpha, args.gamma, args.reward, args.tie, args.penalty)
players = [QPlayer(i+1, QAlgorithm(*algPara)) for i in range(args.nb)]
umpire = Umpire()
train(args.epochs, players, umpire, board)
# demo
demo(args.verbose, players, umpire, board)
@timefn
def train(epochs, players, umpire, board):
game = ClassicGame()
for _ in range(epochs):
umpire.setPlayers(players)
game.start(umpire, board, board, board)
def demo(verbose, players, umpire, board):
demo = ClassicGame(verbose)
players[0] = HumanPlayer(1)
while True:
umpire.setPlayers(players)
demo.start(umpire, board, board, board)
def show_setting(args):
print("=== Trial Setting ===")
print("Epochs : ", args.epochs)
print("Board length : ", args.length)
print("Player number : ", args.nb)
print("Learning rate (alpha) : ", args.alpha)
print("Discount ratio (gamma) : ", args.gamma)
print("Reward per winning : ", args.reward)
print("Penalty per losing : ", args.penalty)
print("Fee per tie : ", args.tie)
print("Verbose : ", args.verbose)
if __name__ == '__main__':
args = parser.parse_args()
main(args) | 0.275227 | 0.117724 |
import csv
import numpy as np
import matplotlib.pyplot as plt
gpudata = np.zeros((5,6))
with open('exhongpugen9.csv', 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
line_count = 0
for row in csvreader:
if line_count == 0:
gpunames = row
line_count += 1
else:
gpudata[line_count-1,:] = row
line_count += 1
#<codecell>
plt.plot(gpudata[:,0], gpudata[:,1], 'bo-', label=gpunames[1])
plt.plot(gpudata[:,0], gpudata[:,2], 'go-', label=gpunames[2])
plt.plot(gpudata[:,0], gpudata[:,3], 'ro-', label=gpunames[3])
plt.plot(gpudata[:,0], gpudata[:,4], 'mo-', label=gpunames[4])
plt.plot(gpudata[:,0], gpudata[:,5], 'co-', label=gpunames[5])
plt.legend()
plt.grid()
plt.title('Intel(R) UHD Graphics P630')
plt.xlabel('Number of nodes')
plt.ylabel('Calculation time [s]')
plt.semilogy()
plt.savefig('ExhaustiveOnGPU.svg', format='svg', dpi=1200)
#<codecell>
cpudata = np.zeros((5,6))
with open('exhonxeon.csv', 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
line_count = 0
for row in csvreader:
if line_count == 0:
cpunames = row
line_count += 1
else:
cpudata[line_count-1,:] = row
line_count += 1
#<codecell>
plt.plot(cpudata[:,0], cpudata[:,1], 'bo-', label=cpunames[1])
plt.plot(cpudata[:,0], cpudata[:,2], 'go-', label=cpunames[2])
plt.plot(cpudata[:,0], cpudata[:,3], 'ro-', label=cpunames[3])
plt.plot(cpudata[:,0], cpudata[:,4], 'mo-', label=cpunames[4])
plt.plot(cpudata[:,0], cpudata[:,5], 'co-', label=cpunames[5])
plt.legend()
plt.grid()
plt.title('Intel(R) Xeon(R) E-2176G CPU @ 3.50GHz')
plt.xlabel('Number of nodes')
plt.ylabel('Calculation time [s]')
plt.semilogy()
plt.savefig('ExhaustiveOnCPU.svg', format='svg', dpi=1200)
#<codecell>
#<codecell>
plt.plot(gpudata[:,0], gpudata[:,1], 'bo-', label=gpunames[1]+'(gpu)')
plt.plot(gpudata[:,0], gpudata[:,2], 'go-', label=gpunames[2]+'(gpu)')
plt.plot(gpudata[:,0], gpudata[:,3], 'ro-', label=gpunames[3]+'(gpu)')
plt.plot(gpudata[:,0], gpudata[:,4], 'mo-', label=gpunames[4]+'(gpu)')
plt.plot(gpudata[:,0], gpudata[:,5], 'co-', label=gpunames[5]+'(gpu)')
plt.plot(cpudata[:,0], cpudata[:,1], 'b*--', label=cpunames[1]+'(cpu)')
plt.plot(cpudata[:,0], cpudata[:,2], 'g*--', label=cpunames[2]+'(cpu)')
plt.plot(cpudata[:,0], cpudata[:,3], 'r*--', label=cpunames[3]+'(cpu)')
plt.plot(cpudata[:,0], cpudata[:,4], 'm*--', label=cpunames[4]+'(cpu)')
plt.plot(cpudata[:,0], cpudata[:,5], 'c*--', label=cpunames[5]+'(cpu)')
plt.legend(ncol=2)
plt.grid()
plt.title('Intel(R) UHD Graphics P630 vs Intel(R) Xeon(R) E-2176G CPU @ 3.50GHz')
plt.xlabel('Number of nodes')
plt.ylabel('Calculation time [s]')
plt.semilogy()
plt.savefig('ExhaustiveGPUvsCPU.svg', format='svg', dpi=1200)
#<codecell> | benchmarks/exhaustive_search/exhresplots.py | import csv
import numpy as np
import matplotlib.pyplot as plt
gpudata = np.zeros((5,6))
with open('exhongpugen9.csv', 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
line_count = 0
for row in csvreader:
if line_count == 0:
gpunames = row
line_count += 1
else:
gpudata[line_count-1,:] = row
line_count += 1
#<codecell>
plt.plot(gpudata[:,0], gpudata[:,1], 'bo-', label=gpunames[1])
plt.plot(gpudata[:,0], gpudata[:,2], 'go-', label=gpunames[2])
plt.plot(gpudata[:,0], gpudata[:,3], 'ro-', label=gpunames[3])
plt.plot(gpudata[:,0], gpudata[:,4], 'mo-', label=gpunames[4])
plt.plot(gpudata[:,0], gpudata[:,5], 'co-', label=gpunames[5])
plt.legend()
plt.grid()
plt.title('Intel(R) UHD Graphics P630')
plt.xlabel('Number of nodes')
plt.ylabel('Calculation time [s]')
plt.semilogy()
plt.savefig('ExhaustiveOnGPU.svg', format='svg', dpi=1200)
#<codecell>
cpudata = np.zeros((5,6))
with open('exhonxeon.csv', 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
line_count = 0
for row in csvreader:
if line_count == 0:
cpunames = row
line_count += 1
else:
cpudata[line_count-1,:] = row
line_count += 1
#<codecell>
plt.plot(cpudata[:,0], cpudata[:,1], 'bo-', label=cpunames[1])
plt.plot(cpudata[:,0], cpudata[:,2], 'go-', label=cpunames[2])
plt.plot(cpudata[:,0], cpudata[:,3], 'ro-', label=cpunames[3])
plt.plot(cpudata[:,0], cpudata[:,4], 'mo-', label=cpunames[4])
plt.plot(cpudata[:,0], cpudata[:,5], 'co-', label=cpunames[5])
plt.legend()
plt.grid()
plt.title('Intel(R) Xeon(R) E-2176G CPU @ 3.50GHz')
plt.xlabel('Number of nodes')
plt.ylabel('Calculation time [s]')
plt.semilogy()
plt.savefig('ExhaustiveOnCPU.svg', format='svg', dpi=1200)
#<codecell>
#<codecell>
plt.plot(gpudata[:,0], gpudata[:,1], 'bo-', label=gpunames[1]+'(gpu)')
plt.plot(gpudata[:,0], gpudata[:,2], 'go-', label=gpunames[2]+'(gpu)')
plt.plot(gpudata[:,0], gpudata[:,3], 'ro-', label=gpunames[3]+'(gpu)')
plt.plot(gpudata[:,0], gpudata[:,4], 'mo-', label=gpunames[4]+'(gpu)')
plt.plot(gpudata[:,0], gpudata[:,5], 'co-', label=gpunames[5]+'(gpu)')
plt.plot(cpudata[:,0], cpudata[:,1], 'b*--', label=cpunames[1]+'(cpu)')
plt.plot(cpudata[:,0], cpudata[:,2], 'g*--', label=cpunames[2]+'(cpu)')
plt.plot(cpudata[:,0], cpudata[:,3], 'r*--', label=cpunames[3]+'(cpu)')
plt.plot(cpudata[:,0], cpudata[:,4], 'm*--', label=cpunames[4]+'(cpu)')
plt.plot(cpudata[:,0], cpudata[:,5], 'c*--', label=cpunames[5]+'(cpu)')
plt.legend(ncol=2)
plt.grid()
plt.title('Intel(R) UHD Graphics P630 vs Intel(R) Xeon(R) E-2176G CPU @ 3.50GHz')
plt.xlabel('Number of nodes')
plt.ylabel('Calculation time [s]')
plt.semilogy()
plt.savefig('ExhaustiveGPUvsCPU.svg', format='svg', dpi=1200)
#<codecell> | 0.27973 | 0.592519 |
import sys
sys.path.append('../code/')
import os
import math
import numpy as np
import AEPDGP_net
from tools import *
from dataset.UCIdataset import UCIDataset
from dataset.Facedataset import FaceDataset
import argparse
import time
parser = argparse.ArgumentParser(description='run regression experiment',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d', '--dataset',
action="store", dest="dataset",
help="dataset name, eg. boston, power", default="boston")
parser.add_argument('-hi', '--hiddens', nargs='+', type=int,
action="store", dest="n_hiddens",
help="number of hidden dimensions, eg. 2 or 5 2", default=[])
parser.add_argument('-m', '--pseudos', type=int,
action="store", dest="n_pseudos",
help="number of pseudo points per layer, eg. 10", default=10)
parser.add_argument('-b', '--minibatch', type=int,
action="store", dest="minibch_size",
help="minibatch size, eg. 10", default=50)
parser.add_argument('-i', '--iterations', type=int,
action="store", dest="n_iterations",
help="number of stochastic updates, eg. 10", default=1000)
parser.add_argument('-s', '--seed', type=int,
action="store", dest="random_seed",
help="random seed, eg. 10", default=123)
parser.add_argument('-l', '--lrate', type=float,
action="store", dest="lrate",
help="adam learning rate", default=0.005)
parser.add_argument('-t', '--tied',
action="store_true", dest="tied",
help="tying inducing point (boolean)", default=False)
args = parser.parse_args()
name = args.dataset
n_hiddens = args.n_hiddens
n_hiddens_str = '_'.join(map(str, n_hiddens))
nolayers = len(n_hiddens) + 1
M = args.n_pseudos
n_pseudos = [M for _ in range(nolayers)]
no_iterations = args.n_iterations
no_points_per_mb = args.minibch_size
random_seed = args.random_seed
np.random.seed(random_seed)
lrate = args.lrate
tied = args.tied
fnames = {'boston': 'bost',
'power': 'powe',
'concrete': 'conc',
'energy': 'ener',
'kin8nm': 'kin8',
'naval': 'nava',
'protein': 'prot',
'wine_red': 'wine',
'yacht': 'yach',
'year': 'YearPredictionMSD'}
if name == 'face':
data = FaceDataset(ratio=0.9)
else:
data = UCIDataset(name, ratio=0.5)
# prepare output files
for i in range(1):
outname1 = './tmp/' + name + '_' + n_hiddens_str + '_' + str(M) + '_' + str(i) + '.rmse'
if not os.path.exists(os.path.dirname(outname1)):
os.makedirs(os.path.dirname(outname1))
outfile1 = open(outname1, 'w')
outname2 = './tmp/' + name + '_' + n_hiddens_str + '_' + str(M) + '_' + str(i) + '.nll'
outfile2 = open(outname2, 'w')
outname3 = './tmp/' + name + '_' + n_hiddens_str + '_' + str(M) + '_' + str(i) + '.time'
outfile3 = open(outname3, 'w')
X_train = data.Xtrain
y_train = data.Ytrain
X_test = data.Xtest
y_test = data.Ytest
# We construct the network
net = AEPDGP_net.AEPDGP_net(X_train, y_train, n_hiddens, n_pseudos, lik='Gaussian', zu_tied=tied)
# train
no_epochs = no_iterations
test_nll, test_rms, logZ = net.train(X_test, y_test, no_epochs=no_epochs,
no_points_per_mb=no_points_per_mb,
lrate=lrate, compute_test=True,
file_time=outfile3, file_rmse=outfile1, file_llh=outfile2)
outfile1.close()
outfile2.close()
outfile3.close() | dGP/numpy/tests/run_reg_data.py | import sys
sys.path.append('../code/')
import os
import math
import numpy as np
import AEPDGP_net
from tools import *
from dataset.UCIdataset import UCIDataset
from dataset.Facedataset import FaceDataset
import argparse
import time
parser = argparse.ArgumentParser(description='run regression experiment',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d', '--dataset',
action="store", dest="dataset",
help="dataset name, eg. boston, power", default="boston")
parser.add_argument('-hi', '--hiddens', nargs='+', type=int,
action="store", dest="n_hiddens",
help="number of hidden dimensions, eg. 2 or 5 2", default=[])
parser.add_argument('-m', '--pseudos', type=int,
action="store", dest="n_pseudos",
help="number of pseudo points per layer, eg. 10", default=10)
parser.add_argument('-b', '--minibatch', type=int,
action="store", dest="minibch_size",
help="minibatch size, eg. 10", default=50)
parser.add_argument('-i', '--iterations', type=int,
action="store", dest="n_iterations",
help="number of stochastic updates, eg. 10", default=1000)
parser.add_argument('-s', '--seed', type=int,
action="store", dest="random_seed",
help="random seed, eg. 10", default=123)
parser.add_argument('-l', '--lrate', type=float,
action="store", dest="lrate",
help="adam learning rate", default=0.005)
parser.add_argument('-t', '--tied',
action="store_true", dest="tied",
help="tying inducing point (boolean)", default=False)
args = parser.parse_args()
name = args.dataset
n_hiddens = args.n_hiddens
n_hiddens_str = '_'.join(map(str, n_hiddens))
nolayers = len(n_hiddens) + 1
M = args.n_pseudos
n_pseudos = [M for _ in range(nolayers)]
no_iterations = args.n_iterations
no_points_per_mb = args.minibch_size
random_seed = args.random_seed
np.random.seed(random_seed)
lrate = args.lrate
tied = args.tied
fnames = {'boston': 'bost',
'power': 'powe',
'concrete': 'conc',
'energy': 'ener',
'kin8nm': 'kin8',
'naval': 'nava',
'protein': 'prot',
'wine_red': 'wine',
'yacht': 'yach',
'year': 'YearPredictionMSD'}
if name == 'face':
data = FaceDataset(ratio=0.9)
else:
data = UCIDataset(name, ratio=0.5)
# prepare output files
for i in range(1):
outname1 = './tmp/' + name + '_' + n_hiddens_str + '_' + str(M) + '_' + str(i) + '.rmse'
if not os.path.exists(os.path.dirname(outname1)):
os.makedirs(os.path.dirname(outname1))
outfile1 = open(outname1, 'w')
outname2 = './tmp/' + name + '_' + n_hiddens_str + '_' + str(M) + '_' + str(i) + '.nll'
outfile2 = open(outname2, 'w')
outname3 = './tmp/' + name + '_' + n_hiddens_str + '_' + str(M) + '_' + str(i) + '.time'
outfile3 = open(outname3, 'w')
X_train = data.Xtrain
y_train = data.Ytrain
X_test = data.Xtest
y_test = data.Ytest
# We construct the network
net = AEPDGP_net.AEPDGP_net(X_train, y_train, n_hiddens, n_pseudos, lik='Gaussian', zu_tied=tied)
# train
no_epochs = no_iterations
test_nll, test_rms, logZ = net.train(X_test, y_test, no_epochs=no_epochs,
no_points_per_mb=no_points_per_mb,
lrate=lrate, compute_test=True,
file_time=outfile3, file_rmse=outfile1, file_llh=outfile2)
outfile1.close()
outfile2.close()
outfile3.close() | 0.219003 | 0.167491 |
sm.curNodeEventEnd(True)
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(True, True, False, False)
sm.forcedInput(1)
sm.sendDelay(30)
sm.forcedInput(4)
OBJECT_1 = sm.sendNpcController(1032209, -15, -30)
sm.showNpcSpecialActionByObjectId(OBJECT_1, "summon", 0)
sm.showEffect("Effect/OnUserEff.img/guideEffect/evanTutorial/evanBalloon40", 0, 20, 0, -2, -2, False, 0)
sm.sendDelay(4200)
sm.reservedEffect("Effect/Direction8.img/lightningTutorial2/Scene0")
sm.forcedInput(0)
sm.sendDelay(30)
sm.setSpeakerID(1032209)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendNext("How are you feeling?")
sm.forcedInput(2)
sm.sendDelay(30)
sm.forcedInput(0)
sm.setSpeakerID(1032209)
sm.removeEscapeButton()
sm.setPlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendNext("What happened? I thought... I...")
sm.setSpeakerID(1032209)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay("Who is this Lania, hm? You shouted her name out of the blue and then just conked out.")
sm.setSpeakerID(1032209)
sm.removeEscapeButton()
sm.setPlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendSay("She's nobod--She's a girl I met when I woke up in the present world. We lived together for a few years before the Dark awoke inside me.")
sm.setSpeakerID(1032209)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay("I see. And this darkness... You got it from your fight against the Black Mage?")
sm.setSpeakerID(1032209)
sm.removeEscapeButton()
sm.setPlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendSay("Yes. I believe it happened when I sealed him away. I'm not sure if it's a piece of him that I've absorbed, or some part of me I never realized I had.")
sm.setSpeakerID(1032209)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay("There's something I gotta tell you, Luminous. I did do a bit of research on Dark magic in the last few centuries, and I found that Light and Dark are two sides of the same coin. That's why the darkness got into you so easily!")
sm.setSpeakerID(1032209)
sm.removeEscapeButton()
sm.setPlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendSay("Where have I heard that before...?")
sm.setSpeakerID(1032209)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay("I don't know, but Dark grows stronger when Light fades. But since you have BOTH powers, I bet you could learn to control your darkness! Hold on a tick...")
sm.startQuest(25587)
sm.completeQuest(25587)
sm.giveExp(10000)
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(False, True, False, False)
sm.sendNpcController(OBJECT_1, False)
sm.warp(101000200, 0) | scripts/field/enter_q25587e.py |
sm.curNodeEventEnd(True)
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(True, True, False, False)
sm.forcedInput(1)
sm.sendDelay(30)
sm.forcedInput(4)
OBJECT_1 = sm.sendNpcController(1032209, -15, -30)
sm.showNpcSpecialActionByObjectId(OBJECT_1, "summon", 0)
sm.showEffect("Effect/OnUserEff.img/guideEffect/evanTutorial/evanBalloon40", 0, 20, 0, -2, -2, False, 0)
sm.sendDelay(4200)
sm.reservedEffect("Effect/Direction8.img/lightningTutorial2/Scene0")
sm.forcedInput(0)
sm.sendDelay(30)
sm.setSpeakerID(1032209)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendNext("How are you feeling?")
sm.forcedInput(2)
sm.sendDelay(30)
sm.forcedInput(0)
sm.setSpeakerID(1032209)
sm.removeEscapeButton()
sm.setPlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendNext("What happened? I thought... I...")
sm.setSpeakerID(1032209)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay("Who is this Lania, hm? You shouted her name out of the blue and then just conked out.")
sm.setSpeakerID(1032209)
sm.removeEscapeButton()
sm.setPlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendSay("She's nobod--She's a girl I met when I woke up in the present world. We lived together for a few years before the Dark awoke inside me.")
sm.setSpeakerID(1032209)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay("I see. And this darkness... You got it from your fight against the Black Mage?")
sm.setSpeakerID(1032209)
sm.removeEscapeButton()
sm.setPlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendSay("Yes. I believe it happened when I sealed him away. I'm not sure if it's a piece of him that I've absorbed, or some part of me I never realized I had.")
sm.setSpeakerID(1032209)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay("There's something I gotta tell you, Luminous. I did do a bit of research on Dark magic in the last few centuries, and I found that Light and Dark are two sides of the same coin. That's why the darkness got into you so easily!")
sm.setSpeakerID(1032209)
sm.removeEscapeButton()
sm.setPlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendSay("Where have I heard that before...?")
sm.setSpeakerID(1032209)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay("I don't know, but Dark grows stronger when Light fades. But since you have BOTH powers, I bet you could learn to control your darkness! Hold on a tick...")
sm.startQuest(25587)
sm.completeQuest(25587)
sm.giveExp(10000)
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(False, True, False, False)
sm.sendNpcController(OBJECT_1, False)
sm.warp(101000200, 0) | 0.182753 | 0.22194 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.cred import portal
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
from twisted.conch import avatar
from twisted.conch.checkers import SSHPublicKeyChecker, InMemorySSHKeyDB
from twisted.conch.ssh import factory, userauth, connection, keys, session
from twisted.conch.ssh.transport import SSHServerTransport
from twisted.internet import reactor, protocol
from twisted.python import log
from zope.interface import implementer
from twisted.conch import recvline
from numpy import uint8
import sys
class ExampleAvatar(avatar.ConchUser):
"""
The avatar is used to configure SSH services/sessions/subsystems for
an account.
This account will use L{session.SSHSession} to handle a channel of
type I{session}.
"""
def __init__(self, username, datastore):
avatar.ConchUser.__init__(self)
self.datastore = datastore
self.username = username
self.channelLookup.update({b'session':session.SSHSession})
@implementer(portal.IRealm)
class ExampleRealm(object):
"""
When using Twisted Cred, the pluggable authentication framework, the
C{requestAvatar} method should return a L{avatar.ConchUser} instance
as required by the Conch SSH server.
"""
def __init__(self, datastore):
self.datastore = datastore
def requestAvatar(self, avatarId, mind, *interfaces):
"""
See: L{portal.IRealm.requestAvatar}
"""
return interfaces[0], ExampleAvatar(avatarId, self.datastore), lambda: None
class CLIProtocol(protocol.Protocol):
def __init__(self, datastore):
self.line=b''
self.datastore=datastore
def dataReceived(self, data):
if data == b'\r':
self.transport.write(b'\r\n')
self.lineReceived(self.line)
self.line=b''
elif data == b'\x03': #^C
self.transport.loseConnection()
return
self.line+=data
self.transport.write(data)
def sendLine(self, line):
self.transport.write(line+b'\r\n')
def lineReceived(self, line):
# Ignore blank lines
if not line: return
line = line.decode("ascii")
# Parse the command
commandParts = line.split()
command = commandParts[0].lower()
args = commandParts[1:]
# Dispatch the command to the appropriate method. Note that all you
# need to do to implement a new command is add another do_* method.
try:
method = getattr(self, 'do_' + command)
except AttributeError as e:
self.sendLine(b'Error: no such command.')
self.transport.write(b'$ ')
else:
try:
method(*args)
self.transport.write(b'$ ')
except Exception as e:
self.sendLine(b'Error: ' + str(e).encode("ascii"))
self.transport.write(b'$ ')
def do_help(self, command=None):
"""help [command]: List commands, or show help on the given command"""
if command:
doc = getattr(self, 'do_' + command).__doc__
self.sendLine(doc.encode("ascii"))
else:
commands = [cmd[3:].encode("ascii")
for cmd in dir(self)
if cmd.startswith('do_')]
self.sendLine(b"Valid commands: " + b" ".join(commands))
def do_quit(self):
"""quit: Quit this session"""
self.sendLine(b'Goodbye.')
self.transport.loseConnection()
def do_plugins(self):
"""List the available plugins"""
for plugin in self.datastore.plugins:
self.sendLine(str(plugin.__name__).encode()[8:])
def do_plugins(self):
"""List the available plugins"""
for plugin in self.datastore.plugins:
self.sendLine(str(plugin.__name__).encode()[8:])
def do_animations(self):
"""List running animations"""
self.sendLine(b'RGB Animations')
for animation in self.datastore.animations:
self.sendLine(str(animation.__module__[8:]).encode())
self.sendLine(b'Strip Animations')
for animation in self.datastore.strip_animations:
self.sendLine(str(animation.__module__[8:]).encode())
def do_add(self, pluginname, extra=None, extra2=None):
""" Add an instance of a plugin to the running animations list"""
self.datastore.add_animation(pluginname, extra, extra2)
def do_power(self, state):
""" Add an instance of a plugin to the running animations list"""
self.datastore.set_power(state)
def do_del(self, pluginname):
""" Add an instance of a plugin to the running animations list"""
self.datastore.del_animation(pluginname)
def do_nw(self, val):
"""Set level if Natural White strips (0-255)"""
self.datastore.strip_vals[0]=uint8(val)
def do_dw(self, val):
"""Set level if Daylight White strips (0-255)"""
self.datastore.strip_vals[1]=uint8(val)
def do_ib(self, val):
"""Set level if Ice Blue strips (0-255)"""
self.datastore.strip_vals[2]=uint8(val)
def do_ww(self, val):
"""Set level if Warm White strips (0-255)"""
self.datastore.strip_vals[3]=uint8(val)
def do_lightsout(self):
"""Stop all animations and turn all lights off"""
self.strip_vals = [0,0,0,0]
self.datastore.animations=[]
self.datastore.strip_animations=[]
self.datastore.add_animation("set_strips")
def do_brt(self, val):
"""Set the master brightness. Range: 0.00-1.00"""
self.datastore.master_brightness=float(val)
def do_brtr(self, val):
"""Set the brightness for the Red channel. Range: 0.00-1.00"""
self.datastore.rgbw_brightness[0]=float(val)
def do_brtg(self, val):
"""Set the brightness for the Green channel. Range: 0.00-1.00"""
self.datastore.rgbw_brightness[1]=float(val)
def do_brtb(self, val):
"""Set the brightness for the Blue channel. Range: 0.00-1.00"""
self.datastore.rgbw_brightness[2]=float(val)
def do_brtw(self, val):
"""Set the brightness for the White channel. Range: 0.00-1.00"""
self.datastore.rgbw_brightness[3]=float(val)
class ExampleSession(object):
def __init__(self, avatar):
"""
In this example the avatar argument is not used for session selection,
but for example you can use it to limit I{shell} or I{exec} access
only to specific accounts.
"""
self.datastore = avatar.datastore
def getPty(self, term, windowSize, attrs):
"""
We don't support pseudo-terminal sessions.
"""
def execCommand(self, proto, cmd):
"""
We don't support command execution sessions.
"""
raise Exception("not executing commands")
def openShell(self, transport):
"""
Use our protocol as shell session.
"""
protocol = CLIProtocol(self.datastore)
# Connect the new protocol to the transport and the transport
# to the new protocol so they can communicate in both directions.
protocol.makeConnection(transport)
transport.makeConnection(session.wrapProtocol(protocol))
protocol.transport.write(b'Welcome to Digital Sky\r\nType "help" for help.\r\n$ ')
def eofReceived(self):
pass
def closed(self):
pass | core/ssh.py |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.cred import portal
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
from twisted.conch import avatar
from twisted.conch.checkers import SSHPublicKeyChecker, InMemorySSHKeyDB
from twisted.conch.ssh import factory, userauth, connection, keys, session
from twisted.conch.ssh.transport import SSHServerTransport
from twisted.internet import reactor, protocol
from twisted.python import log
from zope.interface import implementer
from twisted.conch import recvline
from numpy import uint8
import sys
class ExampleAvatar(avatar.ConchUser):
"""
The avatar is used to configure SSH services/sessions/subsystems for
an account.
This account will use L{session.SSHSession} to handle a channel of
type I{session}.
"""
def __init__(self, username, datastore):
avatar.ConchUser.__init__(self)
self.datastore = datastore
self.username = username
self.channelLookup.update({b'session':session.SSHSession})
@implementer(portal.IRealm)
class ExampleRealm(object):
"""
When using Twisted Cred, the pluggable authentication framework, the
C{requestAvatar} method should return a L{avatar.ConchUser} instance
as required by the Conch SSH server.
"""
def __init__(self, datastore):
self.datastore = datastore
def requestAvatar(self, avatarId, mind, *interfaces):
"""
See: L{portal.IRealm.requestAvatar}
"""
return interfaces[0], ExampleAvatar(avatarId, self.datastore), lambda: None
class CLIProtocol(protocol.Protocol):
def __init__(self, datastore):
self.line=b''
self.datastore=datastore
def dataReceived(self, data):
if data == b'\r':
self.transport.write(b'\r\n')
self.lineReceived(self.line)
self.line=b''
elif data == b'\x03': #^C
self.transport.loseConnection()
return
self.line+=data
self.transport.write(data)
def sendLine(self, line):
self.transport.write(line+b'\r\n')
def lineReceived(self, line):
# Ignore blank lines
if not line: return
line = line.decode("ascii")
# Parse the command
commandParts = line.split()
command = commandParts[0].lower()
args = commandParts[1:]
# Dispatch the command to the appropriate method. Note that all you
# need to do to implement a new command is add another do_* method.
try:
method = getattr(self, 'do_' + command)
except AttributeError as e:
self.sendLine(b'Error: no such command.')
self.transport.write(b'$ ')
else:
try:
method(*args)
self.transport.write(b'$ ')
except Exception as e:
self.sendLine(b'Error: ' + str(e).encode("ascii"))
self.transport.write(b'$ ')
def do_help(self, command=None):
"""help [command]: List commands, or show help on the given command"""
if command:
doc = getattr(self, 'do_' + command).__doc__
self.sendLine(doc.encode("ascii"))
else:
commands = [cmd[3:].encode("ascii")
for cmd in dir(self)
if cmd.startswith('do_')]
self.sendLine(b"Valid commands: " + b" ".join(commands))
def do_quit(self):
"""quit: Quit this session"""
self.sendLine(b'Goodbye.')
self.transport.loseConnection()
def do_plugins(self):
"""List the available plugins"""
for plugin in self.datastore.plugins:
self.sendLine(str(plugin.__name__).encode()[8:])
def do_plugins(self):
"""List the available plugins"""
for plugin in self.datastore.plugins:
self.sendLine(str(plugin.__name__).encode()[8:])
def do_animations(self):
"""List running animations"""
self.sendLine(b'RGB Animations')
for animation in self.datastore.animations:
self.sendLine(str(animation.__module__[8:]).encode())
self.sendLine(b'Strip Animations')
for animation in self.datastore.strip_animations:
self.sendLine(str(animation.__module__[8:]).encode())
def do_add(self, pluginname, extra=None, extra2=None):
""" Add an instance of a plugin to the running animations list"""
self.datastore.add_animation(pluginname, extra, extra2)
def do_power(self, state):
""" Add an instance of a plugin to the running animations list"""
self.datastore.set_power(state)
def do_del(self, pluginname):
""" Add an instance of a plugin to the running animations list"""
self.datastore.del_animation(pluginname)
def do_nw(self, val):
"""Set level if Natural White strips (0-255)"""
self.datastore.strip_vals[0]=uint8(val)
def do_dw(self, val):
"""Set level if Daylight White strips (0-255)"""
self.datastore.strip_vals[1]=uint8(val)
def do_ib(self, val):
"""Set level if Ice Blue strips (0-255)"""
self.datastore.strip_vals[2]=uint8(val)
def do_ww(self, val):
"""Set level if Warm White strips (0-255)"""
self.datastore.strip_vals[3]=uint8(val)
def do_lightsout(self):
"""Stop all animations and turn all lights off"""
self.strip_vals = [0,0,0,0]
self.datastore.animations=[]
self.datastore.strip_animations=[]
self.datastore.add_animation("set_strips")
def do_brt(self, val):
"""Set the master brightness. Range: 0.00-1.00"""
self.datastore.master_brightness=float(val)
def do_brtr(self, val):
"""Set the brightness for the Red channel. Range: 0.00-1.00"""
self.datastore.rgbw_brightness[0]=float(val)
def do_brtg(self, val):
"""Set the brightness for the Green channel. Range: 0.00-1.00"""
self.datastore.rgbw_brightness[1]=float(val)
def do_brtb(self, val):
"""Set the brightness for the Blue channel. Range: 0.00-1.00"""
self.datastore.rgbw_brightness[2]=float(val)
def do_brtw(self, val):
"""Set the brightness for the White channel. Range: 0.00-1.00"""
self.datastore.rgbw_brightness[3]=float(val)
class ExampleSession(object):
def __init__(self, avatar):
"""
In this example the avatar argument is not used for session selection,
but for example you can use it to limit I{shell} or I{exec} access
only to specific accounts.
"""
self.datastore = avatar.datastore
def getPty(self, term, windowSize, attrs):
"""
We don't support pseudo-terminal sessions.
"""
def execCommand(self, proto, cmd):
"""
We don't support command execution sessions.
"""
raise Exception("not executing commands")
def openShell(self, transport):
"""
Use our protocol as shell session.
"""
protocol = CLIProtocol(self.datastore)
# Connect the new protocol to the transport and the transport
# to the new protocol so they can communicate in both directions.
protocol.makeConnection(transport)
transport.makeConnection(session.wrapProtocol(protocol))
protocol.transport.write(b'Welcome to Digital Sky\r\nType "help" for help.\r\n$ ')
def eofReceived(self):
pass
def closed(self):
pass | 0.677581 | 0.148047 |
import json
import logging
import click
import numpy as np
from scipy.special import softmax
from sklearn import metrics
from .... import utils
from ....baselines.metrics import METRICS
logger = logging.getLogger(__name__)
# constants
REPORT_TEMPLATE =\
"""Scruples Resource Predictions Performance Report
================================================
Analysis of predictions on the scruples resource.
Main Metrics
------------
Note that the xentropy score, if present, is computed with respect to
the estimated true label distribution rather than the hard labels. All
other scores are standard and computed against the most frequent label.
{metrics_report}{calibration_factor_report}
Classification Report
---------------------
{classification_report}
Confusion Matrix
----------------
{confusion_matrix}
"""
# main function
@click.command()
@click.argument(
'dataset_path',
type=click.Path(exists=True, file_okay=True, dir_okay=False))
@click.argument(
'predictions_path',
type=click.Path(exists=True, file_okay=True, dir_okay=False))
@click.argument(
'output_path',
type=click.Path(exists=False, file_okay=True, dir_okay=False))
@click.option(
'--label-scores', is_flag=True,
help='Compute metrics which require predictions of label'
' probabilities, and include them in the report. Predictions'
' must have "label_scores" keys to use this option.')
@click.option(
'--calibration-factor', type=float, default=None,
help='The calibration factor to use for computing the calibrated'
' xentropy. If no calibration factor is provided, then it will be'
' calculated from the data.')
def predictions(
dataset_path: str,
predictions_path: str,
output_path: str,
label_scores: bool,
calibration_factor: float
) -> None:
"""Analyze classification performance and write a report.
Read in the dataset from DATASET_PATH, as well as predictions from
PREDICTIONS_PATH, then analyze the predictions and write the
results to OUTPUT_PATH. PREDICTIONS_PATH should be a JSON Lines file
in which each object has "id", "label", and optionally
"label_scores" keys, corresponding to the ID for the instance, the
predicted label, and the predicted probabilities for each class.
"""
# Step 1: Read in the dataset.
with click.open_file(dataset_path, 'r') as dataset_file:
id_to_dataset_label_and_label_scores = {}
for ln in dataset_file:
row = json.loads(ln)
id_to_dataset_label_and_label_scores[row['id']] = (
row['gold_label'],
row['gold_annotations']
)
# Step 2: Read in the predictions.
with click.open_file(predictions_path, 'r') as predictions_file:
id_to_predicted_label_and_label_scores = {}
for ln in predictions_file:
row = json.loads(ln)
id_to_predicted_label_and_label_scores[row['id']] = (
row['label'],
row.get('label_scores')
)
# Step 3: Extract the dataset and predictions on the relevant
# subset.
dataset_labels_and_label_scores, predicted_labels_and_label_scores = (
*zip(*[
(
id_to_dataset_label_and_label_scores[id_],
id_to_predicted_label_and_label_scores[id_]
)
for id_ in id_to_predicted_label_and_label_scores.keys()
if id_ in id_to_dataset_label_and_label_scores
]),
)
dataset_labels = [
label
for label, _ in dataset_labels_and_label_scores
]
predicted_labels = [
label
for label, _ in predicted_labels_and_label_scores
]
if label_scores:
dataset_label_scores = [
[count / sum(scores) for count in scores]
for _, scores in dataset_labels_and_label_scores
]
predicted_label_scores = [
[count / sum(scores) for count in scores]
for _, scores in predicted_labels_and_label_scores
]
# Step 4: Write the report.
with click.open_file(output_path, 'w') as output_file:
# create the metrics report
metric_name_to_value = {
name:
metric(
y_true=dataset_labels,
y_pred=predicted_label_scores
if scorer_kwargs['needs_proba']
else predicted_labels)
for name, metric, scorer_kwargs in METRICS.values()
if label_scores or not scorer_kwargs['needs_proba']
}
if label_scores:
if 'xentropy' in metric_name_to_value:
raise ValueError(
'METRICS should not have a key named'
' "xentropy". This issue is a bug in the library,'
' please notify the maintainers.')
metric_name_to_value['xentropy'] = utils.xentropy(
y_true=dataset_label_scores,
y_pred=predicted_label_scores)
if 'calibrated_xentropy' in metric_name_to_value:
raise ValueError(
'METRICS should not have a key named'
' "calibrated_xentropy". This issue is a bug in the'
' library, please notify the maintainers.')
logits = np.log(predicted_label_scores)
temperature = (
calibration_factor
if calibration_factor is not None else
utils.calibration_factor(
logits=logits,
targets=dataset_label_scores)
)
logger.info(f'Calibrating temperature: {temperature}')
metric_name_to_value['calibrated_xentropy'] = utils.xentropy(
y_true=dataset_label_scores,
y_pred=softmax(logits / temperature, axis=-1))
metric_name_width = 1 + max(
len(name)
for name in metric_name_to_value.keys())
metrics_report = '\n'.join(
f'{name: <{metric_name_width}}: {value:.4f}'
for name, value in metric_name_to_value.items())
if label_scores:
calibration_factor_report = (
f'\n\nCalibration Factor: {temperature}\n'
)
else:
calibration_factor_report = ''
# create the classification report
label_names = ['0', '1']
classification_report = metrics.classification_report(
y_true=[str(label) for label in dataset_labels],
y_pred=[str(label) for label in predicted_labels],
labels=label_names)
# create the confusion matrix
confusion_matrix = utils.make_confusion_matrix_str(
y_true=[str(label) for label in dataset_labels],
y_pred=[str(label) for label in predicted_labels],
labels=label_names)
output_file.write(
REPORT_TEMPLATE.format(
metrics_report=metrics_report,
calibration_factor_report=calibration_factor_report,
classification_report=classification_report,
confusion_matrix=confusion_matrix)) | src/scruples/scripts/analyze/resource/predictions.py |
import json
import logging
import click
import numpy as np
from scipy.special import softmax
from sklearn import metrics
from .... import utils
from ....baselines.metrics import METRICS
logger = logging.getLogger(__name__)
# constants
REPORT_TEMPLATE =\
"""Scruples Resource Predictions Performance Report
================================================
Analysis of predictions on the scruples resource.
Main Metrics
------------
Note that the xentropy score, if present, is computed with respect to
the estimated true label distribution rather than the hard labels. All
other scores are standard and computed against the most frequent label.
{metrics_report}{calibration_factor_report}
Classification Report
---------------------
{classification_report}
Confusion Matrix
----------------
{confusion_matrix}
"""
# main function
@click.command()
@click.argument(
'dataset_path',
type=click.Path(exists=True, file_okay=True, dir_okay=False))
@click.argument(
'predictions_path',
type=click.Path(exists=True, file_okay=True, dir_okay=False))
@click.argument(
'output_path',
type=click.Path(exists=False, file_okay=True, dir_okay=False))
@click.option(
'--label-scores', is_flag=True,
help='Compute metrics which require predictions of label'
' probabilities, and include them in the report. Predictions'
' must have "label_scores" keys to use this option.')
@click.option(
'--calibration-factor', type=float, default=None,
help='The calibration factor to use for computing the calibrated'
' xentropy. If no calibration factor is provided, then it will be'
' calculated from the data.')
def predictions(
dataset_path: str,
predictions_path: str,
output_path: str,
label_scores: bool,
calibration_factor: float
) -> None:
"""Analyze classification performance and write a report.
Read in the dataset from DATASET_PATH, as well as predictions from
PREDICTIONS_PATH, then analyze the predictions and write the
results to OUTPUT_PATH. PREDICTIONS_PATH should be a JSON Lines file
in which each object has "id", "label", and optionally
"label_scores" keys, corresponding to the ID for the instance, the
predicted label, and the predicted probabilities for each class.
"""
# Step 1: Read in the dataset.
with click.open_file(dataset_path, 'r') as dataset_file:
id_to_dataset_label_and_label_scores = {}
for ln in dataset_file:
row = json.loads(ln)
id_to_dataset_label_and_label_scores[row['id']] = (
row['gold_label'],
row['gold_annotations']
)
# Step 2: Read in the predictions.
with click.open_file(predictions_path, 'r') as predictions_file:
id_to_predicted_label_and_label_scores = {}
for ln in predictions_file:
row = json.loads(ln)
id_to_predicted_label_and_label_scores[row['id']] = (
row['label'],
row.get('label_scores')
)
# Step 3: Extract the dataset and predictions on the relevant
# subset.
dataset_labels_and_label_scores, predicted_labels_and_label_scores = (
*zip(*[
(
id_to_dataset_label_and_label_scores[id_],
id_to_predicted_label_and_label_scores[id_]
)
for id_ in id_to_predicted_label_and_label_scores.keys()
if id_ in id_to_dataset_label_and_label_scores
]),
)
dataset_labels = [
label
for label, _ in dataset_labels_and_label_scores
]
predicted_labels = [
label
for label, _ in predicted_labels_and_label_scores
]
if label_scores:
dataset_label_scores = [
[count / sum(scores) for count in scores]
for _, scores in dataset_labels_and_label_scores
]
predicted_label_scores = [
[count / sum(scores) for count in scores]
for _, scores in predicted_labels_and_label_scores
]
# Step 4: Write the report.
with click.open_file(output_path, 'w') as output_file:
# create the metrics report
metric_name_to_value = {
name:
metric(
y_true=dataset_labels,
y_pred=predicted_label_scores
if scorer_kwargs['needs_proba']
else predicted_labels)
for name, metric, scorer_kwargs in METRICS.values()
if label_scores or not scorer_kwargs['needs_proba']
}
if label_scores:
if 'xentropy' in metric_name_to_value:
raise ValueError(
'METRICS should not have a key named'
' "xentropy". This issue is a bug in the library,'
' please notify the maintainers.')
metric_name_to_value['xentropy'] = utils.xentropy(
y_true=dataset_label_scores,
y_pred=predicted_label_scores)
if 'calibrated_xentropy' in metric_name_to_value:
raise ValueError(
'METRICS should not have a key named'
' "calibrated_xentropy". This issue is a bug in the'
' library, please notify the maintainers.')
logits = np.log(predicted_label_scores)
temperature = (
calibration_factor
if calibration_factor is not None else
utils.calibration_factor(
logits=logits,
targets=dataset_label_scores)
)
logger.info(f'Calibrating temperature: {temperature}')
metric_name_to_value['calibrated_xentropy'] = utils.xentropy(
y_true=dataset_label_scores,
y_pred=softmax(logits / temperature, axis=-1))
metric_name_width = 1 + max(
len(name)
for name in metric_name_to_value.keys())
metrics_report = '\n'.join(
f'{name: <{metric_name_width}}: {value:.4f}'
for name, value in metric_name_to_value.items())
if label_scores:
calibration_factor_report = (
f'\n\nCalibration Factor: {temperature}\n'
)
else:
calibration_factor_report = ''
# create the classification report
label_names = ['0', '1']
classification_report = metrics.classification_report(
y_true=[str(label) for label in dataset_labels],
y_pred=[str(label) for label in predicted_labels],
labels=label_names)
# create the confusion matrix
confusion_matrix = utils.make_confusion_matrix_str(
y_true=[str(label) for label in dataset_labels],
y_pred=[str(label) for label in predicted_labels],
labels=label_names)
output_file.write(
REPORT_TEMPLATE.format(
metrics_report=metrics_report,
calibration_factor_report=calibration_factor_report,
classification_report=classification_report,
confusion_matrix=confusion_matrix)) | 0.705075 | 0.353121 |
import tensorflow as tf
import math
from capsule.capsule_layer import Capsule
from capsule.em_capsule_layer import EMCapsule
from capsule.gamma_capsule_layer import GammaCapsule
from capsule.conv_capsule_layer import ConvCapsule
from capsule.primary_capsule_layer import PrimaryCapsule
from capsule.reconstruction_network import ReconstructionNetwork
from capsule.norm_layer import Norm
from capsule.residual_layer import Residual
from tensorflow.keras.layers import BatchNormalization
class ConvCapsNet(tf.keras.Model):
def __init__(self, args):
super(ConvCapsNet, self).__init__()
# Set params
dimensions = list(map(int, args.dimensions.split(","))) if args.dimensions != "" else []
layers = list(map(int, args.layers.split(","))) if args.layers != "" else []
self.use_bias=args.use_bias
self.use_reconstruction = args.use_reconstruction
self.make_skips = args.make_skips
self.skip_dist = args.skip_dist
CapsuleType = {
"rba": Capsule,
"em": EMCapsule,
"sda": GammaCapsule
}
if args.dataset == 'mnist':
img_size = 24
elif args.dataset == 'cifar10':
img_size = 32
else:
raise NotImplementedError()
conv1_filters, conv1_kernel, conv1_stride = 128, 7, 2
out_height = (img_size - conv1_kernel) // conv1_stride + 1
out_width = (img_size - conv1_kernel) // conv1_stride + 1
with tf.name_scope(self.name):
# normal convolution
self.conv_1 = tf.keras.layers.Conv2D(
conv1_filters,
kernel_size=conv1_kernel,
strides=conv1_stride,
padding='valid',
activation="relu",
name="conv1")
# reshape into capsule shape
self.capsuleShape = tf.keras.layers.Reshape(target_shape=(out_height, out_width, 1, conv1_filters), name='toCapsuleShape')
self.capsule_layers = []
for i in range(len(layers)-1):
self.capsule_layers.append(
ConvCapsule(
name="ConvCapsuleLayer" + str(i),
in_capsules=layers[i],
in_dim=dimensions[i],
out_dim=dimensions[i],
out_capsules=layers[i+1],
kernel_size=3,
routing_iterations=args.iterations,
routing=args.routing))
# flatten for input to FC capsule
self.flatten = tf.keras.layers.Reshape(target_shape=(out_height * out_width * layers[-2], dimensions[-2]), name='flatten')
# fully connected caspule layer
self.fcCapsuleLayer = Capsule(
name="FCCapsuleLayer",
in_capsules = out_height * out_width * layers[-2],
in_dim = dimensions[-2],
out_capsules = layers[-1],
out_dim = dimensions[-1],
use_bias = self.use_bias)
if self.use_reconstruction:
self.reconstruction_network = ReconstructionNetwork(
name="ReconstructionNetwork",
in_capsules=layers[-1],
in_dim=dimensions[-1],
out_dim=args.img_height,
img_dim=args.img_depth)
self.norm = Norm()
self.residual = Residual()
# Inference
def call(self, x, y):
x = self.conv_1(x)
x = self.capsuleShape(x)
layers = []
capsule_outputs = []
i = 0
for j, capsuleLayer in enumerate(self.capsule_layers):
x = capsuleLayer(x)
# add skip connection
capsule_outputs.append(x)
if self.make_skips and i > 0 and i % self.skip_dist == 0:
out_skip = capsule_outputs[j-self.skip_dist]
if x.shape == out_skip.shape:
#print('make residual connection from ', j-self.skip_dist, ' to ', j)
x = self.residual(x, out_skip)
i = -1
i += 1
layers.append(x)
x = self.flatten(x)
x = self.fcCapsuleLayer(x)
r = self.reconstruction_network(x, y) if self.use_reconstruction else None
out = self.norm(x)
return out, r, layers | capsule/conv_capsule_network.py | import tensorflow as tf
import math
from capsule.capsule_layer import Capsule
from capsule.em_capsule_layer import EMCapsule
from capsule.gamma_capsule_layer import GammaCapsule
from capsule.conv_capsule_layer import ConvCapsule
from capsule.primary_capsule_layer import PrimaryCapsule
from capsule.reconstruction_network import ReconstructionNetwork
from capsule.norm_layer import Norm
from capsule.residual_layer import Residual
from tensorflow.keras.layers import BatchNormalization
class ConvCapsNet(tf.keras.Model):
def __init__(self, args):
super(ConvCapsNet, self).__init__()
# Set params
dimensions = list(map(int, args.dimensions.split(","))) if args.dimensions != "" else []
layers = list(map(int, args.layers.split(","))) if args.layers != "" else []
self.use_bias=args.use_bias
self.use_reconstruction = args.use_reconstruction
self.make_skips = args.make_skips
self.skip_dist = args.skip_dist
CapsuleType = {
"rba": Capsule,
"em": EMCapsule,
"sda": GammaCapsule
}
if args.dataset == 'mnist':
img_size = 24
elif args.dataset == 'cifar10':
img_size = 32
else:
raise NotImplementedError()
conv1_filters, conv1_kernel, conv1_stride = 128, 7, 2
out_height = (img_size - conv1_kernel) // conv1_stride + 1
out_width = (img_size - conv1_kernel) // conv1_stride + 1
with tf.name_scope(self.name):
# normal convolution
self.conv_1 = tf.keras.layers.Conv2D(
conv1_filters,
kernel_size=conv1_kernel,
strides=conv1_stride,
padding='valid',
activation="relu",
name="conv1")
# reshape into capsule shape
self.capsuleShape = tf.keras.layers.Reshape(target_shape=(out_height, out_width, 1, conv1_filters), name='toCapsuleShape')
self.capsule_layers = []
for i in range(len(layers)-1):
self.capsule_layers.append(
ConvCapsule(
name="ConvCapsuleLayer" + str(i),
in_capsules=layers[i],
in_dim=dimensions[i],
out_dim=dimensions[i],
out_capsules=layers[i+1],
kernel_size=3,
routing_iterations=args.iterations,
routing=args.routing))
# flatten for input to FC capsule
self.flatten = tf.keras.layers.Reshape(target_shape=(out_height * out_width * layers[-2], dimensions[-2]), name='flatten')
# fully connected caspule layer
self.fcCapsuleLayer = Capsule(
name="FCCapsuleLayer",
in_capsules = out_height * out_width * layers[-2],
in_dim = dimensions[-2],
out_capsules = layers[-1],
out_dim = dimensions[-1],
use_bias = self.use_bias)
if self.use_reconstruction:
self.reconstruction_network = ReconstructionNetwork(
name="ReconstructionNetwork",
in_capsules=layers[-1],
in_dim=dimensions[-1],
out_dim=args.img_height,
img_dim=args.img_depth)
self.norm = Norm()
self.residual = Residual()
# Inference
def call(self, x, y):
x = self.conv_1(x)
x = self.capsuleShape(x)
layers = []
capsule_outputs = []
i = 0
for j, capsuleLayer in enumerate(self.capsule_layers):
x = capsuleLayer(x)
# add skip connection
capsule_outputs.append(x)
if self.make_skips and i > 0 and i % self.skip_dist == 0:
out_skip = capsule_outputs[j-self.skip_dist]
if x.shape == out_skip.shape:
#print('make residual connection from ', j-self.skip_dist, ' to ', j)
x = self.residual(x, out_skip)
i = -1
i += 1
layers.append(x)
x = self.flatten(x)
x = self.fcCapsuleLayer(x)
r = self.reconstruction_network(x, y) if self.use_reconstruction else None
out = self.norm(x)
return out, r, layers | 0.814938 | 0.451387 |
import sys
from twisted.internet import defer, endpoints, protocol, reactor, ssl, task
from twisted.python import log
from twisted.words.protocols import irc
import config
import database
class IRCProtocol(irc.IRCClient):
nickname = config.nickname
def __init__(self):
self.deferred = defer.Deferred()
def connectionLost(self, reason):
self.deferred.errback(reason)
def signedOn(self):
for channel in self.factory.channels:
self.join(channel)
def privmsg(self, user, channel, message):
nick, _, host = user.partition("!")
message = message.strip()
if not message.startswith("!"):
return
command, sep, rest = message.lstrip("!").partition(" ")
func = getattr(self, "command_" + command, None)
if not func:
return
deferred = defer.maybeDeferred(func, nick, channel, rest)
deferred.addErrback(self._showError)
if channel == self.nickname:
deferred.addCallback(self._sendMessage, nick)
else:
deferred.addCallback(self._sendMessage, channel, nick)
def _sendMessage(self, msg, target, nick=None):
if nick:
msg = "%s, %s" % (nick, msg)
self.msg(target, msg)
def _showError(self, failure):
return failure.getErrorMessage()
def command_ping(self, nick, channel, rest):
return "pong"
def command_help(self, nick, channel, rest):
return "!addquote (aq), !deletequote (dq), !quote (q), !findquote (fq)"
def command_addquote(self, nick, channel, rest):
return self.factory.db.add_quote(rest, channel, nick)
command_aq = command_addquote
def command_deletequote(self, nick, channel, rest):
return self.factory.db.delete_quote(rest, channel, nick)
command_dq = command_deletequote
def command_quote(self, nick, channel, rest):
return self.factory.db.quote(channel)
command_q = command_quote
def command_findquote(self, nick, channel, rest):
return self.factory.db.find_quote(rest, channel)
command_fq = command_findquote
class IRCFactory(protocol.ReconnectingClientFactory):
protocol = IRCProtocol
channels = config.channels
db = database.Database(config.sqlite_path)
def run(reactor, host, port):
options = ssl.optionsForClientTLS(host)
endpoint = endpoints.SSL4ClientEndpoint(reactor, host, port, options)
factory = IRCFactory()
deferred = endpoint.connect(factory)
deferred.addCallback(lambda protocol: protocol.deferred)
return deferred
def main():
log.startLogging(sys.stderr)
task.react(run, (config.serverhost, config.serverport))
if __name__ == "__main__":
main() | src/blahblahblahbot/bot.py | import sys
from twisted.internet import defer, endpoints, protocol, reactor, ssl, task
from twisted.python import log
from twisted.words.protocols import irc
import config
import database
class IRCProtocol(irc.IRCClient):
nickname = config.nickname
def __init__(self):
self.deferred = defer.Deferred()
def connectionLost(self, reason):
self.deferred.errback(reason)
def signedOn(self):
for channel in self.factory.channels:
self.join(channel)
def privmsg(self, user, channel, message):
nick, _, host = user.partition("!")
message = message.strip()
if not message.startswith("!"):
return
command, sep, rest = message.lstrip("!").partition(" ")
func = getattr(self, "command_" + command, None)
if not func:
return
deferred = defer.maybeDeferred(func, nick, channel, rest)
deferred.addErrback(self._showError)
if channel == self.nickname:
deferred.addCallback(self._sendMessage, nick)
else:
deferred.addCallback(self._sendMessage, channel, nick)
def _sendMessage(self, msg, target, nick=None):
if nick:
msg = "%s, %s" % (nick, msg)
self.msg(target, msg)
def _showError(self, failure):
return failure.getErrorMessage()
def command_ping(self, nick, channel, rest):
return "pong"
def command_help(self, nick, channel, rest):
return "!addquote (aq), !deletequote (dq), !quote (q), !findquote (fq)"
def command_addquote(self, nick, channel, rest):
return self.factory.db.add_quote(rest, channel, nick)
command_aq = command_addquote
def command_deletequote(self, nick, channel, rest):
return self.factory.db.delete_quote(rest, channel, nick)
command_dq = command_deletequote
def command_quote(self, nick, channel, rest):
return self.factory.db.quote(channel)
command_q = command_quote
def command_findquote(self, nick, channel, rest):
return self.factory.db.find_quote(rest, channel)
command_fq = command_findquote
class IRCFactory(protocol.ReconnectingClientFactory):
protocol = IRCProtocol
channels = config.channels
db = database.Database(config.sqlite_path)
def run(reactor, host, port):
options = ssl.optionsForClientTLS(host)
endpoint = endpoints.SSL4ClientEndpoint(reactor, host, port, options)
factory = IRCFactory()
deferred = endpoint.connect(factory)
deferred.addCallback(lambda protocol: protocol.deferred)
return deferred
def main():
log.startLogging(sys.stderr)
task.react(run, (config.serverhost, config.serverport))
if __name__ == "__main__":
main() | 0.343562 | 0.051463 |
from django.core.exceptions import ObjectDoesNotExist
from rdkit.Chem import AllChem
from rdkit import Chem
from rdkit.Chem.Draw import rdMolDraw2D
from rdkit.Chem import rdDepictor
from django_rdkit import models
import datetime
from django.contrib.auth.models import User
class Molecule(models.Model):
"""
Represents one molecule.
"""
# fields, which can be calculated on save():
rdmol = models.MolField()
internal_id = models.CharField(max_length=32, db_index=True)
image_svg = models.TextField(null=True)
mw = models.FloatField(db_index=True)
sum_formula = models.CharField(max_length=32, db_index=True)
fingerprint = models.CharField(max_length=1024, db_index=True)
inchi = models.TextField(db_index=True)
inchi_key = models.CharField(max_length=27, db_index=True)
name = models.TextField(db_index=True, null=True)
smiles = models.TextField(db_index=True)
amount = models.FloatField()
created = models.DateTimeField(auto_now_add=True)
# excluded molecules SMILES (they cause rdKit stuck)
EXCLUDED_MOLECULES = ["C", "CH3", "CH4", "[CH3]", "[C]", "[CH4]"]
def __str__(self):
return "Molecule ({id}): '{name}', formula: '{formula}'".format(id=self.internal_id, name=self.name, formula=self.sum_formula)
def save(self, smiles=None, molfile=None, rdmol=None, inchi=None, name=None, update=False, *args, **kwargs):
if not update:
if molfile:
mol = AllChem.MolFromMolBlock(molfile)
elif smiles:
mol = AllChem.MolFromSmiles(smiles)
elif rdmol:
mol = rdmol
elif inchi:
mol = AllChem.MolFromInchi(inchi)
if mol:
inchi = AllChem.MolToInchi(mol)
smiles = AllChem.MolToSmiles(mol)
if inchi and Molecule.objects.filter(inchi=inchi).count() == 0 and len(inchi) > 1:
self.inchi = inchi
self.mw = float("{0:.2f}".format(AllChem.CalcExactMolWt(mol)))
self.sum_formula = AllChem.CalcMolFormula(mol)
self.fingerprint = AllChem.GetMorganFingerprintAsBitVect(mol, 4, nBits=1024).ToBitString()
self.inchi_key = AllChem.InchiToInchiKey(self.inchi)
self.molfile = AllChem.MolToMolBlock(mol)
self.smiles = smiles
self.rdmol = mol
# generating SVG image
if self.smiles not in self.EXCLUDED_MOLECULES:
binMol = AllChem.Mol(self.rdmol.ToBinary())
if not binMol.GetNumConformers():
rdDepictor.Compute2DCoords(self.rdmol)
drawer = rdMolDraw2D.MolDraw2DSVG(100, 100)
drawer.DrawMolecule(self.rdmol)
drawer.FinishDrawing()
svg = drawer.GetDrawingText().replace('svg:', '')
# remove first line containg XML meta information
self.image_svg = "\n".join(svg.split("\n")[1:]).strip()
else:
self.image_svg = None
if name:
self.name = name
else:
try:
self.name = mol.GetProp("LONGNAME")
except KeyError:
self.name= None
if Molecule.objects.all().count() == 0:
self.internal_id = "MI-J-1"
else:
self.internal_id = "MI-J-{}".format(Molecule.objects.latest("id").id + 1)
super(Molecule, self).save(*args, **kwargs)
else:
raise self.MoleculeExistsInDatabase(smiles)
else:
raise self.MoleculeCreationError
else:
super(Molecule, self).save(*args, **kwargs)
class Meta:
ordering = ['id']
class MoleculeExistsInDatabase(Exception):
def __init__(self, smiles):
super(Exception, self).__init__(smiles)
self.smiles = smiles
self.message = "Cannot add the molecule: it already exists in database."
class MoleculeCreationError(Exception):
def __init__(self):
super(Exception, self).__init__()
self.message = "Cannot add the molecule: check your structure (valence etc.)."
class Building(models.Model):
"""
Represents one building.
"""
name = models.CharField(max_length=2, unique=True)
def __str__(self):
return "Building ({}): {}".format(self.id, self.name)
class Room(models.Model):
"""
Represents one room.
"""
code = models.CharField(max_length=8, unique=True)
def __str__(self):
return "Room ({}): {}".format(self.id, self.code)
class Place(models.Model):
"""
Represents one place which consists of room and building.
"""
building = models.ForeignKey(Building)
room = models.ForeignKey(Room)
class Meta:
unique_together = ("building", "room")
def __str__(self):
return "Place ({}): {}/{}".format(self.id, self.building.name, self.room.code)
class Person(models.Model):
"""
Represents one person.
"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
name = models.CharField(max_length=64)
surname = models.CharField(max_length=128)
place = models.ForeignKey(Place)
telephone = models.CharField(max_length=16)
def __str__(self):
return "Person ({}): {}, {} @ {}".format(self.id, self.surname, self.name, str(self.place))
class Meta:
ordering = ['id']
class OrderStatus(models.Model):
"""
Represents the order status.
"""
status = models.CharField(max_length=32, unique=True)
def __str__(self):
return "OrderStatus ({}): {}".format(self.id, self.status)
class Order(models.Model):
"""
Represents one order.
"""
compounds = models.ManyToManyField(Molecule, through="OrderCompound")
person = models.ForeignKey(Person)
status = models.ForeignKey(OrderStatus)
order_internal_id = models.CharField(max_length=32, unique=True)
created = models.DateTimeField(auto_now_add=True)
def save(self, update=True, *args, **kwargs):
if not update:
today_date = datetime.date.today()
today_min = datetime.datetime.combine(datetime.date.today(), datetime.time.min)
today_max = datetime.datetime.combine(datetime.date.today(), datetime.time.max)
try:
today_last_order = Order.objects.filter(created__range=(today_min, today_max)).latest("id")
self.order_internal_id = "{:%Y-%m-%d}/{}".format(datetime.date.today(), today_last_order.id + 1)
except ObjectDoesNotExist:
self.order_internal_id = "{:%Y-%m-%d}/1".format(today_date)
super(Order, self).save(*args, **kwargs)
def __str__(self):
return "Order ({} / {}): for {} | compounds: {} | {}".format(self.id,
self.order_internal_id,
str(self.person),
self.compounds.count(),
str(self.status))
class Meta:
ordering = ['id']
class OrderCompound(models.Model):
"""
Join table for order and its compounds.
"""
compound = models.ForeignKey(Molecule)
order = models.ForeignKey(Order)
amount = models.FloatField()
def __str__(self):
return "OrderCompound ({}): compound {} in order {} ".format(self.id, self.compound.id, self.order.id)
class PurchaseStatus(models.Model):
"""
Represents the purchase status.
"""
status = models.CharField(max_length=32, unique=True)
def __str__(self):
return "PurchaseStatus ({}): {}".format(self.id, self.status)
class Purchase(models.Model):
"""
Represents one purchase.
"""
compounds = models.ManyToManyField(Molecule, through="PurchaseCompound")
person = models.ForeignKey(Person)
status = models.ForeignKey(PurchaseStatus)
purchase_internal_id = models.CharField(max_length=32, unique=True)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "Purchase ({} / {}): for {} | compounds: {} | {}".format(self.id,
self.purchase_internal_id,
str(self.person),
self.compounds.count(),
str(self.status))
def save(self, update=True, *args, **kwargs):
if not update:
today_date = datetime.date.today()
today_min = datetime.datetime.combine(datetime.date.today(), datetime.time.min)
today_max = datetime.datetime.combine(datetime.date.today(), datetime.time.max)
try:
today_last_purchase = Purchase.objects.filter(created__range=(today_min, today_max)).latest("id")
print(today_last_purchase.id)
self.purchase_internal_id = "{:%Y-%m-%d}/{}".format(datetime.date.today(), today_last_purchase.id + 1)
except ObjectDoesNotExist:
self.purchase_internal_id = "{:%Y-%m-%d}/1".format(today_date)
super(Purchase, self).save(*args, **kwargs)
class Meta:
ordering = ['id']
class PurchaseCompound(models.Model):
"""
Join table for purchase and its compounds.
"""
compound = models.ForeignKey(Molecule)
purchase = models.ForeignKey(Purchase)
amount = models.FloatField()
def __str__(self):
return "PurchaseCompound ({}): compound {} in purchase {} ".format(self.id, self.compound.id, self.purchase.id) | chemoinformatics/molecule_web_database/moldb/models.py | from django.core.exceptions import ObjectDoesNotExist
from rdkit.Chem import AllChem
from rdkit import Chem
from rdkit.Chem.Draw import rdMolDraw2D
from rdkit.Chem import rdDepictor
from django_rdkit import models
import datetime
from django.contrib.auth.models import User
class Molecule(models.Model):
"""
Represents one molecule.
"""
# fields, which can be calculated on save():
rdmol = models.MolField()
internal_id = models.CharField(max_length=32, db_index=True)
image_svg = models.TextField(null=True)
mw = models.FloatField(db_index=True)
sum_formula = models.CharField(max_length=32, db_index=True)
fingerprint = models.CharField(max_length=1024, db_index=True)
inchi = models.TextField(db_index=True)
inchi_key = models.CharField(max_length=27, db_index=True)
name = models.TextField(db_index=True, null=True)
smiles = models.TextField(db_index=True)
amount = models.FloatField()
created = models.DateTimeField(auto_now_add=True)
# excluded molecules SMILES (they cause rdKit stuck)
EXCLUDED_MOLECULES = ["C", "CH3", "CH4", "[CH3]", "[C]", "[CH4]"]
def __str__(self):
return "Molecule ({id}): '{name}', formula: '{formula}'".format(id=self.internal_id, name=self.name, formula=self.sum_formula)
def save(self, smiles=None, molfile=None, rdmol=None, inchi=None, name=None, update=False, *args, **kwargs):
if not update:
if molfile:
mol = AllChem.MolFromMolBlock(molfile)
elif smiles:
mol = AllChem.MolFromSmiles(smiles)
elif rdmol:
mol = rdmol
elif inchi:
mol = AllChem.MolFromInchi(inchi)
if mol:
inchi = AllChem.MolToInchi(mol)
smiles = AllChem.MolToSmiles(mol)
if inchi and Molecule.objects.filter(inchi=inchi).count() == 0 and len(inchi) > 1:
self.inchi = inchi
self.mw = float("{0:.2f}".format(AllChem.CalcExactMolWt(mol)))
self.sum_formula = AllChem.CalcMolFormula(mol)
self.fingerprint = AllChem.GetMorganFingerprintAsBitVect(mol, 4, nBits=1024).ToBitString()
self.inchi_key = AllChem.InchiToInchiKey(self.inchi)
self.molfile = AllChem.MolToMolBlock(mol)
self.smiles = smiles
self.rdmol = mol
# generating SVG image
if self.smiles not in self.EXCLUDED_MOLECULES:
binMol = AllChem.Mol(self.rdmol.ToBinary())
if not binMol.GetNumConformers():
rdDepictor.Compute2DCoords(self.rdmol)
drawer = rdMolDraw2D.MolDraw2DSVG(100, 100)
drawer.DrawMolecule(self.rdmol)
drawer.FinishDrawing()
svg = drawer.GetDrawingText().replace('svg:', '')
# remove first line containg XML meta information
self.image_svg = "\n".join(svg.split("\n")[1:]).strip()
else:
self.image_svg = None
if name:
self.name = name
else:
try:
self.name = mol.GetProp("LONGNAME")
except KeyError:
self.name= None
if Molecule.objects.all().count() == 0:
self.internal_id = "MI-J-1"
else:
self.internal_id = "MI-J-{}".format(Molecule.objects.latest("id").id + 1)
super(Molecule, self).save(*args, **kwargs)
else:
raise self.MoleculeExistsInDatabase(smiles)
else:
raise self.MoleculeCreationError
else:
super(Molecule, self).save(*args, **kwargs)
class Meta:
ordering = ['id']
class MoleculeExistsInDatabase(Exception):
def __init__(self, smiles):
super(Exception, self).__init__(smiles)
self.smiles = smiles
self.message = "Cannot add the molecule: it already exists in database."
class MoleculeCreationError(Exception):
def __init__(self):
super(Exception, self).__init__()
self.message = "Cannot add the molecule: check your structure (valence etc.)."
class Building(models.Model):
"""
Represents one building.
"""
name = models.CharField(max_length=2, unique=True)
def __str__(self):
return "Building ({}): {}".format(self.id, self.name)
class Room(models.Model):
"""
Represents one room.
"""
code = models.CharField(max_length=8, unique=True)
def __str__(self):
return "Room ({}): {}".format(self.id, self.code)
class Place(models.Model):
"""
Represents one place which consists of room and building.
"""
building = models.ForeignKey(Building)
room = models.ForeignKey(Room)
class Meta:
unique_together = ("building", "room")
def __str__(self):
return "Place ({}): {}/{}".format(self.id, self.building.name, self.room.code)
class Person(models.Model):
"""
Represents one person.
"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
name = models.CharField(max_length=64)
surname = models.CharField(max_length=128)
place = models.ForeignKey(Place)
telephone = models.CharField(max_length=16)
def __str__(self):
return "Person ({}): {}, {} @ {}".format(self.id, self.surname, self.name, str(self.place))
class Meta:
ordering = ['id']
class OrderStatus(models.Model):
"""
Represents the order status.
"""
status = models.CharField(max_length=32, unique=True)
def __str__(self):
return "OrderStatus ({}): {}".format(self.id, self.status)
class Order(models.Model):
"""
Represents one order.
"""
compounds = models.ManyToManyField(Molecule, through="OrderCompound")
person = models.ForeignKey(Person)
status = models.ForeignKey(OrderStatus)
order_internal_id = models.CharField(max_length=32, unique=True)
created = models.DateTimeField(auto_now_add=True)
def save(self, update=True, *args, **kwargs):
if not update:
today_date = datetime.date.today()
today_min = datetime.datetime.combine(datetime.date.today(), datetime.time.min)
today_max = datetime.datetime.combine(datetime.date.today(), datetime.time.max)
try:
today_last_order = Order.objects.filter(created__range=(today_min, today_max)).latest("id")
self.order_internal_id = "{:%Y-%m-%d}/{}".format(datetime.date.today(), today_last_order.id + 1)
except ObjectDoesNotExist:
self.order_internal_id = "{:%Y-%m-%d}/1".format(today_date)
super(Order, self).save(*args, **kwargs)
def __str__(self):
return "Order ({} / {}): for {} | compounds: {} | {}".format(self.id,
self.order_internal_id,
str(self.person),
self.compounds.count(),
str(self.status))
class Meta:
ordering = ['id']
class OrderCompound(models.Model):
"""
Join table for order and its compounds.
"""
compound = models.ForeignKey(Molecule)
order = models.ForeignKey(Order)
amount = models.FloatField()
def __str__(self):
return "OrderCompound ({}): compound {} in order {} ".format(self.id, self.compound.id, self.order.id)
class PurchaseStatus(models.Model):
"""
Represents the purchase status.
"""
status = models.CharField(max_length=32, unique=True)
def __str__(self):
return "PurchaseStatus ({}): {}".format(self.id, self.status)
class Purchase(models.Model):
"""
Represents one purchase.
"""
compounds = models.ManyToManyField(Molecule, through="PurchaseCompound")
person = models.ForeignKey(Person)
status = models.ForeignKey(PurchaseStatus)
purchase_internal_id = models.CharField(max_length=32, unique=True)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "Purchase ({} / {}): for {} | compounds: {} | {}".format(self.id,
self.purchase_internal_id,
str(self.person),
self.compounds.count(),
str(self.status))
def save(self, update=True, *args, **kwargs):
if not update:
today_date = datetime.date.today()
today_min = datetime.datetime.combine(datetime.date.today(), datetime.time.min)
today_max = datetime.datetime.combine(datetime.date.today(), datetime.time.max)
try:
today_last_purchase = Purchase.objects.filter(created__range=(today_min, today_max)).latest("id")
print(today_last_purchase.id)
self.purchase_internal_id = "{:%Y-%m-%d}/{}".format(datetime.date.today(), today_last_purchase.id + 1)
except ObjectDoesNotExist:
self.purchase_internal_id = "{:%Y-%m-%d}/1".format(today_date)
super(Purchase, self).save(*args, **kwargs)
class Meta:
ordering = ['id']
class PurchaseCompound(models.Model):
"""
Join table for purchase and its compounds.
"""
compound = models.ForeignKey(Molecule)
purchase = models.ForeignKey(Purchase)
amount = models.FloatField()
def __str__(self):
return "PurchaseCompound ({}): compound {} in purchase {} ".format(self.id, self.compound.id, self.purchase.id) | 0.527803 | 0.348562 |
import os
import numpy as np
import pandas as pd
from util import DATA_DIR, DOWNLOAD_DIR, load_data, reduce_dataset, keystrokes2events, events2keystrokes
GREYC_NISLAB_DATASET_URL = 'http://www.epaymentbiometrics.ensicaen.fr/wp-content/uploads/2015/04/greyc-nislab-keystroke-benchmark-dataset.xls'
CITEFA_DATASET_URL = 'http://www.cse.chalmers.se/~bello/publications/kprofiler-20100716-1442.tar.gz'
VILLANI_DATASET_URL = 'https://bitbucket.org/vmonaco/dataset-villani-keystroke/raw/f451aa1b1ee40e86ef58d8eab2b8f829fcc23405/data/keystroke.csv'
COLS = ['age', 'gender', 'handedness', 'timepress', 'timerelease', 'keyname']
def make_sessions(x, mean, std, skip_chars=20):
x = x.reset_index(level=1)
base_session = x['session'].unique().squeeze() * 10000
end = 0
session = base_session
while end < len(x):
if std > 0:
new_end = end + int(np.random.normal(mean, std))
else:
new_end = end + int(mean)
if new_end > len(x):
x['session'][end:new_end] = -1
break
else:
x['session'][end:new_end] = session
x['session'][new_end:new_end + skip_chars] = -1
session += 1
end = new_end + skip_chars
x = x[x['session'] >= 0]
x = x.set_index('session', append=True)
return x
def preprocess_greyc_nislab(in_file, out_file):
"""
Preprocess the raw GREYC NISLAB dataset
"""
df = pd.concat([pd.read_excel(in_file, sheetname=0),
pd.read_excel(in_file, sheetname=1),
pd.read_excel(in_file, sheetname=2),
pd.read_excel(in_file, sheetname=3),
pd.read_excel(in_file, sheetname=4)])
df = df[df['Class'] == 2]
df['age'] = (df['Age'] < 30).map({True: '<30', False: '>=30'})
df['gender'] = df['Gender'].map({'F': 'female', 'M': 'male'})
df['handedness'] = df['Handedness'].map({'L': 'left', 'R': 'right'})
df['session'] = np.arange(len(df))
df['password'] = df['Password'].map({
'<NAME>': 1,
'the rolling stones': 2,
'<NAME>': 3,
'red hot chilli peppers': 4,
'united states of america': 5,
})
def preprocess_row(idx_row):
idx, row = idx_row
keyname = list(map(lambda x: 'space' if x == ' ' else x, list(row['Password'])))
v = np.array(row['Keystroke Template Vector'].strip().split()).astype(int) // 10000
s = len(keyname) - 1
pp, rr, pr, rp = [v[s * i:s * (i + 1)] for i in range(4)]
timepress = np.r_[0, pp].cumsum()
# Offset the first release time by the duration of the first key
timerelease = np.r_[rp[0] - rr[0], rr].cumsum()
# There are ~180 rows where timerelease == timepress.
# Fix these by assuming at least the minimum standard clock resolution
timerelease[timerelease == timepress] += 16
sample = pd.DataFrame.from_items([
('user', row['User_ID']),
('session', row['session']),
('password', row['password']),
('age', row['age']),
('gender', row['gender']),
('handedness', row['handedness']),
('timepress', timepress),
('timerelease', timerelease),
('keyname', keyname)
])
return sample
df = pd.concat(map(preprocess_row, df.iterrows()))
df = df.set_index(['user', 'session'])[COLS]
df = remove_repeated_keys(df)
df.to_csv(out_file)
return
def preprocess_citefa(in_file, out_file):
"""
Preprocess the raw CITEFA dataset
"""
import tempfile
import shutil
import tarfile
from glob import glob
from operator import itemgetter
from keycode import lookup_key, detect_agent
tdir = tempfile.mkdtemp()
tfile = tarfile.open(in_file, 'r:gz')
tfile.extractall(tdir)
dfs = []
for fname in glob(os.path.join(tdir, '*', '*')):
with open(fname) as f:
lines = f.readlines()
header = lines[0]
agent = detect_agent(header)
fields = header.split(';')
age = '<30' if int(fields[7]) < 30 else '>=30'
gender = 'male' if fields[8] == 'Male' else 'female'
handedness = 'right' if fields[9] == 'right-handed' else 'left'
# rows contain the keypress/keyrelease actions, need to convert to keystrokes
key_actions = [row.strip().split() for row in lines if ('dn' in row) or ('up' in row)]
# parse the ints
key_actions = [(i1, int(i2), i3, int(i4)) for i1, i2, i3, i4 in key_actions]
key_actions = sorted(key_actions, key=itemgetter(1))
keystrokes = []
keys_down = {}
for task, time, action, keycode in key_actions:
if action == 'dn':
if keycode in keys_down.keys():
print('Warning: key pressed twice without release (probably auto repeated while held down)')
continue
keys_down[keycode] = time
elif action == 'up':
if keycode not in keys_down.keys():
print('Warning: key released without first being pressed', time, keycode)
continue
keystrokes.append((task, keys_down[keycode], time, lookup_key(keycode, agent)))
del keys_down[keycode]
else:
raise Exception('Unknown action')
task, timepress, timerelease, keyname = zip(*keystrokes)
dfs.append(pd.DataFrame.from_items([
('user', fields[4]),
('session', int(fields[2])),
('age', age),
('gender', gender),
('handedness', handedness),
('task', task),
('timepress', timepress),
('timerelease', timerelease),
('keyname', keyname)
]))
shutil.rmtree(tdir)
df = pd.concat(dfs)
# Keep only the sentence copy tasks. See Bello 2010
df = df[df['task'].isin(
{'ks_00', 'ks_01', 'ks_02', 'ks_03', 'ks_04', 'ks_05',
'ks_06', 'ks_07', 'ks_08', 'ks_09', 'ks_10',
'ks_11' 'ks_12', 'ks_13', 'ks_14'})]
df['session'] = df['session'] * 100 + df['task'].str[3:].astype(int)
df = df.set_index(['user', 'session'])
df = remove_repeated_keys(df)
df = reduce_dataset(df, min_samples=10, max_samples=10)
df.to_csv(out_file)
return
def preprocess_villani(in_file, out_file, long_fixed_out_file):
"""
Preprocess the raw Villani dataset and extend the long fixed dataset
"""
df = pd.read_csv(in_file, index_col=[0, 1])
# Make age a binary target, <30 and >=30
df['age'] = df['agegroup'].map({
'under20': '<30',
'20-29': '<30',
'30-39': '>=30',
'40-49': '>=30',
'50-59': '>=30',
'over60': '>=30'}
)
# Ignore missing data
df = df.dropna()
df = remove_repeated_keys(df)
# combine the villani fixed text with citefa dataset fixed text
long_fixed = load_data('long_fixed')
slf = long_fixed.groupby(level=[0, 1]).size()
villani_fixed = df[df['inputtype'] == 'fixed']
villani_fixed = villani_fixed.groupby(level=[0, 1]).apply(lambda x: make_sessions(x, slf.mean(), slf.std()))
villani_fixed = villani_fixed.reset_index(level=[0, 1], drop=True)
villani_fixed = reduce_dataset(villani_fixed, min_samples=10, max_samples=10)
long_fixed = pd.concat([long_fixed, villani_fixed])
long_fixed = long_fixed[COLS]
long_fixed.to_csv(long_fixed_out_file)
# Free-text input only
villani_free = df[df['inputtype'] == 'free']
villani_free = villani_free.groupby(level=[0, 1]).apply(lambda x: make_sessions(x, slf.mean(), slf.std()))
villani_free = villani_free.reset_index(level=[0, 1], drop=True)
villani_free = reduce_dataset(villani_free, min_samples=10, max_samples=10)
villani_free = villani_free[COLS]
villani_free.to_csv(out_file)
return
def remove_repeated_keys(df):
def process_sample(x):
dfs = []
last_release = {}
for idx, row in x.iterrows():
# time press must be after last release, otherwise ignore
if row['keyname'] in last_release.keys() and row['timepress'] <= last_release[row['keyname']]:
continue
last_release[row['keyname']] = row['timerelease']
dfs.append(row)
x = pd.concat(dfs, axis=1).T
x.index.names = ['user', 'session']
return x
df = df.groupby(level=[0, 1]).apply(process_sample).reset_index(level=[2, 3], drop=True)
return df
def preprocess():
"""
Download and preprocess datasets for the experiments.
"""
import urllib.request
import urllib.error
def download_dataset(name, local_name, url):
if os.path.exists(os.path.join(DOWNLOAD_DIR, local_name)):
print('Already downloaded %s' % name)
return
try:
print('Downloading %s' % name)
urllib.request.urlretrieve(url, os.path.join(DOWNLOAD_DIR, local_name))
except urllib.error.HTTPError as e:
print('WARNING: Unable to download %s from URL:\n%s' % (name, url))
print('Check that the URL is correct and you have permissions to download the file.')
# Download both datasets
download_dataset('GREYC NISLAB Dataset', 'greyc_nislab.xls', GREYC_NISLAB_DATASET_URL)
download_dataset('CITAFA Dataset', 'citefa.tar.gz', CITEFA_DATASET_URL)
download_dataset('Villani Dataset', 'villani.csv', VILLANI_DATASET_URL)
# This creates the short fixed dataset
# preprocess_greyc_nislab(os.path.join(DOWNLOAD_DIR, 'greyc_nislab.xls'),
# os.path.join(DATA_DIR, 'short_fixed.csv'))
# This creates the long fixed dataset
preprocess_citefa(os.path.join(DOWNLOAD_DIR, 'citefa.tar.gz'),
os.path.join(DATA_DIR, 'long_fixed.csv'))
# This creates the long free dataset and extends the previous long fixed dataset
preprocess_villani(os.path.join(DOWNLOAD_DIR, 'villani.csv'),
os.path.join(DATA_DIR, 'long_free.csv'),
os.path.join(DATA_DIR, 'long_fixed.csv'))
return | preprocess.py | import os
import numpy as np
import pandas as pd
from util import DATA_DIR, DOWNLOAD_DIR, load_data, reduce_dataset, keystrokes2events, events2keystrokes
GREYC_NISLAB_DATASET_URL = 'http://www.epaymentbiometrics.ensicaen.fr/wp-content/uploads/2015/04/greyc-nislab-keystroke-benchmark-dataset.xls'
CITEFA_DATASET_URL = 'http://www.cse.chalmers.se/~bello/publications/kprofiler-20100716-1442.tar.gz'
VILLANI_DATASET_URL = 'https://bitbucket.org/vmonaco/dataset-villani-keystroke/raw/f451aa1b1ee40e86ef58d8eab2b8f829fcc23405/data/keystroke.csv'
COLS = ['age', 'gender', 'handedness', 'timepress', 'timerelease', 'keyname']
def make_sessions(x, mean, std, skip_chars=20):
x = x.reset_index(level=1)
base_session = x['session'].unique().squeeze() * 10000
end = 0
session = base_session
while end < len(x):
if std > 0:
new_end = end + int(np.random.normal(mean, std))
else:
new_end = end + int(mean)
if new_end > len(x):
x['session'][end:new_end] = -1
break
else:
x['session'][end:new_end] = session
x['session'][new_end:new_end + skip_chars] = -1
session += 1
end = new_end + skip_chars
x = x[x['session'] >= 0]
x = x.set_index('session', append=True)
return x
def preprocess_greyc_nislab(in_file, out_file):
"""
Preprocess the raw GREYC NISLAB dataset
"""
df = pd.concat([pd.read_excel(in_file, sheetname=0),
pd.read_excel(in_file, sheetname=1),
pd.read_excel(in_file, sheetname=2),
pd.read_excel(in_file, sheetname=3),
pd.read_excel(in_file, sheetname=4)])
df = df[df['Class'] == 2]
df['age'] = (df['Age'] < 30).map({True: '<30', False: '>=30'})
df['gender'] = df['Gender'].map({'F': 'female', 'M': 'male'})
df['handedness'] = df['Handedness'].map({'L': 'left', 'R': 'right'})
df['session'] = np.arange(len(df))
df['password'] = df['Password'].map({
'<NAME>': 1,
'the rolling stones': 2,
'<NAME>': 3,
'red hot chilli peppers': 4,
'united states of america': 5,
})
def preprocess_row(idx_row):
idx, row = idx_row
keyname = list(map(lambda x: 'space' if x == ' ' else x, list(row['Password'])))
v = np.array(row['Keystroke Template Vector'].strip().split()).astype(int) // 10000
s = len(keyname) - 1
pp, rr, pr, rp = [v[s * i:s * (i + 1)] for i in range(4)]
timepress = np.r_[0, pp].cumsum()
# Offset the first release time by the duration of the first key
timerelease = np.r_[rp[0] - rr[0], rr].cumsum()
# There are ~180 rows where timerelease == timepress.
# Fix these by assuming at least the minimum standard clock resolution
timerelease[timerelease == timepress] += 16
sample = pd.DataFrame.from_items([
('user', row['User_ID']),
('session', row['session']),
('password', row['password']),
('age', row['age']),
('gender', row['gender']),
('handedness', row['handedness']),
('timepress', timepress),
('timerelease', timerelease),
('keyname', keyname)
])
return sample
df = pd.concat(map(preprocess_row, df.iterrows()))
df = df.set_index(['user', 'session'])[COLS]
df = remove_repeated_keys(df)
df.to_csv(out_file)
return
def preprocess_citefa(in_file, out_file):
"""
Preprocess the raw CITEFA dataset
"""
import tempfile
import shutil
import tarfile
from glob import glob
from operator import itemgetter
from keycode import lookup_key, detect_agent
tdir = tempfile.mkdtemp()
tfile = tarfile.open(in_file, 'r:gz')
tfile.extractall(tdir)
dfs = []
for fname in glob(os.path.join(tdir, '*', '*')):
with open(fname) as f:
lines = f.readlines()
header = lines[0]
agent = detect_agent(header)
fields = header.split(';')
age = '<30' if int(fields[7]) < 30 else '>=30'
gender = 'male' if fields[8] == 'Male' else 'female'
handedness = 'right' if fields[9] == 'right-handed' else 'left'
# rows contain the keypress/keyrelease actions, need to convert to keystrokes
key_actions = [row.strip().split() for row in lines if ('dn' in row) or ('up' in row)]
# parse the ints
key_actions = [(i1, int(i2), i3, int(i4)) for i1, i2, i3, i4 in key_actions]
key_actions = sorted(key_actions, key=itemgetter(1))
keystrokes = []
keys_down = {}
for task, time, action, keycode in key_actions:
if action == 'dn':
if keycode in keys_down.keys():
print('Warning: key pressed twice without release (probably auto repeated while held down)')
continue
keys_down[keycode] = time
elif action == 'up':
if keycode not in keys_down.keys():
print('Warning: key released without first being pressed', time, keycode)
continue
keystrokes.append((task, keys_down[keycode], time, lookup_key(keycode, agent)))
del keys_down[keycode]
else:
raise Exception('Unknown action')
task, timepress, timerelease, keyname = zip(*keystrokes)
dfs.append(pd.DataFrame.from_items([
('user', fields[4]),
('session', int(fields[2])),
('age', age),
('gender', gender),
('handedness', handedness),
('task', task),
('timepress', timepress),
('timerelease', timerelease),
('keyname', keyname)
]))
shutil.rmtree(tdir)
df = pd.concat(dfs)
# Keep only the sentence copy tasks. See Bello 2010
df = df[df['task'].isin(
{'ks_00', 'ks_01', 'ks_02', 'ks_03', 'ks_04', 'ks_05',
'ks_06', 'ks_07', 'ks_08', 'ks_09', 'ks_10',
'ks_11' 'ks_12', 'ks_13', 'ks_14'})]
df['session'] = df['session'] * 100 + df['task'].str[3:].astype(int)
df = df.set_index(['user', 'session'])
df = remove_repeated_keys(df)
df = reduce_dataset(df, min_samples=10, max_samples=10)
df.to_csv(out_file)
return
def preprocess_villani(in_file, out_file, long_fixed_out_file):
"""
Preprocess the raw Villani dataset and extend the long fixed dataset
"""
df = pd.read_csv(in_file, index_col=[0, 1])
# Make age a binary target, <30 and >=30
df['age'] = df['agegroup'].map({
'under20': '<30',
'20-29': '<30',
'30-39': '>=30',
'40-49': '>=30',
'50-59': '>=30',
'over60': '>=30'}
)
# Ignore missing data
df = df.dropna()
df = remove_repeated_keys(df)
# combine the villani fixed text with citefa dataset fixed text
long_fixed = load_data('long_fixed')
slf = long_fixed.groupby(level=[0, 1]).size()
villani_fixed = df[df['inputtype'] == 'fixed']
villani_fixed = villani_fixed.groupby(level=[0, 1]).apply(lambda x: make_sessions(x, slf.mean(), slf.std()))
villani_fixed = villani_fixed.reset_index(level=[0, 1], drop=True)
villani_fixed = reduce_dataset(villani_fixed, min_samples=10, max_samples=10)
long_fixed = pd.concat([long_fixed, villani_fixed])
long_fixed = long_fixed[COLS]
long_fixed.to_csv(long_fixed_out_file)
# Free-text input only
villani_free = df[df['inputtype'] == 'free']
villani_free = villani_free.groupby(level=[0, 1]).apply(lambda x: make_sessions(x, slf.mean(), slf.std()))
villani_free = villani_free.reset_index(level=[0, 1], drop=True)
villani_free = reduce_dataset(villani_free, min_samples=10, max_samples=10)
villani_free = villani_free[COLS]
villani_free.to_csv(out_file)
return
def remove_repeated_keys(df):
def process_sample(x):
dfs = []
last_release = {}
for idx, row in x.iterrows():
# time press must be after last release, otherwise ignore
if row['keyname'] in last_release.keys() and row['timepress'] <= last_release[row['keyname']]:
continue
last_release[row['keyname']] = row['timerelease']
dfs.append(row)
x = pd.concat(dfs, axis=1).T
x.index.names = ['user', 'session']
return x
df = df.groupby(level=[0, 1]).apply(process_sample).reset_index(level=[2, 3], drop=True)
return df
def preprocess():
"""
Download and preprocess datasets for the experiments.
"""
import urllib.request
import urllib.error
def download_dataset(name, local_name, url):
if os.path.exists(os.path.join(DOWNLOAD_DIR, local_name)):
print('Already downloaded %s' % name)
return
try:
print('Downloading %s' % name)
urllib.request.urlretrieve(url, os.path.join(DOWNLOAD_DIR, local_name))
except urllib.error.HTTPError as e:
print('WARNING: Unable to download %s from URL:\n%s' % (name, url))
print('Check that the URL is correct and you have permissions to download the file.')
# Download both datasets
download_dataset('GREYC NISLAB Dataset', 'greyc_nislab.xls', GREYC_NISLAB_DATASET_URL)
download_dataset('CITAFA Dataset', 'citefa.tar.gz', CITEFA_DATASET_URL)
download_dataset('Villani Dataset', 'villani.csv', VILLANI_DATASET_URL)
# This creates the short fixed dataset
# preprocess_greyc_nislab(os.path.join(DOWNLOAD_DIR, 'greyc_nislab.xls'),
# os.path.join(DATA_DIR, 'short_fixed.csv'))
# This creates the long fixed dataset
preprocess_citefa(os.path.join(DOWNLOAD_DIR, 'citefa.tar.gz'),
os.path.join(DATA_DIR, 'long_fixed.csv'))
# This creates the long free dataset and extends the previous long fixed dataset
preprocess_villani(os.path.join(DOWNLOAD_DIR, 'villani.csv'),
os.path.join(DATA_DIR, 'long_free.csv'),
os.path.join(DATA_DIR, 'long_fixed.csv'))
return | 0.459076 | 0.220941 |
from __future__ import absolute_import
import logging
from geocoder.location import Location
from geocoder.base import OneResult
from geocoder.uscensus import USCensusQuery
class USCensusReverseResult(OneResult):
@property
def ok(self):
return bool(self.raw['States'])
@property
def state(self):
if self.raw['States']:
return self.raw['States'][0].get('NAME')
@property
def statenumber(self):
if self.raw['States']:
return self.raw['States'][0].get('STATE')
@property
def county(self):
if self.raw['Counties']:
return self.raw['Counties'][0].get('NAME')
@property
def countynumber(self):
if self.raw['Counties']:
return self.raw['Counties'][0].get('COUNTY')
@property
def tract(self):
if self.raw['Census Tracts']:
return self.raw['Census Tracts'][0].get('NAME')
@property
def tractnumber(self):
if self.raw['Census Tracts']:
return self.raw['Census Tracts'][0].get('TRACT')
@property
def block(self):
if self.raw['2010 Census Blocks']:
return self.raw['2010 Census Blocks'][0].get('NAME')
elif self.raw['Census Blocks']:
return self.raw['Census Blocks'][0].get('NAME')
@property
def blocknumber(self):
if self.raw['2010 Census Blocks']:
return self.raw['2010 Census Blocks'][0].get('BLOCK')
elif self.raw['Census Blocks']:
return self.raw['Census Blocks'][0].get('BLOCK')
@property
def geoid(self):
if self.raw['2010 Census Blocks']:
return self.raw['2010 Census Blocks'][0].get('GEOID')
elif self.raw['Census Blocks']:
return self.raw['Census Blocks'][0].get('GEOID')
class USCensusReverse(USCensusQuery):
"""
US Census Geocoder REST Services
=======================
The Census Geocoder is an address look-up tool that converts your address to an approximate coordinate (latitude/longitude) and returns information about the address range that includes the address and the census geography the address is within. The geocoder is available as a web interface and as an API (Representational State Transfer - REST - web-based service).
API Reference
-------------
https://geocoding.geo.census.gov/geocoder/Geocoding_Services_API.pdf
"""
provider = 'uscensus'
method = 'reverse'
_URL = 'https://geocoding.geo.census.gov/geocoder/geographies/coordinates'
_RESULT_CLASS = USCensusReverseResult
def _build_params(self, location, provider_key, **kwargs):
location = Location(location)
return {
'x': location.longitude,
'y': location.latitude,
'benchmark': kwargs.get('benchmark', '4'),
'vintage': kwargs.get('vintage', '4'),
'format': 'json'
}
def _adapt_results(self, json_response):
return [json_response['result']['geographies']]
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
g = USCensusReverse([38.846542, -76.92691])
g.debug() | geocoder/uscensus_reverse.py | from __future__ import absolute_import
import logging
from geocoder.location import Location
from geocoder.base import OneResult
from geocoder.uscensus import USCensusQuery
class USCensusReverseResult(OneResult):
@property
def ok(self):
return bool(self.raw['States'])
@property
def state(self):
if self.raw['States']:
return self.raw['States'][0].get('NAME')
@property
def statenumber(self):
if self.raw['States']:
return self.raw['States'][0].get('STATE')
@property
def county(self):
if self.raw['Counties']:
return self.raw['Counties'][0].get('NAME')
@property
def countynumber(self):
if self.raw['Counties']:
return self.raw['Counties'][0].get('COUNTY')
@property
def tract(self):
if self.raw['Census Tracts']:
return self.raw['Census Tracts'][0].get('NAME')
@property
def tractnumber(self):
if self.raw['Census Tracts']:
return self.raw['Census Tracts'][0].get('TRACT')
@property
def block(self):
if self.raw['2010 Census Blocks']:
return self.raw['2010 Census Blocks'][0].get('NAME')
elif self.raw['Census Blocks']:
return self.raw['Census Blocks'][0].get('NAME')
@property
def blocknumber(self):
if self.raw['2010 Census Blocks']:
return self.raw['2010 Census Blocks'][0].get('BLOCK')
elif self.raw['Census Blocks']:
return self.raw['Census Blocks'][0].get('BLOCK')
@property
def geoid(self):
if self.raw['2010 Census Blocks']:
return self.raw['2010 Census Blocks'][0].get('GEOID')
elif self.raw['Census Blocks']:
return self.raw['Census Blocks'][0].get('GEOID')
class USCensusReverse(USCensusQuery):
"""
US Census Geocoder REST Services
=======================
The Census Geocoder is an address look-up tool that converts your address to an approximate coordinate (latitude/longitude) and returns information about the address range that includes the address and the census geography the address is within. The geocoder is available as a web interface and as an API (Representational State Transfer - REST - web-based service).
API Reference
-------------
https://geocoding.geo.census.gov/geocoder/Geocoding_Services_API.pdf
"""
provider = 'uscensus'
method = 'reverse'
_URL = 'https://geocoding.geo.census.gov/geocoder/geographies/coordinates'
_RESULT_CLASS = USCensusReverseResult
def _build_params(self, location, provider_key, **kwargs):
location = Location(location)
return {
'x': location.longitude,
'y': location.latitude,
'benchmark': kwargs.get('benchmark', '4'),
'vintage': kwargs.get('vintage', '4'),
'format': 'json'
}
def _adapt_results(self, json_response):
return [json_response['result']['geographies']]
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
g = USCensusReverse([38.846542, -76.92691])
g.debug() | 0.746971 | 0.21307 |
import json
from typing import Dict, Union
from peewee import (
SQL,
AutoField,
BlobField,
BooleanField,
Case,
CharField,
DoesNotExist,
FloatField,
ForeignKeyField,
IntegerField,
Model,
SqliteDatabase,
TextField,
fn,
)
from playhouse.hybrid import hybrid_property
from werkzeug.security import check_password_hash
from jukebox import APP_ROOT
database = SqliteDatabase((APP_ROOT / "jukebox.db").as_posix())
class BaseModel(Model):
class Meta:
database = database
legacy_table_names = False
class Artist(BaseModel):
artist_id = AutoField(primary_key=True)
name = CharField()
api_id = IntegerField(null=True)
class Meta:
database = database
legacy_table_names = False
order_by = ("name",)
constraints = [SQL('UNIQUE ("name" COLLATE NOCASE)')]
@hybrid_property
def sort_name(self):
return str(self.name).lower().replace("the", "", 1).strip()
@sort_name.expression
def sort_name(cls):
return Case(
None,
[
(
fn.LOWER(fn.SUBSTR(cls.name, 1, 3)) == "the",
fn.TRIM(fn.REPLACE(fn.LOWER(cls.name), "the", ""), " "),
),
],
fn.LOWER(cls.name),
)
def to_json(self) -> Dict[str, Union[str, int]]:
# noinspection PyTypeChecker
return {
"artist_id": self.artist_id,
"name": self.name,
"sort_name": self.sort_name,
}
class ArtistImage(BaseModel):
artist_image_id = AutoField(primary_key=True)
artist = ForeignKeyField(Artist, backref="images")
small = BlobField(null=True)
not_found = BooleanField(default=False)
class ArtistInfoMismatches(BaseModel):
artist_id = IntegerField(primary_key=True)
artist_name = CharField()
found_api_id = IntegerField()
found_name = CharField()
class Album(BaseModel):
album_id = AutoField(primary_key=True)
title = CharField()
total_discs = IntegerField(null=True)
year = IntegerField(null=True)
type = CharField(null=True)
class Meta:
database = database
legacy_table_names = False
indexes = ((("title",), True),)
@hybrid_property
def sort_title(self):
return str(self.title).lower().replace("the", "", 1).strip()
@sort_title.expression
def sort_title(cls):
return Case(
None,
[
(
fn.LOWER(fn.SUBSTR(cls.title, 1, 3)) == "the",
fn.TRIM(fn.REPLACE(fn.LOWER(cls.title), "the", ""), " "),
),
],
fn.LOWER(cls.title),
)
def to_json(self) -> Dict[str, Union[str, int]]:
# noinspection PyTypeChecker
return {
"album_id": self.album_id,
"title": self.title,
"sort_title": self.sort_title,
"total_discs": self.total_discs,
"year": self.year,
"type": self.type,
}
class AlbumArtist(BaseModel):
album_artist_id = AutoField(primary_key=True)
artist = ForeignKeyField(Artist)
album = ForeignKeyField(Album, backref="album_artists")
class Meta:
database = database
legacy_table_names = False
indexes = ((("album", "artist"), True),)
class AlbumDisc(BaseModel):
album_disc_id = AutoField(primary_key=True)
album = ForeignKeyField(Album, backref="disks")
disc_number = IntegerField(null=True)
total_tracks = IntegerField(null=True)
class Meta:
database = database
legacy_table_names = False
indexes = ((("album", "disc_number"), True),)
def to_json(self) -> Dict[str, Union[str, int]]:
return {
"album_disc_id": self.album_disc_id,
"album": self.album.name,
"album_id": self.album.album_id,
"disc_number": self.disc_number,
"total_tracks": self.total_tracks,
}
class AlbumImage(BaseModel):
album_image_id = AutoField(primary_key=True)
album = ForeignKeyField(Album, backref="images", unique=True)
small = BlobField(null=True)
not_found = BooleanField(default=False)
class Track(BaseModel):
track_id = AutoField(primary_key=True)
title = CharField()
album = ForeignKeyField(Album, backref="tracks")
album_disc = ForeignKeyField(AlbumDisc, backref="tracks")
artist = ForeignKeyField(Artist, backref="tracks")
track_number = IntegerField(null=True)
disc_number = IntegerField(null=True)
genre = CharField(null=True)
compilation = BooleanField(default=False)
length = FloatField()
mimetype = CharField()
codec = CharField()
bitrate = IntegerField()
size = IntegerField()
file_path = CharField()
class Meta:
database = database
legacy_table_names = False
indexes = ((("title", "album", "file_path"), True),)
@hybrid_property
def sort_title(self):
return str(self.title).lower().replace("the", "", 1).strip()
@sort_title.expression
def sort_title(cls):
return Case(
None,
[
(
fn.LOWER(fn.SUBSTR(cls.title, 1, 3)) == "the",
fn.TRIM(fn.REPLACE(fn.LOWER(cls.title), "the", ""), " "),
),
],
fn.LOWER(cls.title),
)
def to_json(self) -> Dict[str, Union[str, int]]:
return {
"track_id": self.track_id,
"title": self.title,
"sort_title": self.sort_title,
"album": self.album.title,
"sort_album": self.album.sort_title,
"album_id": self.album.album_id,
"album_disc": self.album_disc.album_disc_id,
"artist": self.artist.name,
"sort_artist": self.artist.sort_name,
"artist_id": self.artist.artist_id,
"track_number": self.track_number,
"genre": self.genre,
"year": self.album.year,
"compilation": self.compilation,
"disc_number": self.disc_number,
"length": self.length,
"mimetype": self.mimetype,
"codec": self.codec,
"bitrate": self.bitrate,
"size": self.size,
}
class User(BaseModel):
user_id = AutoField(primary_key=True)
username = CharField(unique=True)
password = <PASSWORD>()
settings = TextField(null=True)
class Meta:
database = database
legacy_table_names = False
order_by = ("username",)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._authenticated = False
@classmethod
def default_settings(cls) -> Dict[str, str]:
return {"theme_name": "dark", "primary_color": "#15dea5"}
@classmethod
def authenticate(cls, **kwargs):
username = kwargs.get("username")
password = kwargs.get("password")
if not username or not password:
return None
try:
user = cls.get(username=username)
except DoesNotExist:
return None
if not check_password_hash(user.password, password):
return None
return user
def to_json(self) -> Dict[str, Union[str, int]]:
settings = self.settings
if not settings:
settings = json.dumps(self.default_settings())
# noinspection PyTypeChecker
return {
"user_id": self.user_id,
"username": self.username,
"settings": json.loads(settings),
}
class Playlist(BaseModel):
playlist_id = AutoField(primary_key=True)
playlist_name = CharField()
track = ForeignKeyField(Track)
user = ForeignKeyField(User, backref="playlists")
order = IntegerField()
class Meta:
database = database
legacy_table_names = False
indexes = ((("playlist_name", "track", "user"), True),)
def to_json(self) -> Dict[str, Union[str, int]]:
return {
"playlist_name": self.playlist_name,
"track_id": self.track.track_id,
"user_id": self.user.user_id,
"order": self.order,
}
class LovedTrack(BaseModel):
loved_track_id = AutoField(primary_key=True)
track = ForeignKeyField(Track)
user = ForeignKeyField(User, backref="loved_tracks")
class Meta:
database = database
legacy_table_names = False
indexes = ((("track", "user"), True),)
class LovedAlbum(BaseModel):
loved_album_id = AutoField(primary_key=True)
album = ForeignKeyField(Album)
user = ForeignKeyField(User, backref="loved_albums")
class Meta:
database = database
legacy_table_names = False
indexes = ((("album", "user"), True),)
class LovedArtist(BaseModel):
loved_artist_id = AutoField(primary_key=True)
artist = ForeignKeyField(Artist)
user = ForeignKeyField(User, backref="loved_artists")
class Meta:
database = database
legacy_table_names = False
indexes = ((("artist", "user"), True),)
def create_tables() -> None:
with database:
database.create_tables(
[
Artist,
ArtistImage,
ArtistInfoMismatches,
Album,
AlbumArtist,
AlbumDisc,
AlbumImage,
LovedTrack,
LovedAlbum,
LovedArtist,
Track,
Playlist,
User,
]
)
if __name__ == "__main__":
create_tables() | jukebox/db_models.py | import json
from typing import Dict, Union
from peewee import (
SQL,
AutoField,
BlobField,
BooleanField,
Case,
CharField,
DoesNotExist,
FloatField,
ForeignKeyField,
IntegerField,
Model,
SqliteDatabase,
TextField,
fn,
)
from playhouse.hybrid import hybrid_property
from werkzeug.security import check_password_hash
from jukebox import APP_ROOT
database = SqliteDatabase((APP_ROOT / "jukebox.db").as_posix())
class BaseModel(Model):
class Meta:
database = database
legacy_table_names = False
class Artist(BaseModel):
artist_id = AutoField(primary_key=True)
name = CharField()
api_id = IntegerField(null=True)
class Meta:
database = database
legacy_table_names = False
order_by = ("name",)
constraints = [SQL('UNIQUE ("name" COLLATE NOCASE)')]
@hybrid_property
def sort_name(self):
return str(self.name).lower().replace("the", "", 1).strip()
@sort_name.expression
def sort_name(cls):
return Case(
None,
[
(
fn.LOWER(fn.SUBSTR(cls.name, 1, 3)) == "the",
fn.TRIM(fn.REPLACE(fn.LOWER(cls.name), "the", ""), " "),
),
],
fn.LOWER(cls.name),
)
def to_json(self) -> Dict[str, Union[str, int]]:
# noinspection PyTypeChecker
return {
"artist_id": self.artist_id,
"name": self.name,
"sort_name": self.sort_name,
}
class ArtistImage(BaseModel):
artist_image_id = AutoField(primary_key=True)
artist = ForeignKeyField(Artist, backref="images")
small = BlobField(null=True)
not_found = BooleanField(default=False)
class ArtistInfoMismatches(BaseModel):
artist_id = IntegerField(primary_key=True)
artist_name = CharField()
found_api_id = IntegerField()
found_name = CharField()
class Album(BaseModel):
album_id = AutoField(primary_key=True)
title = CharField()
total_discs = IntegerField(null=True)
year = IntegerField(null=True)
type = CharField(null=True)
class Meta:
database = database
legacy_table_names = False
indexes = ((("title",), True),)
@hybrid_property
def sort_title(self):
return str(self.title).lower().replace("the", "", 1).strip()
@sort_title.expression
def sort_title(cls):
return Case(
None,
[
(
fn.LOWER(fn.SUBSTR(cls.title, 1, 3)) == "the",
fn.TRIM(fn.REPLACE(fn.LOWER(cls.title), "the", ""), " "),
),
],
fn.LOWER(cls.title),
)
def to_json(self) -> Dict[str, Union[str, int]]:
# noinspection PyTypeChecker
return {
"album_id": self.album_id,
"title": self.title,
"sort_title": self.sort_title,
"total_discs": self.total_discs,
"year": self.year,
"type": self.type,
}
class AlbumArtist(BaseModel):
album_artist_id = AutoField(primary_key=True)
artist = ForeignKeyField(Artist)
album = ForeignKeyField(Album, backref="album_artists")
class Meta:
database = database
legacy_table_names = False
indexes = ((("album", "artist"), True),)
class AlbumDisc(BaseModel):
album_disc_id = AutoField(primary_key=True)
album = ForeignKeyField(Album, backref="disks")
disc_number = IntegerField(null=True)
total_tracks = IntegerField(null=True)
class Meta:
database = database
legacy_table_names = False
indexes = ((("album", "disc_number"), True),)
def to_json(self) -> Dict[str, Union[str, int]]:
return {
"album_disc_id": self.album_disc_id,
"album": self.album.name,
"album_id": self.album.album_id,
"disc_number": self.disc_number,
"total_tracks": self.total_tracks,
}
class AlbumImage(BaseModel):
album_image_id = AutoField(primary_key=True)
album = ForeignKeyField(Album, backref="images", unique=True)
small = BlobField(null=True)
not_found = BooleanField(default=False)
class Track(BaseModel):
track_id = AutoField(primary_key=True)
title = CharField()
album = ForeignKeyField(Album, backref="tracks")
album_disc = ForeignKeyField(AlbumDisc, backref="tracks")
artist = ForeignKeyField(Artist, backref="tracks")
track_number = IntegerField(null=True)
disc_number = IntegerField(null=True)
genre = CharField(null=True)
compilation = BooleanField(default=False)
length = FloatField()
mimetype = CharField()
codec = CharField()
bitrate = IntegerField()
size = IntegerField()
file_path = CharField()
class Meta:
database = database
legacy_table_names = False
indexes = ((("title", "album", "file_path"), True),)
@hybrid_property
def sort_title(self):
return str(self.title).lower().replace("the", "", 1).strip()
@sort_title.expression
def sort_title(cls):
return Case(
None,
[
(
fn.LOWER(fn.SUBSTR(cls.title, 1, 3)) == "the",
fn.TRIM(fn.REPLACE(fn.LOWER(cls.title), "the", ""), " "),
),
],
fn.LOWER(cls.title),
)
def to_json(self) -> Dict[str, Union[str, int]]:
return {
"track_id": self.track_id,
"title": self.title,
"sort_title": self.sort_title,
"album": self.album.title,
"sort_album": self.album.sort_title,
"album_id": self.album.album_id,
"album_disc": self.album_disc.album_disc_id,
"artist": self.artist.name,
"sort_artist": self.artist.sort_name,
"artist_id": self.artist.artist_id,
"track_number": self.track_number,
"genre": self.genre,
"year": self.album.year,
"compilation": self.compilation,
"disc_number": self.disc_number,
"length": self.length,
"mimetype": self.mimetype,
"codec": self.codec,
"bitrate": self.bitrate,
"size": self.size,
}
class User(BaseModel):
user_id = AutoField(primary_key=True)
username = CharField(unique=True)
password = <PASSWORD>()
settings = TextField(null=True)
class Meta:
database = database
legacy_table_names = False
order_by = ("username",)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._authenticated = False
@classmethod
def default_settings(cls) -> Dict[str, str]:
return {"theme_name": "dark", "primary_color": "#15dea5"}
@classmethod
def authenticate(cls, **kwargs):
username = kwargs.get("username")
password = kwargs.get("password")
if not username or not password:
return None
try:
user = cls.get(username=username)
except DoesNotExist:
return None
if not check_password_hash(user.password, password):
return None
return user
def to_json(self) -> Dict[str, Union[str, int]]:
settings = self.settings
if not settings:
settings = json.dumps(self.default_settings())
# noinspection PyTypeChecker
return {
"user_id": self.user_id,
"username": self.username,
"settings": json.loads(settings),
}
class Playlist(BaseModel):
playlist_id = AutoField(primary_key=True)
playlist_name = CharField()
track = ForeignKeyField(Track)
user = ForeignKeyField(User, backref="playlists")
order = IntegerField()
class Meta:
database = database
legacy_table_names = False
indexes = ((("playlist_name", "track", "user"), True),)
def to_json(self) -> Dict[str, Union[str, int]]:
return {
"playlist_name": self.playlist_name,
"track_id": self.track.track_id,
"user_id": self.user.user_id,
"order": self.order,
}
class LovedTrack(BaseModel):
loved_track_id = AutoField(primary_key=True)
track = ForeignKeyField(Track)
user = ForeignKeyField(User, backref="loved_tracks")
class Meta:
database = database
legacy_table_names = False
indexes = ((("track", "user"), True),)
class LovedAlbum(BaseModel):
loved_album_id = AutoField(primary_key=True)
album = ForeignKeyField(Album)
user = ForeignKeyField(User, backref="loved_albums")
class Meta:
database = database
legacy_table_names = False
indexes = ((("album", "user"), True),)
class LovedArtist(BaseModel):
loved_artist_id = AutoField(primary_key=True)
artist = ForeignKeyField(Artist)
user = ForeignKeyField(User, backref="loved_artists")
class Meta:
database = database
legacy_table_names = False
indexes = ((("artist", "user"), True),)
def create_tables() -> None:
with database:
database.create_tables(
[
Artist,
ArtistImage,
ArtistInfoMismatches,
Album,
AlbumArtist,
AlbumDisc,
AlbumImage,
LovedTrack,
LovedAlbum,
LovedArtist,
Track,
Playlist,
User,
]
)
if __name__ == "__main__":
create_tables() | 0.649912 | 0.201283 |
import mock
import unittest2
from squirrel_api import VoicemailUser, VoicemailMessage, VoicemailSuperUser
from squirrel_api.exceptions import SquirrelException, SquirrelApiException
from contextlib import contextmanager
class SquirrelUserAPI(unittest2.TestCase):
def setUp(self):
pass
@contextmanager
def set_response(self, response_file):
with mock.patch('squirrel_api.api.SquirrelAPIResource.get_connection') as m:
with open(response_file) as f:
m().getresponse.return_value = f
yield
def test_valid_auth(self):
with self.set_response('tests/data/login_successful.xml'):
user = VoicemailUser(12345)
token = user.login(123456)
self.assertEqual(token, 'TESTTOKEN')
def test_invalid_auth(self):
with self.set_response('tests/data/login_invalid.xml'):
user = VoicemailUser(12345)
with self.assertRaises(SquirrelApiException) as e:
user.login(123456)
self.assertEqual(e.exception.error_code, 2101)
def test_unsuccessful_auth(self):
with self.set_response('tests/data/login_unsuccessful.xml'):
user = VoicemailUser(12345)
with self.assertRaises(SquirrelApiException) as e:
user.login(123456)
self.assertEqual(e.exception.error_code, 2205)
def test_list_messages(self):
user = VoicemailUser(12345)
user.token = "<PASSWORD>"
with self.set_response('tests/data/list_messages.xml'):
messages = user.get_messages()
for m in messages:
self.assertIsInstance(m, VoicemailMessage)
def test_mailbox_exist_true(self):
su = VoicemailSuperUser(12345)
su.token = "<PASSWORD>"
with self.set_response('tests/data/mailbox_exist_true.xml'):
exist = su.mailbox_exist('12121')
self.assertTrue(exist)
def test_mailbox_exist_false(self):
su = VoicemailSuperUser(12345)
su.token = "<PASSWORD>"
with self.set_response('tests/data/mailbox_exist_false.xml'):
exist = su.mailbox_exist('12121')
self.assertFalse(exist)
def test_mailbox_exist_wrong(self):
su = VoicemailSuperUser(12345)
su.token = "<PASSWORD>"
with self.set_response('tests/data/mailbox_exist_wrong.xml'):
with self.assertRaises(SquirrelException) as e:
exist = su.mailbox_exist('123123') | tests/test_squirrel.py | import mock
import unittest2
from squirrel_api import VoicemailUser, VoicemailMessage, VoicemailSuperUser
from squirrel_api.exceptions import SquirrelException, SquirrelApiException
from contextlib import contextmanager
class SquirrelUserAPI(unittest2.TestCase):
def setUp(self):
pass
@contextmanager
def set_response(self, response_file):
with mock.patch('squirrel_api.api.SquirrelAPIResource.get_connection') as m:
with open(response_file) as f:
m().getresponse.return_value = f
yield
def test_valid_auth(self):
with self.set_response('tests/data/login_successful.xml'):
user = VoicemailUser(12345)
token = user.login(123456)
self.assertEqual(token, 'TESTTOKEN')
def test_invalid_auth(self):
with self.set_response('tests/data/login_invalid.xml'):
user = VoicemailUser(12345)
with self.assertRaises(SquirrelApiException) as e:
user.login(123456)
self.assertEqual(e.exception.error_code, 2101)
def test_unsuccessful_auth(self):
with self.set_response('tests/data/login_unsuccessful.xml'):
user = VoicemailUser(12345)
with self.assertRaises(SquirrelApiException) as e:
user.login(123456)
self.assertEqual(e.exception.error_code, 2205)
def test_list_messages(self):
user = VoicemailUser(12345)
user.token = "<PASSWORD>"
with self.set_response('tests/data/list_messages.xml'):
messages = user.get_messages()
for m in messages:
self.assertIsInstance(m, VoicemailMessage)
def test_mailbox_exist_true(self):
su = VoicemailSuperUser(12345)
su.token = "<PASSWORD>"
with self.set_response('tests/data/mailbox_exist_true.xml'):
exist = su.mailbox_exist('12121')
self.assertTrue(exist)
def test_mailbox_exist_false(self):
su = VoicemailSuperUser(12345)
su.token = "<PASSWORD>"
with self.set_response('tests/data/mailbox_exist_false.xml'):
exist = su.mailbox_exist('12121')
self.assertFalse(exist)
def test_mailbox_exist_wrong(self):
su = VoicemailSuperUser(12345)
su.token = "<PASSWORD>"
with self.set_response('tests/data/mailbox_exist_wrong.xml'):
with self.assertRaises(SquirrelException) as e:
exist = su.mailbox_exist('123123') | 0.443118 | 0.1933 |
import time
from functools import lru_cache
from trie import PrefixTree
class NumberConverter(object):
def __init__(self):
self.trie = PrefixTree()
with open('words_en.txt') as file:
lines = [line.rstrip('\n') for line in file]
for line in lines:
self.trie.insert(line)
def number_to_valid_phone_words(self, num):
if '1' in num or '0' in num:
raise Exception('Numbers with 1 and 0 are currently not supported.')
# 1: Find all words of length equivalent to given string that can be formed
words = []
for prefix in self.num_to_chars(num[0]):
words.extend(self.trie.starts_with(prefix, len(num)))
# 2: Convert words to number equivalents eg 'cat' -> '228'
possible_words = []
for word in words:
converted_num = self.words_to_nums(word)
# 3: We add this word to results if this is equivalent to given number
if num == converted_num:
possible_words.append(word)
return possible_words
@staticmethod
def num_to_chars(num):
keymap = {'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z']}
return keymap[num] if num in keymap else None
@lru_cache(maxsize=10000)
def words_to_nums(self, word):
keymap = {
'a': '2', 'b': '2', 'c': '2',
'd': '3', 'e': '3', 'f': '3',
'g': '4', 'h': '4', 'i': '4',
'j': '5', 'k': '5', 'l': '5',
'm': '6', 'n': '6', 'o': '6',
'p': '7', 'q': '7', 'r': '7', 's': '7',
't': '8', 'u': '8', 'v': '8',
'w': '9', 'x': '9', 'y': '9', 'z': '9'
}
for char, num in keymap.items():
word = word.replace(char, num)
return word
converter = NumberConverter()
print('****First Run****')
for n in ['228', '888', '2382']:
start = time.time()
print(n, converter.number_to_valid_phone_words(n))
end = time.time()
print('Processing time in milliseconds:', int((end - start) * 1000))
print('****Second Run****')
for n in ['228', '888', '2382']:
start = time.time()
print(n, converter.number_to_valid_phone_words(n))
end = time.time()
print('Processing time in milliseconds:', int((end - start) * 1000)) | number_converter.py | import time
from functools import lru_cache
from trie import PrefixTree
class NumberConverter(object):
def __init__(self):
self.trie = PrefixTree()
with open('words_en.txt') as file:
lines = [line.rstrip('\n') for line in file]
for line in lines:
self.trie.insert(line)
def number_to_valid_phone_words(self, num):
if '1' in num or '0' in num:
raise Exception('Numbers with 1 and 0 are currently not supported.')
# 1: Find all words of length equivalent to given string that can be formed
words = []
for prefix in self.num_to_chars(num[0]):
words.extend(self.trie.starts_with(prefix, len(num)))
# 2: Convert words to number equivalents eg 'cat' -> '228'
possible_words = []
for word in words:
converted_num = self.words_to_nums(word)
# 3: We add this word to results if this is equivalent to given number
if num == converted_num:
possible_words.append(word)
return possible_words
@staticmethod
def num_to_chars(num):
keymap = {'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z']}
return keymap[num] if num in keymap else None
@lru_cache(maxsize=10000)
def words_to_nums(self, word):
keymap = {
'a': '2', 'b': '2', 'c': '2',
'd': '3', 'e': '3', 'f': '3',
'g': '4', 'h': '4', 'i': '4',
'j': '5', 'k': '5', 'l': '5',
'm': '6', 'n': '6', 'o': '6',
'p': '7', 'q': '7', 'r': '7', 's': '7',
't': '8', 'u': '8', 'v': '8',
'w': '9', 'x': '9', 'y': '9', 'z': '9'
}
for char, num in keymap.items():
word = word.replace(char, num)
return word
converter = NumberConverter()
print('****First Run****')
for n in ['228', '888', '2382']:
start = time.time()
print(n, converter.number_to_valid_phone_words(n))
end = time.time()
print('Processing time in milliseconds:', int((end - start) * 1000))
print('****Second Run****')
for n in ['228', '888', '2382']:
start = time.time()
print(n, converter.number_to_valid_phone_words(n))
end = time.time()
print('Processing time in milliseconds:', int((end - start) * 1000)) | 0.598899 | 0.36376 |
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
import forestci as fci
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
use_ci = True
mod0_rmse = []
mod1_rmse = []
def picp(y_true, y_pred, y_std, sigma=1):
cnt = 0
if use_ci:
y_std = y_std / 2
for i in range(len(y_true)):
if (y_true[i] - sigma*y_std[i] <= y_pred[i]) and (y_pred[i] <= y_true[i] + sigma*y_std[i]):
cnt = cnt + 1
return 100 * cnt / (len(y_true))
def mpiw(y_std):
return np.mean(y_std)
def train_rfg(X_train, y_train, X_test, y_test, sample_weight=None, uncertainty=False):
rfg = RandomForestRegressor(n_estimators=300, random_state=0).fit(X_train, y_train, sample_weight)
preds = [rfg.predict(X_train), rfg.predict(X_test)]
variance_tr = fci.random_forest_error(rfg, X_train, X_train)
variance_te = fci.random_forest_error(rfg, X_train, X_test)
if uncertainty:
sw_tr = variance_tr
sw_te = variance_te
else:
sw_tr = (preds[0]-y_train)**2
sw_te = (preds[1]-y_test)**2
variance = [variance_tr, variance_te]
sws = [sw_tr, sw_te]
# print("Train rmse: ", mean_squared_error(preds[0], y_train, squared=False))
# print("Test rmse: ", mean_squared_error(preds[1], y_test, squared=False))
return preds, variance, sws
def boost_ensemble(X_train, y_train, X_test, y_test, boosting=False, uncertainty=False):
sample_weight = None
results = [0]*len(X_train)
all_train_preds = []
all_test_preds = []
sample_weights_tr = [np.asarray([1]*len(X_train[0]))]
sample_weights_te = [np.asarray([1]*len(X_test[0]))]
variance_tr = []
variance_te = []
for i in range(len(X_train)):
# print("Modality ", i)
Xf_train = X_train[i]
Xf_test = X_test[i]
if not boosting:
preds, variances, sws = train_rfg(Xf_train, y_train, Xf_test, y_test, sample_weight=None, uncertainty=uncertainty)
else:
preds, variances, sws = train_rfg(Xf_train, y_train, Xf_test, y_test, sample_weight=sample_weights_tr[-1], uncertainty=uncertainty)
all_train_preds.append(preds[0])
all_test_preds.append(preds[1])
if i==0:
mod0_rmse.append(mean_squared_error(preds[1], y_test, squared=False))
else:
mod1_rmse.append(mean_squared_error(preds[1], y_test, squared=False))
sample_weights_tr.append(sws[0])
sample_weights_te.append(sws[1])
variance_tr.append(variances[0])
variance_te.append(variances[1])
# print("-"*30)
return np.asarray(all_train_preds), np.asarray(all_test_preds), np.asarray(variance_tr), np.asarray(variance_te)
def pprint(p, curr):
m = np.mean(curr, axis=0)
s = np.std(curr, axis=0)
print_ans = ''
for a, b in zip(m, s):
print_ans+="{:.3f} +/- {:.3f}, ".format(a, b)
print(p+print_ans)
def train_ensemble(X, y, cols, boosting=True, uncertainty=True):
uw_tr_rmse = []
uw_te_rmse = []
w_tr_rmse = []
w_te_rmse = []
mod0_mpiw=[]
mod1_mpiw=[]
mod0_picp=[]
mod1_picp=[]
mod0_picps_uw = []
mod0_picps_w = []
mod1_picps_uw = []
mod1_picps_w = []
all_y_te = []
all_y_pred_uw = []
all_y_pred_w = []
all_y_std0 = []
all_y_std1 = []
kf = KFold(n_splits=5, shuffle=True, random_state=0)
if boosting==True and uncertainty==True:
print("*"*20, "UA ENSEMBLE", "*"*20)
if boosting==True and uncertainty==False:
print("*"*20, "VANILLA ENSEMBLE", "*"*20)
fold=1
for train_index, test_index in kf.split(X):
print("Fold ", fold)
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = np.asarray(y[train_index]), np.asarray(y[test_index])
X_train_ens = []
X_test_ens = []
for i in range(len(cols)):
X_train_ens.append(np.asarray(X_train[cols[i]]))
X_test_ens.append(np.asarray(X_test[cols[i]]))
tr, te, sw_tr, sw_te = boost_ensemble(X_train_ens, y_train, X_test_ens, y_test, boosting=boosting, uncertainty=uncertainty)
all_y_te.extend(y_test)
all_y_std0.extend(sw_te[0])
all_y_std1.extend(sw_te[1])
mod0_mpiw.append(mpiw(np.sqrt(np.abs(sw_te[0]))))
mod1_mpiw.append(mpiw(np.sqrt(np.abs(sw_te[1]))))
tmp0=[]
tmp1=[]
for sig in range(1,4):
tmp0.append(picp(y_test, te[0, :], np.sqrt(sw_te)[0, :], sig))
tmp1.append(picp(y_test, te[1, :], np.sqrt(sw_te)[1, :], sig))
mod0_picp.append(tmp0)
mod1_picp.append(tmp1)
sw_tr_ = 1/np.asarray(sw_tr)
sw_te_ = 1/np.asarray(sw_te)
w_tr = sw_tr_/np.sum(sw_tr_, axis=0)
w_te = sw_te_/np.sum(sw_te_, axis=0)
uw_tr_rmse.append(mean_squared_error(np.mean(tr, axis=0), y_train, squared=False))
uw_te_rmse.append(mean_squared_error(np.mean(te, axis=0), y_test, squared=False))
w_tr_rmse.append(mean_squared_error(np.sum(tr*w_tr, axis=0), y_train, squared=False))
w_te_rmse.append(mean_squared_error(np.sum(te*w_te, axis=0), y_test, squared=False))
all_y_pred_uw.extend(np.mean(te, axis=0))
all_y_pred_w.extend(np.sum(te*w_te, axis=0))
tmp0=[]
tmp1=[]
for sig in range(1,4):
tmp0.append(picp(y_test, np.mean(te, axis=0), np.sqrt(sw_te)[0, :], sig))
tmp1.append(picp(y_test, np.mean(te, axis=0), np.sqrt(sw_te)[1, :], sig))
mod0_picps_uw.append(tmp0)
mod1_picps_uw.append(tmp1)
tmp0=[]
tmp1=[]
for sig in range(1,4):
tmp0.append(picp(y_test, np.sum(te*w_te, axis=0), np.sqrt(sw_te)[0, :], sig))
tmp1.append(picp(y_test, np.sum(te*w_te, axis=0), np.sqrt(sw_te)[1, :], sig))
mod0_picps_w.append(tmp0)
mod1_picps_w.append(tmp1)
fold+=1
print("*"*50)
print("*"*20,"Results", "*"*20)
print("Mod 0 Test RMSE: {:.3f} +/- {:.3f}".format(np.mean(mod0_rmse), np.std(mod0_rmse)))
print("Mod 1 Test RMSE: {:.3f} +/- {:.3f}".format(np.mean(mod1_rmse), np.std(mod1_rmse)))
print("Mod 0 MPIW: {:.3f} +/- {:.3f}, ".format(np.mean(mod0_mpiw), np.std(mod0_mpiw)))
print("Mod 1 MPIW: {:.3f} +/- {:.3f}, ".format(np.mean(mod1_mpiw), np.std(mod1_mpiw)))
pprint("Mod 0 PICP: ", mod0_picp)
pprint("Mod 1 PICP: ", mod1_picp)
print("*"*50)
print("Unweighted")
print("Train ensemble RMSE: {:.3f} +/- {:.3f}".format(np.mean(uw_tr_rmse), np.std(uw_tr_rmse)))
print("Test ensemble RMSE: {:.3f} +/- {:.3f}".format(np.mean(uw_te_rmse), np.std(uw_te_rmse)))
print("Train ensemble RMSEs: {}".format(uw_tr_rmse))
print("Test ensemble RMSEs: {}".format(uw_te_rmse))
pprint("Mod 0 PICP: ", mod0_picps_uw)
pprint("Mod 1 PICP: ", mod1_picps_uw)
print("*"*50)
print("Weighted")
print("Train ensemble RMSE: {:.3f} +/- {:.3f}".format(np.mean(w_tr_rmse), np.std(w_tr_rmse)))
print("Test ensemble RMSE: {:.3f} +/- {:.3f}".format(np.mean(w_te_rmse), np.std(w_te_rmse)))
print("Train ensemble RMSEs: {}".format(w_tr_rmse))
print("Test ensemble RMSEs: {}".format(w_te_rmse))
pprint("Mod 0 PICP: ", mod0_picps_w)
pprint("Mod 1 PICP: ", mod1_picps_w)
print("\n")
return all_y_te, all_y_pred_uw, all_y_pred_w, np.sqrt(all_y_std0), np.sqrt(all_y_std1)
def plot_empirical_rule(mus, sigmas, true_values, model_name):
thresholds = [0.12566, 0.25335, 0.38532, 0.52440, 0.67339, 0.84162, 1.03643, 1.28155, 1.64485]
values = [[] for i in range(len(sigmas))]
threshold_values = [10, 20, 30, 40, 50, 60, 70, 80, 90]
fig, ax = plt.subplots()
ideal = [i for i in range(10,100,10)]
if 'vanilla' in model_name:
model_name_ = 'Vanilla Ensemble'
elif 'weighted' in model_name:
model_name_ = 'UA Ensemble(weighted)'
else:
model_name_ = 'UA Ensemble'
plt.plot([], [], ' ', label=model_name_, color='white')
plt.plot(ideal, ideal, label='Ideal Calibration', linewidth=2, color='black', linestyle=':')
for cluster_id in range(len(sigmas)):
cluster_id = 1-cluster_id
print('Cluster {}'.format(cluster_id+1))
for t in thresholds:
count = 0
for i in range(len(mus)):
if np.abs(mus[i] - true_values[i])<= t* sigmas[cluster_id, i]:
count+=1
values[cluster_id].append(count)
values[cluster_id] = np.array(values[cluster_id])*100/len(mus)
plt.scatter(threshold_values, values[cluster_id], s=96)
if cluster_id==1:
plt.plot(threshold_values, values[cluster_id], label='Amplitude', linewidth=3)
else:
plt.plot(threshold_values, values[cluster_id], label='Frequency', linewidth=3)
plt.ylabel('% of True Values inside Interval', fontsize=18)
plt.xlabel('% of Prediction Interval around Mean', fontsize=18)
plt.xticks(range(10, 100, 10))
plt.yticks(range(10, 100, 10))
ax.tick_params(axis="x", labelsize=18)
ax.tick_params(axis="y", labelsize=18)
plt.legend(fontsize=15, loc='lower right')
plt.tight_layout(pad=0)
plt.savefig(model_name, dpi=300)
plt.clf()
# plt.show()
plt.close() | parkinsons/utils.py | from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
import forestci as fci
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
use_ci = True
mod0_rmse = []
mod1_rmse = []
def picp(y_true, y_pred, y_std, sigma=1):
cnt = 0
if use_ci:
y_std = y_std / 2
for i in range(len(y_true)):
if (y_true[i] - sigma*y_std[i] <= y_pred[i]) and (y_pred[i] <= y_true[i] + sigma*y_std[i]):
cnt = cnt + 1
return 100 * cnt / (len(y_true))
def mpiw(y_std):
return np.mean(y_std)
def train_rfg(X_train, y_train, X_test, y_test, sample_weight=None, uncertainty=False):
rfg = RandomForestRegressor(n_estimators=300, random_state=0).fit(X_train, y_train, sample_weight)
preds = [rfg.predict(X_train), rfg.predict(X_test)]
variance_tr = fci.random_forest_error(rfg, X_train, X_train)
variance_te = fci.random_forest_error(rfg, X_train, X_test)
if uncertainty:
sw_tr = variance_tr
sw_te = variance_te
else:
sw_tr = (preds[0]-y_train)**2
sw_te = (preds[1]-y_test)**2
variance = [variance_tr, variance_te]
sws = [sw_tr, sw_te]
# print("Train rmse: ", mean_squared_error(preds[0], y_train, squared=False))
# print("Test rmse: ", mean_squared_error(preds[1], y_test, squared=False))
return preds, variance, sws
def boost_ensemble(X_train, y_train, X_test, y_test, boosting=False, uncertainty=False):
sample_weight = None
results = [0]*len(X_train)
all_train_preds = []
all_test_preds = []
sample_weights_tr = [np.asarray([1]*len(X_train[0]))]
sample_weights_te = [np.asarray([1]*len(X_test[0]))]
variance_tr = []
variance_te = []
for i in range(len(X_train)):
# print("Modality ", i)
Xf_train = X_train[i]
Xf_test = X_test[i]
if not boosting:
preds, variances, sws = train_rfg(Xf_train, y_train, Xf_test, y_test, sample_weight=None, uncertainty=uncertainty)
else:
preds, variances, sws = train_rfg(Xf_train, y_train, Xf_test, y_test, sample_weight=sample_weights_tr[-1], uncertainty=uncertainty)
all_train_preds.append(preds[0])
all_test_preds.append(preds[1])
if i==0:
mod0_rmse.append(mean_squared_error(preds[1], y_test, squared=False))
else:
mod1_rmse.append(mean_squared_error(preds[1], y_test, squared=False))
sample_weights_tr.append(sws[0])
sample_weights_te.append(sws[1])
variance_tr.append(variances[0])
variance_te.append(variances[1])
# print("-"*30)
return np.asarray(all_train_preds), np.asarray(all_test_preds), np.asarray(variance_tr), np.asarray(variance_te)
def pprint(p, curr):
m = np.mean(curr, axis=0)
s = np.std(curr, axis=0)
print_ans = ''
for a, b in zip(m, s):
print_ans+="{:.3f} +/- {:.3f}, ".format(a, b)
print(p+print_ans)
def train_ensemble(X, y, cols, boosting=True, uncertainty=True):
uw_tr_rmse = []
uw_te_rmse = []
w_tr_rmse = []
w_te_rmse = []
mod0_mpiw=[]
mod1_mpiw=[]
mod0_picp=[]
mod1_picp=[]
mod0_picps_uw = []
mod0_picps_w = []
mod1_picps_uw = []
mod1_picps_w = []
all_y_te = []
all_y_pred_uw = []
all_y_pred_w = []
all_y_std0 = []
all_y_std1 = []
kf = KFold(n_splits=5, shuffle=True, random_state=0)
if boosting==True and uncertainty==True:
print("*"*20, "UA ENSEMBLE", "*"*20)
if boosting==True and uncertainty==False:
print("*"*20, "VANILLA ENSEMBLE", "*"*20)
fold=1
for train_index, test_index in kf.split(X):
print("Fold ", fold)
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = np.asarray(y[train_index]), np.asarray(y[test_index])
X_train_ens = []
X_test_ens = []
for i in range(len(cols)):
X_train_ens.append(np.asarray(X_train[cols[i]]))
X_test_ens.append(np.asarray(X_test[cols[i]]))
tr, te, sw_tr, sw_te = boost_ensemble(X_train_ens, y_train, X_test_ens, y_test, boosting=boosting, uncertainty=uncertainty)
all_y_te.extend(y_test)
all_y_std0.extend(sw_te[0])
all_y_std1.extend(sw_te[1])
mod0_mpiw.append(mpiw(np.sqrt(np.abs(sw_te[0]))))
mod1_mpiw.append(mpiw(np.sqrt(np.abs(sw_te[1]))))
tmp0=[]
tmp1=[]
for sig in range(1,4):
tmp0.append(picp(y_test, te[0, :], np.sqrt(sw_te)[0, :], sig))
tmp1.append(picp(y_test, te[1, :], np.sqrt(sw_te)[1, :], sig))
mod0_picp.append(tmp0)
mod1_picp.append(tmp1)
sw_tr_ = 1/np.asarray(sw_tr)
sw_te_ = 1/np.asarray(sw_te)
w_tr = sw_tr_/np.sum(sw_tr_, axis=0)
w_te = sw_te_/np.sum(sw_te_, axis=0)
uw_tr_rmse.append(mean_squared_error(np.mean(tr, axis=0), y_train, squared=False))
uw_te_rmse.append(mean_squared_error(np.mean(te, axis=0), y_test, squared=False))
w_tr_rmse.append(mean_squared_error(np.sum(tr*w_tr, axis=0), y_train, squared=False))
w_te_rmse.append(mean_squared_error(np.sum(te*w_te, axis=0), y_test, squared=False))
all_y_pred_uw.extend(np.mean(te, axis=0))
all_y_pred_w.extend(np.sum(te*w_te, axis=0))
tmp0=[]
tmp1=[]
for sig in range(1,4):
tmp0.append(picp(y_test, np.mean(te, axis=0), np.sqrt(sw_te)[0, :], sig))
tmp1.append(picp(y_test, np.mean(te, axis=0), np.sqrt(sw_te)[1, :], sig))
mod0_picps_uw.append(tmp0)
mod1_picps_uw.append(tmp1)
tmp0=[]
tmp1=[]
for sig in range(1,4):
tmp0.append(picp(y_test, np.sum(te*w_te, axis=0), np.sqrt(sw_te)[0, :], sig))
tmp1.append(picp(y_test, np.sum(te*w_te, axis=0), np.sqrt(sw_te)[1, :], sig))
mod0_picps_w.append(tmp0)
mod1_picps_w.append(tmp1)
fold+=1
print("*"*50)
print("*"*20,"Results", "*"*20)
print("Mod 0 Test RMSE: {:.3f} +/- {:.3f}".format(np.mean(mod0_rmse), np.std(mod0_rmse)))
print("Mod 1 Test RMSE: {:.3f} +/- {:.3f}".format(np.mean(mod1_rmse), np.std(mod1_rmse)))
print("Mod 0 MPIW: {:.3f} +/- {:.3f}, ".format(np.mean(mod0_mpiw), np.std(mod0_mpiw)))
print("Mod 1 MPIW: {:.3f} +/- {:.3f}, ".format(np.mean(mod1_mpiw), np.std(mod1_mpiw)))
pprint("Mod 0 PICP: ", mod0_picp)
pprint("Mod 1 PICP: ", mod1_picp)
print("*"*50)
print("Unweighted")
print("Train ensemble RMSE: {:.3f} +/- {:.3f}".format(np.mean(uw_tr_rmse), np.std(uw_tr_rmse)))
print("Test ensemble RMSE: {:.3f} +/- {:.3f}".format(np.mean(uw_te_rmse), np.std(uw_te_rmse)))
print("Train ensemble RMSEs: {}".format(uw_tr_rmse))
print("Test ensemble RMSEs: {}".format(uw_te_rmse))
pprint("Mod 0 PICP: ", mod0_picps_uw)
pprint("Mod 1 PICP: ", mod1_picps_uw)
print("*"*50)
print("Weighted")
print("Train ensemble RMSE: {:.3f} +/- {:.3f}".format(np.mean(w_tr_rmse), np.std(w_tr_rmse)))
print("Test ensemble RMSE: {:.3f} +/- {:.3f}".format(np.mean(w_te_rmse), np.std(w_te_rmse)))
print("Train ensemble RMSEs: {}".format(w_tr_rmse))
print("Test ensemble RMSEs: {}".format(w_te_rmse))
pprint("Mod 0 PICP: ", mod0_picps_w)
pprint("Mod 1 PICP: ", mod1_picps_w)
print("\n")
return all_y_te, all_y_pred_uw, all_y_pred_w, np.sqrt(all_y_std0), np.sqrt(all_y_std1)
def plot_empirical_rule(mus, sigmas, true_values, model_name):
thresholds = [0.12566, 0.25335, 0.38532, 0.52440, 0.67339, 0.84162, 1.03643, 1.28155, 1.64485]
values = [[] for i in range(len(sigmas))]
threshold_values = [10, 20, 30, 40, 50, 60, 70, 80, 90]
fig, ax = plt.subplots()
ideal = [i for i in range(10,100,10)]
if 'vanilla' in model_name:
model_name_ = 'Vanilla Ensemble'
elif 'weighted' in model_name:
model_name_ = 'UA Ensemble(weighted)'
else:
model_name_ = 'UA Ensemble'
plt.plot([], [], ' ', label=model_name_, color='white')
plt.plot(ideal, ideal, label='Ideal Calibration', linewidth=2, color='black', linestyle=':')
for cluster_id in range(len(sigmas)):
cluster_id = 1-cluster_id
print('Cluster {}'.format(cluster_id+1))
for t in thresholds:
count = 0
for i in range(len(mus)):
if np.abs(mus[i] - true_values[i])<= t* sigmas[cluster_id, i]:
count+=1
values[cluster_id].append(count)
values[cluster_id] = np.array(values[cluster_id])*100/len(mus)
plt.scatter(threshold_values, values[cluster_id], s=96)
if cluster_id==1:
plt.plot(threshold_values, values[cluster_id], label='Amplitude', linewidth=3)
else:
plt.plot(threshold_values, values[cluster_id], label='Frequency', linewidth=3)
plt.ylabel('% of True Values inside Interval', fontsize=18)
plt.xlabel('% of Prediction Interval around Mean', fontsize=18)
plt.xticks(range(10, 100, 10))
plt.yticks(range(10, 100, 10))
ax.tick_params(axis="x", labelsize=18)
ax.tick_params(axis="y", labelsize=18)
plt.legend(fontsize=15, loc='lower right')
plt.tight_layout(pad=0)
plt.savefig(model_name, dpi=300)
plt.clf()
# plt.show()
plt.close() | 0.378459 | 0.420362 |
import sys
import itertools
class TipRemoval:
# Tips are error-prone ends of the reads that do not form a bubble but instead
# form a path starting in a vertex without incoming edges or ending in a vertex
# without outgoing edges in the de Bruijn graph.
def __init__(self, k, reads):
self.k = k
self.threshold = self.k
self.graph = {}
self.paths = {}
self.edges_removed = 0
self.build_deBruijn_graph(self.break_reads_into_kmers(reads))
def break_reads_into_kmers(self, reads):
break_read = lambda read: [ read[j:j + self.k] for j in range(len(read) - self.k + 1) ]
return [ kmer for read in reads for kmer in break_read(read) ]
def build_deBruijn_graph(self, kmers):
def add_edge(graph, left, right):
graph.setdefault(left, [set(), 0])
graph.setdefault(right, [set(), 0])
if right not in graph[left][0]:
graph[left][0].add(right)
graph[right][1] += 1
for kmer in kmers:
left, right = kmer[:-1], kmer[1:]
if left != right:
add_edge(self.graph, left, right)
def remove_tips(self):
for k, v in self.graph.items():
find_and_remove = None
if len(v[0]) == 1 and v[1] == 0:
find_and_remove = self.find_and_remove_incoming
elif len(v[0]) > 1:
find_and_remove = self.find_and_remove_outgoing
else : continue
condition = True
while condition:
condition = False
for edge in v[0]:
if find_and_remove(edge, 0):
v[0].remove(edge)
self.edges_removed += 1
condition = True
break
return self.edges_removed
def find_and_remove_outgoing(self, current, depth):
if self.outgoing_num(current) > 1 or self.incoming_num(current) > 1:
return False
if depth == self.threshold:
return False
if self.outgoing_num(current) == 0:
return True
if self.find_and_remove_outgoing(next(iter(self.graph[current][0])), depth + 1):
self.graph[current][0].pop()
self.edges_removed += 1
return True
return False
def find_and_remove_incoming(self, current, depth):
if depth == self.threshold:
return False
if self.outgoing_num(current) == 0 or self.incoming_num(current) > 1:
return True
if self.find_and_remove_incoming(next(iter(self.graph[current][0])), depth + 1):
self.graph[current][0].pop()
self.edges_removed += 1
return True
return False
def incoming_num(self, v):
return self.graph[v][1]
def outgoing_num(self, v):
return len(self.graph[v][0])
if __name__ == "__main__":
k, reads = 15, sys.stdin.read().split()
print(TipRemoval(k, reads).remove_tips()) | week3/Tip Removal.py | import sys
import itertools
class TipRemoval:
# Tips are error-prone ends of the reads that do not form a bubble but instead
# form a path starting in a vertex without incoming edges or ending in a vertex
# without outgoing edges in the de Bruijn graph.
def __init__(self, k, reads):
self.k = k
self.threshold = self.k
self.graph = {}
self.paths = {}
self.edges_removed = 0
self.build_deBruijn_graph(self.break_reads_into_kmers(reads))
def break_reads_into_kmers(self, reads):
break_read = lambda read: [ read[j:j + self.k] for j in range(len(read) - self.k + 1) ]
return [ kmer for read in reads for kmer in break_read(read) ]
def build_deBruijn_graph(self, kmers):
def add_edge(graph, left, right):
graph.setdefault(left, [set(), 0])
graph.setdefault(right, [set(), 0])
if right not in graph[left][0]:
graph[left][0].add(right)
graph[right][1] += 1
for kmer in kmers:
left, right = kmer[:-1], kmer[1:]
if left != right:
add_edge(self.graph, left, right)
def remove_tips(self):
for k, v in self.graph.items():
find_and_remove = None
if len(v[0]) == 1 and v[1] == 0:
find_and_remove = self.find_and_remove_incoming
elif len(v[0]) > 1:
find_and_remove = self.find_and_remove_outgoing
else : continue
condition = True
while condition:
condition = False
for edge in v[0]:
if find_and_remove(edge, 0):
v[0].remove(edge)
self.edges_removed += 1
condition = True
break
return self.edges_removed
def find_and_remove_outgoing(self, current, depth):
if self.outgoing_num(current) > 1 or self.incoming_num(current) > 1:
return False
if depth == self.threshold:
return False
if self.outgoing_num(current) == 0:
return True
if self.find_and_remove_outgoing(next(iter(self.graph[current][0])), depth + 1):
self.graph[current][0].pop()
self.edges_removed += 1
return True
return False
def find_and_remove_incoming(self, current, depth):
if depth == self.threshold:
return False
if self.outgoing_num(current) == 0 or self.incoming_num(current) > 1:
return True
if self.find_and_remove_incoming(next(iter(self.graph[current][0])), depth + 1):
self.graph[current][0].pop()
self.edges_removed += 1
return True
return False
def incoming_num(self, v):
return self.graph[v][1]
def outgoing_num(self, v):
return len(self.graph[v][0])
if __name__ == "__main__":
k, reads = 15, sys.stdin.read().split()
print(TipRemoval(k, reads).remove_tips()) | 0.368747 | 0.407746 |
import io
import os
import sys
from setuptools import find_packages, setup
DESCRIPTION = 'so eazy to interact with wechat api'
URL = 'https://github.com/chuter/wechat-requests'
EMAIL = '<EMAIL>'
AUTHOR = 'chuter'
VERSION = None
# What packages are required for this module to be executed?
REQUIRED = [
'six',
'requests>=2.18.4',
'lxml',
'bs4',
'pycrypto'
]
TEST_REQUIREMENTS = [
'pytest-httpbin==0.0.7',
'pytest-cov',
'pytest-mock',
'pytest-xdist',
'pytest>=2.8.0'
]
here = os.path.abspath(os.path.dirname(__file__))
src = os.path.join(here, 'src')
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist')
os.system('twine upload dist/*')
sys.exit()
with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
about = {}
if VERSION is None:
with open(os.path.join(src, 'wechat', '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
setup(
name=about['__name__'],
version=about['__version__'],
description=DESCRIPTION,
license='MIT',
long_description=long_description,
author=AUTHOR,
author_email=EMAIL,
python_requires=">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
url=URL,
packages=find_packages('src'),
package_dir={'': 'src'},
install_requires=REQUIRED,
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: Chinese (Simplified)',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
keywords=[
'wechat', 'weixin', 'wxpay', 'api', 'apiclient', 'requests'
],
tests_require=TEST_REQUIREMENTS,
setup_requires=['pytest-runner'],
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
) | setup.py |
import io
import os
import sys
from setuptools import find_packages, setup
DESCRIPTION = 'so eazy to interact with wechat api'
URL = 'https://github.com/chuter/wechat-requests'
EMAIL = '<EMAIL>'
AUTHOR = 'chuter'
VERSION = None
# What packages are required for this module to be executed?
REQUIRED = [
'six',
'requests>=2.18.4',
'lxml',
'bs4',
'pycrypto'
]
TEST_REQUIREMENTS = [
'pytest-httpbin==0.0.7',
'pytest-cov',
'pytest-mock',
'pytest-xdist',
'pytest>=2.8.0'
]
here = os.path.abspath(os.path.dirname(__file__))
src = os.path.join(here, 'src')
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist')
os.system('twine upload dist/*')
sys.exit()
with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
about = {}
if VERSION is None:
with open(os.path.join(src, 'wechat', '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
setup(
name=about['__name__'],
version=about['__version__'],
description=DESCRIPTION,
license='MIT',
long_description=long_description,
author=AUTHOR,
author_email=EMAIL,
python_requires=">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
url=URL,
packages=find_packages('src'),
package_dir={'': 'src'},
install_requires=REQUIRED,
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: Chinese (Simplified)',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
keywords=[
'wechat', 'weixin', 'wxpay', 'api', 'apiclient', 'requests'
],
tests_require=TEST_REQUIREMENTS,
setup_requires=['pytest-runner'],
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
) | 0.267026 | 0.177276 |
import mysql.connector
def conectar():
"""
Função para conectar ao servidor
"""
try:
conn = mysql.connector.connect(
host='localhost',
database='todos_filmes',
user='root',
password='<PASSWORD>')
return conn
except mysql.connector.Error as e:
print(f'Erro ao se conectar ao MySQL: {e}')
def desconectar(conn):
"""
Função para desconectar do servidor.
"""
if conn:
conn.close()
def listar():
"""
Função para listar os produtos
"""
conn = conectar()
cursor = conn.cursor()
cursor.execute('SELECT * FROM filmes')
linhas = cursor.fetchall()
if len(linhas) > 0:
print('=' * 20)
for linha in linhas:
print(f'ID: {linha[0]}')
print(f'Nome do Filme: {linha[1]}')
print(f'Ano do Filme: {linha[2]}')
print(f'Distribuidor: {linha[3]}')
print(f'Gênero do Filme: {linha[4]}')
print(f'Classificação Indicativa: {linha[5]}')
print('=' * 20)
else:
print('Não existe filmes cadastrados')
desconectar(conn)
def inserir():
"""
Função para inserir um produto
"""
conn = conectar()
cursor = conn.cursor()
nome = str(input('Nome do Filme: ')).strip().title()
ano = int(input('Ano do Filme: '))
id_distribuidor = int(input('Filme Distribuído por: '))
id_genero = int(input('Genero do filme: '))
id_classificacao = int(input('Classificação indicativa: '))
injetar = f"""INSERT INTO filmes (nome, ano, id_distribuidor, id_genero, id_classificacao) VALUES
('{nome}', {ano}, {id_distribuidor}, {id_genero}, {id_classificacao})"""
cursor.execute(injetar)
conn.commit()
if cursor.rowcount == 1:
print(f'O filme {nome} foi inserido com Sucesso!')
else:
print('\033[31mProblema ao inserir o filme.\033[m')
desconectar(conn)
def atualizar():
"""
Função para atualizar um produto
"""
conn = conectar()
cursor = conn.cursor()
codigo = int(input('Código do filme que deseja atualizar: '))
nome = str(input('Novo nome do Filme: ')).strip().title()
ano = int(input('Novo ano do Filme: '))
id_distribuidor = int(input('ID do Distribuidor: '))
id_genero = int(input('ID do Gênero do filme: '))
id_classificacao = int(input('Classificação Indicativa: '))
att = f"""UPDATE filmes
SET nome = '{nome}', ano = {ano}, id_distribuidor = {id_distribuidor}, id_genero = {id_genero},
id_classificacao = {id_classificacao}
WHERE id = {codigo}"""
cursor.execute(att)
conn.commit()
if cursor.rowcount == 1:
print(f'Produto {nome} atualizado com Sucesso!')
else:
print('Não foi Possível atualizar o produto.')
desconectar(conn)
def deletar():
"""
Função para deletar um produto
"""
conn = conectar()
cursor = conn.cursor()
codigo = int(input('Insira o id que deseja deletar: '))
cursor.execute(f'DELETE FROM filmes WHERE id = {codigo}')
conn.commit()
if cursor.rowcount == 1:
print('Produto deletado com Sucesso!!')
else:
print(f'Erro ao excluir o produto com id={codigo}')
desconectar(conn)
def menu():
"""
Função para gerar o menu inicial
"""
while True:
print('=========Gerenciamento de Produtos==============')
print('Selecione uma opção: ')
print('1 - Listar produtos.')
print('2 - Inserir produtos.')
print('3 - Atualizar produto.')
print('4 - Deletar produto.')
print('5 - Sair do Programa.')
opcao = int(input('Digite um número: '))
if opcao in [1, 2, 3, 4, 5]:
if opcao == 1:
listar()
elif opcao == 2:
inserir()
elif opcao == 3:
atualizar()
elif opcao == 4:
deletar()
elif opcao == 5:
break
else:
print('Opção inválida')
else:
print('Opção inválida')
if __name__ == '__main__':
menu() | PythonCruds/CrudFilmes.py | import mysql.connector
def conectar():
"""
Função para conectar ao servidor
"""
try:
conn = mysql.connector.connect(
host='localhost',
database='todos_filmes',
user='root',
password='<PASSWORD>')
return conn
except mysql.connector.Error as e:
print(f'Erro ao se conectar ao MySQL: {e}')
def desconectar(conn):
"""
Função para desconectar do servidor.
"""
if conn:
conn.close()
def listar():
"""
Função para listar os produtos
"""
conn = conectar()
cursor = conn.cursor()
cursor.execute('SELECT * FROM filmes')
linhas = cursor.fetchall()
if len(linhas) > 0:
print('=' * 20)
for linha in linhas:
print(f'ID: {linha[0]}')
print(f'Nome do Filme: {linha[1]}')
print(f'Ano do Filme: {linha[2]}')
print(f'Distribuidor: {linha[3]}')
print(f'Gênero do Filme: {linha[4]}')
print(f'Classificação Indicativa: {linha[5]}')
print('=' * 20)
else:
print('Não existe filmes cadastrados')
desconectar(conn)
def inserir():
"""
Função para inserir um produto
"""
conn = conectar()
cursor = conn.cursor()
nome = str(input('Nome do Filme: ')).strip().title()
ano = int(input('Ano do Filme: '))
id_distribuidor = int(input('Filme Distribuído por: '))
id_genero = int(input('Genero do filme: '))
id_classificacao = int(input('Classificação indicativa: '))
injetar = f"""INSERT INTO filmes (nome, ano, id_distribuidor, id_genero, id_classificacao) VALUES
('{nome}', {ano}, {id_distribuidor}, {id_genero}, {id_classificacao})"""
cursor.execute(injetar)
conn.commit()
if cursor.rowcount == 1:
print(f'O filme {nome} foi inserido com Sucesso!')
else:
print('\033[31mProblema ao inserir o filme.\033[m')
desconectar(conn)
def atualizar():
"""
Função para atualizar um produto
"""
conn = conectar()
cursor = conn.cursor()
codigo = int(input('Código do filme que deseja atualizar: '))
nome = str(input('Novo nome do Filme: ')).strip().title()
ano = int(input('Novo ano do Filme: '))
id_distribuidor = int(input('ID do Distribuidor: '))
id_genero = int(input('ID do Gênero do filme: '))
id_classificacao = int(input('Classificação Indicativa: '))
att = f"""UPDATE filmes
SET nome = '{nome}', ano = {ano}, id_distribuidor = {id_distribuidor}, id_genero = {id_genero},
id_classificacao = {id_classificacao}
WHERE id = {codigo}"""
cursor.execute(att)
conn.commit()
if cursor.rowcount == 1:
print(f'Produto {nome} atualizado com Sucesso!')
else:
print('Não foi Possível atualizar o produto.')
desconectar(conn)
def deletar():
"""
Função para deletar um produto
"""
conn = conectar()
cursor = conn.cursor()
codigo = int(input('Insira o id que deseja deletar: '))
cursor.execute(f'DELETE FROM filmes WHERE id = {codigo}')
conn.commit()
if cursor.rowcount == 1:
print('Produto deletado com Sucesso!!')
else:
print(f'Erro ao excluir o produto com id={codigo}')
desconectar(conn)
def menu():
"""
Função para gerar o menu inicial
"""
while True:
print('=========Gerenciamento de Produtos==============')
print('Selecione uma opção: ')
print('1 - Listar produtos.')
print('2 - Inserir produtos.')
print('3 - Atualizar produto.')
print('4 - Deletar produto.')
print('5 - Sair do Programa.')
opcao = int(input('Digite um número: '))
if opcao in [1, 2, 3, 4, 5]:
if opcao == 1:
listar()
elif opcao == 2:
inserir()
elif opcao == 3:
atualizar()
elif opcao == 4:
deletar()
elif opcao == 5:
break
else:
print('Opção inválida')
else:
print('Opção inválida')
if __name__ == '__main__':
menu() | 0.155303 | 0.157105 |
from __future__ import unicode_literals, division
import subprocess
import os
import shutil
import logging
from monty.shutil import decompress_dir
from monty.os.path import zpath
from pymatgen.io.cp2k.inputs import Cp2kInput, Keyword
from custodian.custodian import Job
from custodian.cp2k.interpreter import Cp2kModder
from custodian.cp2k.utils import restart, cleanup_input
logger = logging.getLogger(__name__)
__author__ = "<NAME>"
__version__ = "1.0"
CP2K_INPUT_FILES = ["cp2k.inp"]
CP2K_OUTPUT_FILES = ["cp2k.out"]
class Cp2kJob(Job):
"""
A basic cp2k job. Just runs whatever is in the directory. But conceivably
can be a complex processing of inputs etc. with initialization.
"""
def __init__(
self,
cp2k_cmd,
input_file="cp2k.inp",
output_file="cp2k.out",
stderr_file="std_err.txt",
suffix="",
final=True,
backup=True,
settings_override=None,
restart=False,
):
"""
This constructor is necessarily complex due to the need for
flexibility. For standard kinds of runs, it's often better to use one
of the static constructors. The defaults are usually fine too.
Args:
cp2k_cmd (list): Command to run cp2k as a list of args. For example,
if you are using mpirun, it can be something like
["mpirun", "cp2k.popt"]
input_file (str): Name of the file to use as input to CP2K
executable. Defaults to "cp2k.inp"
output_file (str): Name of file to direct standard out to.
Defaults to "cp2k.out".
stderr_file (str): Name of file to direct standard error to.
Defaults to "std_err.txt".
suffix (str): A suffix to be appended to the final output. E.g.,
to rename all CP2K output from say cp2k.out to
cp2k.out.relax1, provide ".relax1" as the suffix.
final (bool): Indicating whether this is the final cp2k job in a
series. Defaults to True.
backup (bool): Whether to backup the initial input files. If True,
the input file will be copied with a
".orig" appended. Defaults to True.
settings_override ([actions]): A list of actions. See the Cp2kModder
in interpreter.py
restart (bool): Whether to run in restart mode, i.e. this a continuation of
a previous calculation. Default is False.
"""
self.cp2k_cmd = cp2k_cmd
self.input_file = input_file
self.ci = None
self.output_file = output_file
self.stderr_file = stderr_file
self.final = final
self.backup = backup
self.suffix = suffix
self.settings_override = settings_override if settings_override else []
self.restart = restart
def setup(self):
"""
Performs initial setup for Cp2k in three stages. First, if custodian is running in restart mode, then
the restart function will copy the restart file to self.input_file, and remove any previous WFN initialization
if present. Second, any additional user specified settings will be applied. Lastly, a backup of the input
file will be made for reference.
"""
decompress_dir(".")
self.ci = Cp2kInput.from_file(zpath(self.input_file))
cleanup_input(self.ci)
if self.restart:
restart(
actions=self.settings_override,
output_file=self.output_file,
input_file=self.input_file,
no_actions_needed=True,
)
if self.settings_override or self.restart:
modder = Cp2kModder(filename=self.input_file, actions=[], ci=self.ci)
modder.apply_actions(self.settings_override)
if self.backup:
shutil.copy(self.input_file, f"{self.input_file}.orig")
def run(self):
"""
Perform the actual CP2K run.
Returns:
(subprocess.Popen) Used for monitoring.
"""
# TODO: cp2k has bizarre in/out streams. Some errors that should go to std_err are not sent anywhere...
cmd = list(self.cp2k_cmd)
cmd.extend(["-i", self.input_file])
cmdstring = " ".join(cmd)
logger.info(f"Running {cmdstring}")
with open(self.output_file, "w") as f_std, open(self.stderr_file, "w", buffering=1) as f_err:
# use line buffering for stderr
return subprocess.Popen(cmd, stdout=f_std, stderr=f_err, shell=False)
# TODO double jobs, file manipulations, etc. should be done in atomate in the future
# and custodian should only run the job itself
def postprocess(self):
"""
Postprocessing includes renaming and gzipping where necessary.
"""
fs = os.listdir(".")
if os.path.exists(self.output_file):
if self.suffix != "":
os.mkdir(f"run{self.suffix}")
for f in fs:
if "json" in f:
continue
if not os.path.isdir(f):
if self.final:
shutil.move(f, f"run{self.suffix}/{f}")
else:
shutil.copy(f, f"run{self.suffix}/{f}")
# Remove continuation so if a subsequent job is run in
# the same directory, will not restart this job.
if os.path.exists("continue.json"):
os.remove("continue.json")
def terminate(self):
"""
Terminate cp2k
"""
for k in self.cp2k_cmd:
if "cp2k" in k:
try:
os.system(f"killall {k}")
except Exception:
pass
@classmethod
def gga_static_to_hybrid(
cls,
cp2k_cmd,
input_file="cp2k.inp",
output_file="cp2k.out",
stderr_file="std_err.txt",
backup=True,
settings_override_gga=None,
settings_override_hybrid=None,
):
"""
A bare gga to hybrid calculation. Removes all unecessary features
from the gga run, and making it only a ENERGY/ENERGY_FORCE
depending on the hybrid run.
"""
job1_settings_override = [
{
"dict": input_file,
"action": {
"_unset": {"FORCE_EVAL": {"DFT": "XC"}},
"_set": {"GLOBAL": {"PROJECT_NAME": "GGA", "RUN_TYPE": "ENERGY_FORCE"}},
},
},
{
"dict": input_file,
"action": {"_set": {"FORCE_EVAL": {"DFT": {"XC": {"XC_FUNCTIONAL": {"PBE": {}}}}}}},
},
]
job1 = Cp2kJob(
cp2k_cmd,
input_file=input_file,
output_file=output_file,
backup=backup,
stderr_file=stderr_file,
final=False,
suffix="1",
settings_override=job1_settings_override,
)
ci = Cp2kInput.from_file(zpath(input_file))
r = ci["global"].get("run_type", Keyword("RUN_TYPE", "ENERGY_FORCE")).values[0]
if r in ["ENERGY", "WAVEFUNCTION_OPTIMIZATION", "WFN_OPT", "ENERGY_FORCE"]: # no need for double job
return [job1]
job2_settings_override = [
{
"dict": input_file,
"action": {
"_set": {
"FORCE_EVAL": {
"DFT": {
"XC": {
"HF": {
"SCREENING": {
"SCREEN_ON_INITIAL_P": True,
"SCREEN_P_FORCES": True,
}
}
},
"WFN_RESTART_FILE_NAME": "GGA-RESTART.wfn",
}
},
"GLOBAL": {"RUN_TYPE": r},
},
},
}
]
job2 = Cp2kJob(
cp2k_cmd,
input_file=input_file,
output_file=output_file,
backup=backup,
stderr_file=stderr_file,
final=True,
suffix="2",
restart=False,
settings_override=job2_settings_override,
)
return [job1, job2]
@classmethod
def double_job(
cls, cp2k_cmd, input_file="cp2k.inp", output_file="cp2k.out", stderr_file="std_err.txt", backup=True
):
"""
This creates a sequence of two jobs. The first of which is an "initialization" of the
wfn. Using this, the "restart" function can be exploited to determine if a diagonalization
job can/would benefit from switching to OT scheme. If not, then the second job remains a
diagonalization job, and there is minimal overhead from restarting.
"""
job1 = Cp2kJob(
cp2k_cmd,
input_file=input_file,
output_file=output_file,
backup=backup,
stderr_file=stderr_file,
final=False,
suffix="1",
settings_override={},
)
ci = Cp2kInput.from_file(zpath(input_file))
r = ci["global"].get("run_type", Keyword("RUN_TYPE", "ENERGY_FORCE")).values[0]
if r not in ["ENERGY", "WAVEFUNCTION_OPTIMIZATION", "WFN_OPT"]:
job1.settings_override = [
{"dict": input_file, "action": {"_set": {"GLOBAL": {"RUN_TYPE": "ENERGY_FORCE"}}}}
]
job2 = Cp2kJob(
cp2k_cmd,
input_file=input_file,
output_file=output_file,
backup=backup,
stderr_file=stderr_file,
final=True,
suffix="2",
restart=True,
)
job2.settings_override = [{"dict": input_file, "action": {"_set": {"GLOBAL": {"RUN_TYPE": r}}}}]
return [job1, job2]
@classmethod
def pre_screen_hybrid(
cls, cp2k_cmd, input_file="cp2k.inp", output_file="cp2k.out", stderr_file="std_err.txt", backup=True
):
"""
Build a job where the first job is an unscreened hybrid static calculation, then the second one
uses the wfn from the first job as a restart to do a screened calculation.
"""
job1_settings_override = [
{
"dict": input_file,
"action": {
"_set": {
"FORCE_EVAL": {
"DFT": {
"XC": {
"HF": {
"SCREENING": {
"SCREEN_ON_INITIAL_P": False,
"SCREEN_P_FORCES": False,
}
}
}
}
},
"GLOBAL": {"RUN_TYPE": "ENERGY_FORCE"},
}
},
}
]
job1 = Cp2kJob(
cp2k_cmd,
input_file=input_file,
output_file=output_file,
backup=backup,
stderr_file=stderr_file,
final=False,
suffix="1",
settings_override=job1_settings_override,
)
ci = Cp2kInput.from_file(zpath(input_file))
r = ci["global"].get("run_type", Keyword("RUN_TYPE", "ENERGY_FORCE")).values[0]
if r in ["ENERGY", "WAVEFUNCTION_OPTIMIZATION", "WFN_OPT", "ENERGY_FORCE"]: # no need for double job
return [job1]
job2_settings_override = [
{
"dict": input_file,
"action": {
"_set": {
"FORCE_EVAL": {
"DFT": {
"XC": {
"HF": {
"SCREENING": {
"SCREEN_ON_INITIAL_P": True,
"SCREEN_P_FORCES": True,
}
}
},
"WFN_RESTART_FILE_NAME": "UNSCREENED_HYBRID-RESTART.wfn",
}
},
"GLOBAL": {"RUN_TYPE": r},
},
},
}
]
job2 = Cp2kJob(
cp2k_cmd,
input_file=input_file,
output_file=output_file,
backup=backup,
stderr_file=stderr_file,
final=True,
suffix="2",
restart=False,
settings_override=job2_settings_override,
)
return [job1, job2] | custodian/cp2k/jobs.py | from __future__ import unicode_literals, division
import subprocess
import os
import shutil
import logging
from monty.shutil import decompress_dir
from monty.os.path import zpath
from pymatgen.io.cp2k.inputs import Cp2kInput, Keyword
from custodian.custodian import Job
from custodian.cp2k.interpreter import Cp2kModder
from custodian.cp2k.utils import restart, cleanup_input
logger = logging.getLogger(__name__)
__author__ = "<NAME>"
__version__ = "1.0"
CP2K_INPUT_FILES = ["cp2k.inp"]
CP2K_OUTPUT_FILES = ["cp2k.out"]
class Cp2kJob(Job):
"""
A basic cp2k job. Just runs whatever is in the directory. But conceivably
can be a complex processing of inputs etc. with initialization.
"""
def __init__(
self,
cp2k_cmd,
input_file="cp2k.inp",
output_file="cp2k.out",
stderr_file="std_err.txt",
suffix="",
final=True,
backup=True,
settings_override=None,
restart=False,
):
"""
This constructor is necessarily complex due to the need for
flexibility. For standard kinds of runs, it's often better to use one
of the static constructors. The defaults are usually fine too.
Args:
cp2k_cmd (list): Command to run cp2k as a list of args. For example,
if you are using mpirun, it can be something like
["mpirun", "cp2k.popt"]
input_file (str): Name of the file to use as input to CP2K
executable. Defaults to "cp2k.inp"
output_file (str): Name of file to direct standard out to.
Defaults to "cp2k.out".
stderr_file (str): Name of file to direct standard error to.
Defaults to "std_err.txt".
suffix (str): A suffix to be appended to the final output. E.g.,
to rename all CP2K output from say cp2k.out to
cp2k.out.relax1, provide ".relax1" as the suffix.
final (bool): Indicating whether this is the final cp2k job in a
series. Defaults to True.
backup (bool): Whether to backup the initial input files. If True,
the input file will be copied with a
".orig" appended. Defaults to True.
settings_override ([actions]): A list of actions. See the Cp2kModder
in interpreter.py
restart (bool): Whether to run in restart mode, i.e. this a continuation of
a previous calculation. Default is False.
"""
self.cp2k_cmd = cp2k_cmd
self.input_file = input_file
self.ci = None
self.output_file = output_file
self.stderr_file = stderr_file
self.final = final
self.backup = backup
self.suffix = suffix
self.settings_override = settings_override if settings_override else []
self.restart = restart
def setup(self):
"""
Performs initial setup for Cp2k in three stages. First, if custodian is running in restart mode, then
the restart function will copy the restart file to self.input_file, and remove any previous WFN initialization
if present. Second, any additional user specified settings will be applied. Lastly, a backup of the input
file will be made for reference.
"""
decompress_dir(".")
self.ci = Cp2kInput.from_file(zpath(self.input_file))
cleanup_input(self.ci)
if self.restart:
restart(
actions=self.settings_override,
output_file=self.output_file,
input_file=self.input_file,
no_actions_needed=True,
)
if self.settings_override or self.restart:
modder = Cp2kModder(filename=self.input_file, actions=[], ci=self.ci)
modder.apply_actions(self.settings_override)
if self.backup:
shutil.copy(self.input_file, f"{self.input_file}.orig")
def run(self):
"""
Perform the actual CP2K run.
Returns:
(subprocess.Popen) Used for monitoring.
"""
# TODO: cp2k has bizarre in/out streams. Some errors that should go to std_err are not sent anywhere...
cmd = list(self.cp2k_cmd)
cmd.extend(["-i", self.input_file])
cmdstring = " ".join(cmd)
logger.info(f"Running {cmdstring}")
with open(self.output_file, "w") as f_std, open(self.stderr_file, "w", buffering=1) as f_err:
# use line buffering for stderr
return subprocess.Popen(cmd, stdout=f_std, stderr=f_err, shell=False)
# TODO double jobs, file manipulations, etc. should be done in atomate in the future
# and custodian should only run the job itself
def postprocess(self):
"""
Postprocessing includes renaming and gzipping where necessary.
"""
fs = os.listdir(".")
if os.path.exists(self.output_file):
if self.suffix != "":
os.mkdir(f"run{self.suffix}")
for f in fs:
if "json" in f:
continue
if not os.path.isdir(f):
if self.final:
shutil.move(f, f"run{self.suffix}/{f}")
else:
shutil.copy(f, f"run{self.suffix}/{f}")
# Remove continuation so if a subsequent job is run in
# the same directory, will not restart this job.
if os.path.exists("continue.json"):
os.remove("continue.json")
def terminate(self):
"""
Terminate cp2k
"""
for k in self.cp2k_cmd:
if "cp2k" in k:
try:
os.system(f"killall {k}")
except Exception:
pass
@classmethod
def gga_static_to_hybrid(
cls,
cp2k_cmd,
input_file="cp2k.inp",
output_file="cp2k.out",
stderr_file="std_err.txt",
backup=True,
settings_override_gga=None,
settings_override_hybrid=None,
):
"""
A bare gga to hybrid calculation. Removes all unecessary features
from the gga run, and making it only a ENERGY/ENERGY_FORCE
depending on the hybrid run.
"""
job1_settings_override = [
{
"dict": input_file,
"action": {
"_unset": {"FORCE_EVAL": {"DFT": "XC"}},
"_set": {"GLOBAL": {"PROJECT_NAME": "GGA", "RUN_TYPE": "ENERGY_FORCE"}},
},
},
{
"dict": input_file,
"action": {"_set": {"FORCE_EVAL": {"DFT": {"XC": {"XC_FUNCTIONAL": {"PBE": {}}}}}}},
},
]
job1 = Cp2kJob(
cp2k_cmd,
input_file=input_file,
output_file=output_file,
backup=backup,
stderr_file=stderr_file,
final=False,
suffix="1",
settings_override=job1_settings_override,
)
ci = Cp2kInput.from_file(zpath(input_file))
r = ci["global"].get("run_type", Keyword("RUN_TYPE", "ENERGY_FORCE")).values[0]
if r in ["ENERGY", "WAVEFUNCTION_OPTIMIZATION", "WFN_OPT", "ENERGY_FORCE"]: # no need for double job
return [job1]
job2_settings_override = [
{
"dict": input_file,
"action": {
"_set": {
"FORCE_EVAL": {
"DFT": {
"XC": {
"HF": {
"SCREENING": {
"SCREEN_ON_INITIAL_P": True,
"SCREEN_P_FORCES": True,
}
}
},
"WFN_RESTART_FILE_NAME": "GGA-RESTART.wfn",
}
},
"GLOBAL": {"RUN_TYPE": r},
},
},
}
]
job2 = Cp2kJob(
cp2k_cmd,
input_file=input_file,
output_file=output_file,
backup=backup,
stderr_file=stderr_file,
final=True,
suffix="2",
restart=False,
settings_override=job2_settings_override,
)
return [job1, job2]
@classmethod
def double_job(
cls, cp2k_cmd, input_file="cp2k.inp", output_file="cp2k.out", stderr_file="std_err.txt", backup=True
):
"""
This creates a sequence of two jobs. The first of which is an "initialization" of the
wfn. Using this, the "restart" function can be exploited to determine if a diagonalization
job can/would benefit from switching to OT scheme. If not, then the second job remains a
diagonalization job, and there is minimal overhead from restarting.
"""
job1 = Cp2kJob(
cp2k_cmd,
input_file=input_file,
output_file=output_file,
backup=backup,
stderr_file=stderr_file,
final=False,
suffix="1",
settings_override={},
)
ci = Cp2kInput.from_file(zpath(input_file))
r = ci["global"].get("run_type", Keyword("RUN_TYPE", "ENERGY_FORCE")).values[0]
if r not in ["ENERGY", "WAVEFUNCTION_OPTIMIZATION", "WFN_OPT"]:
job1.settings_override = [
{"dict": input_file, "action": {"_set": {"GLOBAL": {"RUN_TYPE": "ENERGY_FORCE"}}}}
]
job2 = Cp2kJob(
cp2k_cmd,
input_file=input_file,
output_file=output_file,
backup=backup,
stderr_file=stderr_file,
final=True,
suffix="2",
restart=True,
)
job2.settings_override = [{"dict": input_file, "action": {"_set": {"GLOBAL": {"RUN_TYPE": r}}}}]
return [job1, job2]
@classmethod
def pre_screen_hybrid(
cls, cp2k_cmd, input_file="cp2k.inp", output_file="cp2k.out", stderr_file="std_err.txt", backup=True
):
"""
Build a job where the first job is an unscreened hybrid static calculation, then the second one
uses the wfn from the first job as a restart to do a screened calculation.
"""
job1_settings_override = [
{
"dict": input_file,
"action": {
"_set": {
"FORCE_EVAL": {
"DFT": {
"XC": {
"HF": {
"SCREENING": {
"SCREEN_ON_INITIAL_P": False,
"SCREEN_P_FORCES": False,
}
}
}
}
},
"GLOBAL": {"RUN_TYPE": "ENERGY_FORCE"},
}
},
}
]
job1 = Cp2kJob(
cp2k_cmd,
input_file=input_file,
output_file=output_file,
backup=backup,
stderr_file=stderr_file,
final=False,
suffix="1",
settings_override=job1_settings_override,
)
ci = Cp2kInput.from_file(zpath(input_file))
r = ci["global"].get("run_type", Keyword("RUN_TYPE", "ENERGY_FORCE")).values[0]
if r in ["ENERGY", "WAVEFUNCTION_OPTIMIZATION", "WFN_OPT", "ENERGY_FORCE"]: # no need for double job
return [job1]
job2_settings_override = [
{
"dict": input_file,
"action": {
"_set": {
"FORCE_EVAL": {
"DFT": {
"XC": {
"HF": {
"SCREENING": {
"SCREEN_ON_INITIAL_P": True,
"SCREEN_P_FORCES": True,
}
}
},
"WFN_RESTART_FILE_NAME": "UNSCREENED_HYBRID-RESTART.wfn",
}
},
"GLOBAL": {"RUN_TYPE": r},
},
},
}
]
job2 = Cp2kJob(
cp2k_cmd,
input_file=input_file,
output_file=output_file,
backup=backup,
stderr_file=stderr_file,
final=True,
suffix="2",
restart=False,
settings_override=job2_settings_override,
)
return [job1, job2] | 0.464173 | 0.18188 |
from common.deprecated import deprecated
from enum import IntEnum
from field_type import FieldType
_INT_TO_LEN_MAP = {
FieldType.Int8:1,
FieldType.Int16:2,
FieldType.Int32:4,
FieldType.Int64:8,
}
class _WireTypes(IntEnum):
VAR_INT = 0
LENGTH_DELIMITED = 1
#TODO: consider more optimal way to work with bytes
class _Tokenizer(object):
def write_varint(self, output_string, num):
while True:
num, mod = divmod(num, 128)
if num == 0:
output_string.write(chr(mod))
break
else:
output_string.write(chr(mod + 128))
def read_varint(self, input_string):
result = 0
while True:
byte = ord(input_string.read(1))
if byte & 128 == 128:
result = (result << 8) + (byte - 128)
else:
return (result << 8) + byte
def write_key(self, output_string, wire_type, field_number):
output_string.write(chr((field_number << 1) % 256 + int(wire_type)))
def read_key(self, input_string):
byte = ord(input_string.read(1))
return (byte >> 1, _WireTypes(byte % 2))
@deprecated
def write_int(self, output_string, num, num_type=FieldType.Int8):
output_string.write(self._num_to_str(num, num_type))
@deprecated
def write_str(self, output_string, string):
output_string.write(string)
@deprecated
def read_int(self, input_string, num_type=FieldType.Int8):
if num_type in _INT_TO_LEN_MAP.iterkeys():
str_len = _INT_TO_LEN_MAP[num_type]
else:
raise Exception('Type %r is not numeric.' % num_type)
return self._str_to_num(input_string.read(str_len))
@deprecated
def read_str(self, input_string, length):
return input_string.read(length)
@deprecated
def _str_to_num(self, string):
result = 0
for i, byte in enumerate(reversed(string)):
result += ord(byte) * 256 ** i
return result
@deprecated
def _num_to_str(self, num, num_type):
result = ''
a = num
while a != 0:
a, b = divmod(a, 256)
result = chr(b) + result
if num_type in _INT_TO_LEN_MAP.iterkeys():
str_len = _INT_TO_LEN_MAP[num_type]
# else:
# raise Exception("Byte representation of %d - %d is greater that %d" % (num, len(result), str_len))
return chr(0) * (str_len - len(result)) + result | proto/_tokenizer.py | from common.deprecated import deprecated
from enum import IntEnum
from field_type import FieldType
_INT_TO_LEN_MAP = {
FieldType.Int8:1,
FieldType.Int16:2,
FieldType.Int32:4,
FieldType.Int64:8,
}
class _WireTypes(IntEnum):
VAR_INT = 0
LENGTH_DELIMITED = 1
#TODO: consider more optimal way to work with bytes
class _Tokenizer(object):
def write_varint(self, output_string, num):
while True:
num, mod = divmod(num, 128)
if num == 0:
output_string.write(chr(mod))
break
else:
output_string.write(chr(mod + 128))
def read_varint(self, input_string):
result = 0
while True:
byte = ord(input_string.read(1))
if byte & 128 == 128:
result = (result << 8) + (byte - 128)
else:
return (result << 8) + byte
def write_key(self, output_string, wire_type, field_number):
output_string.write(chr((field_number << 1) % 256 + int(wire_type)))
def read_key(self, input_string):
byte = ord(input_string.read(1))
return (byte >> 1, _WireTypes(byte % 2))
@deprecated
def write_int(self, output_string, num, num_type=FieldType.Int8):
output_string.write(self._num_to_str(num, num_type))
@deprecated
def write_str(self, output_string, string):
output_string.write(string)
@deprecated
def read_int(self, input_string, num_type=FieldType.Int8):
if num_type in _INT_TO_LEN_MAP.iterkeys():
str_len = _INT_TO_LEN_MAP[num_type]
else:
raise Exception('Type %r is not numeric.' % num_type)
return self._str_to_num(input_string.read(str_len))
@deprecated
def read_str(self, input_string, length):
return input_string.read(length)
@deprecated
def _str_to_num(self, string):
result = 0
for i, byte in enumerate(reversed(string)):
result += ord(byte) * 256 ** i
return result
@deprecated
def _num_to_str(self, num, num_type):
result = ''
a = num
while a != 0:
a, b = divmod(a, 256)
result = chr(b) + result
if num_type in _INT_TO_LEN_MAP.iterkeys():
str_len = _INT_TO_LEN_MAP[num_type]
# else:
# raise Exception("Byte representation of %d - %d is greater that %d" % (num, len(result), str_len))
return chr(0) * (str_len - len(result)) + result | 0.227727 | 0.281057 |
class HashTableSeparateChaining(object):
'''
A Hash Table implementation by using normal Python List. Method supports for functions like:
Inserting Key, Removing Key, Getting Key, Checking the size, Iterating through all the keys etc.
Class allows to define a custom Hash Function, Initial List Capacity and Load Factor.
'''
def __init__(self, capacity = None, load_factor = None, hash_function = None):
def default_hash(key):
result = 0
if isinstance(key, str):
for r in key:
result += ord(r)
return result
if capacity is not None and capacity < 0:
raise ValueError("Invalid Capacity")
if load_factor is not None and not (load_factor > 0 and load_factor < 1):
raise ValueError("Invalid Load Factor")
self.capacity = capacity if capacity is not None else 10
self.load_factor = load_factor if load_factor is not None else 0.75
self.hash_function = hash_function if hash_function is not None else default_hash
self.size = 0
self.data = [None] * self.capacity
def insert_key(self, key, value):
if key is None:
raise Exception("Key cannot be None")
index = self.hash_function(key) % self.capacity
if self.data[index] is None:
self.data[index] = [[key, value]]
self.size += 1
else:
if key in [x[0] for x in self.data[index]]:
for i, r in enumerate(self.data[index]):
if r[0] == key:
self.data[index][i][1] = value
else:
self.data[index].append([key, value])
self.size += 1
if self.size > int(self.capacity * self.load_factor):
self.resize_table()
def resize_table(self):
self.capacity *= 2
new_data = [None] * self.capacity
for r in self.data:
if r is not None:
for vals in r:
index = self.hash_function(vals[0]) % self.capacity
if new_data[index] is None:
new_data[index] = [[vals[0], vals[1]]]
else:
# No Need to handle the Duplicate Key case here
new_data[index].append([vals[0], vals[1]])
self.data = new_data
def get_key(self, key):
if key is None:
return None
index = self.hash_function(key) % self.capacity
try:
list_index = [x[0] for x in self.data[index]].index(key)
except:
return None
return self.data[index][list_index][1]
def remove_key(self, key):
if key is None:
return None
if not self.contains_key(key):
raise KeyError("Key not found!")
value = self[key]
index = self.hash_function(key) % self.capacity
list_index = [x[0] for x in self.data[index]].index(key)
list_elem = self.data[index][list_index]
self.data[index].remove(list_elem)
self.size -= 1
return value
def __repr__(self):
result = ""
for i, _ in enumerate(self.data):
if self.data[i] is not None:
for r in self.data[i]:
result += str(r)+"\n"
return result
def __len__(self):
return self.size
def __getitem__(self, key):
return self.get_key(key)
def __setitem__(self, key, value):
self.insert_key(key, value)
def __delitem__(self, key):
return self.remove_key(key)
def __contains__(self, key):
return self.contains_key(key)
def __iter__(self):
for i, _ in enumerate(self.data):
if self.data[i] is not None:
for r in self.data[i]:
yield r[0]
def isEmpty(self):
return self.size == 0
def clear(self):
for key in list(self):
self.remove_key(key)
def contains_key(self, key):
if key in list(self):
return True
return False
if __name__ == "__main__":
h = HashTableSeparateChaining()
print(h.capacity) # 10
for r in range(3):
h[f'A{r}'] = 1
h[f'B{r}'] = 2
h[f'C{r}'] = 3
h['A2'] = 100
print(h.capacity) # 20
print(h)
# ['A0', 1]
# ['B0', 2]
# ['A1', 1]
# ['C0', 3]
# ['B1', 2]
# ['A2', 100]
# ['C1', 3]
# ['B2', 2]
# ['C2', 3]
print(h.remove_key('A2')) # 100
print(f'Length: {len(h)}, {h.capacity}')
# Length: 8, 20
print(h['A2']) # None
for r in h:
print(r)
# A0
# B0
# A1
# C0
# B1
# C1
# B2
# C2
print(h.contains_key('A1')) # True
h.clear()
print(h.isEmpty()) # True | HASH_TABLES/SEPARATE_CHAINING/HashTable.py | class HashTableSeparateChaining(object):
'''
A Hash Table implementation by using normal Python List. Method supports for functions like:
Inserting Key, Removing Key, Getting Key, Checking the size, Iterating through all the keys etc.
Class allows to define a custom Hash Function, Initial List Capacity and Load Factor.
'''
def __init__(self, capacity = None, load_factor = None, hash_function = None):
def default_hash(key):
result = 0
if isinstance(key, str):
for r in key:
result += ord(r)
return result
if capacity is not None and capacity < 0:
raise ValueError("Invalid Capacity")
if load_factor is not None and not (load_factor > 0 and load_factor < 1):
raise ValueError("Invalid Load Factor")
self.capacity = capacity if capacity is not None else 10
self.load_factor = load_factor if load_factor is not None else 0.75
self.hash_function = hash_function if hash_function is not None else default_hash
self.size = 0
self.data = [None] * self.capacity
def insert_key(self, key, value):
if key is None:
raise Exception("Key cannot be None")
index = self.hash_function(key) % self.capacity
if self.data[index] is None:
self.data[index] = [[key, value]]
self.size += 1
else:
if key in [x[0] for x in self.data[index]]:
for i, r in enumerate(self.data[index]):
if r[0] == key:
self.data[index][i][1] = value
else:
self.data[index].append([key, value])
self.size += 1
if self.size > int(self.capacity * self.load_factor):
self.resize_table()
def resize_table(self):
self.capacity *= 2
new_data = [None] * self.capacity
for r in self.data:
if r is not None:
for vals in r:
index = self.hash_function(vals[0]) % self.capacity
if new_data[index] is None:
new_data[index] = [[vals[0], vals[1]]]
else:
# No Need to handle the Duplicate Key case here
new_data[index].append([vals[0], vals[1]])
self.data = new_data
def get_key(self, key):
if key is None:
return None
index = self.hash_function(key) % self.capacity
try:
list_index = [x[0] for x in self.data[index]].index(key)
except:
return None
return self.data[index][list_index][1]
def remove_key(self, key):
if key is None:
return None
if not self.contains_key(key):
raise KeyError("Key not found!")
value = self[key]
index = self.hash_function(key) % self.capacity
list_index = [x[0] for x in self.data[index]].index(key)
list_elem = self.data[index][list_index]
self.data[index].remove(list_elem)
self.size -= 1
return value
def __repr__(self):
result = ""
for i, _ in enumerate(self.data):
if self.data[i] is not None:
for r in self.data[i]:
result += str(r)+"\n"
return result
def __len__(self):
return self.size
def __getitem__(self, key):
return self.get_key(key)
def __setitem__(self, key, value):
self.insert_key(key, value)
def __delitem__(self, key):
return self.remove_key(key)
def __contains__(self, key):
return self.contains_key(key)
def __iter__(self):
for i, _ in enumerate(self.data):
if self.data[i] is not None:
for r in self.data[i]:
yield r[0]
def isEmpty(self):
return self.size == 0
def clear(self):
for key in list(self):
self.remove_key(key)
def contains_key(self, key):
if key in list(self):
return True
return False
if __name__ == "__main__":
h = HashTableSeparateChaining()
print(h.capacity) # 10
for r in range(3):
h[f'A{r}'] = 1
h[f'B{r}'] = 2
h[f'C{r}'] = 3
h['A2'] = 100
print(h.capacity) # 20
print(h)
# ['A0', 1]
# ['B0', 2]
# ['A1', 1]
# ['C0', 3]
# ['B1', 2]
# ['A2', 100]
# ['C1', 3]
# ['B2', 2]
# ['C2', 3]
print(h.remove_key('A2')) # 100
print(f'Length: {len(h)}, {h.capacity}')
# Length: 8, 20
print(h['A2']) # None
for r in h:
print(r)
# A0
# B0
# A1
# C0
# B1
# C1
# B2
# C2
print(h.contains_key('A1')) # True
h.clear()
print(h.isEmpty()) # True | 0.608012 | 0.508605 |
from __future__ import division
import numpy as np
np.seterr(all='raise')
import logging
import os
import signal
import alsaaudio
from multiprocessing import Process
from pyffmpeg import FFMpegReader, PixelFormats
import pygame
from OpenGL.GL.shaders import compileProgram, compileShader
import VisionEgg
import VisionEgg.GL as gl
from VisionEgg.Textures import Texture, TextureObject, TextureStimulus
from VisionEgg.MoreStimuli import Target2D
from Core import Stimulus
from SweepController import StimulusController,SweepSequeStimulusController
class ShaderTexture(Texture):
def __init__(self,contrast=1.0,*args,**kwargs):
super(ShaderTexture, self).__init__(*args,**kwargs)
"""
This contrast program comes from atduskgreg's shader example.
See https://github.com/atduskgreg/Processing-Shader-Examples/
"""
self.contrast_program = compileProgram(
compileShader('''
uniform sampler2D src_tex_unit0;
uniform float contrast;
void main() {
vec3 color = vec3(texture2D(src_tex_unit0, gl_TexCoord[0].st));
const vec3 LumCoeff = vec3(0.2125, 0.7154, 0.0721);
vec3 AvgLumin = vec3(0.5, 0.5, 0.5);
vec3 intensity = vec3(dot(color, LumCoeff));
// could substitute a uniform for this 1. and have variable saturation
vec3 satColor = mix(intensity, color, 1.);
vec3 conColor = mix(AvgLumin, satColor, contrast);
gl_FragColor = vec4(conColor, 1);
}
''',gl.GL_FRAGMENT_SHADER))
self.texture_loc = gl.glGetUniformLocation(self.contrast_program, "src_tex_unit0")
self.contrast_loc = gl.glGetUniformLocation(self.contrast_program, "contrast")
self.contrast = contrast
def set_contrast(self, contrast):
self.contrast = contrast
def update(self):
# install pixel shader for adjusting texture contrast
gl.glUseProgram(self.contrast_program)
gl.glUniform1i(self.texture_loc, 0)
gl.glUniform1f(self.contrast_loc, self.contrast)
class SurfaceTextureObject(TextureObject):
def __init__(self,*args,**kwargs):
super(SurfaceTextureObject, self).__init__(*args,**kwargs)
self.raw_data = None
def update_sub_surface( self,
texel_data,
transfer_pixels,
sub_surface_size, # updated region size
unpack_offset = None, # crop offset
update_offset = None, # update offset
mipmap_level = 0,
data_format = None, # automatic guess unless set explicitly
data_type = None, # automatic guess unless set explicitly
):
# make myself the active texture
gl.glBindTexture(self.target, self.gl_id)
if data_format is None: # guess the format of the data
if isinstance(texel_data,pygame.surface.Surface):
if texel_data.get_alpha():
data_format = gl.GL_RGBA
else:
data_format = gl.GL_RGB
data_type = gl.GL_UNSIGNED_BYTE
target = gl.GL_TEXTURE_2D
if unpack_offset is None:
unpack_offset = (0, 0)
if update_offset is None:
update_offset = (0, 0)
width, _height = texel_data.get_size()
if transfer_pixels or self.raw_data is None:
if texel_data.get_alpha():
self.raw_data = pygame.image.tostring(texel_data,'RGBA',1)
else:
self.raw_data = pygame.image.tostring(texel_data,'RGB',1)
gl.glPixelStorei( gl.GL_UNPACK_ROW_LENGTH, width)
gl.glPixelStorei( gl.GL_UNPACK_SKIP_PIXELS, unpack_offset[0])
gl.glPixelStorei( gl.GL_UNPACK_SKIP_ROWS, unpack_offset[1])
gl.glTexSubImage2D(target,
mipmap_level,
update_offset[0],
update_offset[1],
sub_surface_size[0],
sub_surface_size[1],
data_format,
data_type,
self.raw_data)
gl.glPixelStorei( gl.GL_UNPACK_ROW_LENGTH, 0)
gl.glPixelStorei( gl.GL_UNPACK_SKIP_PIXELS, 0)
gl.glPixelStorei( gl.GL_UNPACK_SKIP_ROWS, 0)
class BufferedTextureObject(TextureObject):
def __init__(self,buffer_data,*args,**kwargs):
super(BufferedTextureObject, self).__init__(*args,**kwargs)
self.buffer_data = buffer_data
def update_sub_surface( self,
texel_data,
transfer_pixels,
sub_surface_size, # updated region size
unpack_offset = None, # crop offset
update_offset = None, # update offset
mipmap_level = 0,
data_format = None, # automatic guess unless set explicitly
data_type = None, # automatic guess unless set explicitly
):
# make myself the active texture
gl.glBindTexture(self.target, self.gl_id)
data_format = gl.GL_RGB
data_type = gl.GL_UNSIGNED_BYTE
target = gl.GL_TEXTURE_2D
if unpack_offset is None:
unpack_offset = (0, 0)
if update_offset is None:
update_offset = (0, 0)
width, _height = texel_data.get_size()
raw_data = np.frombuffer(self.buffer_data, 'B')
gl.glPixelStorei( gl.GL_UNPACK_ROW_LENGTH, width)
gl.glPixelStorei( gl.GL_UNPACK_SKIP_PIXELS, unpack_offset[0])
gl.glPixelStorei( gl.GL_UNPACK_SKIP_ROWS, unpack_offset[1])
gl.glTexSubImage2D(target,
mipmap_level,
update_offset[0],
update_offset[1],
sub_surface_size[0],
sub_surface_size[1],
data_format,
data_type,
raw_data)
gl.glPixelStorei( gl.GL_UNPACK_ROW_LENGTH, 0)
gl.glPixelStorei( gl.GL_UNPACK_SKIP_PIXELS, 0)
gl.glPixelStorei( gl.GL_UNPACK_SKIP_ROWS, 0)
class ShaderTextureStimulus(TextureStimulus):
def __init__(self,shared_texture,*args,**kwargs):
super(ShaderTextureStimulus, self).__init__(*args,**kwargs)
# Recreate an OpenGL texture object this instance "owns"
self.texture_object = shared_texture
self.parameters.texture.load(self.texture_object,
internal_format=gl.GL_RGB,
build_mipmaps=False)
def draw(self):
super(ShaderTextureStimulus, self).draw()
# uninstall shader program
gl.glUseProgram(0)
class MovieController(StimulusController):
""" update movie from pygame surface """
def __init__(self,*args,**kwargs):
super(MovieController, self).__init__(*args,**kwargs)
self.surface = self.stimulus.surface
self.texture = self.stimulus.texture
self.texture_obj = self.texture.get_texture_object()
width, height = self.surface.get_size()
viewport = self.viewport.get_name()
half_width_projection = VisionEgg.Core.OrthographicProjectionNoZClip(
left=0.0, right=self.viewport.width_pix,
bottom=-self.viewport.height_pix*0.5,
top=self.viewport.height_pix*1.5)
half_height_projection = VisionEgg.Core.OrthographicProjectionNoZClip(
left=-self.viewport.width_pix*0.5,
right=self.viewport.width_pix*1.5,
bottom=0, top=self.viewport.height_pix)
if self.p.layout == "2D":
self.crop_offset = (0, 0)
self.size = (width, height)
self.update_offset = (0, 0)
elif self.p.layout in ("LR", "LRH"):
if viewport == "left":
self.crop_offset = (0, 0)
elif viewport == "right":
self.crop_offset = (width//2, 0)
self.size = (width//2, height)
self.update_offset = (width//4, 0)
elif self.p.layout in ("RL", "RLH"):
if viewport == "left":
self.crop_offset = (width//2, 0)
elif viewport == "right":
self.crop_offset = (0, 0)
self.size = (width//2, height)
self.update_offset = (width//4, 0)
elif self.p.layout in ("TB", "TBH"):
if viewport == "left":
self.crop_offset = (0, 0)
elif viewport == "right":
self.crop_offset = (0, height//2)
self.size = (width, height//2)
self.update_offset = (0, height//4)
elif self.p.layout in ("BT", "BTH"):
if viewport == "left":
self.crop_offset = (0, height//2)
elif viewport == "right":
self.crop_offset = (0, 0)
self.size = (width, height//2)
self.update_offset = (0, height//4)
else:
self.logger.error("Cannot support layout: %s" %self.p.layout)
if self.p.layout in ("LRH", "RLH"):
self.viewport.parameters.projection = half_width_projection
elif self.p.layout in ("TBH", "BTH"):
self.viewport.parameters.projection = half_height_projection
def during_go_eval(self):
transfer_pixels = True if self.viewport.get_name() == "left" else False
self.texture.set_contrast(self.p.contrast)
self.texture_obj.update_sub_surface(self.surface,
transfer_pixels=transfer_pixels,
sub_surface_size=self.size,
unpack_offset=self.crop_offset,
update_offset=self.update_offset)
class Movie(Stimulus):
def __init__(self, params, surface, texture_obj, subject=None, sweepseq=None, trigger=True, **kwargs):
super(Movie, self).__init__(subject=subject, params=params, **kwargs)
self.name = 'timingmovie'
self.logger = logging.getLogger('LightStim.Movie')
self.param_names = ['on','xorigDeg','yorigDeg','widthDeg','heightDeg']
self.defalut_parameters = {'xorigDeg':0.0,
'yorigDeg':0.0,
'bgbrightness':0.0,}
""" load parameters from stimulus_params file """
self.load_params()
""" override params from script """
self.set_parameters(self.parameters, params)
self.parameters.on = False
self.sweepseq = sweepseq
self.trigger = trigger
self.surface = surface
self.texure_obj = texture_obj
self.make_stimuli()
self.register_controllers()
def make_stimuli(self):
size = self.viewport.get_size()
self.background = Target2D(position=(size[0]/2, size[1]/2),
anchor='center',
size=size,
on=True)
self.bgp = self.background.parameters
#set background color before real sweep
bgb = self.parameters.bgbrightness
self.bgp.color = bgb, bgb, bgb, 1.0
contrast = self.parameters.contrast
self.texture = ShaderTexture(contrast, self.surface)
self.texture_stim = ShaderTextureStimulus(texture=self.texture,
shared_texture=self.texure_obj,
position=(size[0]/2, size[1]/2),
anchor='center',
mipmaps_enabled=0,
texture_min_filter=gl.GL_LINEAR)
self.tp = self.texture_stim.parameters
self.stimuli = (self.background, self.texture_stim)
def register_controllers(self):
self.controllers.append(MovieController(self))
class TimingController(SweepSequeStimulusController):
def __init__(self,*args,**kwargs):
super(TimingController, self).__init__(*args,**kwargs)
self.tp = self.stimulus.tp
def during_go_eval(self):
stimulus_on = self.next_param()
if stimulus_on:
self.tp.on = True
else:
self.tp.on = False
class TimingSetMovie(Movie):
def register_controllers(self):
super(TimingSetMovie, self).register_controllers()
self.logger.info('Register TimingController.')
self.controllers.append(TimingController(self))
class AlsaSoundLazyPlayer:
def __init__(self,rate=44100,channels=2,fps=25):
self._rate=rate
self._channels=channels
self._d = alsaaudio.PCM()
self._d.setchannels(channels)
self._d.setformat(alsaaudio.PCM_FORMAT_S16_LE)
self._d.setperiodsize(int((rate*channels)//fps//2))
self._d.setrate(rate)
def push_nowait(self,stamped_buffer):
self._d.write(stamped_buffer[0].data)
class MoviePlayer(Process):
def __init__(self, filename, *args,**kwargs):
super(MoviePlayer, self).__init__(*args,**kwargs)
TS_VIDEO_RGB24={ 'video1':(0, -1, {'pixel_format':PixelFormats.PIX_FMT_RGB24}), 'audio1':(1,-1,{})}
## create the reader object
self.mp = FFMpegReader(seek_before=0)
## open an audio-video file
self.mp.open(filename,TS_VIDEO_RGB24,buf_size=4096)
def get_size(self):
tracks = self.mp.get_tracks()
return tracks[0].get_size()
def set_buffer(self, buffer_data):
self.buffer_data = buffer_data
def render_to_buffer(self, frame):
buffer_array = np.frombuffer(self.buffer_data, 'B')
frame = np.flipud(frame)
frame = frame.reshape((1, -1))
buffer_array[:] = frame
def run(self):
tracks = self.mp.get_tracks()
tracks[0].set_observer(self.render_to_buffer)
rate = tracks[1].get_samplerate()
channels = tracks[1].get_channels()
fps = tracks[0].get_fps()
ap = AlsaSoundLazyPlayer(rate, channels, fps)
tracks[1].set_observer(ap.push_nowait)
self.mp.run()
def seek(self, pos=0):
if pos is not None and pos > 0:
self.mp.seek_to_seconds(pos)
def stop(self):
self.mp.close()
os.kill(self.pid, signal.SIGKILL) | StimControl/LightStim/Movie.py |
from __future__ import division
import numpy as np
np.seterr(all='raise')
import logging
import os
import signal
import alsaaudio
from multiprocessing import Process
from pyffmpeg import FFMpegReader, PixelFormats
import pygame
from OpenGL.GL.shaders import compileProgram, compileShader
import VisionEgg
import VisionEgg.GL as gl
from VisionEgg.Textures import Texture, TextureObject, TextureStimulus
from VisionEgg.MoreStimuli import Target2D
from Core import Stimulus
from SweepController import StimulusController,SweepSequeStimulusController
class ShaderTexture(Texture):
def __init__(self,contrast=1.0,*args,**kwargs):
super(ShaderTexture, self).__init__(*args,**kwargs)
"""
This contrast program comes from atduskgreg's shader example.
See https://github.com/atduskgreg/Processing-Shader-Examples/
"""
self.contrast_program = compileProgram(
compileShader('''
uniform sampler2D src_tex_unit0;
uniform float contrast;
void main() {
vec3 color = vec3(texture2D(src_tex_unit0, gl_TexCoord[0].st));
const vec3 LumCoeff = vec3(0.2125, 0.7154, 0.0721);
vec3 AvgLumin = vec3(0.5, 0.5, 0.5);
vec3 intensity = vec3(dot(color, LumCoeff));
// could substitute a uniform for this 1. and have variable saturation
vec3 satColor = mix(intensity, color, 1.);
vec3 conColor = mix(AvgLumin, satColor, contrast);
gl_FragColor = vec4(conColor, 1);
}
''',gl.GL_FRAGMENT_SHADER))
self.texture_loc = gl.glGetUniformLocation(self.contrast_program, "src_tex_unit0")
self.contrast_loc = gl.glGetUniformLocation(self.contrast_program, "contrast")
self.contrast = contrast
def set_contrast(self, contrast):
self.contrast = contrast
def update(self):
# install pixel shader for adjusting texture contrast
gl.glUseProgram(self.contrast_program)
gl.glUniform1i(self.texture_loc, 0)
gl.glUniform1f(self.contrast_loc, self.contrast)
class SurfaceTextureObject(TextureObject):
def __init__(self,*args,**kwargs):
super(SurfaceTextureObject, self).__init__(*args,**kwargs)
self.raw_data = None
def update_sub_surface( self,
texel_data,
transfer_pixels,
sub_surface_size, # updated region size
unpack_offset = None, # crop offset
update_offset = None, # update offset
mipmap_level = 0,
data_format = None, # automatic guess unless set explicitly
data_type = None, # automatic guess unless set explicitly
):
# make myself the active texture
gl.glBindTexture(self.target, self.gl_id)
if data_format is None: # guess the format of the data
if isinstance(texel_data,pygame.surface.Surface):
if texel_data.get_alpha():
data_format = gl.GL_RGBA
else:
data_format = gl.GL_RGB
data_type = gl.GL_UNSIGNED_BYTE
target = gl.GL_TEXTURE_2D
if unpack_offset is None:
unpack_offset = (0, 0)
if update_offset is None:
update_offset = (0, 0)
width, _height = texel_data.get_size()
if transfer_pixels or self.raw_data is None:
if texel_data.get_alpha():
self.raw_data = pygame.image.tostring(texel_data,'RGBA',1)
else:
self.raw_data = pygame.image.tostring(texel_data,'RGB',1)
gl.glPixelStorei( gl.GL_UNPACK_ROW_LENGTH, width)
gl.glPixelStorei( gl.GL_UNPACK_SKIP_PIXELS, unpack_offset[0])
gl.glPixelStorei( gl.GL_UNPACK_SKIP_ROWS, unpack_offset[1])
gl.glTexSubImage2D(target,
mipmap_level,
update_offset[0],
update_offset[1],
sub_surface_size[0],
sub_surface_size[1],
data_format,
data_type,
self.raw_data)
gl.glPixelStorei( gl.GL_UNPACK_ROW_LENGTH, 0)
gl.glPixelStorei( gl.GL_UNPACK_SKIP_PIXELS, 0)
gl.glPixelStorei( gl.GL_UNPACK_SKIP_ROWS, 0)
class BufferedTextureObject(TextureObject):
def __init__(self,buffer_data,*args,**kwargs):
super(BufferedTextureObject, self).__init__(*args,**kwargs)
self.buffer_data = buffer_data
def update_sub_surface( self,
texel_data,
transfer_pixels,
sub_surface_size, # updated region size
unpack_offset = None, # crop offset
update_offset = None, # update offset
mipmap_level = 0,
data_format = None, # automatic guess unless set explicitly
data_type = None, # automatic guess unless set explicitly
):
# make myself the active texture
gl.glBindTexture(self.target, self.gl_id)
data_format = gl.GL_RGB
data_type = gl.GL_UNSIGNED_BYTE
target = gl.GL_TEXTURE_2D
if unpack_offset is None:
unpack_offset = (0, 0)
if update_offset is None:
update_offset = (0, 0)
width, _height = texel_data.get_size()
raw_data = np.frombuffer(self.buffer_data, 'B')
gl.glPixelStorei( gl.GL_UNPACK_ROW_LENGTH, width)
gl.glPixelStorei( gl.GL_UNPACK_SKIP_PIXELS, unpack_offset[0])
gl.glPixelStorei( gl.GL_UNPACK_SKIP_ROWS, unpack_offset[1])
gl.glTexSubImage2D(target,
mipmap_level,
update_offset[0],
update_offset[1],
sub_surface_size[0],
sub_surface_size[1],
data_format,
data_type,
raw_data)
gl.glPixelStorei( gl.GL_UNPACK_ROW_LENGTH, 0)
gl.glPixelStorei( gl.GL_UNPACK_SKIP_PIXELS, 0)
gl.glPixelStorei( gl.GL_UNPACK_SKIP_ROWS, 0)
class ShaderTextureStimulus(TextureStimulus):
def __init__(self,shared_texture,*args,**kwargs):
super(ShaderTextureStimulus, self).__init__(*args,**kwargs)
# Recreate an OpenGL texture object this instance "owns"
self.texture_object = shared_texture
self.parameters.texture.load(self.texture_object,
internal_format=gl.GL_RGB,
build_mipmaps=False)
def draw(self):
super(ShaderTextureStimulus, self).draw()
# uninstall shader program
gl.glUseProgram(0)
class MovieController(StimulusController):
""" update movie from pygame surface """
def __init__(self,*args,**kwargs):
super(MovieController, self).__init__(*args,**kwargs)
self.surface = self.stimulus.surface
self.texture = self.stimulus.texture
self.texture_obj = self.texture.get_texture_object()
width, height = self.surface.get_size()
viewport = self.viewport.get_name()
half_width_projection = VisionEgg.Core.OrthographicProjectionNoZClip(
left=0.0, right=self.viewport.width_pix,
bottom=-self.viewport.height_pix*0.5,
top=self.viewport.height_pix*1.5)
half_height_projection = VisionEgg.Core.OrthographicProjectionNoZClip(
left=-self.viewport.width_pix*0.5,
right=self.viewport.width_pix*1.5,
bottom=0, top=self.viewport.height_pix)
if self.p.layout == "2D":
self.crop_offset = (0, 0)
self.size = (width, height)
self.update_offset = (0, 0)
elif self.p.layout in ("LR", "LRH"):
if viewport == "left":
self.crop_offset = (0, 0)
elif viewport == "right":
self.crop_offset = (width//2, 0)
self.size = (width//2, height)
self.update_offset = (width//4, 0)
elif self.p.layout in ("RL", "RLH"):
if viewport == "left":
self.crop_offset = (width//2, 0)
elif viewport == "right":
self.crop_offset = (0, 0)
self.size = (width//2, height)
self.update_offset = (width//4, 0)
elif self.p.layout in ("TB", "TBH"):
if viewport == "left":
self.crop_offset = (0, 0)
elif viewport == "right":
self.crop_offset = (0, height//2)
self.size = (width, height//2)
self.update_offset = (0, height//4)
elif self.p.layout in ("BT", "BTH"):
if viewport == "left":
self.crop_offset = (0, height//2)
elif viewport == "right":
self.crop_offset = (0, 0)
self.size = (width, height//2)
self.update_offset = (0, height//4)
else:
self.logger.error("Cannot support layout: %s" %self.p.layout)
if self.p.layout in ("LRH", "RLH"):
self.viewport.parameters.projection = half_width_projection
elif self.p.layout in ("TBH", "BTH"):
self.viewport.parameters.projection = half_height_projection
def during_go_eval(self):
transfer_pixels = True if self.viewport.get_name() == "left" else False
self.texture.set_contrast(self.p.contrast)
self.texture_obj.update_sub_surface(self.surface,
transfer_pixels=transfer_pixels,
sub_surface_size=self.size,
unpack_offset=self.crop_offset,
update_offset=self.update_offset)
class Movie(Stimulus):
def __init__(self, params, surface, texture_obj, subject=None, sweepseq=None, trigger=True, **kwargs):
super(Movie, self).__init__(subject=subject, params=params, **kwargs)
self.name = 'timingmovie'
self.logger = logging.getLogger('LightStim.Movie')
self.param_names = ['on','xorigDeg','yorigDeg','widthDeg','heightDeg']
self.defalut_parameters = {'xorigDeg':0.0,
'yorigDeg':0.0,
'bgbrightness':0.0,}
""" load parameters from stimulus_params file """
self.load_params()
""" override params from script """
self.set_parameters(self.parameters, params)
self.parameters.on = False
self.sweepseq = sweepseq
self.trigger = trigger
self.surface = surface
self.texure_obj = texture_obj
self.make_stimuli()
self.register_controllers()
def make_stimuli(self):
size = self.viewport.get_size()
self.background = Target2D(position=(size[0]/2, size[1]/2),
anchor='center',
size=size,
on=True)
self.bgp = self.background.parameters
#set background color before real sweep
bgb = self.parameters.bgbrightness
self.bgp.color = bgb, bgb, bgb, 1.0
contrast = self.parameters.contrast
self.texture = ShaderTexture(contrast, self.surface)
self.texture_stim = ShaderTextureStimulus(texture=self.texture,
shared_texture=self.texure_obj,
position=(size[0]/2, size[1]/2),
anchor='center',
mipmaps_enabled=0,
texture_min_filter=gl.GL_LINEAR)
self.tp = self.texture_stim.parameters
self.stimuli = (self.background, self.texture_stim)
def register_controllers(self):
self.controllers.append(MovieController(self))
class TimingController(SweepSequeStimulusController):
def __init__(self,*args,**kwargs):
super(TimingController, self).__init__(*args,**kwargs)
self.tp = self.stimulus.tp
def during_go_eval(self):
stimulus_on = self.next_param()
if stimulus_on:
self.tp.on = True
else:
self.tp.on = False
class TimingSetMovie(Movie):
def register_controllers(self):
super(TimingSetMovie, self).register_controllers()
self.logger.info('Register TimingController.')
self.controllers.append(TimingController(self))
class AlsaSoundLazyPlayer:
def __init__(self,rate=44100,channels=2,fps=25):
self._rate=rate
self._channels=channels
self._d = alsaaudio.PCM()
self._d.setchannels(channels)
self._d.setformat(alsaaudio.PCM_FORMAT_S16_LE)
self._d.setperiodsize(int((rate*channels)//fps//2))
self._d.setrate(rate)
def push_nowait(self,stamped_buffer):
self._d.write(stamped_buffer[0].data)
class MoviePlayer(Process):
def __init__(self, filename, *args,**kwargs):
super(MoviePlayer, self).__init__(*args,**kwargs)
TS_VIDEO_RGB24={ 'video1':(0, -1, {'pixel_format':PixelFormats.PIX_FMT_RGB24}), 'audio1':(1,-1,{})}
## create the reader object
self.mp = FFMpegReader(seek_before=0)
## open an audio-video file
self.mp.open(filename,TS_VIDEO_RGB24,buf_size=4096)
def get_size(self):
tracks = self.mp.get_tracks()
return tracks[0].get_size()
def set_buffer(self, buffer_data):
self.buffer_data = buffer_data
def render_to_buffer(self, frame):
buffer_array = np.frombuffer(self.buffer_data, 'B')
frame = np.flipud(frame)
frame = frame.reshape((1, -1))
buffer_array[:] = frame
def run(self):
tracks = self.mp.get_tracks()
tracks[0].set_observer(self.render_to_buffer)
rate = tracks[1].get_samplerate()
channels = tracks[1].get_channels()
fps = tracks[0].get_fps()
ap = AlsaSoundLazyPlayer(rate, channels, fps)
tracks[1].set_observer(ap.push_nowait)
self.mp.run()
def seek(self, pos=0):
if pos is not None and pos > 0:
self.mp.seek_to_seconds(pos)
def stop(self):
self.mp.close()
os.kill(self.pid, signal.SIGKILL) | 0.733738 | 0.155623 |
import pytest
from construct import Container, ListContainer
from construct_typed import DataclassStruct
from bonfo.msp.codes import MSP
from bonfo.msp.fields.base import Direction
from bonfo.msp.fields.pids import PidAdvanced, PidCoefficients
from bonfo.msp.versions import MSPVersions
from tests import messages
from tests.utils import minus_preamble
def test_pid_coefficients():
assert PidCoefficients.get_direction() == Direction.OUT
assert PidCoefficients.get_code == MSP.PID
assert PidCoefficients.set_code is None
assert isinstance(PidCoefficients.get_struct(), DataclassStruct)
def test_pid_coefficients_parse():
data_bytes = minus_preamble(messages.pid)
data = PidCoefficients.get_struct().parse(data_bytes)
assert isinstance(data, PidCoefficients)
assert data == PidCoefficients(
pids=ListContainer(
[
Container(p=22, i=68, d=31),
Container(p=26, i=68, d=31),
Container(p=29, i=76, d=4),
Container(p=53, i=55, d=75),
Container(p=40, i=0, d=0),
]
)
)
def test_pid_parse_and_build_non_destructive():
"""Pid advanced generated struct should be non-destructive to bytestring."""
data_bytes = minus_preamble(messages.pid)
struct = PidCoefficients.get_struct()
data = struct.parse(data_bytes)
assert isinstance(data, PidCoefficients)
output_data_bytes = struct.build(data)
assert data_bytes == output_data_bytes
def test_pid_advanced():
assert PidAdvanced.get_direction() == Direction.BOTH
assert PidAdvanced.get_code == MSP.PID_ADVANCED
assert PidAdvanced.set_code == MSP.SET_PID_ADVANCED
assert isinstance(PidAdvanced.get_struct(), DataclassStruct)
def xtest_pid_advanced_parse():
data_bytes = minus_preamble(messages.pid_advanced)
data = PidAdvanced.get_struct().parse(data_bytes, msp=MSPVersions.V1_43)
assert isinstance(data, PidAdvanced)
# TODO: don't require unused parameters fields
assert data == PidAdvanced(
feedforward_transition=1,
rate_accel_limit=1,
yaw_rate_accel_limit=1,
level_angle_limit=1,
iterm_throttle_threshold=1,
iterm_accelerator_gain=1,
iterm_rotation=1,
iterm_relax=1,
iterm_relax_type=1,
abs_control_gain=1,
throttle_boost=1,
acro_trainer_angle_limit=1,
pid_roll_f=1,
pid_pitch_f=1,
pid_yaw_f=1,
anti_gravity_mode=1,
d_min_roll=1,
d_min_pitch=1,
d_min_yaw=1,
d_min_gain=1,
d_min_advance=1,
use_integrated_yaw=1,
integrated_yaw_relax=1,
iterm_relax_cutoff=1,
motor_output_limit=1,
auto_profile_cell_count=1,
dyn_idle_min_rpm=1,
feedforward_averaging=1,
feedforward_smooth_factor=1,
feedforward_boost=1,
feedforward_max_rate_limit=1,
feedforward_jitter_factor=1,
vbat_sag_compensation=1,
thrust_linearization=1,
)
def test_pid_advanced_parse_and_build_non_destructive():
"""Pi aAdvanced generated struct should be non-destructive to bytescring."""
data_bytes = minus_preamble(messages.pid_advanced)
struct = PidAdvanced.get_struct()
data = struct.parse(data_bytes, msp=MSPVersions.V1_43.value)
assert isinstance(data, PidAdvanced)
output_data_bytes = struct.build(data, msp=MSPVersions.V1_43.value)
assert data_bytes == output_data_bytes
def test_pid_advanced_dataclass_no_args_errors():
"""No args throws a type error with missing count."""
with pytest.raises(TypeError) as exec_info:
PidAdvanced()
assert "34" in exec_info.exconly()
def test_pid_advanced_dataclass_init_good_data():
"""Pid advanced shouldn't error when initialized."""
PidAdvanced(
feedforward_transition=1,
rate_accel_limit=1,
yaw_rate_accel_limit=1,
level_angle_limit=1,
iterm_throttle_threshold=1,
iterm_accelerator_gain=1,
iterm_rotation=1,
iterm_relax=1,
iterm_relax_type=1,
abs_control_gain=1,
throttle_boost=1,
acro_trainer_angle_limit=1,
pid_roll_f=1,
pid_pitch_f=1,
pid_yaw_f=1,
anti_gravity_mode=1,
d_min_roll=1,
d_min_pitch=1,
d_min_yaw=1,
d_min_gain=1,
d_min_advance=1,
use_integrated_yaw=1,
integrated_yaw_relax=1,
iterm_relax_cutoff=1,
motor_output_limit=1,
auto_profile_cell_count=1,
dyn_idle_min_rpm=1,
feedforward_averaging=1,
feedforward_smooth_factor=1,
feedforward_boost=1,
feedforward_max_rate_limit=1,
feedforward_jitter_factor=1,
vbat_sag_compensation=1,
thrust_linearization=1,
) | tests/fields/test_pids.py | import pytest
from construct import Container, ListContainer
from construct_typed import DataclassStruct
from bonfo.msp.codes import MSP
from bonfo.msp.fields.base import Direction
from bonfo.msp.fields.pids import PidAdvanced, PidCoefficients
from bonfo.msp.versions import MSPVersions
from tests import messages
from tests.utils import minus_preamble
def test_pid_coefficients():
assert PidCoefficients.get_direction() == Direction.OUT
assert PidCoefficients.get_code == MSP.PID
assert PidCoefficients.set_code is None
assert isinstance(PidCoefficients.get_struct(), DataclassStruct)
def test_pid_coefficients_parse():
data_bytes = minus_preamble(messages.pid)
data = PidCoefficients.get_struct().parse(data_bytes)
assert isinstance(data, PidCoefficients)
assert data == PidCoefficients(
pids=ListContainer(
[
Container(p=22, i=68, d=31),
Container(p=26, i=68, d=31),
Container(p=29, i=76, d=4),
Container(p=53, i=55, d=75),
Container(p=40, i=0, d=0),
]
)
)
def test_pid_parse_and_build_non_destructive():
"""Pid advanced generated struct should be non-destructive to bytestring."""
data_bytes = minus_preamble(messages.pid)
struct = PidCoefficients.get_struct()
data = struct.parse(data_bytes)
assert isinstance(data, PidCoefficients)
output_data_bytes = struct.build(data)
assert data_bytes == output_data_bytes
def test_pid_advanced():
assert PidAdvanced.get_direction() == Direction.BOTH
assert PidAdvanced.get_code == MSP.PID_ADVANCED
assert PidAdvanced.set_code == MSP.SET_PID_ADVANCED
assert isinstance(PidAdvanced.get_struct(), DataclassStruct)
def xtest_pid_advanced_parse():
data_bytes = minus_preamble(messages.pid_advanced)
data = PidAdvanced.get_struct().parse(data_bytes, msp=MSPVersions.V1_43)
assert isinstance(data, PidAdvanced)
# TODO: don't require unused parameters fields
assert data == PidAdvanced(
feedforward_transition=1,
rate_accel_limit=1,
yaw_rate_accel_limit=1,
level_angle_limit=1,
iterm_throttle_threshold=1,
iterm_accelerator_gain=1,
iterm_rotation=1,
iterm_relax=1,
iterm_relax_type=1,
abs_control_gain=1,
throttle_boost=1,
acro_trainer_angle_limit=1,
pid_roll_f=1,
pid_pitch_f=1,
pid_yaw_f=1,
anti_gravity_mode=1,
d_min_roll=1,
d_min_pitch=1,
d_min_yaw=1,
d_min_gain=1,
d_min_advance=1,
use_integrated_yaw=1,
integrated_yaw_relax=1,
iterm_relax_cutoff=1,
motor_output_limit=1,
auto_profile_cell_count=1,
dyn_idle_min_rpm=1,
feedforward_averaging=1,
feedforward_smooth_factor=1,
feedforward_boost=1,
feedforward_max_rate_limit=1,
feedforward_jitter_factor=1,
vbat_sag_compensation=1,
thrust_linearization=1,
)
def test_pid_advanced_parse_and_build_non_destructive():
"""Pi aAdvanced generated struct should be non-destructive to bytescring."""
data_bytes = minus_preamble(messages.pid_advanced)
struct = PidAdvanced.get_struct()
data = struct.parse(data_bytes, msp=MSPVersions.V1_43.value)
assert isinstance(data, PidAdvanced)
output_data_bytes = struct.build(data, msp=MSPVersions.V1_43.value)
assert data_bytes == output_data_bytes
def test_pid_advanced_dataclass_no_args_errors():
"""No args throws a type error with missing count."""
with pytest.raises(TypeError) as exec_info:
PidAdvanced()
assert "34" in exec_info.exconly()
def test_pid_advanced_dataclass_init_good_data():
"""Pid advanced shouldn't error when initialized."""
PidAdvanced(
feedforward_transition=1,
rate_accel_limit=1,
yaw_rate_accel_limit=1,
level_angle_limit=1,
iterm_throttle_threshold=1,
iterm_accelerator_gain=1,
iterm_rotation=1,
iterm_relax=1,
iterm_relax_type=1,
abs_control_gain=1,
throttle_boost=1,
acro_trainer_angle_limit=1,
pid_roll_f=1,
pid_pitch_f=1,
pid_yaw_f=1,
anti_gravity_mode=1,
d_min_roll=1,
d_min_pitch=1,
d_min_yaw=1,
d_min_gain=1,
d_min_advance=1,
use_integrated_yaw=1,
integrated_yaw_relax=1,
iterm_relax_cutoff=1,
motor_output_limit=1,
auto_profile_cell_count=1,
dyn_idle_min_rpm=1,
feedforward_averaging=1,
feedforward_smooth_factor=1,
feedforward_boost=1,
feedforward_max_rate_limit=1,
feedforward_jitter_factor=1,
vbat_sag_compensation=1,
thrust_linearization=1,
) | 0.515376 | 0.474022 |
import sys, os
from glob import glob
from setuptools import setup, find_packages
import platutils
platinfo = platutils.get_platform()
NAME = "pycopia-storage"
VERSION = "1.0"
if platinfo.is_linux():
DATAFILES = [
('/etc/pycopia', glob("etc/*.example") + glob("etc/*.dist")),
('/etc/pam.d', glob("etc/pam.d/*")),
]
if platinfo.is_gentoo():
DATAFILES.append(('/etc/init.d', glob("etc/init.d/gentoo/*")))
elif platinfo.is_redhat():
DATAFILES.append(('/etc/init.d', glob("etc/init.d/redhat/*")))
WEBSITE = os.environ.get("WEBSITE", "localhost")
DATAFILES.extend([
#(os.path.join("/var", "www", WEBSITE, 'htdocs'), glob("doc/html/*.html")),
#(os.path.join("/var", "www", WEBSITE, 'cgi-bin'), glob("doc/html/cgi-bin/*.py")),
(os.path.join("/var", "www", WEBSITE, 'media', 'js'), glob("media/js/*.js")),
(os.path.join("/var", "www", WEBSITE, 'media', 'css'), glob("media/css/*.css")),
#(os.path.join("/var", "www", WEBSITE, 'media', 'images'), glob("media/images/*.png")),
])
SCRIPTS = glob("bin/*")
elif platinfo.is_osx():
DATAFILES = [
('/etc/pycopia', glob("etc/*.example") + glob("etc/*.dist")),
]
SCRIPTS = glob("bin/*")
else:
DATAFILES = []
SCRIPTS = []
setup (name=NAME, version=VERSION,
namespace_packages = ["pycopia"],
packages = find_packages(),
# install_requires = [
# 'pycopia-core>=1.0.dev-r138,==dev',
# 'pycopia-CLI>=1.0.dev-r138,==dev',
# 'sqlalchemy>=0.9.0',
# 'pycrypto>=2.0',
# 'urwid>=1.0',
# #'psycopg>=2.0',
# ],
dependency_links = [
"http://www.pycopia.net/download/"
],
test_suite = "test.StorageTests",
scripts = SCRIPTS,
data_files = DATAFILES,
description = "Pycopia storage and object model.",
long_description = """Pycopia persistent storage and object model.
Provides a storage build on top of Durus that defines container types
and some persistent objects useful for networks and network device
testing.
""",
license = "LGPL",
author = "<NAME>",
author_email = "<EMAIL>",
keywords = "pycopia framework",
url = "http://www.pycopia.net/",
#download_url = "ftp://ftp.pycopia.net/pub/python/%s.%s.tar.gz" % (NAME, VERSION),
classifiers = ["Operating System :: POSIX",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Networking :: Monitoring",
"Intended Audience :: Developers"],
) | storage/setup.py |
import sys, os
from glob import glob
from setuptools import setup, find_packages
import platutils
platinfo = platutils.get_platform()
NAME = "pycopia-storage"
VERSION = "1.0"
if platinfo.is_linux():
DATAFILES = [
('/etc/pycopia', glob("etc/*.example") + glob("etc/*.dist")),
('/etc/pam.d', glob("etc/pam.d/*")),
]
if platinfo.is_gentoo():
DATAFILES.append(('/etc/init.d', glob("etc/init.d/gentoo/*")))
elif platinfo.is_redhat():
DATAFILES.append(('/etc/init.d', glob("etc/init.d/redhat/*")))
WEBSITE = os.environ.get("WEBSITE", "localhost")
DATAFILES.extend([
#(os.path.join("/var", "www", WEBSITE, 'htdocs'), glob("doc/html/*.html")),
#(os.path.join("/var", "www", WEBSITE, 'cgi-bin'), glob("doc/html/cgi-bin/*.py")),
(os.path.join("/var", "www", WEBSITE, 'media', 'js'), glob("media/js/*.js")),
(os.path.join("/var", "www", WEBSITE, 'media', 'css'), glob("media/css/*.css")),
#(os.path.join("/var", "www", WEBSITE, 'media', 'images'), glob("media/images/*.png")),
])
SCRIPTS = glob("bin/*")
elif platinfo.is_osx():
DATAFILES = [
('/etc/pycopia', glob("etc/*.example") + glob("etc/*.dist")),
]
SCRIPTS = glob("bin/*")
else:
DATAFILES = []
SCRIPTS = []
setup (name=NAME, version=VERSION,
namespace_packages = ["pycopia"],
packages = find_packages(),
# install_requires = [
# 'pycopia-core>=1.0.dev-r138,==dev',
# 'pycopia-CLI>=1.0.dev-r138,==dev',
# 'sqlalchemy>=0.9.0',
# 'pycrypto>=2.0',
# 'urwid>=1.0',
# #'psycopg>=2.0',
# ],
dependency_links = [
"http://www.pycopia.net/download/"
],
test_suite = "test.StorageTests",
scripts = SCRIPTS,
data_files = DATAFILES,
description = "Pycopia storage and object model.",
long_description = """Pycopia persistent storage and object model.
Provides a storage build on top of Durus that defines container types
and some persistent objects useful for networks and network device
testing.
""",
license = "LGPL",
author = "<NAME>",
author_email = "<EMAIL>",
keywords = "pycopia framework",
url = "http://www.pycopia.net/",
#download_url = "ftp://ftp.pycopia.net/pub/python/%s.%s.tar.gz" % (NAME, VERSION),
classifiers = ["Operating System :: POSIX",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Networking :: Monitoring",
"Intended Audience :: Developers"],
) | 0.236781 | 0.073032 |
"""Tests involving the tf.distributed datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import reference_test_base
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
def dataset_no_vars_loop(ds, dds):
for pr in dds:
tf.print(ds.reduce('SUM', pr, axis=None))
def iterator_no_vars_loop(ds, dds):
for pr in iter(dds):
tf.print(ds.reduce('SUM', pr, axis=None))
def dataset_single_var_loop(ds, dds):
s = 0
for pr in dds:
# TODO(mdan): It would be nice to be able to write s = s * 10 + pr.
s = s * 10 + ds.reduce('SUM', pr, axis=None)
# TODO(mdan): This looks like a bug.
s.set_shape(())
return s
def iterator_single_var_loop(ds, dds):
s = 0
for pr in iter(dds):
s = s * 10 + ds.reduce('SUM', pr, axis=None)
return s
def dataset_two_vars_loop(ds, dds):
s = 0
p = 1
for pr in dds:
e = ds.reduce('SUM', pr, axis=None)
e.set_shape(())
s += e
p *= e
return s, p
def iterator_two_vars_loop(ds, dds):
s = 0
p = 1
for pr in iter(dds):
e = ds.reduce('SUM', pr, axis=None)
e.set_shape(())
s += e
p *= e
return s, p
def dataset_enumeration(ds, dds):
s = 0
p = 1
for i, pr in enumerate(dds):
e = ds.reduce('SUM', pr, axis=None)
e.set_shape(())
s = s * 10 + e
p *= i
return s, p
def iterator_next(ds, dds):
itr = iter(dds)
return ds.reduce('SUM', next(itr), axis=None)
def iterator_next_multiple_calls(ds, dds):
itr = iter(dds)
a = ds.reduce('SUM', next(itr), axis=None)
b = ds.reduce('SUM', next(itr), axis=None)
return a * 10 + b
def iterator_next_in_limited_loop(ds, dds, n):
itr = iter(dds)
s = 0
for _ in range(n):
s = s * 10 + ds.reduce('SUM', next(itr), axis=None)
return s
def iterator_next_stopping(ds, dds, cond):
# This case will raise, but not the expected StopIteration error.
itr = iter(dds)
while cond:
ds.reduce('SUM', next(itr), axis=None)
def iterator_next_with_catching_stop_iteration(ds, dds, cond):
# This is the one instance when the use of TF iterators does not work as
# intended. In graph mode, the `except` below will never catch, and the
# tf.function will raise the error instead.
# TODO(b/132311724): The error should be friendlier here.
# Note: b/132298783 covers actually supporting this pattern.
itr = iter(dds)
try:
while cond:
ds.reduce('SUM', next(itr), axis=None)
except StopIteration:
pass
class ReferenceTest(reference_test_base.TestCase):
def setUp(self):
super(ReferenceTest, self).setUp()
cpus = tf.config.experimental.list_physical_devices('CPU')
tf.config.experimental.set_virtual_device_configuration(
cpus[0], [tf.config.experimental.VirtualDeviceConfiguration()] * 2)
strategy = tf.distribute.MirroredStrategy()
dataset = tf.data.Dataset.from_tensor_slices(
tf.reshape(tf.range(40), (10, 4)))
self.ds = strategy
self.dds = strategy.experimental_distribute_dataset(dataset)
def test_dataset_no_vars_loop(self):
self.assertFunctionMatchesEager(dataset_no_vars_loop, self.ds, self.dds)
def test_iterator_no_vars_loop(self):
with self.assertRaises(RuntimeError):
tf.function(iterator_no_vars_loop)(self.ds, self.dds)
def test_dataset_single_var_loop(self):
self.assertFunctionMatchesEager(dataset_single_var_loop, self.ds, self.dds)
def test_iterator_single_var_loop(self):
with self.assertRaises(RuntimeError):
tf.function(iterator_single_var_loop)(self.ds, self.dds)
def test_dataset_two_vars_loop(self):
self.assertFunctionMatchesEager(dataset_two_vars_loop, self.ds, self.dds)
def test_iterator_two_vars_loop(self):
with self.assertRaises(RuntimeError):
tf.function(iterator_two_vars_loop)(self.ds, self.dds)
def test_iterator_next(self):
self.assertFunctionMatchesEager(iterator_next, self.ds, self.dds)
def test_iterator_next_multiple_calls(self):
self.assertFunctionMatchesEager(iterator_next_multiple_calls, self.ds,
self.dds)
def test_iterator_next_in_limited_loop(self):
self.assertFunctionMatchesEager(iterator_next_in_limited_loop, self.ds,
self.dds, 0)
self.assertFunctionMatchesEager(iterator_next_in_limited_loop, self.ds,
self.dds, 1)
self.assertFunctionMatchesEager(iterator_next_in_limited_loop, self.ds,
self.dds, 3)
def test_iterator_next_stopping(self):
with self.assertRaises(tf.errors.OutOfRangeError):
tf.function(iterator_next_stopping)(self.ds, self.dds, tf.constant(True))
def test_iterator_next_with_catching_stop_iteration(self):
with self.assertRaises(tf.errors.OutOfRangeError):
tf.function(iterator_next_with_catching_stop_iteration)(self.ds, self.dds,
tf.constant(True))
if __name__ == '__main__':
tf.test.main() | reference_tests/distributed_dataset_test.py | """Tests involving the tf.distributed datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import reference_test_base
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
def dataset_no_vars_loop(ds, dds):
for pr in dds:
tf.print(ds.reduce('SUM', pr, axis=None))
def iterator_no_vars_loop(ds, dds):
for pr in iter(dds):
tf.print(ds.reduce('SUM', pr, axis=None))
def dataset_single_var_loop(ds, dds):
s = 0
for pr in dds:
# TODO(mdan): It would be nice to be able to write s = s * 10 + pr.
s = s * 10 + ds.reduce('SUM', pr, axis=None)
# TODO(mdan): This looks like a bug.
s.set_shape(())
return s
def iterator_single_var_loop(ds, dds):
s = 0
for pr in iter(dds):
s = s * 10 + ds.reduce('SUM', pr, axis=None)
return s
def dataset_two_vars_loop(ds, dds):
s = 0
p = 1
for pr in dds:
e = ds.reduce('SUM', pr, axis=None)
e.set_shape(())
s += e
p *= e
return s, p
def iterator_two_vars_loop(ds, dds):
s = 0
p = 1
for pr in iter(dds):
e = ds.reduce('SUM', pr, axis=None)
e.set_shape(())
s += e
p *= e
return s, p
def dataset_enumeration(ds, dds):
s = 0
p = 1
for i, pr in enumerate(dds):
e = ds.reduce('SUM', pr, axis=None)
e.set_shape(())
s = s * 10 + e
p *= i
return s, p
def iterator_next(ds, dds):
itr = iter(dds)
return ds.reduce('SUM', next(itr), axis=None)
def iterator_next_multiple_calls(ds, dds):
itr = iter(dds)
a = ds.reduce('SUM', next(itr), axis=None)
b = ds.reduce('SUM', next(itr), axis=None)
return a * 10 + b
def iterator_next_in_limited_loop(ds, dds, n):
itr = iter(dds)
s = 0
for _ in range(n):
s = s * 10 + ds.reduce('SUM', next(itr), axis=None)
return s
def iterator_next_stopping(ds, dds, cond):
# This case will raise, but not the expected StopIteration error.
itr = iter(dds)
while cond:
ds.reduce('SUM', next(itr), axis=None)
def iterator_next_with_catching_stop_iteration(ds, dds, cond):
# This is the one instance when the use of TF iterators does not work as
# intended. In graph mode, the `except` below will never catch, and the
# tf.function will raise the error instead.
# TODO(b/132311724): The error should be friendlier here.
# Note: b/132298783 covers actually supporting this pattern.
itr = iter(dds)
try:
while cond:
ds.reduce('SUM', next(itr), axis=None)
except StopIteration:
pass
class ReferenceTest(reference_test_base.TestCase):
def setUp(self):
super(ReferenceTest, self).setUp()
cpus = tf.config.experimental.list_physical_devices('CPU')
tf.config.experimental.set_virtual_device_configuration(
cpus[0], [tf.config.experimental.VirtualDeviceConfiguration()] * 2)
strategy = tf.distribute.MirroredStrategy()
dataset = tf.data.Dataset.from_tensor_slices(
tf.reshape(tf.range(40), (10, 4)))
self.ds = strategy
self.dds = strategy.experimental_distribute_dataset(dataset)
def test_dataset_no_vars_loop(self):
self.assertFunctionMatchesEager(dataset_no_vars_loop, self.ds, self.dds)
def test_iterator_no_vars_loop(self):
with self.assertRaises(RuntimeError):
tf.function(iterator_no_vars_loop)(self.ds, self.dds)
def test_dataset_single_var_loop(self):
self.assertFunctionMatchesEager(dataset_single_var_loop, self.ds, self.dds)
def test_iterator_single_var_loop(self):
with self.assertRaises(RuntimeError):
tf.function(iterator_single_var_loop)(self.ds, self.dds)
def test_dataset_two_vars_loop(self):
self.assertFunctionMatchesEager(dataset_two_vars_loop, self.ds, self.dds)
def test_iterator_two_vars_loop(self):
with self.assertRaises(RuntimeError):
tf.function(iterator_two_vars_loop)(self.ds, self.dds)
def test_iterator_next(self):
self.assertFunctionMatchesEager(iterator_next, self.ds, self.dds)
def test_iterator_next_multiple_calls(self):
self.assertFunctionMatchesEager(iterator_next_multiple_calls, self.ds,
self.dds)
def test_iterator_next_in_limited_loop(self):
self.assertFunctionMatchesEager(iterator_next_in_limited_loop, self.ds,
self.dds, 0)
self.assertFunctionMatchesEager(iterator_next_in_limited_loop, self.ds,
self.dds, 1)
self.assertFunctionMatchesEager(iterator_next_in_limited_loop, self.ds,
self.dds, 3)
def test_iterator_next_stopping(self):
with self.assertRaises(tf.errors.OutOfRangeError):
tf.function(iterator_next_stopping)(self.ds, self.dds, tf.constant(True))
def test_iterator_next_with_catching_stop_iteration(self):
with self.assertRaises(tf.errors.OutOfRangeError):
tf.function(iterator_next_with_catching_stop_iteration)(self.ds, self.dds,
tf.constant(True))
if __name__ == '__main__':
tf.test.main() | 0.637708 | 0.366448 |
import json
import os
import sys
import time
from json.decoder import JSONDecodeError
from shutil import which
from selenium import webdriver
def get_exec_path():
driver_name = 'chromedriver'
path = which(driver_name)
if path is None:
path = which(driver_name, path='.')
if path is None:
print('No chrome driver...')
sys.exit(1)
return path
def get_driver():
# Chrome options
op = webdriver.ChromeOptions()
# 关掉浏览器左上角的通知提示
op.add_argument("--disable-notifications")
# 关闭 chrome 正受到自动测试软件的控制'提示
op.add_argument("disable-infobars")
op.add_argument("--start-maximized")
# No gui
op.add_argument("--headless")
# Run under root user
op.add_argument("--no-sandbox")
op.add_argument("--disable-dev-shm-usage")
op.add_argument("--disable-gpu")
# Modify User agent
# op.add_argument(
# "user-agent='"
# "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) "
# "Chrome/84.0.4147.125 Mobile Safari/537.36'")
driver = webdriver.Chrome(executable_path=get_exec_path(), options=op)
return driver
def log(single, addition):
info = "[{time_tag} 姓名:{yzxx} 学号: {loginName}] {add}"
print(info.format(time_tag=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
yzxx=single.__getitem__('yzxx'),
loginName=single.__getitem__('loginName'), add=addition)
)
def task(driver, single):
driver.get("https://fxgl.jx.edu.cn/4136010406/public/homeQd?loginName="
+ single.__getitem__('loginName')
+ "&loginType=" + str(single.__getitem__('loginType')))
# 稍微等久一点,等待 js、css 加载
time.sleep(5)
log(single, '自动签到中...')
js = 'async function(){let t=\'REPLACE\';return t=JSON.parse(t),await async function(t){return await new Promise(' \
'n=>{$.ajax({url:"https://fxgl.jx.edu.cn/4136010406/studentQd/saveStu",method:"post",data:t,' \
'success:function(t){return n(JSON.stringify(t))}})})}(t)}(); '
js = js.replace("REPLACE", json.dumps(single.__getitem__('checkIn')))
print(driver.execute_script('return ' + js))
# time.sleep(3)
# log(single, '自动填写问卷中...')
# js = 'async function(){var t=\'REPLACE\',n="https://fxgl.jx.edu.cn/4136010406/";return 0==(t=JSON.parse(' \
# 't)).sf?n+="dcwjEditNew/dcwjSubmit2":n+="dcwjEditNew/dcwjTsubmit2",await async function(t,n){return await ' \
# 'new Promise(i=>{$.ajax({type:"post",url:t,data:{dcwj:JSON.stringify(n)},success:function(t){return i(' \
# 'JSON.stringify(t))}})})}(n,t)}(); '
# js = js.replace("REPLACE", json.dumps(single.__getitem__('paper')))
# print(driver.execute_script('return ' + js))
print()
def main():
json_filename = "./config.json"
try:
with open(json_filename, 'r', encoding='utf-8') as f:
data = json.load(f)
driver = get_driver()
success = 0
fail = []
index = -1
for single in data:
index += 1
try:
task(driver, single)
time.sleep(1)
success += 1
except Exception as e:
print(e)
fail += [index]
# Restart driver is too slow
driver.refresh()
driver = get_driver()
driver.quit()
# Error info
if len(fail) == 0:
for i in fail:
log(i, 'Error')
print('Total: ' + str(len(data))
+ '\nSuccess: ' + str(success)
+ '\nFail: ' + str(len(fail)) + '\n')
except FileNotFoundError:
print("File is not found: " + os.path.abspath(json_filename))
print("Creating file...")
with open(json_filename, 'w', encoding='utf-8') as f:
json.dump([{
"loginName": "<NAME>",
"yzxx": "name",
"loginType": 0,
"checkIn": {},
"paper": {}
}], f)
except PermissionError:
print("No permission: " + os.path.abspath(json_filename))
except JSONDecodeError:
print("Error file data...")
if __name__ == '__main__':
main() | App.py |
import json
import os
import sys
import time
from json.decoder import JSONDecodeError
from shutil import which
from selenium import webdriver
def get_exec_path():
driver_name = 'chromedriver'
path = which(driver_name)
if path is None:
path = which(driver_name, path='.')
if path is None:
print('No chrome driver...')
sys.exit(1)
return path
def get_driver():
# Chrome options
op = webdriver.ChromeOptions()
# 关掉浏览器左上角的通知提示
op.add_argument("--disable-notifications")
# 关闭 chrome 正受到自动测试软件的控制'提示
op.add_argument("disable-infobars")
op.add_argument("--start-maximized")
# No gui
op.add_argument("--headless")
# Run under root user
op.add_argument("--no-sandbox")
op.add_argument("--disable-dev-shm-usage")
op.add_argument("--disable-gpu")
# Modify User agent
# op.add_argument(
# "user-agent='"
# "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) "
# "Chrome/84.0.4147.125 Mobile Safari/537.36'")
driver = webdriver.Chrome(executable_path=get_exec_path(), options=op)
return driver
def log(single, addition):
info = "[{time_tag} 姓名:{yzxx} 学号: {loginName}] {add}"
print(info.format(time_tag=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
yzxx=single.__getitem__('yzxx'),
loginName=single.__getitem__('loginName'), add=addition)
)
def task(driver, single):
driver.get("https://fxgl.jx.edu.cn/4136010406/public/homeQd?loginName="
+ single.__getitem__('loginName')
+ "&loginType=" + str(single.__getitem__('loginType')))
# 稍微等久一点,等待 js、css 加载
time.sleep(5)
log(single, '自动签到中...')
js = 'async function(){let t=\'REPLACE\';return t=JSON.parse(t),await async function(t){return await new Promise(' \
'n=>{$.ajax({url:"https://fxgl.jx.edu.cn/4136010406/studentQd/saveStu",method:"post",data:t,' \
'success:function(t){return n(JSON.stringify(t))}})})}(t)}(); '
js = js.replace("REPLACE", json.dumps(single.__getitem__('checkIn')))
print(driver.execute_script('return ' + js))
# time.sleep(3)
# log(single, '自动填写问卷中...')
# js = 'async function(){var t=\'REPLACE\',n="https://fxgl.jx.edu.cn/4136010406/";return 0==(t=JSON.parse(' \
# 't)).sf?n+="dcwjEditNew/dcwjSubmit2":n+="dcwjEditNew/dcwjTsubmit2",await async function(t,n){return await ' \
# 'new Promise(i=>{$.ajax({type:"post",url:t,data:{dcwj:JSON.stringify(n)},success:function(t){return i(' \
# 'JSON.stringify(t))}})})}(n,t)}(); '
# js = js.replace("REPLACE", json.dumps(single.__getitem__('paper')))
# print(driver.execute_script('return ' + js))
print()
def main():
json_filename = "./config.json"
try:
with open(json_filename, 'r', encoding='utf-8') as f:
data = json.load(f)
driver = get_driver()
success = 0
fail = []
index = -1
for single in data:
index += 1
try:
task(driver, single)
time.sleep(1)
success += 1
except Exception as e:
print(e)
fail += [index]
# Restart driver is too slow
driver.refresh()
driver = get_driver()
driver.quit()
# Error info
if len(fail) == 0:
for i in fail:
log(i, 'Error')
print('Total: ' + str(len(data))
+ '\nSuccess: ' + str(success)
+ '\nFail: ' + str(len(fail)) + '\n')
except FileNotFoundError:
print("File is not found: " + os.path.abspath(json_filename))
print("Creating file...")
with open(json_filename, 'w', encoding='utf-8') as f:
json.dump([{
"loginName": "<NAME>",
"yzxx": "name",
"loginType": 0,
"checkIn": {},
"paper": {}
}], f)
except PermissionError:
print("No permission: " + os.path.abspath(json_filename))
except JSONDecodeError:
print("Error file data...")
if __name__ == '__main__':
main() | 0.108685 | 0.090253 |
from catm00dz.data import MoodExperienceImageCalendar
import collections
def average_mood_by_weekday(img_path):
weekday_counter = collections.defaultdict(int)
gpa_counter = collections.defaultdict(float)
experience_cal = MoodExperienceImageCalendar.from_path(
img_path
)
for experience in experience_cal.iter_box_center_decoded:
if experience.mood.is_blank:
continue
weekday_counter[experience.date.weekday] += 1
gpa_counter[experience.date.weekday] += experience.mood.gpa
return {
weekday: (
gpa_counter[weekday] / weekday_counter[weekday]
)
for weekday
in weekday_counter.keys()
}
def average_mood_by_month(img_path):
month_counter = collections.defaultdict(int)
gpa_counter = collections.defaultdict(float)
experience_cal = MoodExperienceImageCalendar.from_path(
img_path
)
for experience in experience_cal.iter_box_center_decoded:
if experience.mood.is_blank:
continue
month_counter[experience.date.month] += 1
gpa_counter[experience.date.month] += experience.mood.gpa
return {
month: (
gpa_counter[month] / month_counter[month]
)
for month
in month_counter.keys()
}
def std_mood_by_weekday(img_path):
avg_by_weekday = average_mood_by_weekday(
img_path
)
experience_cal = MoodExperienceImageCalendar.from_path(
img_path
)
weekday_counter = collections.defaultdict(int)
sum_diff_pow2_from_avg_by_weekday = collections.defaultdict(int)
for experience in experience_cal.iter_box_center_decoded:
if experience.mood.is_blank:
continue
sum_diff_pow2_from_avg_by_weekday[experience.date.weekday] += (
pow(experience.mood.gpa - avg_by_weekday[experience.date.weekday], 2.0)
)
weekday_counter[experience.date.weekday] += 1
return {
calendar.day_name[weekday]: pow(
(1 / (weekday_counter[weekday] - 1)) *
sum_diff_pow2_from_avg_by_weekday[weekday],
0.5
)
for weekday
in weekday_counter.keys()
} | catm00dz/statistics.py | from catm00dz.data import MoodExperienceImageCalendar
import collections
def average_mood_by_weekday(img_path):
weekday_counter = collections.defaultdict(int)
gpa_counter = collections.defaultdict(float)
experience_cal = MoodExperienceImageCalendar.from_path(
img_path
)
for experience in experience_cal.iter_box_center_decoded:
if experience.mood.is_blank:
continue
weekday_counter[experience.date.weekday] += 1
gpa_counter[experience.date.weekday] += experience.mood.gpa
return {
weekday: (
gpa_counter[weekday] / weekday_counter[weekday]
)
for weekday
in weekday_counter.keys()
}
def average_mood_by_month(img_path):
month_counter = collections.defaultdict(int)
gpa_counter = collections.defaultdict(float)
experience_cal = MoodExperienceImageCalendar.from_path(
img_path
)
for experience in experience_cal.iter_box_center_decoded:
if experience.mood.is_blank:
continue
month_counter[experience.date.month] += 1
gpa_counter[experience.date.month] += experience.mood.gpa
return {
month: (
gpa_counter[month] / month_counter[month]
)
for month
in month_counter.keys()
}
def std_mood_by_weekday(img_path):
avg_by_weekday = average_mood_by_weekday(
img_path
)
experience_cal = MoodExperienceImageCalendar.from_path(
img_path
)
weekday_counter = collections.defaultdict(int)
sum_diff_pow2_from_avg_by_weekday = collections.defaultdict(int)
for experience in experience_cal.iter_box_center_decoded:
if experience.mood.is_blank:
continue
sum_diff_pow2_from_avg_by_weekday[experience.date.weekday] += (
pow(experience.mood.gpa - avg_by_weekday[experience.date.weekday], 2.0)
)
weekday_counter[experience.date.weekday] += 1
return {
calendar.day_name[weekday]: pow(
(1 / (weekday_counter[weekday] - 1)) *
sum_diff_pow2_from_avg_by_weekday[weekday],
0.5
)
for weekday
in weekday_counter.keys()
} | 0.563258 | 0.439206 |
from django.utils import timezone
from django.shortcuts import get_object_or_404
from rest_framework import status, viewsets
from rest_framework.response import Response
from rest_framework.permissions import IsAdminUser
from rest_framework_simplejwt.views import TokenObtainPairView
from drf_yasg.utils import swagger_auto_schema
from ..accounts_api.models import Member
from ..accounts_api.serializers import TokenObtainPairResponseSerializer
from ..reservation_api.models import Reservation
from ..subscription_api.models import Subscription
from .serializers import StaffReservationSerializer, StaffSubscriptionSerializer
class LoginStaffTokenObtainView(TokenObtainPairView):
"""
Takes a set of staff credentials and returns an access and refresh JSON web
token pair to prove the authentication of those credentials.
"""
@swagger_auto_schema(responses={200: TokenObtainPairResponseSerializer})
def post(self, request, *args, **kwargs):
try:
queryset = Member.objects.all()
staff = queryset.get(email=request.data['email'])
except KeyError:
error = {'detail': 'The email field is required.'}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
except Member.DoesNotExist:
error = {'detail': 'No active account found with the given credentials.'}
return Response(error, status=status.HTTP_401_UNAUTHORIZED)
if not staff.is_staff:
error = {'detail': 'Please enter the correct email address and password for a staff account!'}
return Response(error, status=status.HTTP_401_UNAUTHORIZED)
return super().post(request, *args, **kwargs)
class StaffReservationViewSet(viewsets.ViewSet):
"""
list:
Return all non-expired reservations.
update:
Modify the reservation information.
"""
permission_classes = [IsAdminUser]
def get_queryset(self):
return Reservation.objects.filter(reserved_end__gt=timezone.now())
def get_object(self, pk):
queryset = self.get_queryset()
return get_object_or_404(queryset, pk=pk)
@swagger_auto_schema(responses={200: StaffReservationSerializer})
def list(self, request):
queryset = self.get_queryset()
serializer = StaffReservationSerializer(queryset, many=True)
if serializer.data:
return Response(serializer.data, status=status.HTTP_200_OK)
error = {'detail': 'There are no requested reservations.'}
return Response(error, status=status.HTTP_404_NOT_FOUND)
@swagger_auto_schema(request_body=StaffReservationSerializer, responses={200: StaffReservationSerializer})
def update(self, request, pk):
reservation = self.get_object(pk)
serializer = StaffReservationSerializer(instance=reservation, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class StaffSubscriptionViewSet(viewsets.ViewSet):
"""
list:
Return all members with an active subscription.
update:
Modify the visits count for a member with an active subscription.
"""
permission_classes = [IsAdminUser]
def get_queryset(self):
return Subscription.objects.filter(expires__gt=timezone.now(), visits_count__gt=0)
def get_object(self, pk):
queryset = self.get_queryset()
return get_object_or_404(queryset, pk=pk)
@swagger_auto_schema(responses={200: StaffSubscriptionSerializer})
def list(self, request):
queryset = self.get_queryset()
serializer = StaffSubscriptionSerializer(queryset, many=True)
if serializer.data:
return Response(serializer.data, status=status.HTTP_200_OK)
error = {'detail': 'There are no members with an active subscription.'}
return Response(error, status=status.HTTP_404_NOT_FOUND)
@swagger_auto_schema(request_body=StaffSubscriptionSerializer, responses={200: StaffSubscriptionSerializer})
def update(self, request, pk):
subscription = self.get_object(pk)
serializer = StaffSubscriptionSerializer(instance=subscription, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) | api/staff_api/views.py | from django.utils import timezone
from django.shortcuts import get_object_or_404
from rest_framework import status, viewsets
from rest_framework.response import Response
from rest_framework.permissions import IsAdminUser
from rest_framework_simplejwt.views import TokenObtainPairView
from drf_yasg.utils import swagger_auto_schema
from ..accounts_api.models import Member
from ..accounts_api.serializers import TokenObtainPairResponseSerializer
from ..reservation_api.models import Reservation
from ..subscription_api.models import Subscription
from .serializers import StaffReservationSerializer, StaffSubscriptionSerializer
class LoginStaffTokenObtainView(TokenObtainPairView):
"""
Takes a set of staff credentials and returns an access and refresh JSON web
token pair to prove the authentication of those credentials.
"""
@swagger_auto_schema(responses={200: TokenObtainPairResponseSerializer})
def post(self, request, *args, **kwargs):
try:
queryset = Member.objects.all()
staff = queryset.get(email=request.data['email'])
except KeyError:
error = {'detail': 'The email field is required.'}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
except Member.DoesNotExist:
error = {'detail': 'No active account found with the given credentials.'}
return Response(error, status=status.HTTP_401_UNAUTHORIZED)
if not staff.is_staff:
error = {'detail': 'Please enter the correct email address and password for a staff account!'}
return Response(error, status=status.HTTP_401_UNAUTHORIZED)
return super().post(request, *args, **kwargs)
class StaffReservationViewSet(viewsets.ViewSet):
"""
list:
Return all non-expired reservations.
update:
Modify the reservation information.
"""
permission_classes = [IsAdminUser]
def get_queryset(self):
return Reservation.objects.filter(reserved_end__gt=timezone.now())
def get_object(self, pk):
queryset = self.get_queryset()
return get_object_or_404(queryset, pk=pk)
@swagger_auto_schema(responses={200: StaffReservationSerializer})
def list(self, request):
queryset = self.get_queryset()
serializer = StaffReservationSerializer(queryset, many=True)
if serializer.data:
return Response(serializer.data, status=status.HTTP_200_OK)
error = {'detail': 'There are no requested reservations.'}
return Response(error, status=status.HTTP_404_NOT_FOUND)
@swagger_auto_schema(request_body=StaffReservationSerializer, responses={200: StaffReservationSerializer})
def update(self, request, pk):
reservation = self.get_object(pk)
serializer = StaffReservationSerializer(instance=reservation, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class StaffSubscriptionViewSet(viewsets.ViewSet):
"""
list:
Return all members with an active subscription.
update:
Modify the visits count for a member with an active subscription.
"""
permission_classes = [IsAdminUser]
def get_queryset(self):
return Subscription.objects.filter(expires__gt=timezone.now(), visits_count__gt=0)
def get_object(self, pk):
queryset = self.get_queryset()
return get_object_or_404(queryset, pk=pk)
@swagger_auto_schema(responses={200: StaffSubscriptionSerializer})
def list(self, request):
queryset = self.get_queryset()
serializer = StaffSubscriptionSerializer(queryset, many=True)
if serializer.data:
return Response(serializer.data, status=status.HTTP_200_OK)
error = {'detail': 'There are no members with an active subscription.'}
return Response(error, status=status.HTTP_404_NOT_FOUND)
@swagger_auto_schema(request_body=StaffSubscriptionSerializer, responses={200: StaffSubscriptionSerializer})
def update(self, request, pk):
subscription = self.get_object(pk)
serializer = StaffSubscriptionSerializer(instance=subscription, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) | 0.61231 | 0.094845 |
import math
from typing import Sequence
import fastfilters
import numpy
from sklearn.base import BaseEstimator, TransformerMixin
class Filter(BaseEstimator, TransformerMixin):
def fit(self, X=None, y=None, **kwargs):
return self
def transform(self, X):
raise NotImplementedError
@property
def kernel_size(self):
raise NotImplementedError
def _more_tags(self):
return {"requires_fit": False, "stateless": True}
class SingleFilter(Filter):
def __init__(self, scale):
self.scale = scale
def __init_subclass__(cls, order, **kwargs):
super().__init_subclass__(**kwargs)
cls.order = order
@property
def kernel_size(self):
# TODO: Make sure that the kernel size formula is pixel-perfect.
return math.ceil((3 + self.order / 2) * self.scale) + 1
_required_parameters = ("scale",)
class Gaussian(SingleFilter, order=0):
def transform(self, X):
return fastfilters.gaussianSmoothing(X, sigma=self.scale)
class DifferenceOfGaussians(SingleFilter, order=0):
def __init__(self, scale, *, inner_k=0.5):
super().__init__(scale)
self.inner_k = inner_k
def transform(self, X):
outer = fastfilters.gaussianSmoothing(X, sigma=self.scale)
inner = fastfilters.gaussianSmoothing(X, sigma=self.inner_k * self.scale)
return outer - inner
class GaussianGradientMagnitude(SingleFilter, order=1):
def transform(self, X):
return fastfilters.gaussianGradientMagnitude(X, sigma=self.scale)
class LaplacianOfGaussian(SingleFilter, order=2):
def transform(self, X):
return fastfilters.laplacianOfGaussian(X, scale=self.scale)
class StructureTensorEigenvalues(SingleFilter, order=1):
def __init__(self, scale, *, inner_k=0.5):
super().__init__(scale)
self.inner_k = inner_k
def transform(self, X):
return fastfilters.structureTensorEigenvalues(
X, innerScale=self.inner_k * self.scale, outerScale=self.scale
)
class HessianOfGaussianEigenvalues(SingleFilter, order=2):
def transform(self, X):
return fastfilters.hessianOfGaussianEigenvalues(X, scale=self.scale)
class FilterSet(Filter):
def __init__(self, *, filters: Sequence[Filter]):
self.filters = filters
def transform(self, X):
# TODO: Optimize feature computations by sharing intermediate results.
ys = [f.transform(X).reshape((*X.shape, -1)) for f in self.filters]
return numpy.concatenate(ys, axis=-1)
@property
def kernel_size(self):
return max(f.kernel_size for f in self.filters) | src/napari_ilastik/filters.py | import math
from typing import Sequence
import fastfilters
import numpy
from sklearn.base import BaseEstimator, TransformerMixin
class Filter(BaseEstimator, TransformerMixin):
def fit(self, X=None, y=None, **kwargs):
return self
def transform(self, X):
raise NotImplementedError
@property
def kernel_size(self):
raise NotImplementedError
def _more_tags(self):
return {"requires_fit": False, "stateless": True}
class SingleFilter(Filter):
def __init__(self, scale):
self.scale = scale
def __init_subclass__(cls, order, **kwargs):
super().__init_subclass__(**kwargs)
cls.order = order
@property
def kernel_size(self):
# TODO: Make sure that the kernel size formula is pixel-perfect.
return math.ceil((3 + self.order / 2) * self.scale) + 1
_required_parameters = ("scale",)
class Gaussian(SingleFilter, order=0):
def transform(self, X):
return fastfilters.gaussianSmoothing(X, sigma=self.scale)
class DifferenceOfGaussians(SingleFilter, order=0):
def __init__(self, scale, *, inner_k=0.5):
super().__init__(scale)
self.inner_k = inner_k
def transform(self, X):
outer = fastfilters.gaussianSmoothing(X, sigma=self.scale)
inner = fastfilters.gaussianSmoothing(X, sigma=self.inner_k * self.scale)
return outer - inner
class GaussianGradientMagnitude(SingleFilter, order=1):
def transform(self, X):
return fastfilters.gaussianGradientMagnitude(X, sigma=self.scale)
class LaplacianOfGaussian(SingleFilter, order=2):
def transform(self, X):
return fastfilters.laplacianOfGaussian(X, scale=self.scale)
class StructureTensorEigenvalues(SingleFilter, order=1):
def __init__(self, scale, *, inner_k=0.5):
super().__init__(scale)
self.inner_k = inner_k
def transform(self, X):
return fastfilters.structureTensorEigenvalues(
X, innerScale=self.inner_k * self.scale, outerScale=self.scale
)
class HessianOfGaussianEigenvalues(SingleFilter, order=2):
def transform(self, X):
return fastfilters.hessianOfGaussianEigenvalues(X, scale=self.scale)
class FilterSet(Filter):
def __init__(self, *, filters: Sequence[Filter]):
self.filters = filters
def transform(self, X):
# TODO: Optimize feature computations by sharing intermediate results.
ys = [f.transform(X).reshape((*X.shape, -1)) for f in self.filters]
return numpy.concatenate(ys, axis=-1)
@property
def kernel_size(self):
return max(f.kernel_size for f in self.filters) | 0.776029 | 0.271338 |
from server.databases.utils import database, convert_helper
from bson.objectid import ObjectId
from bson.errors import InvalidId
from typing import List, Tuple
from server.models.utils import ErrorModel
course_collection = database.get_collection("Classes")
async def get_all_courses() -> List[dict]:
""" Get all classes in db """
courses = []
async for course in course_collection.find():
changed_course = convert_helper(course)
courses.append(changed_course)
return courses
async def create_class(data: dict) -> dict:
""" Creates a new class with <data> """
# print(f"DATA: {data}, TYPE: {type(data)}")
course = await course_collection.insert_one(data)
new_course = await course_collection.find_one({"_id": course.inserted_id})
return convert_helper(new_course)
async def get_class(id: str, errors: list) -> Tuple[dict, List[dict]]:
""" Gets class from db with ObjectID(<id>) """
course = {}
try:
course = await course_collection.find_one({"_id": ObjectId(id)})
if course:
return convert_helper(course)
else:
errors.append(ErrorModel(["path", "id"],
"course with {} not found".format(
id),
"value_error"))
except InvalidId:
errors.append(ErrorModel(["path", "id"],
"{} is not a valid id".format(id),
"value_error"))
# ------------------- POST SPECIFIC METHODS ------------------------------
async def get_all_posts(id: str, errors: list) -> List[dict]:
""" Get all posts for class with ObjectID(id) """
course = await get_class(id, errors)
if course:
return course["posts"]
async def get_post_by_index(id: str, ind: int, errors: list) -> dict:
"""Get a single post with index = <ind> from all posts in class with
_id = <ObjectID(id)>"""
course = await get_class(id, errors)
if course["post_num"] < 0 or course["post_num"] <= ind:
errors.append(ErrorModel(["path", "index"], "invalid index",
"value_error"))
else:
return course["posts"][ind]
async def create_post(id: str, data: dict, errors: list) -> dict:
""" Creates a single post in class with <id> """
course = await get_class(id, errors)
if course:
data["index"] = course["post_num"]
# add post
post = await course_collection.update_one(
{"_id": ObjectId(id)},
{"$set": {"posts." + str(course["post_num"]): data}},
upsert=False
)
# Update post_num in course after adding post
await course_collection.update_one(
{"_id": ObjectId(id)},
{"$inc": {"post_num": 1}},
upsert=False
)
return data
async def update_post(id: str, ind: int, data: dict, errors: list) -> dict:
""" Updates a single field in class <id> and has post <ind> with
new <data>"""
if len(data) < 1:
return
post = await get_post_by_index(id, ind, errors)
if post:
for field in post:
if field not in data:
data[field] = post[field]
updated_post = await course_collection.update_one(
{"_id": ObjectId(id)},
{"$set": {"posts." + str(ind): data}}, upsert=False)
return data | backend/server/databases/course.py | from server.databases.utils import database, convert_helper
from bson.objectid import ObjectId
from bson.errors import InvalidId
from typing import List, Tuple
from server.models.utils import ErrorModel
course_collection = database.get_collection("Classes")
async def get_all_courses() -> List[dict]:
""" Get all classes in db """
courses = []
async for course in course_collection.find():
changed_course = convert_helper(course)
courses.append(changed_course)
return courses
async def create_class(data: dict) -> dict:
""" Creates a new class with <data> """
# print(f"DATA: {data}, TYPE: {type(data)}")
course = await course_collection.insert_one(data)
new_course = await course_collection.find_one({"_id": course.inserted_id})
return convert_helper(new_course)
async def get_class(id: str, errors: list) -> Tuple[dict, List[dict]]:
""" Gets class from db with ObjectID(<id>) """
course = {}
try:
course = await course_collection.find_one({"_id": ObjectId(id)})
if course:
return convert_helper(course)
else:
errors.append(ErrorModel(["path", "id"],
"course with {} not found".format(
id),
"value_error"))
except InvalidId:
errors.append(ErrorModel(["path", "id"],
"{} is not a valid id".format(id),
"value_error"))
# ------------------- POST SPECIFIC METHODS ------------------------------
async def get_all_posts(id: str, errors: list) -> List[dict]:
""" Get all posts for class with ObjectID(id) """
course = await get_class(id, errors)
if course:
return course["posts"]
async def get_post_by_index(id: str, ind: int, errors: list) -> dict:
"""Get a single post with index = <ind> from all posts in class with
_id = <ObjectID(id)>"""
course = await get_class(id, errors)
if course["post_num"] < 0 or course["post_num"] <= ind:
errors.append(ErrorModel(["path", "index"], "invalid index",
"value_error"))
else:
return course["posts"][ind]
async def create_post(id: str, data: dict, errors: list) -> dict:
""" Creates a single post in class with <id> """
course = await get_class(id, errors)
if course:
data["index"] = course["post_num"]
# add post
post = await course_collection.update_one(
{"_id": ObjectId(id)},
{"$set": {"posts." + str(course["post_num"]): data}},
upsert=False
)
# Update post_num in course after adding post
await course_collection.update_one(
{"_id": ObjectId(id)},
{"$inc": {"post_num": 1}},
upsert=False
)
return data
async def update_post(id: str, ind: int, data: dict, errors: list) -> dict:
""" Updates a single field in class <id> and has post <ind> with
new <data>"""
if len(data) < 1:
return
post = await get_post_by_index(id, ind, errors)
if post:
for field in post:
if field not in data:
data[field] = post[field]
updated_post = await course_collection.update_one(
{"_id": ObjectId(id)},
{"$set": {"posts." + str(ind): data}}, upsert=False)
return data | 0.434581 | 0.279116 |
from serene_load.helpers.containers.container_base import TempFileContainer, BaseContainer, BaseProcessor
import logging
import datetime
import io
import re
import subprocess
log = logging.getLogger()
class SevenZipFileContainer(TempFileContainer):
def decompress(self, source, target):
subprocess.check_call("/usr/bin/7za e '{}' '{}' -so > {}".format(source, self.filename(), target), stderr=io.open('/dev/null', 'w'), shell=True)
BaseContainer.add_container_type(SevenZipFileContainer)
with_dt = re.compile(
r'(?P<year>[\d]{4})-(?P<month>[\d]{2})-(?P<day>[\d]{2}) (?P<hour>[\d]{2}):(?P<minute>[\d]{2}):(?P<second>[\d]{2}) [^\s]{5}[\s\t]+(?P<size>[\d]+)[\s\t\d]+[\s]+(?P<name>.*)$',
)
no_dt = re.compile(
r'^[\s]+[^\s]{5}[\s\t]+(?P<size>[\d]+)[\s\t\d]+[\s]+(?P<name>.*)$',
)
class SevenZipFileProcessor(BaseProcessor):
@classmethod
def valid(cls, args, input_file):
with input_file as infile:
fp = infile.instantiate_file()
try:
subprocess.check_call(u'7za l "{}" >& /dev/null'.format(fp), shell=True)
return True
except subprocess.CalledProcessError:
return False
@classmethod
def unpack(cls, args, input_file):
with input_file as infile:
fp = infile.instantiate_file()
log.debug(u'using {} for {}'.format(fp, input_file))
output = subprocess.check_output(u"7za l '{}'".format(fp), shell=True)
file_listing = False
for line in output.split('\n'):
if file_listing:
if line.startswith('----'):
file_listing = False
continue
if line[4] == '-':
match = with_dt.match(line)
mtime = datetime.datetime(
year=int(match.group('year')),
month=int(match.group('month')),
day=int(match.group('day')),
hour=int(match.group('hour')),
minute=int(match.group('minute')),
second=int(match.group('second')),
).isoformat()[:19]
else:
mtime = None
match = no_dt.match(line)
if match is None:
raise Exception(line)
filename = match.group('name').strip()
assert not filename == 'file'
sz = SevenZipFileContainer(input_fd=input_file, file=filename, job_args=args)
d = {
'next_func': 'hash',
'accessor': sz,
'file': filename,
'path': input_file.relative_path()
}
if mtime:
d.update({
'mtime': mtime
})
yield d
else:
if line.startswith('----'):
file_listing = True | serene_load/serene_load/helpers/containers/container_7za.py | from serene_load.helpers.containers.container_base import TempFileContainer, BaseContainer, BaseProcessor
import logging
import datetime
import io
import re
import subprocess
log = logging.getLogger()
class SevenZipFileContainer(TempFileContainer):
def decompress(self, source, target):
subprocess.check_call("/usr/bin/7za e '{}' '{}' -so > {}".format(source, self.filename(), target), stderr=io.open('/dev/null', 'w'), shell=True)
BaseContainer.add_container_type(SevenZipFileContainer)
with_dt = re.compile(
r'(?P<year>[\d]{4})-(?P<month>[\d]{2})-(?P<day>[\d]{2}) (?P<hour>[\d]{2}):(?P<minute>[\d]{2}):(?P<second>[\d]{2}) [^\s]{5}[\s\t]+(?P<size>[\d]+)[\s\t\d]+[\s]+(?P<name>.*)$',
)
no_dt = re.compile(
r'^[\s]+[^\s]{5}[\s\t]+(?P<size>[\d]+)[\s\t\d]+[\s]+(?P<name>.*)$',
)
class SevenZipFileProcessor(BaseProcessor):
@classmethod
def valid(cls, args, input_file):
with input_file as infile:
fp = infile.instantiate_file()
try:
subprocess.check_call(u'7za l "{}" >& /dev/null'.format(fp), shell=True)
return True
except subprocess.CalledProcessError:
return False
@classmethod
def unpack(cls, args, input_file):
with input_file as infile:
fp = infile.instantiate_file()
log.debug(u'using {} for {}'.format(fp, input_file))
output = subprocess.check_output(u"7za l '{}'".format(fp), shell=True)
file_listing = False
for line in output.split('\n'):
if file_listing:
if line.startswith('----'):
file_listing = False
continue
if line[4] == '-':
match = with_dt.match(line)
mtime = datetime.datetime(
year=int(match.group('year')),
month=int(match.group('month')),
day=int(match.group('day')),
hour=int(match.group('hour')),
minute=int(match.group('minute')),
second=int(match.group('second')),
).isoformat()[:19]
else:
mtime = None
match = no_dt.match(line)
if match is None:
raise Exception(line)
filename = match.group('name').strip()
assert not filename == 'file'
sz = SevenZipFileContainer(input_fd=input_file, file=filename, job_args=args)
d = {
'next_func': 'hash',
'accessor': sz,
'file': filename,
'path': input_file.relative_path()
}
if mtime:
d.update({
'mtime': mtime
})
yield d
else:
if line.startswith('----'):
file_listing = True | 0.257018 | 0.103341 |
from selenium import webdriver
class AmazonScrapperChrome:
"""
Class that scrapes the amazon.in for the provided link
if any failure occurs during scraping, getter methods will return string: `failure`
operating flow:
constructor -> init_chrome_window -> set_product_url ->
{
get_product_name,
get_product_price,
(get_product_availability)
}
"""
def __init__(self, driver_path):
self.driver_path = driver_path
self.driver = None
self.url = None
self.err = None
# VALUES TO BE SCRAPPED
self.product_name = "Failure"
self.product_price = "Failure"
self.product_availability = "Failure"
# OPEN CHROME WINDOW
def init_chrome_window(self):
self.driver = webdriver.Chrome(self.driver_path)
def process_price(self):
"""
RETURN: STR
REMOVES CURRENCY SYMBOLS AND COMMAS FROM SCRAPPED PRICES
$11,1000 -> 111000
"""
if self.product_price == 'Failure':
return
amount = self.product_price.split('₹')[1].strip()
amount = amount.split('.')[0]
if ',' in amount:
self.product_price = ''.join(amount.split(','))
else:
self.product_price = amount
# TO SET PRODUCT URL
def set_product_url(self, url):
try:
self.url = url
self.driver.get(url)
return True
except Exception as e:
self.err = e
return False
# TO SCRAP PRICE
def get_price(self):
temp2 = []
temp = []
try:
temp = self.driver.find_elements_by_xpath('.//*[@id="priceblock_ourprice"]')
temp2 = self.driver.find_elements_by_xpath('.//*[@id="priceblock_dealprice"]')
if len(temp):
self.product_price = temp[0].text
finally:
if temp == [] and len(temp2):
self.product_price = temp2[0].text
self.process_price()
return self.product_price
# TO SCRAP PRODUCT NAME
def get_product_name(self):
try:
temp = self.driver.find_elements_by_xpath('.//*[@id="title"]')
if len(temp):
self.product_name = temp[0].text
finally:
return self.product_name
# TO SCRAP INFO ABOUT WHETHER PRODUCT IS AVAILABLE OR NOT
def get_product_availability(self):
try:
temp = self.driver.find_elements_by_xpath('//*[@id="availability"]')
if len(temp):
self.product_availability = temp[0].text
finally:
return self.product_availability
# TO CLOSE THE CHROME WINDOW
def close(self):
self.driver.close()
'''
scrapper = AmazonScrapperChrome(".//w_driver//chromedriver")
success = scrapper.set_product_url('https://www.amazon.in/CERTIFIED-REFURBISHED-Moto-G5S-Plus/dp/B079FWWQTL/ref=sr_1_2?dchild=1&keywords=moto&qid=1610112003&sr=8-2')
if success:
print(f'price: {scrapper.get_price()}')
print(f'product_name: {scrapper.get_product_name()}')
print(f'availability: {scrapper.get_product_availability()}')
else:
print('Error:', scrapper.err)
scrapper.close()
''' | scripts/amzn.py | from selenium import webdriver
class AmazonScrapperChrome:
"""
Class that scrapes the amazon.in for the provided link
if any failure occurs during scraping, getter methods will return string: `failure`
operating flow:
constructor -> init_chrome_window -> set_product_url ->
{
get_product_name,
get_product_price,
(get_product_availability)
}
"""
def __init__(self, driver_path):
self.driver_path = driver_path
self.driver = None
self.url = None
self.err = None
# VALUES TO BE SCRAPPED
self.product_name = "Failure"
self.product_price = "Failure"
self.product_availability = "Failure"
# OPEN CHROME WINDOW
def init_chrome_window(self):
self.driver = webdriver.Chrome(self.driver_path)
def process_price(self):
"""
RETURN: STR
REMOVES CURRENCY SYMBOLS AND COMMAS FROM SCRAPPED PRICES
$11,1000 -> 111000
"""
if self.product_price == 'Failure':
return
amount = self.product_price.split('₹')[1].strip()
amount = amount.split('.')[0]
if ',' in amount:
self.product_price = ''.join(amount.split(','))
else:
self.product_price = amount
# TO SET PRODUCT URL
def set_product_url(self, url):
try:
self.url = url
self.driver.get(url)
return True
except Exception as e:
self.err = e
return False
# TO SCRAP PRICE
def get_price(self):
temp2 = []
temp = []
try:
temp = self.driver.find_elements_by_xpath('.//*[@id="priceblock_ourprice"]')
temp2 = self.driver.find_elements_by_xpath('.//*[@id="priceblock_dealprice"]')
if len(temp):
self.product_price = temp[0].text
finally:
if temp == [] and len(temp2):
self.product_price = temp2[0].text
self.process_price()
return self.product_price
# TO SCRAP PRODUCT NAME
def get_product_name(self):
try:
temp = self.driver.find_elements_by_xpath('.//*[@id="title"]')
if len(temp):
self.product_name = temp[0].text
finally:
return self.product_name
# TO SCRAP INFO ABOUT WHETHER PRODUCT IS AVAILABLE OR NOT
def get_product_availability(self):
try:
temp = self.driver.find_elements_by_xpath('//*[@id="availability"]')
if len(temp):
self.product_availability = temp[0].text
finally:
return self.product_availability
# TO CLOSE THE CHROME WINDOW
def close(self):
self.driver.close()
'''
scrapper = AmazonScrapperChrome(".//w_driver//chromedriver")
success = scrapper.set_product_url('https://www.amazon.in/CERTIFIED-REFURBISHED-Moto-G5S-Plus/dp/B079FWWQTL/ref=sr_1_2?dchild=1&keywords=moto&qid=1610112003&sr=8-2')
if success:
print(f'price: {scrapper.get_price()}')
print(f'product_name: {scrapper.get_product_name()}')
print(f'availability: {scrapper.get_product_availability()}')
else:
print('Error:', scrapper.err)
scrapper.close()
''' | 0.483648 | 0.069668 |
import cupy as cp
def cdf(y,x,bw_method='scott',weight=1):
'''
Nadaraya watson conditional probability estimation is a way to estimate the conditional probability of a
random variable y given random variable x in a non-parametric way. It works for both uni-variate and
multi-variate data. It includes automatic bandwidth determination. The estimation works best
for a unimodal distribution; bimodal or multi-modal distributions tend to be oversmoothed.
Parameters
dataset: array_like
Datapoints to estimate from. Currently, it only supports 1-D array.
bw_method:str, scalar or callable, optional
The method used to calculate the estimator bandwidth.
This can be ‘scott’, ‘silverman’, a scalar constant.
If a scalar, this will be used directly as kde.factor.
If None (default), ‘scott’ is used. See Notes for more details.
weights:array_like, optional
weights of datapoints. This must be the same shape as dataset.
If None (default), the samples are assumed to be equally weighted
'''
mempool = cp.get_default_memory_pool()
pinned_mempool = cp.get_default_pinned_memory_pool()
assert (x.ndim==1) & (y.ndim==1)
NN = y.size
d = 1
neff = (cp.ones(NN)*weight).sum()
if bw_method=='scott':
h = neff**(-1./(d+4))
elif bw_method=='silverman':
h = (neff * (d + 2) / 4.)**(-1. / (d + 4))
else:
h = bw_method
x = x.reshape((-1,1))
x = cp.asarray(x/h, dtype='float32')
y = cp.asarray(y, dtype='float32')
XX=cp.broadcast_to(x,(NN,NN))
XXT=cp.broadcast_to(x.T,(NN,NN))
xx = cp.absolute(XX-XXT)
XX = None
XXT = None
xx2 = cp.copy(xx)
xx[xx2<1] = 70/81*(1-xx[xx<1]**3)**3
xx[xx2>=1] = 0
xx2 =None
y = y.reshape((-1,1))
yy = y<=y.T
kernel = cp.asarray(weight, dtype='float32')
kernel = cp.broadcast_to(kernel,(NN,NN))
kernel = xx*kernel
weight = kernel/kernel.sum(0,keepdims =True)
cdf = (weight*yy).sum(0,keepdims =True).T
#cv = cp.asnumpy((((yy-cdf)/(1-weight))**2*kk).mean())
weight = None
kernel = None
yy = None
cdf2 = cp.asnumpy(cdf)
cdf = None
mempool.free_all_blocks()
pinned_mempool.free_all_blocks()
return cdf2 | kde_gpu/conditional_probability.py | import cupy as cp
def cdf(y,x,bw_method='scott',weight=1):
'''
Nadaraya watson conditional probability estimation is a way to estimate the conditional probability of a
random variable y given random variable x in a non-parametric way. It works for both uni-variate and
multi-variate data. It includes automatic bandwidth determination. The estimation works best
for a unimodal distribution; bimodal or multi-modal distributions tend to be oversmoothed.
Parameters
dataset: array_like
Datapoints to estimate from. Currently, it only supports 1-D array.
bw_method:str, scalar or callable, optional
The method used to calculate the estimator bandwidth.
This can be ‘scott’, ‘silverman’, a scalar constant.
If a scalar, this will be used directly as kde.factor.
If None (default), ‘scott’ is used. See Notes for more details.
weights:array_like, optional
weights of datapoints. This must be the same shape as dataset.
If None (default), the samples are assumed to be equally weighted
'''
mempool = cp.get_default_memory_pool()
pinned_mempool = cp.get_default_pinned_memory_pool()
assert (x.ndim==1) & (y.ndim==1)
NN = y.size
d = 1
neff = (cp.ones(NN)*weight).sum()
if bw_method=='scott':
h = neff**(-1./(d+4))
elif bw_method=='silverman':
h = (neff * (d + 2) / 4.)**(-1. / (d + 4))
else:
h = bw_method
x = x.reshape((-1,1))
x = cp.asarray(x/h, dtype='float32')
y = cp.asarray(y, dtype='float32')
XX=cp.broadcast_to(x,(NN,NN))
XXT=cp.broadcast_to(x.T,(NN,NN))
xx = cp.absolute(XX-XXT)
XX = None
XXT = None
xx2 = cp.copy(xx)
xx[xx2<1] = 70/81*(1-xx[xx<1]**3)**3
xx[xx2>=1] = 0
xx2 =None
y = y.reshape((-1,1))
yy = y<=y.T
kernel = cp.asarray(weight, dtype='float32')
kernel = cp.broadcast_to(kernel,(NN,NN))
kernel = xx*kernel
weight = kernel/kernel.sum(0,keepdims =True)
cdf = (weight*yy).sum(0,keepdims =True).T
#cv = cp.asnumpy((((yy-cdf)/(1-weight))**2*kk).mean())
weight = None
kernel = None
yy = None
cdf2 = cp.asnumpy(cdf)
cdf = None
mempool.free_all_blocks()
pinned_mempool.free_all_blocks()
return cdf2 | 0.658527 | 0.686055 |