hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5b32e78243c7c2051a92ab047a90c7532e5fb7f5 | 156,466 | py | Python | tensorflow/python/data/ops/dataset_ops.py | MaverickMeerkat/tensorflow | 51c7df0cfc45d31c2ce2cd61e5c66969d890de2a | [
"Apache-2.0"
] | 2 | 2018-06-04T09:14:10.000Z | 2019-02-10T10:39:44.000Z | tensorflow/python/data/ops/dataset_ops.py | Kiku-git/tensorflow | a8b7b52f9fe90f428c5b8f3c220bde342ab2a54b | [
"Apache-2.0"
] | null | null | null | tensorflow/python/data/ops/dataset_ops.py | Kiku-git/tensorflow | a8b7b52f9fe90f428c5b8f3c220bde342ab2a54b | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for Datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import enum
import functools
import sys
import threading
import warnings
import weakref
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from tensorflow.core.framework import graph_pb2
from tensorflow.python import tf2
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.experimental.ops import optimization_options
from tensorflow.python.data.experimental.ops import stats_options
from tensorflow.python.data.experimental.ops import threading_options
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import options as options_lib
from tensorflow.python.data.util import random_seed
from tensorflow.python.data.util import sparse
from tensorflow.python.data.util import structure
from tensorflow.python.data.util import traverse
from tensorflow.python.eager import context
from tensorflow.python.eager import function as eager_function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed as core_random_seed
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base as tracking_base
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import lazy_loader
from tensorflow.python.util import nest as tf_nest
from tensorflow.python.util.tf_export import tf_export
# Loaded lazily due to a circular dependency (roughly
# tf.function->wrap_function->dataset->autograph->tf.function).
# TODO(b/133251390): Use a regular import.
wrap_function = lazy_loader.LazyLoader(
"wrap_function", globals(),
"tensorflow.python.eager.wrap_function")
# TODO(mdan): Create a public API for this.
autograph_ctx = lazy_loader.LazyLoader(
"autograph_ctx", globals(),
"tensorflow.python.autograph.core.ag_ctx")
autograph = lazy_loader.LazyLoader(
"autograph", globals(),
"tensorflow.python.autograph.impl.api")
ops.NotDifferentiable("ReduceDataset")
# A constant that can be used to enable auto-tuning.
AUTOTUNE = -1
tf_export("data.experimental.AUTOTUNE").export_constant(__name__, "AUTOTUNE")
class AutotuneAlgorithm(enum.Enum):
HILL_CLIMB = 0
GRADIENT_DESCENT = 1
@tf_export("data.Dataset", v1=[])
@six.add_metaclass(abc.ABCMeta)
class DatasetV2(tracking_base.Trackable, composite_tensor.CompositeTensor):
"""Represents a potentially large set of elements.
A `Dataset` can be used to represent an input pipeline as a
collection of elements and a "logical plan" of transformations that act on
those elements.
A dataset contains elements that each have the same (nested) structure and the
individual components of the structure can be of any type representable by
`tf.TypeSpec`, including `tf.Tensor`, `tf.data.Dataset`, `tf.SparseTensor`,
`tf.RaggedTensor`, or `tf.TensorArray`.
Example elements:
```python
# Integer element
a = 1
# Float element
b = 2.0
# Tuple element with 2 components
c = (1, 2)
# Dict element with 3 components
d = {"a": (2, 2), "b": 3}
# Element containing a dataset
e = tf.data.Dataset.from_tensors(10)
```
"""
def __init__(self, variant_tensor):
"""Creates a DatasetV2 object.
This is a difference between DatasetV1 and DatasetV2. DatasetV1 does not
take anything in its constructor whereas in the DatasetV2, we expect
subclasses to create a variant_tensor and pass it in to the super() call.
Args:
variant_tensor: A DT_VARIANT tensor that represents the dataset.
"""
self._variant_tensor_attr = variant_tensor
weak_self = weakref.proxy(self)
self._variant_tracker = self._track_trackable(
_VariantTracker(
self._variant_tensor,
# _trace_variant_creation only works when executing eagerly, so we
# don't want to run it immediately. We also want the _VariantTracker
# to have a weak reference to the Dataset to avoid creating
# reference cycles and making work for the garbage collector.
lambda: weak_self._trace_variant_creation()()), # pylint: disable=unnecessary-lambda,protected-access
name="_variant_tracker")
self._graph_attr = ops.get_default_graph()
@property
def _variant_tensor(self):
return self._variant_tensor_attr
@_variant_tensor.setter
def _variant_tensor(self, _):
raise ValueError("The _variant_tensor property is read-only")
def _as_serialized_graph(self, allow_stateful=None):
"""Produces serialized graph representation of the dataset.
Args:
allow_stateful: If true, we allow stateful ops to be present in the graph
def. In that case, the state in these ops would be thrown away.
Returns:
A scalar `tf.Tensor` of `tf.string` type, representing this dataset as a
serialized graph.
"""
if compat.forward_compatible(2019, 9, 16) or allow_stateful:
return gen_dataset_ops.dataset_to_graph(self._variant_tensor,
allow_stateful=allow_stateful)
else:
return gen_dataset_ops.dataset_to_graph(self._variant_tensor)
def _trace_variant_creation(self):
"""Traces a function which outputs a variant `tf.Tensor` for this dataset.
Note that creating this function involves evaluating an op, and is currently
only supported when executing eagerly.
Returns:
A zero-argument `ConcreteFunction` which outputs a variant `tf.Tensor`.
"""
variant = self._variant_tensor
if not isinstance(variant, ops.EagerTensor):
raise NotImplementedError(
"Can only export Datasets which were created executing eagerly. "
"Please file a feature request if this is important to you.")
with context.eager_mode(), ops.device("CPU"):
graph_def = graph_pb2.GraphDef().FromString(
self._as_serialized_graph().numpy()) # pylint: disable=protected-access
output_node_name = None
for node in graph_def.node:
if node.op == "_Retval":
if output_node_name is not None:
raise AssertionError(
"Found multiple return values from the dataset's graph, expected "
"only one.")
output_node_name, = node.input
if output_node_name is None:
raise AssertionError("Could not find the dataset's output node.")
# Add functions used in this Dataset to the function's graph, since they
# need to follow it around (and for example be added to a SavedModel which
# references the dataset).
variant_function = wrap_function.function_from_graph_def(
graph_def, inputs=[], outputs=output_node_name + ":0")
for used_function in self._functions():
used_function.function.add_to_graph(variant_function.graph)
return variant_function
@abc.abstractmethod
def _inputs(self):
"""Returns a list of the input datasets of the dataset."""
raise NotImplementedError("Dataset._inputs")
@property
def _graph(self):
return self._graph_attr
@_graph.setter
def _graph(self, _):
raise ValueError("The _graph property is read-only")
def _has_captured_ref(self):
"""Whether this dataset uses a function that captures ref variables.
Returns:
A boolean, which if true indicates that the dataset or one of its inputs
uses a function that captures ref variables.
"""
if context.executing_eagerly():
# RefVariables are not supported in eager mode
return False
def is_tensor_or_parent_ref(tensor):
if tensor.dtype._is_ref_dtype: # pylint: disable=protected-access
return True
# If the captured tensor is an eager tensor, we cannot trace its inputs.
if isinstance(tensor, ops._EagerTensorBase): # pylint: disable=protected-access
return False
return any([is_tensor_or_parent_ref(x) for x in tensor.op.inputs])
for fn in self._functions():
if any([is_tensor_or_parent_ref(t) for t in fn.function.captured_inputs]):
return True
return any(
[input_dataset._has_captured_ref() for input_dataset in self._inputs()]) # pylint: disable=protected-access
# TODO(jsimsa): Change this to be the transitive closure of functions used
# by this dataset and its inputs.
def _functions(self):
"""Returns a list of functions associated with this dataset.
Returns:
A list of `StructuredFunctionWrapper` objects.
"""
return []
def options(self):
"""Returns the options for this dataset and its inputs.
Returns:
A `tf.data.Options` object representing the dataset options.
"""
options = Options()
for input_dataset in self._inputs():
input_options = input_dataset.options()
if input_options is not None:
options = options.merge(input_options)
return options
def _apply_options(self):
"""Apply options, such as optimization configuration, to the dataset."""
dataset = self
options = self.options()
if options.experimental_threading is not None:
t_options = options.experimental_threading
if t_options.max_intra_op_parallelism is not None:
dataset = _MaxIntraOpParallelismDataset(
dataset, t_options.max_intra_op_parallelism)
if t_options.private_threadpool_size is not None:
dataset = _PrivateThreadPoolDataset(dataset,
t_options.private_threadpool_size)
# pylint: disable=protected-access
static_optimizations = options._static_optimizations()
static_optimization_configs = options._static_optimization_configs()
# pylint: enable=protected-access
if static_optimizations:
if self._has_captured_ref():
warnings.warn(
"tf.data static optimizations are not compatible with tf.Variable. "
"The following optimizations will be disabled: %s. To enable "
"optimizations, use resource variables instead by calling "
"`tf.enable_resource_variables()` at the start of the program." %
", ".join(static_optimizations))
else:
dataset = _OptimizeDataset(dataset, static_optimizations,
static_optimization_configs)
autotune = True
algorithm = AutotuneAlgorithm.HILL_CLIMB
cpu_budget = 0 # Indicates that all CPU cores should be used.
if options.experimental_optimization is not None:
if options.experimental_optimization.autotune is False: # pylint: disable=g-bool-id-comparison
autotune = False
if options.experimental_optimization.autotune_algorithm is not None:
algorithm = options.experimental_optimization.autotune_algorithm
if options.experimental_optimization.autotune_cpu_budget is not None:
cpu_budget = options.experimental_optimization.autotune_cpu_budget
if autotune:
dataset = _ModelDataset(dataset, algorithm, cpu_budget)
if options.experimental_stats and options.experimental_stats.aggregator: # pylint: disable=line-too-long
dataset = _SetStatsAggregatorDataset( # pylint: disable=protected-access
dataset, options.experimental_stats.aggregator,
options.experimental_stats.prefix,
options.experimental_stats.counter_prefix)
return dataset
def __iter__(self):
"""Creates an `Iterator` for enumerating the elements of this dataset.
The returned iterator implements the Python iterator protocol and therefore
can only be used in eager mode.
Returns:
An `Iterator` over the elements of this dataset.
Raises:
RuntimeError: If not inside of tf.function and not executing eagerly.
"""
if (context.executing_eagerly()
or ops.get_default_graph()._building_function): # pylint: disable=protected-access
return iterator_ops.IteratorV2(self)
else:
raise RuntimeError("__iter__() is only supported inside of tf.function "
"or when eager execution is enabled.")
@abc.abstractproperty
def element_spec(self):
"""The type specification of an element of this dataset.
Returns:
A nested structure of `tf.TypeSpec` objects matching the structure of an
element of this dataset and specifying the type of individual components.
"""
raise NotImplementedError("Dataset.element_spec")
def __repr__(self):
output_shapes = nest.map_structure(str, get_legacy_output_shapes(self))
output_shapes = str(output_shapes).replace("'", "")
output_types = nest.map_structure(repr, get_legacy_output_types(self))
output_types = str(output_types).replace("'", "")
return ("<%s shapes: %s, types: %s>" % (type(self).__name__, output_shapes,
output_types))
@property
def _flat_shapes(self):
"""Returns a list `tf.TensorShapes`s for the element tensor representation.
Returns:
A list `tf.TensorShapes`s for the element tensor representation.
"""
return structure.get_flat_tensor_shapes(self.element_spec)
@property
def _flat_types(self):
"""Returns a list `tf.DType`s for the element tensor representation.
Returns:
A list `tf.DType`s for the element tensor representation.
"""
return structure.get_flat_tensor_types(self.element_spec)
@property
def _flat_structure(self):
"""Helper for setting `output_shapes` and `output_types` attrs of an op.
Most dataset op constructors expect `output_shapes` and `output_types`
arguments that represent the flattened structure of an element. This helper
function generates these attrs as a keyword argument dictionary, allowing
`Dataset._variant_tensor` implementations to pass `**self._flat_structure`
to the op constructor.
Returns:
A dictionary of keyword arguments that can be passed to a dataset op
constructor.
"""
return {
"output_shapes": self._flat_shapes,
"output_types": self._flat_types,
}
@property
def _type_spec(self):
return DatasetSpec(self.element_spec)
@staticmethod
def from_tensors(tensors):
"""Creates a `Dataset` with a single element, comprising the given tensors.
Note that if `tensors` contains a NumPy array, and eager execution is not
enabled, the values will be embedded in the graph as one or more
`tf.constant` operations. For large datasets (> 1 GB), this can waste
memory and run into byte limits of graph serialization. If `tensors`
contains one or more large NumPy arrays, consider the alternative described
in [this
guide](https://tensorflow.org/guide/data#consuming_numpy_arrays).
Args:
tensors: A dataset element.
Returns:
Dataset: A `Dataset`.
"""
return TensorDataset(tensors)
@staticmethod
def from_tensor_slices(tensors):
"""Creates a `Dataset` whose elements are slices of the given tensors.
The given tensors are sliced along their first dimension. This operation
preserves the structure of the input tensors, removing the first dimension
of each tensor and using it as the dataset dimension. All input tensors
must have the same size in their first dimensions.
```python
# Slicing a 1D tensor produces scalar tensor elements.
Dataset.from_tensor_slices([1, 2, 3]) # ==> [ 1, 2, 3 ]
# Slicing a 2D tensor produces 1D tensor elements.
Dataset.from_tensor_slices([[1, 2], [3, 4], [5, 6]])
# ==> [ [1, 2], [3, 4], [5, 6] ]
# Slicing a tuple of 1D tensors produces tuple elements containing scalar
tensors.
Dataset.from_tensor_slices(([1, 2], [3, 4], [5, 6]))
# ==> [ (1, 3, 5), (2, 4, 6) ]
# Dictionary structure is also preserved.
Dataset.from_tensor_slices({"a": [1, 2], "b": [3, 4], "c": [5, 6]})
# ==> [ {"a": 1, "b": 3, "c": 5}, {"a": 2, "b": 4, "c:" 6} ]
```
Note that if `tensors` contains a NumPy array, and eager execution is not
enabled, the values will be embedded in the graph as one or more
`tf.constant` operations. For large datasets (> 1 GB), this can waste
memory and run into byte limits of graph serialization. If `tensors`
contains one or more large NumPy arrays, consider the alternative described
in [this guide](
https://tensorflow.org/guide/data#consuming_numpy_arrays).
Args:
tensors: A dataset element, with each component having the same size in
the first dimension.
Returns:
Dataset: A `Dataset`.
"""
return TensorSliceDataset(tensors)
class _GeneratorState(object):
"""Stores outstanding iterators created from a Python generator.
This class keeps track of potentially multiple iterators that may have
been created from a generator, e.g. in the case that the dataset is
repeated, or nested within a parallel computation.
"""
def __init__(self, generator):
self._generator = generator
self._lock = threading.Lock()
self._next_id = 0 # GUARDED_BY(self._lock)
self._args = {}
self._iterators = {}
def get_next_id(self, *args):
with self._lock:
ret = self._next_id
self._next_id += 1
self._args[ret] = args
# NOTE(mrry): Explicitly create an array of `np.int64` because implicit
# casting in `py_func()` will create an array of `np.int32` on Windows,
# leading to a runtime error.
return np.array(ret, dtype=np.int64)
def get_iterator(self, iterator_id):
try:
return self._iterators[iterator_id]
except KeyError:
iterator = iter(self._generator(*self._args.pop(iterator_id)))
self._iterators[iterator_id] = iterator
return iterator
def iterator_completed(self, iterator_id):
del self._iterators[iterator_id]
@staticmethod
def from_generator(generator, output_types, output_shapes=None, args=None):
"""Creates a `Dataset` whose elements are generated by `generator`.
The `generator` argument must be a callable object that returns
an object that supports the `iter()` protocol (e.g. a generator function).
The elements generated by `generator` must be compatible with the given
`output_types` and (optional) `output_shapes` arguments.
For example:
```python
import itertools
tf.compat.v1.enable_eager_execution()
def gen():
for i in itertools.count(1):
yield (i, [1] * i)
ds = tf.data.Dataset.from_generator(
gen, (tf.int64, tf.int64), (tf.TensorShape([]), tf.TensorShape([None])))
for value in ds.take(2):
print value
# (1, array([1]))
# (2, array([1, 1]))
```
NOTE: The current implementation of `Dataset.from_generator()` uses
`tf.numpy_function` and inherits the same constraints. In particular, it
requires the `Dataset`- and `Iterator`-related operations to be placed
on a device in the same process as the Python program that called
`Dataset.from_generator()`. The body of `generator` will not be
serialized in a `GraphDef`, and you should not use this method if you
need to serialize your model and restore it in a different environment.
NOTE: If `generator` depends on mutable global variables or other external
state, be aware that the runtime may invoke `generator` multiple times
(in order to support repeating the `Dataset`) and at any time
between the call to `Dataset.from_generator()` and the production of the
first element from the generator. Mutating global variables or external
state can cause undefined behavior, and we recommend that you explicitly
cache any external state in `generator` before calling
`Dataset.from_generator()`.
Args:
generator: A callable object that returns an object that supports the
`iter()` protocol. If `args` is not specified, `generator` must take no
arguments; otherwise it must take as many arguments as there are values
in `args`.
output_types: A nested structure of `tf.DType` objects corresponding to
each component of an element yielded by `generator`.
output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects
corresponding to each component of an element yielded by `generator`.
args: (Optional.) A tuple of `tf.Tensor` objects that will be evaluated
and passed to `generator` as NumPy-array arguments.
Returns:
Dataset: A `Dataset`.
"""
if not callable(generator):
raise TypeError("`generator` must be callable.")
if output_shapes is None:
output_shapes = nest.map_structure(
lambda _: tensor_shape.TensorShape(None), output_types)
else:
output_shapes = nest.map_structure_up_to(
output_types, tensor_shape.as_shape, output_shapes)
if args is None:
args = ()
else:
args = tuple(ops.convert_n_to_tensor(args, name="args"))
flattened_types = [dtypes.as_dtype(dt) for dt in nest.flatten(output_types)]
flattened_shapes = nest.flatten(output_shapes)
generator_state = DatasetV2._GeneratorState(generator)
def get_iterator_id_fn(unused_dummy):
"""Creates a unique `iterator_id` for each pass over the dataset.
The returned `iterator_id` disambiguates between multiple concurrently
existing iterators.
Args:
unused_dummy: Ignored value.
Returns:
A `tf.int64` tensor whose value uniquely identifies an iterator in
`generator_state`.
"""
return script_ops.numpy_function(generator_state.get_next_id, args,
dtypes.int64)
def generator_next_fn(iterator_id_t):
"""Generates the next element from iterator with ID `iterator_id_t`.
We map this function across an infinite repetition of the
`iterator_id_t`, and raise `StopIteration` to terminate the iteration.
Args:
iterator_id_t: A `tf.int64` tensor whose value uniquely identifies the
iterator in `generator_state` from which to generate an element.
Returns:
The next element to generate from the iterator.
"""
def generator_py_func(iterator_id):
"""A `py_func` that will be called to invoke the iterator."""
# `next()` raises `StopIteration` when there are no more
# elements remaining to be generated.
values = next(generator_state.get_iterator(iterator_id))
# Use the same _convert function from the py_func() implementation to
# convert the returned values to arrays early, so that we can inspect
# their values.
try:
flattened_values = nest.flatten_up_to(output_types, values)
except (TypeError, ValueError):
six.reraise(TypeError, TypeError(
"`generator` yielded an element that did not match the expected "
"structure. The expected structure was %s, but the yielded "
"element was %s." % (output_types, values)), sys.exc_info()[2])
ret_arrays = []
for ret, dtype in zip(flattened_values, flattened_types):
try:
ret_arrays.append(script_ops.FuncRegistry._convert( # pylint: disable=protected-access
ret, dtype=dtype.as_numpy_dtype))
except (TypeError, ValueError):
six.reraise(TypeError, TypeError(
"`generator` yielded an element that could not be converted to "
"the expected type. The expected type was %s, but the yielded "
"element was %s." % (dtype.name, ret)), sys.exc_info()[2])
# Additional type and shape checking to ensure that the components
# of the generated element match the `output_types` and `output_shapes`
# arguments.
for (ret_array, expected_dtype, expected_shape) in zip(
ret_arrays, flattened_types, flattened_shapes):
if ret_array.dtype != expected_dtype.as_numpy_dtype:
raise TypeError(
"`generator` yielded an element of type %s where an element "
"of type %s was expected." % (ret_array.dtype,
expected_dtype.as_numpy_dtype))
if not expected_shape.is_compatible_with(ret_array.shape):
raise ValueError(
"`generator` yielded an element of shape %s where an element "
"of shape %s was expected." % (ret_array.shape, expected_shape))
return ret_arrays
flat_values = script_ops.numpy_function(generator_py_func,
[iterator_id_t], flattened_types)
# The `py_func()` op drops the inferred shapes, so we add them back in
# here.
if output_shapes is not None:
for ret_t, shape in zip(flat_values, flattened_shapes):
ret_t.set_shape(shape)
return nest.pack_sequence_as(output_types, flat_values)
def finalize_fn(iterator_id_t):
"""Releases host-side state for the iterator with ID `iterator_id_t`."""
def finalize_py_func(iterator_id):
generator_state.iterator_completed(iterator_id)
# We return a dummy value so that the `finalize_fn` has a valid
# signature.
# NOTE(mrry): Explicitly create an array of `np.int64` because implicit
# casting in `py_func()` will create an array of `np.int32` on Windows,
# leading to a runtime error.
return np.array(0, dtype=np.int64)
return script_ops.numpy_function(finalize_py_func, [iterator_id_t],
dtypes.int64)
# This function associates each traversal of `generator` with a unique
# iterator ID.
def flat_map_fn(dummy_arg):
# The `get_iterator_id_fn` gets a unique ID for the current instance of
# of the generator.
# The `generator_next_fn` gets the next element from the iterator with the
# given ID, and raises StopIteration when that iterator contains no
# more elements.
return _GeneratorDataset(dummy_arg, get_iterator_id_fn, generator_next_fn,
finalize_fn)
# A single-element dataset that, each time it is evaluated, contains a
# freshly-generated and unique (for the returned dataset) int64
# ID that will be used to identify the appropriate Python state, which
# is encapsulated in `generator_state`, and captured in
# `get_iterator_id_map_fn`.
dummy = 0
id_dataset = Dataset.from_tensors(dummy)
# A dataset that contains all of the elements generated by a
# single iterator created from `generator`, identified by the
# iterator ID contained in `id_dataset`. Lifting the iteration
# into a flat_map here enables multiple repetitions and/or nested
# versions of the returned dataset to be created, because it forces
# the generation of a new ID for each version.
return id_dataset.flat_map(flat_map_fn)
@staticmethod
def range(*args):
"""Creates a `Dataset` of a step-separated range of values.
For example:
```python
Dataset.range(5) == [0, 1, 2, 3, 4]
Dataset.range(2, 5) == [2, 3, 4]
Dataset.range(1, 5, 2) == [1, 3]
Dataset.range(1, 5, -2) == []
Dataset.range(5, 1) == []
Dataset.range(5, 1, -2) == [5, 3]
```
Args:
*args: follows the same semantics as python's xrange.
len(args) == 1 -> start = 0, stop = args[0], step = 1
len(args) == 2 -> start = args[0], stop = args[1], step = 1
len(args) == 3 -> start = args[0], stop = args[1, stop = args[2]
Returns:
Dataset: A `RangeDataset`.
Raises:
ValueError: if len(args) == 0.
"""
return RangeDataset(*args)
@staticmethod
def zip(datasets):
"""Creates a `Dataset` by zipping together the given datasets.
This method has similar semantics to the built-in `zip()` function
in Python, with the main difference being that the `datasets`
argument can be an arbitrary nested structure of `Dataset` objects.
For example:
```python
a = Dataset.range(1, 4) # ==> [ 1, 2, 3 ]
b = Dataset.range(4, 7) # ==> [ 4, 5, 6 ]
c = Dataset.range(7, 13).batch(2) # ==> [ [7, 8], [9, 10], [11, 12] ]
d = Dataset.range(13, 15) # ==> [ 13, 14 ]
# The nested structure of the `datasets` argument determines the
# structure of elements in the resulting dataset.
Dataset.zip((a, b)) # ==> [ (1, 4), (2, 5), (3, 6) ]
Dataset.zip((b, a)) # ==> [ (4, 1), (5, 2), (6, 3) ]
# The `datasets` argument may contain an arbitrary number of
# datasets.
Dataset.zip((a, b, c)) # ==> [ (1, 4, [7, 8]),
# (2, 5, [9, 10]),
# (3, 6, [11, 12]) ]
# The number of elements in the resulting dataset is the same as
# the size of the smallest dataset in `datasets`.
Dataset.zip((a, d)) # ==> [ (1, 13), (2, 14) ]
```
Args:
datasets: A nested structure of datasets.
Returns:
Dataset: A `Dataset`.
"""
return ZipDataset(datasets)
def concatenate(self, dataset):
"""Creates a `Dataset` by concatenating the given dataset with this dataset.
```python
a = Dataset.range(1, 4) # ==> [ 1, 2, 3 ]
b = Dataset.range(4, 8) # ==> [ 4, 5, 6, 7 ]
# The input dataset and dataset to be concatenated should have the same
# nested structures and output types.
# c = Dataset.range(8, 14).batch(2) # ==> [ [8, 9], [10, 11], [12, 13] ]
# d = Dataset.from_tensor_slices([14.0, 15.0, 16.0])
# a.concatenate(c) and a.concatenate(d) would result in error.
a.concatenate(b) # ==> [ 1, 2, 3, 4, 5, 6, 7 ]
```
Args:
dataset: `Dataset` to be concatenated.
Returns:
Dataset: A `Dataset`.
"""
return ConcatenateDataset(self, dataset)
def prefetch(self, buffer_size):
"""Creates a `Dataset` that prefetches elements from this dataset.
Note: Like other `Dataset` methods, prefetch operates on the
elements of the input dataset. It has no concept of examples vs. batches.
`examples.prefetch(2)` will prefetch two elements (2 examples),
while `examples.batch(20).prefetch(2)` will prefetch 2 elements
(2 batches, of 20 examples each).
Args:
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the maximum
number of elements that will be buffered when prefetching.
Returns:
Dataset: A `Dataset`.
"""
return PrefetchDataset(self, buffer_size)
@staticmethod
def list_files(file_pattern, shuffle=None, seed=None):
"""A dataset of all files matching one or more glob patterns.
NOTE: The default behavior of this method is to return filenames in
a non-deterministic random shuffled order. Pass a `seed` or `shuffle=False`
to get results in a deterministic order.
Example:
If we had the following files on our filesystem:
- /path/to/dir/a.txt
- /path/to/dir/b.py
- /path/to/dir/c.py
If we pass "/path/to/dir/*.py" as the directory, the dataset
would produce:
- /path/to/dir/b.py
- /path/to/dir/c.py
Args:
file_pattern: A string, a list of strings, or a `tf.Tensor` of string type
(scalar or vector), representing the filename glob (i.e. shell wildcard)
pattern(s) that will be matched.
shuffle: (Optional.) If `True`, the file names will be shuffled randomly.
Defaults to `True`.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
seed that will be used to create the distribution. See
`tf.compat.v1.set_random_seed` for behavior.
Returns:
Dataset: A `Dataset` of strings corresponding to file names.
"""
with ops.name_scope("list_files"):
if shuffle is None:
shuffle = True
file_pattern = ops.convert_to_tensor(
file_pattern, dtype=dtypes.string, name="file_pattern")
matching_files = gen_io_ops.matching_files(file_pattern)
# Raise an exception if `file_pattern` does not match any files.
condition = math_ops.greater(array_ops.shape(matching_files)[0], 0,
name="match_not_empty")
message = math_ops.add(
"No files matched pattern: ",
string_ops.reduce_join(file_pattern, separator=", "), name="message")
assert_not_empty = control_flow_ops.Assert(
condition, [message], summarize=1, name="assert_not_empty")
with ops.control_dependencies([assert_not_empty]):
matching_files = array_ops.identity(matching_files)
dataset = Dataset.from_tensor_slices(matching_files)
if shuffle:
# NOTE(mrry): The shuffle buffer size must be greater than zero, but the
# list of files might be empty.
buffer_size = math_ops.maximum(
array_ops.shape(matching_files, out_type=dtypes.int64)[0], 1)
dataset = dataset.shuffle(buffer_size, seed=seed)
return dataset
def repeat(self, count=None):
"""Repeats this dataset `count` times.
NOTE: If this dataset is a function of global state (e.g. a random number
generator), then different repetitions may produce different elements.
Args:
count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
number of times the dataset should be repeated. The default behavior (if
`count` is `None` or `-1`) is for the dataset be repeated indefinitely.
Returns:
Dataset: A `Dataset`.
"""
return RepeatDataset(self, count)
def enumerate(self, start=0):
"""Enumerates the elements of this dataset.
It is similar to python's `enumerate`.
For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { 1, 2, 3 }
b = { (7, 8), (9, 10) }
# The nested structure of the `datasets` argument determines the
# structure of elements in the resulting dataset.
a.enumerate(start=5)) == { (5, 1), (6, 2), (7, 3) }
b.enumerate() == { (0, (7, 8)), (1, (9, 10)) }
```
Args:
start: A `tf.int64` scalar `tf.Tensor`, representing the start value for
enumeration.
Returns:
Dataset: A `Dataset`.
"""
max_value = np.iinfo(dtypes.int64.as_numpy_dtype).max
return Dataset.zip((Dataset.range(start, max_value), self))
def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None):
"""Randomly shuffles the elements of this dataset.
This dataset fills a buffer with `buffer_size` elements, then randomly
samples elements from this buffer, replacing the selected elements with new
elements. For perfect shuffling, a buffer size greater than or equal to the
full size of the dataset is required.
For instance, if your dataset contains 10,000 elements but `buffer_size` is
set to 1,000, then `shuffle` will initially select a random element from
only the first 1,000 elements in the buffer. Once an element is selected,
its space in the buffer is replaced by the next (i.e. 1,001-st) element,
maintaining the 1,000 element buffer.
`reshuffle_each_iteration` controls whether the shuffle order should be
different for each epoch. In TF 1.X, the idiomatic way to create epochs
was through the `repeat` transformation:
```python
d = tf.data.Dataset.range(3)
d = d.shuffle(3, reshuffle_each_iteration=True)
d = d.repeat(2) # ==> [ 1, 0, 2, 1, 2, 0 ]
d = tf.data.Dataset.range(3)
d = d.shuffle(3, reshuffle_each_iteration=False)
d = d.repeat(2) # ==> [ 1, 0, 2, 1, 0, 2 ]
```
In TF 2.0, tf.data.Dataset objects are Python iterables which makes it
possible to also create epochs through Python iteration:
```python
d = tf.data.Dataset.range(3)
d = d.shuffle(3, reshuffle_each_iteration=True)
for elem in d:
# ==> [ 1, 0, 2 ]
for elem in d:
# ==> [ 1, 2, 0 ]
d = tf.data.Dataset.range(3)
d = d.shuffle(3, reshuffle_each_iteration=False)
for elem in d:
# ==> [ 1, 0, 2 ]
for elem in d:
# ==> [ 1, 0, 2 ]
```
Args:
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements from this dataset from which the new dataset will sample.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
seed that will be used to create the distribution. See
`tf.compat.v1.set_random_seed` for behavior.
reshuffle_each_iteration: (Optional.) A boolean, which if true indicates
that the dataset should be pseudorandomly reshuffled each time it is
iterated over. (Defaults to `True`.)
Returns:
Dataset: A `Dataset`.
"""
return ShuffleDataset(self, buffer_size, seed, reshuffle_each_iteration)
def cache(self, filename=""):
"""Caches the elements in this dataset.
Args:
filename: A `tf.string` scalar `tf.Tensor`, representing the name of a
directory on the filesystem to use for caching elements in this Dataset.
If a filename is not provided, the dataset will be cached in memory.
Returns:
Dataset: A `Dataset`.
"""
return CacheDataset(self, filename)
def take(self, count):
"""Creates a `Dataset` with at most `count` elements from this dataset.
Args:
count: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements of this dataset that should be taken to form the new dataset.
If `count` is -1, or if `count` is greater than the size of this
dataset, the new dataset will contain all elements of this dataset.
Returns:
Dataset: A `Dataset`.
"""
return TakeDataset(self, count)
def skip(self, count):
"""Creates a `Dataset` that skips `count` elements from this dataset.
Args:
count: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements of this dataset that should be skipped to form the new dataset.
If `count` is greater than the size of this dataset, the new dataset
will contain no elements. If `count` is -1, skips the entire dataset.
Returns:
Dataset: A `Dataset`.
"""
return SkipDataset(self, count)
def shard(self, num_shards, index):
"""Creates a `Dataset` that includes only 1/`num_shards` of this dataset.
`shard` is a deterministic operator; the Dataset produced by
`A.shard(n, i)` will contain all elements of A whose index mod n = i.
```python
# Create a Dataset with 60 elements.
A = tf.data.Dataset.range(60) # ==> [0, 1, 2, 3, ..., 57, 58, 59]
# Create 3 Datasets, each with 20 elements from Dataset A.
B = A.shard(num_shards=3, index=0) # ==> [0, 3, 6, 9, ..., 51, 54, 57]
C = A.shard(num_shards=3, index=1) # ==> [1, 4, 7, 10, ..., 52, 55, 58]
D = A.shard(num_shards=3, index=2) # ==> [2, 5, 8, 11, ..., 53, 56, 59]
# There is no overlap between Datasets B, C and D.
```
This dataset operator is very useful when running distributed training, as
it allows each worker to read a unique subset.
When reading a single input file, you can skip elements as follows:
```python
d = tf.data.TFRecordDataset(input_file)
d = d.shard(num_workers, worker_index)
d = d.repeat(num_epochs)
d = d.shuffle(shuffle_buffer_size)
d = d.map(parser_fn, num_parallel_calls=num_map_threads)
```
Important caveats:
- Be sure to shard before you use any randomizing operator (such as
shuffle).
- Generally it is best if the shard operator is used early in the dataset
pipeline. For example, when reading from a set of TFRecord files, shard
before converting the dataset to input samples. This avoids reading every
file on every worker. The following is an example of an efficient
sharding strategy within a complete pipeline:
```python
d = Dataset.list_files(pattern)
d = d.shard(num_workers, worker_index)
d = d.repeat(num_epochs)
d = d.shuffle(shuffle_buffer_size)
d = d.interleave(tf.data.TFRecordDataset,
cycle_length=num_readers, block_length=1)
d = d.map(parser_fn, num_parallel_calls=num_map_threads)
```
Args:
num_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of
shards operating in parallel.
index: A `tf.int64` scalar `tf.Tensor`, representing the worker index.
Returns:
Dataset: A `Dataset`.
Raises:
InvalidArgumentError: if `num_shards` or `index` are illegal values.
Note: error checking is done on a best-effort basis, and errors aren't
guaranteed to be caught upon dataset creation. (e.g. providing in a
placeholder tensor bypasses the early checking, and will instead result
in an error during a session.run call.)
"""
return ShardDataset(self, num_shards, index)
def batch(self, batch_size, drop_remainder=False):
"""Combines consecutive elements of this dataset into batches.
The components of the resulting element will have an additional outer
dimension, which will be `batch_size` (or `N % batch_size` for the last
element if `batch_size` does not divide the number of input elements `N`
evenly and `drop_remainder` is `False`). If your program depends on the
batches having the same outer dimension, you should set the `drop_remainder`
argument to `True` to prevent the smaller batch from being produced.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case it has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
Returns:
Dataset: A `Dataset`.
"""
return BatchDataset(self, batch_size, drop_remainder)
def padded_batch(self,
batch_size,
padded_shapes,
padding_values=None,
drop_remainder=False):
"""Combines consecutive elements of this dataset into padded batches.
This transformation combines multiple consecutive elements of the input
dataset into a single element.
Like `tf.data.Dataset.batch`, the components of the resulting element will
have an additional outer dimension, which will be `batch_size` (or
`N % batch_size` for the last element if `batch_size` does not divide the
number of input elements `N` evenly and `drop_remainder` is `False`). If
your program depends on the batches having the same outer dimension, you
should set the `drop_remainder` argument to `True` to prevent the smaller
batch from being produced.
Unlike `tf.data.Dataset.batch`, the input elements to be batched may have
different shapes, and this transformation will pad each component to the
respective shape in `padding_shapes`. The `padding_shapes` argument
determines the resulting shape for each dimension of each component in an
output element:
* If the dimension is a constant (e.g. `tf.compat.v1.Dimension(37)`), the
component
will be padded out to that length in that dimension.
* If the dimension is unknown (e.g. `tf.compat.v1.Dimension(None)`), the
component
will be padded out to the maximum length of all elements in that
dimension.
See also `tf.data.experimental.dense_to_sparse_batch`, which combines
elements that may have different shapes into a `tf.SparseTensor`.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
padded_shapes: A nested structure of `tf.TensorShape` or `tf.int64` vector
tensor-like objects representing the shape to which the respective
component of each input element should be padded prior to batching. Any
unknown dimensions (e.g. `tf.compat.v1.Dimension(None)` in a
`tf.TensorShape` or `-1` in a tensor-like object) will be padded to the
maximum size of that dimension in each batch.
padding_values: (Optional.) A nested structure of scalar-shaped
`tf.Tensor`, representing the padding values to use for the respective
components. Defaults are `0` for numeric types and the empty string for
string types.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case it has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
Returns:
Dataset: A `Dataset`.
"""
return PaddedBatchDataset(self, batch_size, padded_shapes, padding_values,
drop_remainder)
def map(self, map_func, num_parallel_calls=None):
"""Maps `map_func` across the elements of this dataset.
This transformation applies `map_func` to each element of this dataset, and
returns a new dataset containing the transformed elements, in the same
order as they appeared in the input.
For example:
```python
a = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ]
a.map(lambda x: x + 1) # ==> [ 2, 3, 4, 5, 6 ]
```
The input signature of `map_func` is determined by the structure of each
element in this dataset. For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
# Each element is a `tf.Tensor` object.
a = { 1, 2, 3, 4, 5 }
# `map_func` takes a single argument of type `tf.Tensor` with the same
# shape and dtype.
result = a.map(lambda x: ...)
# Each element is a tuple containing two `tf.Tensor` objects.
b = { (1, "foo"), (2, "bar"), (3, "baz") }
# `map_func` takes two arguments of type `tf.Tensor`.
result = b.map(lambda x_int, y_str: ...)
# Each element is a dictionary mapping strings to `tf.Tensor` objects.
c = { {"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}, {"a": 3, "b": "baz"} }
# `map_func` takes a single argument of type `dict` with the same keys as
# the elements.
result = c.map(lambda d: ...)
```
The value or values returned by `map_func` determine the structure of each
element in the returned dataset.
```python
# `map_func` returns a scalar `tf.Tensor` of type `tf.float32`.
def f(...):
return tf.constant(37.0)
result = dataset.map(f)
result.output_classes == tf.Tensor
result.output_types == tf.float32
result.output_shapes == [] # scalar
# `map_func` returns two `tf.Tensor` objects.
def g(...):
return tf.constant(37.0), tf.constant(["Foo", "Bar", "Baz"])
result = dataset.map(g)
result.output_classes == (tf.Tensor, tf.Tensor)
result.output_types == (tf.float32, tf.string)
result.output_shapes == ([], [3])
# Python primitives, lists, and NumPy arrays are implicitly converted to
# `tf.Tensor`.
def h(...):
return 37.0, ["Foo", "Bar", "Baz"], np.array([1.0, 2.0] dtype=np.float64)
result = dataset.map(h)
result.output_classes == (tf.Tensor, tf.Tensor, tf.Tensor)
result.output_types == (tf.float32, tf.string, tf.float64)
result.output_shapes == ([], [3], [2])
# `map_func` can return nested structures.
def i(...):
return {"a": 37.0, "b": [42, 16]}, "foo"
result.output_classes == ({"a": tf.Tensor, "b": tf.Tensor}, tf.Tensor)
result.output_types == ({"a": tf.float32, "b": tf.int32}, tf.string)
result.output_shapes == ({"a": [], "b": [2]}, [])
```
`map_func` can accept as arguments and return any type of dataset element.
Note that irrespective of the context in which `map_func` is defined (eager
vs. graph), tf.data traces the function and executes it as a graph. To use
Python code inside of the function you have two options:
1) Rely on AutoGraph to convert Python code into an equivalent graph
computation. The downside of this approach is that AutoGraph can convert
some but not all Python code.
2) Use `tf.py_function`, which allows you to write arbitrary Python code but
will generally result in worse performance than 1). For example:
```python
d = tf.data.Dataset.from_tensor_slices(['hello', 'world'])
# transform a string tensor to upper case string using a Python function
def upper_case_fn(t: tf.Tensor) -> str:
return t.numpy().decode('utf-8').upper()
d.map(lambda x: tf.py_function(func=upper_case_fn,
inp=[x], Tout=tf.string)) # ==> [ "HELLO", "WORLD" ]
```
Args:
map_func: A function mapping a dataset element to another dataset element.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number elements to process asynchronously in parallel.
If not specified, elements will be processed sequentially. If the value
`tf.data.experimental.AUTOTUNE` is used, then the number of parallel
calls is set dynamically based on available CPU.
Returns:
Dataset: A `Dataset`.
"""
if num_parallel_calls is None:
return MapDataset(self, map_func, preserve_cardinality=True)
else:
return ParallelMapDataset(
self, map_func, num_parallel_calls, preserve_cardinality=True)
def flat_map(self, map_func):
"""Maps `map_func` across this dataset and flattens the result.
Use `flat_map` if you want to make sure that the order of your dataset
stays the same. For example, to flatten a dataset of batches into a
dataset of their elements:
```python
a = Dataset.from_tensor_slices([ [1, 2, 3], [4, 5, 6], [7, 8, 9] ])
a.flat_map(lambda x: Dataset.from_tensor_slices(x + 1)) # ==>
# [ 2, 3, 4, 5, 6, 7, 8, 9, 10 ]
```
`tf.data.Dataset.interleave()` is a generalization of `flat_map`, since
`flat_map` produces the same output as
`tf.data.Dataset.interleave(cycle_length=1)`
Args:
map_func: A function mapping a dataset element to a dataset.
Returns:
Dataset: A `Dataset`.
"""
return FlatMapDataset(self, map_func)
def interleave(self,
map_func,
cycle_length=AUTOTUNE,
block_length=1,
num_parallel_calls=None):
"""Maps `map_func` across this dataset, and interleaves the results.
For example, you can use `Dataset.interleave()` to process many input files
concurrently:
```python
# Preprocess 4 files concurrently, and interleave blocks of 16 records from
# each file.
filenames = ["/var/data/file1.txt", "/var/data/file2.txt", ...]
dataset = (Dataset.from_tensor_slices(filenames)
.interleave(lambda x:
TextLineDataset(x).map(parse_fn, num_parallel_calls=1),
cycle_length=4, block_length=16))
```
The `cycle_length` and `block_length` arguments control the order in which
elements are produced. `cycle_length` controls the number of input elements
that are processed concurrently. If you set `cycle_length` to 1, this
transformation will handle one input element at a time, and will produce
identical results to `tf.data.Dataset.flat_map`. In general,
this transformation will apply `map_func` to `cycle_length` input elements,
open iterators on the returned `Dataset` objects, and cycle through them
producing `block_length` consecutive elements from each iterator, and
consuming the next input element each time it reaches the end of an
iterator.
For example:
```python
a = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ]
# NOTE: New lines indicate "block" boundaries.
a.interleave(lambda x: Dataset.from_tensors(x).repeat(6),
cycle_length=2, block_length=4) # ==> [1, 1, 1, 1,
# 2, 2, 2, 2,
# 1, 1,
# 2, 2,
# 3, 3, 3, 3,
# 4, 4, 4, 4,
# 3, 3,
# 4, 4,
# 5, 5, 5, 5,
# 5, 5]
```
NOTE: The order of elements yielded by this transformation is
deterministic, as long as `map_func` is a pure function. If
`map_func` contains any stateful operations, the order in which
that state is accessed is undefined.
Args:
map_func: A function mapping a dataset element to a dataset.
cycle_length: (Optional.) The number of input elements that will be
processed concurrently. If not specified, the value will be derived from
the number of available CPU cores. If the `num_parallel_calls` argument
is set to `tf.data.experimental.AUTOTUNE`, the `cycle_length` argument
also identifies the maximum degree of parallelism.
block_length: (Optional.) The number of consecutive elements to produce
from each input element before cycling to another input element.
num_parallel_calls: (Optional.) If specified, the implementation creates a
threadpool, which is used to fetch inputs from cycle elements
asynchronously and in parallel. The default behavior is to fetch inputs
from cycle elements synchronously with no parallelism. If the value
`tf.data.experimental.AUTOTUNE` is used, then the number of parallel
calls is set dynamically based on available CPU.
Returns:
Dataset: A `Dataset`.
"""
if num_parallel_calls is None:
return InterleaveDataset(self, map_func, cycle_length, block_length)
else:
return ParallelInterleaveDataset(self, map_func, cycle_length,
block_length, num_parallel_calls)
def filter(self, predicate):
"""Filters this dataset according to `predicate`.
```python
d = tf.data.Dataset.from_tensor_slices([1, 2, 3])
d = d.filter(lambda x: x < 3) # ==> [1, 2]
# `tf.math.equal(x, y)` is required for equality comparison
def filter_fn(x):
return tf.math.equal(x, 1)
d = d.filter(filter_fn) # ==> [1]
```
Args:
predicate: A function mapping a dataset element to a boolean.
Returns:
Dataset: The `Dataset` containing the elements of this dataset for which
`predicate` is `True`.
"""
return FilterDataset(self, predicate)
def apply(self, transformation_func):
"""Applies a transformation function to this dataset.
`apply` enables chaining of custom `Dataset` transformations, which are
represented as functions that take one `Dataset` argument and return a
transformed `Dataset`.
For example:
```
dataset = (dataset.map(lambda x: x ** 2)
.apply(group_by_window(key_func, reduce_func, window_size))
.map(lambda x: x ** 3))
```
Args:
transformation_func: A function that takes one `Dataset` argument and
returns a `Dataset`.
Returns:
Dataset: The `Dataset` returned by applying `transformation_func` to this
dataset.
"""
dataset = transformation_func(self)
if not isinstance(dataset, DatasetV2):
raise TypeError(
"`transformation_func` must return a Dataset. Got {}.".format(
dataset))
dataset._input_datasets = [self] # pylint: disable=protected-access
return dataset
def window(self, size, shift=None, stride=1, drop_remainder=False):
"""Combines (nests of) input elements into a dataset of (nests of) windows.
A "window" is a finite dataset of flat elements of size `size` (or possibly
fewer if there are not enough input elements to fill the window and
`drop_remainder` evaluates to false).
The `stride` argument determines the stride of the input elements, and the
`shift` argument determines the shift of the window.
For example, letting {...} to represent a Dataset:
- `tf.data.Dataset.range(7).window(2)` produces
`{{0, 1}, {2, 3}, {4, 5}, {6}}`
- `tf.data.Dataset.range(7).window(3, 2, 1, True)` produces
`{{0, 1, 2}, {2, 3, 4}, {4, 5, 6}}`
- `tf.data.Dataset.range(7).window(3, 1, 2, True)` produces
`{{0, 2, 4}, {1, 3, 5}, {2, 4, 6}}`
Note that when the `window` transformation is applied to a dataset of
nested elements, it produces a dataset of nested windows.
For example:
- `tf.data.Dataset.from_tensor_slices((range(4), range(4))).window(2)`
produces `{({0, 1}, {0, 1}), ({2, 3}, {2, 3})}`
- `tf.data.Dataset.from_tensor_slices({"a": range(4)}).window(2)`
produces `{{"a": {0, 1}}, {"a": {2, 3}}}`
Args:
size: A `tf.int64` scalar `tf.Tensor`, representing the number of elements
of the input dataset to combine into a window.
shift: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
forward shift of the sliding window in each iteration. Defaults to
`size`.
stride: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
stride of the input elements in the sliding window.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether a window should be dropped in case its size is smaller than
`window_size`.
Returns:
Dataset: A `Dataset` of (nests of) windows -- a finite datasets of flat
elements created from the (nests of) input elements.
"""
if shift is None:
shift = size
return WindowDataset(self, size, shift, stride, drop_remainder)
def reduce(self, initial_state, reduce_func):
"""Reduces the input dataset to a single element.
The transformation calls `reduce_func` successively on every element of
the input dataset until the dataset is exhausted, aggregating information in
its internal state. The `initial_state` argument is used for the initial
state and the final state is returned as the result.
For example:
- `tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, _: x + 1)`
produces `5`
- `tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, y: x + y)`
produces `10`
Args:
initial_state: An element representing the initial state of the
transformation.
reduce_func: A function that maps `(old_state, input_element)` to
`new_state`. It must take two arguments and return a new element
The structure of `new_state` must match the structure of
`initial_state`.
Returns:
A dataset element corresponding to the final state of the transformation.
"""
with ops.name_scope("initial_state"):
initial_state = structure.normalize_element(initial_state)
state_structure = structure.type_spec_from_value(initial_state)
# Iteratively rerun the reduce function until reaching a fixed point on
# `state_structure`.
need_to_rerun = True
while need_to_rerun:
wrapped_func = StructuredFunctionWrapper(
reduce_func,
"reduce()",
input_structure=(state_structure, self.element_spec),
add_to_graph=False)
# Extract and validate class information from the returned values.
output_classes = wrapped_func.output_classes
state_classes = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
state_structure)
for new_state_class, state_class in zip(
nest.flatten(output_classes), nest.flatten(state_classes)):
if not issubclass(new_state_class, state_class):
raise TypeError(
"The element classes for the new state must match the initial "
"state. Expected %s; got %s." %
(state_classes, wrapped_func.output_classes))
# Extract and validate type information from the returned values.
output_types = wrapped_func.output_types
state_types = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
state_structure)
for new_state_type, state_type in zip(
nest.flatten(output_types), nest.flatten(state_types)):
if new_state_type != state_type:
raise TypeError(
"The element types for the new state must match the initial "
"state. Expected %s; got %s." %
(state_types, wrapped_func.output_types))
# Extract shape information from the returned values.
output_shapes = wrapped_func.output_shapes
state_shapes = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
state_structure)
flat_state_shapes = nest.flatten(state_shapes)
flat_new_state_shapes = nest.flatten(output_shapes)
weakened_state_shapes = [
original.most_specific_compatible_shape(new)
for original, new in zip(flat_state_shapes, flat_new_state_shapes)
]
need_to_rerun = False
for original_shape, weakened_shape in zip(flat_state_shapes,
weakened_state_shapes):
if original_shape.ndims is not None and (
weakened_shape.ndims is None or
original_shape.as_list() != weakened_shape.as_list()):
need_to_rerun = True
break
if need_to_rerun:
# TODO(b/110122868): Support a "most specific compatible structure"
# method for combining structures, to avoid using legacy structures
# here.
state_structure = structure.convert_legacy_structure(
state_types,
nest.pack_sequence_as(state_shapes, weakened_state_shapes),
state_classes)
reduce_func = wrapped_func.function
reduce_func.add_to_graph(ops.get_default_graph())
dataset = self._apply_options()
# pylint: disable=protected-access
return structure.from_compatible_tensor_list(
state_structure,
gen_dataset_ops.reduce_dataset(
dataset._variant_tensor,
structure.to_tensor_list(state_structure, initial_state),
reduce_func.captured_inputs,
f=reduce_func,
output_shapes=structure.get_flat_tensor_shapes(state_structure),
output_types=structure.get_flat_tensor_types(state_structure)))
def unbatch(self):
"""Splits elements of a dataset into multiple elements.
For example, if elements of the dataset are shaped `[B, a0, a1, ...]`,
where `B` may vary for each input element, then for each element in the
dataset, the unbatched dataset will contain `B` consecutive elements
of shape `[a0, a1, ...]`.
```python
# NOTE: The following example uses `{ ... }` to represent the contents
# of a dataset.
ds = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }
ds.unbatch() == {'a', 'b', 'c', 'a', 'b', 'a', 'b', 'c', 'd'}
```
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
# NOTE(mrry): We must ensure that any non-tensor components in `dataset`
# are normalized to their dense tensor representation, so that the
# non-tensor oblivious unbatching logic will slice them appropriately.
# This leads to a somewhat inefficient re-encoding step for all non-tensor
# components.
#
# TODO(mrry): Consider optimizing this if it turns out to be a bottleneck.
def normalize(arg, *rest):
# pylint: disable=protected-access
if rest:
return structure.to_batched_tensor_list(self.element_spec,
(arg,) + rest)
else:
return structure.to_batched_tensor_list(self.element_spec, arg)
normalized_dataset = self.map(normalize)
# NOTE(mrry): Our `map()` has lost information about the structure of
# non-tensor components, so re-apply the structure of the original dataset.
restructured_dataset = _RestructuredDataset(normalized_dataset,
self.element_spec)
return _UnbatchDataset(restructured_dataset)
def with_options(self, options):
"""Returns a new `tf.data.Dataset` with the given options set.
The options are "global" in the sense they apply to the entire dataset.
If options are set multiple times, they are merged as long as different
options do not use different non-default values.
Args:
options: A `tf.data.Options` that identifies the options the use.
Returns:
Dataset: A `Dataset` with the given options.
Raises:
ValueError: when an option is set more than once to a non-default value
"""
return _OptionsDataset(self, options)
@tf_export(v1=["data.Dataset"])
class DatasetV1(DatasetV2):
"""Represents a potentially large set of elements.
A `Dataset` can be used to represent an input pipeline as a
collection of elements and a "logical plan" of transformations that act on
those elements.
"""
def __init__(self):
try:
variant_tensor = self._as_variant_tensor()
except AttributeError as e:
if "_as_variant_tensor" in str(e):
raise AttributeError("Please use _variant_tensor instead of "
"_as_variant_tensor() to obtain the variant "
"associated with a dataset")
raise AttributeError("{}: A likely cause of this error is that the super "
"call for this dataset is not the last line of the "
"__init__ method. The base class causes the "
"_as_variant_tensor call in its constructor and "
"if that uses attributes defined in the __init__ "
"method, those attrs need to be defined before the "
"super call.".format(e))
super(DatasetV1, self).__init__(variant_tensor)
@abc.abstractmethod
def _as_variant_tensor(self):
"""Creates a scalar `tf.Tensor` of `tf.variant` representing this dataset.
Returns:
A scalar `tf.Tensor` of `tf.variant` type, which represents this dataset.
"""
raise NotImplementedError("Dataset._as_variant_tensor")
@deprecation.deprecated(
None, "Use `for ... in dataset:` to iterate over a dataset. If using "
"`tf.estimator`, return the `Dataset` object directly from your input "
"function. As a last resort, you can use "
"`tf.compat.v1.data.make_one_shot_iterator(dataset)`.")
def make_one_shot_iterator(self):
"""Creates an `Iterator` for enumerating the elements of this dataset.
Note: The returned iterator will be initialized automatically.
A "one-shot" iterator does not currently support re-initialization.
Returns:
An `Iterator` over the elements of this dataset.
"""
return self._make_one_shot_iterator()
def _make_one_shot_iterator(self): # pylint: disable=missing-docstring
if context.executing_eagerly():
return iterator_ops.IteratorV2(self)
_ensure_same_dataset_graph(self)
# Now that we create datasets at python object creation time, the capture
# by value _make_dataset() function would try to capture these variant
# tensor dataset inputs, which are marked as stateful ops and would throw
# an error if we try and capture them. We therefore traverse the graph
# to find all these ops and whitelist them so that the capturing
# logic instead of throwing an error recreates these ops which is what was
# happening before.
all_ds_ops = traverse.obtain_all_variant_tensor_ops(self)
graph_level_seed, op_level_seed = core_random_seed.get_seed(None)
# NOTE(mrry): We capture by value here to ensure that `_make_dataset()` is
# a 0-argument function.
@function.Defun(capture_by_value=True, whitelisted_stateful_ops=all_ds_ops)
def _make_dataset():
"""Factory function for a dataset."""
# NOTE(mrry): `Defun` does not capture the graph-level seed from the
# enclosing graph, so if a graph-level seed is present we set the local
# graph seed based on a combination of the graph- and op-level seeds.
if graph_level_seed is not None:
assert op_level_seed is not None
core_random_seed.set_random_seed(
(graph_level_seed + 87654321 * op_level_seed) % (2 ** 63 - 1))
dataset = self._apply_options()
return dataset._variant_tensor # pylint: disable=protected-access
try:
_make_dataset.add_to_graph(ops.get_default_graph())
except ValueError as err:
if "Cannot capture a stateful node" in str(err):
raise ValueError(
"Failed to create a one-shot iterator for a dataset. "
"`Dataset.make_one_shot_iterator()` does not support datasets that "
"capture stateful objects, such as a `Variable` or `LookupTable`. "
"In these cases, use `Dataset.make_initializable_iterator()`. "
"(Original error: %s)" % err)
else:
six.reraise(ValueError, err)
# pylint: disable=protected-access
return iterator_ops.Iterator(
gen_dataset_ops.one_shot_iterator(
dataset_factory=_make_dataset, **self._flat_structure), None,
get_legacy_output_types(self), get_legacy_output_shapes(self),
get_legacy_output_classes(self))
@deprecation.deprecated(
None, "Use `for ... in dataset:` to iterate over a dataset. If using "
"`tf.estimator`, return the `Dataset` object directly from your input "
"function. As a last resort, you can use "
"`tf.compat.v1.data.make_initializable_iterator(dataset)`.")
def make_initializable_iterator(self, shared_name=None):
"""Creates an `Iterator` for enumerating the elements of this dataset.
Note: The returned iterator will be in an uninitialized state,
and you must run the `iterator.initializer` operation before using it:
```python
dataset = ...
iterator = dataset.make_initializable_iterator()
# ...
sess.run(iterator.initializer)
```
Args:
shared_name: (Optional.) If non-empty, the returned iterator will be
shared under the given name across multiple sessions that share the same
devices (e.g. when using a remote server).
Returns:
An `Iterator` over the elements of this dataset.
Raises:
RuntimeError: If eager execution is enabled.
"""
return self._make_initializable_iterator(shared_name)
def _make_initializable_iterator(self, shared_name=None): # pylint: disable=missing-docstring
if context.executing_eagerly():
raise RuntimeError(
"dataset.make_initializable_iterator is not supported when eager "
"execution is enabled. Use `for element in dataset` instead.")
_ensure_same_dataset_graph(self)
dataset = self._apply_options()
if shared_name is None:
shared_name = ""
iterator_resource = gen_dataset_ops.iterator_v2(
container="", shared_name=shared_name, **self._flat_structure)
with ops.colocate_with(iterator_resource):
initializer = gen_dataset_ops.make_iterator(
dataset._variant_tensor, # pylint: disable=protected-access
iterator_resource)
# pylint: disable=protected-access
return iterator_ops.Iterator(
iterator_resource, initializer, get_legacy_output_types(dataset),
get_legacy_output_shapes(dataset), get_legacy_output_classes(dataset))
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_classes(dataset)`.")
def output_classes(self):
"""Returns the class of each component of an element of this dataset.
Returns:
A nested structure of Python `type` objects corresponding to each
component of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
self.element_spec)
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_shapes(dataset)`.")
def output_shapes(self):
"""Returns the shape of each component of an element of this dataset.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
self.element_spec)
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_types(dataset)`.")
def output_types(self):
"""Returns the type of each component of an element of this dataset.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
self.element_spec)
@property
def element_spec(self):
# TODO(b/110122868): Remove this override once all `Dataset` instances
# implement `element_structure`.
return structure.convert_legacy_structure(
self.output_types, self.output_shapes, self.output_classes)
@staticmethod
@functools.wraps(DatasetV2.from_tensors)
def from_tensors(tensors):
return DatasetV1Adapter(DatasetV2.from_tensors(tensors))
@staticmethod
@functools.wraps(DatasetV2.from_tensor_slices)
def from_tensor_slices(tensors):
return DatasetV1Adapter(DatasetV2.from_tensor_slices(tensors))
@staticmethod
@deprecation.deprecated(None, "Use `tf.data.Dataset.from_tensor_slices()`.")
def from_sparse_tensor_slices(sparse_tensor):
"""Splits each rank-N `tf.SparseTensor` in this dataset row-wise.
Args:
sparse_tensor: A `tf.SparseTensor`.
Returns:
Dataset: A `Dataset` of rank-(N-1) sparse tensors.
"""
return DatasetV1Adapter(SparseTensorSliceDataset(sparse_tensor))
@staticmethod
@functools.wraps(DatasetV2.from_generator)
def from_generator(generator, output_types, output_shapes=None, args=None):
return DatasetV1Adapter(DatasetV2.from_generator(
generator, output_types, output_shapes, args))
@staticmethod
@functools.wraps(DatasetV2.range)
def range(*args):
return DatasetV1Adapter(DatasetV2.range(*args))
@staticmethod
@functools.wraps(DatasetV2.zip)
def zip(datasets):
return DatasetV1Adapter(DatasetV2.zip(datasets))
@functools.wraps(DatasetV2.concatenate)
def concatenate(self, dataset):
return DatasetV1Adapter(super(DatasetV1, self).concatenate(dataset))
@functools.wraps(DatasetV2.prefetch)
def prefetch(self, buffer_size):
return DatasetV1Adapter(super(DatasetV1, self).prefetch(buffer_size))
@staticmethod
@functools.wraps(DatasetV2.list_files)
def list_files(file_pattern, shuffle=None, seed=None):
return DatasetV1Adapter(DatasetV2.list_files(file_pattern, shuffle, seed))
@functools.wraps(DatasetV2.repeat)
def repeat(self, count=None):
return DatasetV1Adapter(super(DatasetV1, self).repeat(count))
@functools.wraps(DatasetV2.shuffle)
def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None):
return DatasetV1Adapter(super(DatasetV1, self).shuffle(
buffer_size, seed, reshuffle_each_iteration))
@functools.wraps(DatasetV2.cache)
def cache(self, filename=""):
return DatasetV1Adapter(super(DatasetV1, self).cache(filename))
@functools.wraps(DatasetV2.take)
def take(self, count):
return DatasetV1Adapter(super(DatasetV1, self).take(count))
@functools.wraps(DatasetV2.skip)
def skip(self, count):
return DatasetV1Adapter(super(DatasetV1, self).skip(count))
@functools.wraps(DatasetV2.shard)
def shard(self, num_shards, index):
return DatasetV1Adapter(super(DatasetV1, self).shard(num_shards, index))
@functools.wraps(DatasetV2.batch)
def batch(self, batch_size, drop_remainder=False):
return DatasetV1Adapter(super(DatasetV1, self).batch(
batch_size, drop_remainder))
@functools.wraps(DatasetV2.padded_batch)
def padded_batch(self,
batch_size,
padded_shapes,
padding_values=None,
drop_remainder=False):
return DatasetV1Adapter(super(DatasetV1, self).padded_batch(
batch_size, padded_shapes, padding_values, drop_remainder))
@functools.wraps(DatasetV2.map)
def map(self, map_func, num_parallel_calls=None):
if num_parallel_calls is None:
return DatasetV1Adapter(
MapDataset(self, map_func, preserve_cardinality=False))
else:
return DatasetV1Adapter(
ParallelMapDataset(
self, map_func, num_parallel_calls, preserve_cardinality=False))
@deprecation.deprecated(None, "Use `tf.data.Dataset.map()")
def map_with_legacy_function(self, map_func, num_parallel_calls=None):
"""Maps `map_func` across the elements of this dataset.
NOTE: This is an escape hatch for existing uses of `map` that do not work
with V2 functions. New uses are strongly discouraged and existing uses
should migrate to `map` as this method will be removed in V2.
Args:
map_func: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to
another nested structure of tensors.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number elements to process asynchronously in parallel.
If not specified, elements will be processed sequentially. If the value
`tf.data.experimental.AUTOTUNE` is used, then the number of parallel
calls is set dynamically based on available CPU.
Returns:
Dataset: A `Dataset`.
"""
if num_parallel_calls is None:
return DatasetV1Adapter(
MapDataset(
self,
map_func,
preserve_cardinality=False,
use_legacy_function=True))
else:
return DatasetV1Adapter(
ParallelMapDataset(
self,
map_func,
num_parallel_calls,
preserve_cardinality=False,
use_legacy_function=True))
@functools.wraps(DatasetV2.flat_map)
def flat_map(self, map_func):
return DatasetV1Adapter(super(DatasetV1, self).flat_map(map_func))
@functools.wraps(DatasetV2.interleave)
def interleave(self,
map_func,
cycle_length=AUTOTUNE,
block_length=1,
num_parallel_calls=None):
return DatasetV1Adapter(super(DatasetV1, self).interleave(
map_func, cycle_length, block_length, num_parallel_calls))
@functools.wraps(DatasetV2.filter)
def filter(self, predicate):
return DatasetV1Adapter(super(DatasetV1, self).filter(predicate))
@deprecation.deprecated(None, "Use `tf.data.Dataset.filter()")
def filter_with_legacy_function(self, predicate):
"""Filters this dataset according to `predicate`.
NOTE: This is an escape hatch for existing uses of `filter` that do not work
with V2 functions. New uses are strongly discouraged and existing uses
should migrate to `filter` as this method will be removed in V2.
Args:
predicate: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to a
scalar `tf.bool` tensor.
Returns:
Dataset: The `Dataset` containing the elements of this dataset for which
`predicate` is `True`.
"""
return FilterDataset(self, predicate, use_legacy_function=True)
@functools.wraps(DatasetV2.apply)
def apply(self, transformation_func):
return DatasetV1Adapter(super(DatasetV1, self).apply(transformation_func))
@functools.wraps(DatasetV2.window)
def window(self, size, shift=None, stride=1, drop_remainder=False):
return DatasetV1Adapter(super(DatasetV1, self).window(
size, shift, stride, drop_remainder))
@functools.wraps(DatasetV2.with_options)
def with_options(self, options):
return DatasetV1Adapter(super(DatasetV1, self).with_options(options))
if tf2.enabled():
Dataset = DatasetV2
else:
Dataset = DatasetV1
class DatasetV1Adapter(DatasetV1):
"""Wraps a V2 `Dataset` object in the `tf.compat.v1.data.Dataset` API."""
def __init__(self, dataset):
self._dataset = dataset
super(DatasetV1Adapter, self).__init__()
def _as_variant_tensor(self):
return self._dataset._variant_tensor # pylint: disable=protected-access
def _has_captured_ref(self):
return self._dataset._has_captured_ref() # pylint: disable=protected-access
def _inputs(self):
return self._dataset._inputs() # pylint: disable=protected-access
def _functions(self):
return self._dataset._functions() # pylint: disable=protected-access
def options(self):
return self._dataset.options()
@property
def element_spec(self):
return self._dataset.element_spec # pylint: disable=protected-access
def __iter__(self):
return iter(self._dataset)
def _ensure_same_dataset_graph(dataset):
"""Walks the dataset graph to ensure all datasets come from the same graph."""
current_graph = ops.get_default_graph()
bfs_q = Queue.Queue()
bfs_q.put(dataset) # pylint: disable=protected-access
visited = []
while not bfs_q.empty():
ds = bfs_q.get()
visited.append(ds)
ds_graph = ds._graph # pylint: disable=protected-access
if current_graph != ds_graph:
logging.warning("The graph (" + str(current_graph) + ") of the iterator "
"is different from the graph (" + str(ds_graph) + ") "
"the dataset: " + str(ds._variant_tensor) + " was " # pylint: disable=protected-access
"created in. If you are using the Estimator API, "
"make sure that no part of the dataset returned by the "
"`input_fn` function is defined outside the `input_fn` "
"function. Please ensure that all datasets in the "
"pipeline are created in the same graph as the iterator. "
"NOTE: This warning will become an error in future "
"versions of TensorFlow.")
for input_ds in ds._inputs(): # pylint: disable=protected-access
if input_ds not in visited:
bfs_q.put(input_ds)
@tf_export(v1=["data.make_one_shot_iterator"])
def make_one_shot_iterator(dataset):
"""Creates a `tf.compat.v1.data.Iterator` for enumerating the elements of a dataset.
Note: The returned iterator will be initialized automatically.
A "one-shot" iterator does not support re-initialization.
Args:
dataset: A `tf.data.Dataset`.
Returns:
A `tf.compat.v1.data.Iterator` over the elements of this dataset.
"""
try:
# Call the defined `_make_one_shot_iterator()` if there is one, because some
# datasets (e.g. for prefetching) override its behavior.
return dataset._make_one_shot_iterator() # pylint: disable=protected-access
except AttributeError:
return DatasetV1Adapter(dataset)._make_one_shot_iterator() # pylint: disable=protected-access
@tf_export(v1=["data.make_initializable_iterator"])
def make_initializable_iterator(dataset, shared_name=None):
"""Creates a `tf.compat.v1.data.Iterator` for enumerating the elements of a dataset.
Note: The returned iterator will be in an uninitialized state,
and you must run the `iterator.initializer` operation before using it:
```python
dataset = ...
iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
# ...
sess.run(iterator.initializer)
```
Args:
dataset: A `tf.data.Dataset`.
shared_name: (Optional.) If non-empty, the returned iterator will be shared
under the given name across multiple sessions that share the same devices
(e.g. when using a remote server).
Returns:
A `tf.compat.v1.data.Iterator` over the elements of `dataset`.
Raises:
RuntimeError: If eager execution is enabled.
"""
try:
# Call the defined `_make_initializable_iterator()` if there is one, because
# some datasets (e.g. for prefetching) override its behavior.
return dataset._make_initializable_iterator(shared_name) # pylint: disable=protected-access
except AttributeError:
return DatasetV1Adapter(dataset)._make_initializable_iterator(shared_name) # pylint: disable=protected-access
@tf_export("data.experimental.get_structure")
def get_structure(dataset_or_iterator):
"""Returns the type specification of an element of a `Dataset` or `Iterator`.
Args:
dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.
Returns:
A nested structure of `tf.TypeSpec` objects matching the structure of an
element of `dataset_or_iterator` and spacifying the type of individal
components.
Raises:
TypeError: If `dataset_or_iterator` is not a `Dataset` or `Iterator` object.
"""
try:
return dataset_or_iterator.element_spec # pylint: disable=protected-access
except AttributeError:
raise TypeError("`dataset_or_iterator` must be a Dataset or Iterator "
"object, but got %s." % type(dataset_or_iterator))
@tf_export(v1=["data.get_output_classes"])
def get_legacy_output_classes(dataset_or_iterator):
"""Returns the output classes of a `Dataset` or `Iterator` elements.
This utility method replaces the deprecated-in-V2
`tf.compat.v1.Dataset.output_classes` property.
Args:
dataset_or_iterator: A `tf.data.Dataset` or `tf.data.IteratorV2`.
Returns:
A nested structure of Python `type` objects matching the structure of the
dataset / iterator elements and specifying the class of the individual
components.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
get_structure(dataset_or_iterator))
@tf_export(v1=["data.get_output_shapes"])
def get_legacy_output_shapes(dataset_or_iterator):
"""Returns the output shapes of a `Dataset` or `Iterator` elements.
This utility method replaces the deprecated-in-V2
`tf.compat.v1.Dataset.output_shapes` property.
Args:
dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.
Returns:
A nested structure of `tf.TensorShape` objects matching the structure of
the dataset / iterator elements and specifying the shape of the individual
components.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
get_structure(dataset_or_iterator))
@tf_export(v1=["data.get_output_types"])
def get_legacy_output_types(dataset_or_iterator):
"""Returns the output shapes of a `Dataset` or `Iterator` elements.
This utility method replaces the deprecated-in-V2
`tf.compat.v1.Dataset.output_types` property.
Args:
dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.
Returns:
A nested structure of `tf.DType` objects objects matching the structure of
dataset / iterator elements and specifying the shape of the individual
components.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
get_structure(dataset_or_iterator))
@tf_export("data.Options")
class Options(options_lib.OptionsBase):
"""Represents options for tf.data.Dataset.
An `Options` object can be, for instance, used to control which static
optimizations to apply or whether to use performance modeling to dynamically
tune the parallelism of operations such as `tf.data.Dataset.map` or
`tf.data.Dataset.interleave`.
After constructing an `Options` object, use `dataset.with_options(options)` to
apply the options to a dataset.
>>> dataset = tf.data.Dataset.range(3)
>>> options = tf.data.Options()
>>> # Set options here.
>>> dataset = dataset.with_options(options)
"""
experimental_deterministic = options_lib.create_option(
name="experimental_deterministic",
ty=bool,
docstring=
"Whether the outputs need to be produced in deterministic order. If None,"
" defaults to True.")
experimental_distribute = options_lib.create_option(
name="experimental_distribute",
ty=distribute_options.DistributeOptions,
docstring=
"The distribution strategy options associated with the dataset. See "
"`tf.data.experimental.DistributeOptions` for more details.",
default_factory=distribute_options.DistributeOptions)
experimental_optimization = options_lib.create_option(
name="experimental_optimization",
ty=optimization_options.OptimizationOptions,
docstring=
"The optimization options associated with the dataset. See "
"`tf.data.experimental.OptimizationOptions` for more details.",
default_factory=optimization_options.OptimizationOptions)
experimental_slack = options_lib.create_option(
name="experimental_slack",
ty=bool,
docstring="Whether to introduce 'slack' in the last `prefetch` of the "
"input pipeline, if it exists. This may reduce CPU contention with "
"accelerator host-side activity at the start of a step. The slack "
"frequency is determined by the number of devices attached to this "
"input pipeline. If None, defaults to False.")
experimental_stats = options_lib.create_option(
name="experimental_stats",
ty=stats_options.StatsOptions,
docstring=
"The statistics options associated with the dataset. See "
"`tf.data.experimental.StatsOptions` for more details.",
default_factory=stats_options.StatsOptions)
experimental_threading = options_lib.create_option(
name="experimental_threading",
ty=threading_options.ThreadingOptions,
docstring=
"The threading options associated with the dataset. See "
"`tf.data.experimental.ThreadingOptions` for more details.",
default_factory=threading_options.ThreadingOptions)
experimental_allow_stateful = options_lib.create_option(
name="experimental_allow_stateful",
ty=bool,
docstring="By default, tf.data will refuse to serialize a dataset or "
"checkpoint its iterator if the dataset contains a stateful op as the "
"serialization / checkpointing won't be able to capture its state. "
"Users can -- at their own risk -- override this restriction by "
"explicitly specifying that they are fine throwing away the state "
"in these ops when they turn this option on.")
def _static_optimizations(self):
"""Produces the list of enabled static optimizations."""
result = []
result.extend(self.experimental_optimization._static_optimizations()) # pylint: disable=protected-access
if self.experimental_deterministic is False:
result.append("make_sloppy")
if self.experimental_stats and self.experimental_stats.latency_all_edges:
result.append("latency_all_edges")
if self.experimental_slack:
result.append("slack")
if (self.experimental_distribute and
self.experimental_distribute._make_stateless): # pylint: disable=protected-access
result.append("make_stateless")
return result
def _static_optimization_configs(self):
"""Produces the list of configurations for enabled static optimizations."""
result = []
if self.experimental_optimization:
result.extend(
self.experimental_optimization._static_optimization_configs()) # pylint: disable=protected-access
if self.experimental_slack:
num_devices = self.experimental_distribute.num_devices
if num_devices is None:
num_devices = 1
result.append("slack:slack_period:%d" % num_devices)
return result
def merge(self, options):
"""Merges itself with the given `tf.data.Options`.
The given `tf.data.Options` can be merged as long as there does not exist an
attribute that is set to different values in `self` and `options`.
Args:
options: a `tf.data.Options` to merge with
Raises:
ValueError: if the given `tf.data.Options` cannot be merged
Returns:
New `tf.data.Options()` object which is the result of merging self with
the input `tf.data.Options`.
"""
return options_lib.merge_options(self, options)
class DatasetSource(DatasetV2):
"""Abstract class representing a dataset with no inputs."""
def _inputs(self):
return []
class UnaryDataset(DatasetV2):
"""Abstract class representing a dataset with one input."""
def __init__(self, input_dataset, variant_tensor):
self._input_dataset = input_dataset
super(UnaryDataset, self).__init__(variant_tensor)
def _inputs(self):
return [self._input_dataset]
class UnaryUnchangedStructureDataset(UnaryDataset):
"""Represents a unary dataset with the same input and output structure."""
def __init__(self, input_dataset, variant_tensor):
self._input_dataset = input_dataset
super(UnaryUnchangedStructureDataset, self).__init__(
input_dataset, variant_tensor)
@property
def element_spec(self):
return self._input_dataset.element_spec
class TensorDataset(DatasetSource):
"""A `Dataset` with a single element."""
def __init__(self, element):
"""See `Dataset.from_tensors()` for details."""
element = structure.normalize_element(element)
self._structure = structure.type_spec_from_value(element)
self._tensors = structure.to_tensor_list(self._structure, element)
variant_tensor = gen_dataset_ops.tensor_dataset(
self._tensors,
output_shapes=structure.get_flat_tensor_shapes(self._structure))
super(TensorDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._structure
class TensorSliceDataset(DatasetSource):
"""A `Dataset` of slices from a dataset element."""
def __init__(self, element):
"""See `Dataset.from_tensor_slices()` for details."""
element = structure.normalize_element(element)
batched_spec = structure.type_spec_from_value(element)
self._tensors = structure.to_batched_tensor_list(batched_spec, element)
self._structure = nest.map_structure(
lambda component_spec: component_spec._unbatch(), batched_spec) # pylint: disable=protected-access
batch_dim = tensor_shape.Dimension(tensor_shape.dimension_value(
self._tensors[0].get_shape()[0]))
for t in self._tensors[1:]:
batch_dim.assert_is_compatible_with(tensor_shape.Dimension(
tensor_shape.dimension_value(t.get_shape()[0])))
variant_tensor = gen_dataset_ops.tensor_slice_dataset(
self._tensors,
output_shapes=structure.get_flat_tensor_shapes(self._structure))
super(TensorSliceDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._structure
class SparseTensorSliceDataset(DatasetSource):
"""A `Dataset` that splits a rank-N `tf.SparseTensor` into its rows."""
def __init__(self, sparse_tensor):
"""See `Dataset.from_sparse_tensor_slices()` for details."""
if not isinstance(sparse_tensor, sparse_tensor_lib.SparseTensor):
raise TypeError(
"`sparse_tensor` must be a `tf.SparseTensor` object. Was {}.".format(
sparse_tensor))
self._sparse_tensor = sparse_tensor
indices_shape = self._sparse_tensor.indices.get_shape()
shape_shape = self._sparse_tensor.dense_shape.get_shape()
rank = (indices_shape.dims[1] - 1).merge_with(shape_shape.dims[0] - 1)
self._structure = (tensor_spec.TensorSpec([None, rank], dtypes.int64),
tensor_spec.TensorSpec([None],
self._sparse_tensor.dtype),
tensor_spec.TensorSpec([rank], dtypes.int64))
variant_tensor = gen_dataset_ops.sparse_tensor_slice_dataset(
self._sparse_tensor.indices, self._sparse_tensor.values,
self._sparse_tensor.dense_shape)
super(SparseTensorSliceDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._structure
class _VariantDataset(DatasetV2):
"""A Dataset wrapper around a `tf.variant`-typed function argument."""
def __init__(self, dataset_variant, structure):
self._structure = structure
super(_VariantDataset, self).__init__(dataset_variant)
def _inputs(self):
return []
@property
def element_spec(self):
return self._structure
class _NestedVariant(composite_tensor.CompositeTensor):
def __init__(self, variant_tensor, element_spec, dataset_shape):
self._variant_tensor = variant_tensor
self._element_spec = element_spec
self._dataset_shape = dataset_shape
@property
def _type_spec(self):
return DatasetSpec(self._element_spec, self._dataset_shape)
@tf_export("data.experimental.from_variant")
def from_variant(variant, structure):
"""Constructs a dataset from the given variant and structure.
Args:
variant: A scalar `tf.variant` tensor representing a dataset.
structure: A `tf.data.experimental.Structure` object representing the
structure of each element in the dataset.
Returns:
A `tf.data.Dataset` instance.
"""
return _VariantDataset(variant, structure) # pylint: disable=protected-access
@tf_export("data.experimental.to_variant")
def to_variant(dataset):
"""Returns a variant representing the given dataset.
Args:
dataset: A `tf.data.Dataset`.
Returns:
A scalar `tf.variant` tensor representing the given dataset.
"""
return dataset._variant_tensor # pylint: disable=protected-access
@tf_export(
"data.DatasetSpec",
v1=["data.DatasetSpec", "data.experimental.DatasetStructure"])
class DatasetSpec(type_spec.BatchableTypeSpec):
"""Type specification for `tf.data.Dataset`.
See `tf.TypeSpec` for more information about TensorFlow type specifications.
>>> dataset = tf.data.Dataset.range(3)
>>> tf.data.DatasetSpec.from_value(dataset)
DatasetSpec(TensorSpec(shape=(), dtype=tf.int64, name=None), TensorShape([]))
"""
__slots__ = ["_element_spec", "_dataset_shape"]
def __init__(self, element_spec, dataset_shape=()):
self._element_spec = element_spec
self._dataset_shape = tensor_shape.as_shape(dataset_shape)
@property
def value_type(self):
return _VariantDataset
def _serialize(self):
return (self._element_spec, self._dataset_shape)
@property
def _component_specs(self):
return tensor_spec.TensorSpec(self._dataset_shape, dtypes.variant)
def _to_components(self, value):
return value._variant_tensor # pylint: disable=protected-access
def _from_components(self, components):
# pylint: disable=protected-access
if self._dataset_shape.ndims == 0:
return _VariantDataset(components, self._element_spec)
else:
return _NestedVariant(components, self._element_spec, self._dataset_shape)
def _to_tensor_list(self, value):
return [
ops.convert_to_tensor(
tf_nest.map_structure(lambda x: x._variant_tensor, value)) # pylint: disable=protected-access
]
@staticmethod
def from_value(value):
"""Creates a `DatasetSpec` for the given `tf.data.Dataset` value."""
return DatasetSpec(value.element_spec) # pylint: disable=protected-access
def _batch(self, batch_size):
return DatasetSpec(
self._element_spec,
tensor_shape.TensorShape([batch_size]).concatenate(self._dataset_shape))
def _unbatch(self):
if self._dataset_shape.ndims == 0:
raise ValueError("Unbatching a dataset is only supported for rank >= 1")
return DatasetSpec(self._element_spec, self._dataset_shape[1:])
def _to_batched_tensor_list(self, value):
if self._dataset_shape.ndims == 0:
raise ValueError("Unbatching a dataset is only supported for rank >= 1")
return self._to_tensor_list(value)
def _to_legacy_output_types(self):
return self
def _to_legacy_output_shapes(self):
return self
def _to_legacy_output_classes(self):
return self
class StructuredFunctionWrapper(object):
"""A function wrapper that supports structured arguments and return values."""
# pylint: disable=protected-access
def __init__(self,
func,
transformation_name,
dataset=None,
input_classes=None,
input_shapes=None,
input_types=None,
input_structure=None,
add_to_graph=True,
use_legacy_function=False,
defun_kwargs=None):
"""Creates a new `StructuredFunctionWrapper` for the given function.
Args:
func: A function from a nested structure to another nested structure.
transformation_name: Human-readable name of the transformation in which
this function is being instantiated, for error messages.
dataset: (Optional.) A `tf.data.Dataset`. If given, the structure of this
dataset will be assumed as the structure for `func` arguments; otherwise
`input_classes`, `input_shapes`, and `input_types` must be defined.
input_classes: (Optional.) A nested structure of `type`. If given, this
argument defines the Python types for `func` arguments.
input_shapes: (Optional.) A nested structure of `tf.TensorShape`. If
given, this argument defines the shapes and structure for `func`
arguments.
input_types: (Optional.) A nested structure of `tf.DType`. If given, this
argument defines the element types and structure for `func` arguments.
input_structure: (Optional.) A `Structure` object. If given, this argument
defines the element types and structure for `func` arguments.
add_to_graph: (Optional.) If `True`, the function will be added to the
default graph.
use_legacy_function: (Optional.) A boolean that determines whether the
function be created using `tensorflow.python.eager.function.defun`
(default behavior) or `tensorflow.python.framework.function.Defun`
(legacy beheavior).
defun_kwargs: (Optional.) A dictionary mapping string argument names to
values. If supplied, will be passed to `function` as keyword arguments.
Raises:
ValueError: If an invalid combination of `dataset`, `input_classes`,
`input_shapes`, and `input_types` is passed.
"""
if input_structure is None:
if dataset is None:
if input_classes is None or input_shapes is None or input_types is None:
raise ValueError("Either `dataset`, `input_structure` or all of "
"`input_classes`, `input_shapes`, and `input_types` "
"must be specified.")
self._input_structure = structure.convert_legacy_structure(
input_types, input_shapes, input_classes)
else:
if not (input_classes is None and input_shapes is None and
input_types is None):
raise ValueError("Either `dataset`, `input_structure` or all of "
"`input_classes`, `input_shapes`, and `input_types` "
"must be specified.")
self._input_structure = dataset.element_spec
else:
if not (dataset is None and input_classes is None and input_shapes is None
and input_types is None):
raise ValueError("Either `dataset`, `input_structure`, or all of "
"`input_classes`, `input_shapes`, and `input_types` "
"must be specified.")
self._input_structure = input_structure
self._func = func
if defun_kwargs is None:
defun_kwargs = {}
readable_transformation_name = transformation_name.replace(
".", "_")[:-2] if len(transformation_name) > 2 else ""
func_name = "_".join(
[readable_transformation_name,
function_utils.get_func_name(func)])
# Sanitize function name to remove symbols that interfere with graph
# construction.
for symbol in ["<", ">", "\\", "'", " "]:
func_name = func_name.replace(symbol, "")
ag_ctx = autograph_ctx.control_status_ctx()
def _warn_if_collections(transformation_name):
"""Prints a warning if the given graph uses common graph collections.
NOTE(mrry): Currently a warning is only generated for resources. Any
variables created will be automatically hoisted out to the outermost scope
using `init_scope()`. Some collections (such as for control-flow contexts)
are benign and should not generate a warning.
Args:
transformation_name: A human-readable name for the transformation.
"""
warnings.warn("Creating resources inside a function passed to %s "
"is not supported. Create each resource outside the "
"function, and capture it inside the function to use it." %
transformation_name, stacklevel=5)
def _wrapper_helper(*args):
"""Wrapper for passing nested structures to and from tf.data functions."""
nested_args = structure.from_compatible_tensor_list(
self._input_structure, args)
if not _should_unpack_args(nested_args):
nested_args = (nested_args,)
ret = autograph.tf_convert(func, ag_ctx)(*nested_args)
# If `func` returns a list of tensors, `nest.flatten()` and
# `ops.convert_to_tensor()` would conspire to attempt to stack
# those tensors into a single tensor, because the customized
# version of `nest.flatten()` does not recurse into lists. Since
# it is more likely that the list arose from returning the
# result of an operation (such as `tf.numpy_function()`) that returns a
# list of not-necessarily-stackable tensors, we treat the
# returned value is a `tuple` instead. A user wishing to pack
# the return value into a single tensor can use an explicit
# `tf.stack()` before returning.
if isinstance(ret, list):
ret = tuple(ret)
try:
self._output_structure = structure.type_spec_from_value(ret)
except (ValueError, TypeError):
six.reraise(
TypeError,
TypeError("Unsupported return value from function passed to "
"%s: %s." % (transformation_name, ret)),
sys.exc_info()[2])
return ret
if use_legacy_function:
func_name = func_name + "_" + str(ops.uid())
@function.Defun(
*structure.get_flat_tensor_types(self._input_structure),
func_name=func_name,
**defun_kwargs)
def wrapper_fn(*args):
ret = _wrapper_helper(*args)
# _warn_if_collections(transformation_name, ops.get_default_graph(), 0)
return structure.to_tensor_list(self._output_structure, ret)
self._function = wrapper_fn
resource_tracker = tracking.ResourceTracker()
with tracking.resource_tracker_scope(resource_tracker):
if add_to_graph:
self._function.add_to_graph(ops.get_default_graph())
else:
# Use the private method that will execute `wrapper_fn` but delay
# adding it to the graph in case (e.g.) we need to rerun the function.
self._function._create_definition_if_needed()
if resource_tracker.resources:
_warn_if_collections(transformation_name)
else:
defun_kwargs.update({"func_name": func_name})
# Note: _wrapper_helper will apply autograph based on context.
@eager_function.defun_with_attributes(
input_signature=structure.get_flat_tensor_specs(
self._input_structure),
autograph=False,
attributes=defun_kwargs)
def wrapper_fn(*args): # pylint: disable=missing-docstring
ret = _wrapper_helper(*args)
ret = structure.to_tensor_list(self._output_structure, ret)
return [ops.convert_to_tensor(t) for t in ret]
resource_tracker = tracking.ResourceTracker()
with tracking.resource_tracker_scope(resource_tracker):
# TODO(b/141462134): Switch to using garbage collection.
self._function = wrapper_fn._get_concrete_function_internal()
if add_to_graph:
self._function.add_to_graph(ops.get_default_graph())
if resource_tracker.resources:
_warn_if_collections(transformation_name)
outer_graph_seed = ops.get_default_graph().seed
if outer_graph_seed and self._function.graph.seed == outer_graph_seed:
if self._function.graph._seed_used:
warnings.warn(
"Seed %s from outer graph might be getting used by function %s, "
"if the random op has not been provided any seed. Explicitly set "
"the seed in the function if this is not the intended behavior."
%(outer_graph_seed, func_name), stacklevel=4)
# pylint: enable=protected-access
@property
def output_structure(self):
return self._output_structure
@property
def output_classes(self):
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
self._output_structure)
@property
def output_shapes(self):
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
self._output_structure)
@property
def output_types(self):
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
self._output_structure)
@property
def function(self):
return self._function
class _GeneratorDataset(DatasetSource):
"""A `Dataset` that generates elements by invoking a function."""
def __init__(self, init_args, init_func, next_func, finalize_func):
"""Constructs a `_GeneratorDataset`.
Args:
init_args: A nested structure representing the arguments to `init_func`.
init_func: A TensorFlow function that will be called on `init_args` each
time a C++ iterator over this dataset is constructed. Returns a nested
structure representing the "state" of the dataset.
next_func: A TensorFlow function that will be called on the result of
`init_func` to produce each element, and that raises `OutOfRangeError`
to terminate iteration.
finalize_func: A TensorFlow function that will be called on the result of
`init_func` immediately before a C++ iterator over this dataset is
destroyed. The return value is ignored.
"""
self._init_args = init_args
self._init_structure = structure.type_spec_from_value(init_args)
self._init_func = StructuredFunctionWrapper(
init_func,
self._transformation_name(),
input_structure=self._init_structure)
self._next_func = StructuredFunctionWrapper(
next_func,
self._transformation_name(),
input_structure=self._init_func.output_structure)
self._finalize_func = StructuredFunctionWrapper(
finalize_func,
self._transformation_name(),
input_structure=self._init_func.output_structure)
variant_tensor = gen_dataset_ops.generator_dataset(
structure.to_tensor_list(self._init_structure, self._init_args) +
self._init_func.function.captured_inputs,
self._next_func.function.captured_inputs,
self._finalize_func.function.captured_inputs,
init_func=self._init_func.function,
next_func=self._next_func.function,
finalize_func=self._finalize_func.function,
**self._flat_structure)
super(_GeneratorDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._next_func.output_structure
def _transformation_name(self):
return "Dataset.from_generator()"
class ZipDataset(DatasetV2):
"""A `Dataset` that zips its inputs together."""
def __init__(self, datasets):
"""See `Dataset.zip()` for details."""
for ds in nest.flatten(datasets):
if not isinstance(ds, DatasetV2):
if isinstance(ds, list):
message = ("The argument to `Dataset.zip()` must be a nested "
"structure of `Dataset` objects. Nested structures do not "
"support Python lists; please use a tuple instead.")
else:
message = ("The argument to `Dataset.zip()` must be a nested "
"structure of `Dataset` objects.")
raise TypeError(message)
self._datasets = datasets
self._structure = nest.pack_sequence_as(
self._datasets,
[ds.element_spec for ds in nest.flatten(self._datasets)])
variant_tensor = gen_dataset_ops.zip_dataset(
[ds._variant_tensor for ds in nest.flatten(self._datasets)],
**self._flat_structure)
super(ZipDataset, self).__init__(variant_tensor)
def _inputs(self):
return nest.flatten(self._datasets)
@property
def element_spec(self):
return self._structure
class ConcatenateDataset(DatasetV2):
"""A `Dataset` that concatenates its input with given dataset."""
def __init__(self, input_dataset, dataset_to_concatenate):
"""See `Dataset.concatenate()` for details."""
self._input_dataset = input_dataset
self._dataset_to_concatenate = dataset_to_concatenate
output_types = get_legacy_output_types(input_dataset)
if output_types != get_legacy_output_types(dataset_to_concatenate):
raise TypeError(
"Two datasets to concatenate have different types %s and %s" %
(output_types, get_legacy_output_types(dataset_to_concatenate)))
output_classes = get_legacy_output_classes(input_dataset)
if output_classes != get_legacy_output_classes(dataset_to_concatenate):
raise TypeError(
"Two datasets to concatenate have different classes %s and %s" %
(output_classes, get_legacy_output_classes(dataset_to_concatenate)))
input_shapes = get_legacy_output_shapes(self._input_dataset)
output_shapes = nest.pack_sequence_as(input_shapes, [
ts1.most_specific_compatible_shape(ts2)
for (ts1, ts2) in zip(
nest.flatten(input_shapes),
nest.flatten(get_legacy_output_shapes(
self._dataset_to_concatenate)))
])
self._structure = structure.convert_legacy_structure(
output_types, output_shapes, output_classes)
self._input_datasets = [input_dataset, dataset_to_concatenate]
# pylint: disable=protected-access
variant_tensor = gen_dataset_ops.concatenate_dataset(
input_dataset._variant_tensor, dataset_to_concatenate._variant_tensor,
**self._flat_structure)
# pylint: enable=protected-access
super(ConcatenateDataset, self).__init__(variant_tensor)
def _inputs(self):
return self._input_datasets
@property
def element_spec(self):
return self._structure
class RepeatDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that repeats its input several times."""
def __init__(self, input_dataset, count):
"""See `Dataset.repeat()` for details."""
self._input_dataset = input_dataset
if count is None:
self._count = constant_op.constant(-1, dtype=dtypes.int64, name="count")
else:
self._count = ops.convert_to_tensor(
count, dtype=dtypes.int64, name="count")
variant_tensor = gen_dataset_ops.repeat_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
count=self._count,
**self._flat_structure)
super(RepeatDataset, self).__init__(input_dataset, variant_tensor)
class RangeDataset(DatasetSource):
"""A `Dataset` of a step separated range of values."""
def __init__(self, *args):
"""See `Dataset.range()` for details."""
self._parse_args(*args)
self._structure = tensor_spec.TensorSpec([], dtypes.int64)
variant_tensor = gen_dataset_ops.range_dataset(
start=self._start,
stop=self._stop,
step=self._step,
**self._flat_structure)
super(RangeDataset, self).__init__(variant_tensor)
def _parse_args(self, *args):
"""Parse arguments according to the same rules as the `range()` builtin."""
if len(args) == 1:
self._start = self._build_tensor(0, "start")
self._stop = self._build_tensor(args[0], "stop")
self._step = self._build_tensor(1, "step")
elif len(args) == 2:
self._start = self._build_tensor(args[0], "start")
self._stop = self._build_tensor(args[1], "stop")
self._step = self._build_tensor(1, "step")
elif len(args) == 3:
self._start = self._build_tensor(args[0], "start")
self._stop = self._build_tensor(args[1], "stop")
self._step = self._build_tensor(args[2], "step")
else:
raise ValueError("Invalid arguments to RangeDataset: %s" % str(args))
def _build_tensor(self, int64_value, name):
return ops.convert_to_tensor(int64_value, dtype=dtypes.int64, name=name)
@property
def element_spec(self):
return self._structure
class _MemoryCacheDeleter(object):
"""An object which cleans up an anonymous memory cache resource.
An alternative to defining a __del__ method on an object. Even if the parent
object is part of a reference cycle, the cycle will be collectable.
"""
def __init__(self, handle, device, deleter):
self._deleter = deleter
self._handle = handle
self._device = device
self._eager_mode = context.executing_eagerly()
def __del__(self):
with ops.device(self._device):
# Make sure the resource is deleted in the same mode as it was created in.
if self._eager_mode:
with context.eager_mode():
gen_dataset_ops.delete_memory_cache(
handle=self._handle, deleter=self._deleter)
else:
with context.graph_mode():
gen_dataset_ops.delete_memory_cache(
handle=self._handle, deleter=self._deleter)
class _MemoryCache(object):
"""Represents a memory cache resource."""
def __init__(self):
super(_MemoryCache, self).__init__()
self._device = context.context().device_name
self._handle, self._deleter = (gen_dataset_ops.anonymous_memory_cache())
self._resource_deleter = _MemoryCacheDeleter(
handle=self._handle, device=self._device, deleter=self._deleter)
@property
def handle(self):
return self._handle
class CacheDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that caches elements of its input."""
def __init__(self, input_dataset, filename):
"""See `Dataset.cache()` for details."""
self._input_dataset = input_dataset
self._filename = ops.convert_to_tensor(
filename, dtype=dtypes.string, name="filename")
if tf2.enabled() and (context.executing_eagerly() or
ops.get_default_graph()._building_function): # pylint: disable=protected-access
self._cache = _MemoryCache()
variant_tensor = gen_dataset_ops.cache_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
filename=self._filename,
cache=self._cache.handle,
**self._flat_structure)
else:
variant_tensor = gen_dataset_ops.cache_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
filename=self._filename,
**self._flat_structure)
super(CacheDataset, self).__init__(input_dataset, variant_tensor)
class _RandomSeedGeneratorDeleter(object):
"""An object which cleans up an anonymous random seed generator resource.
An alternative to defining a __del__ method on an object. Even if the parent
object is part of a reference cycle, the cycle will be collectable.
"""
def __init__(self, handle, device, deleter):
self._deleter = deleter
self._handle = handle
self._device = device
self._eager_mode = context.executing_eagerly()
def __del__(self):
with ops.device(self._device):
# Make sure the resource is deleted in the same mode as it was created in.
if self._eager_mode:
with context.eager_mode():
gen_dataset_ops.delete_random_seed_generator(
handle=self._handle, deleter=self._deleter)
else:
with context.graph_mode():
gen_dataset_ops.delete_random_seed_generator(
handle=self._handle, deleter=self._deleter)
class _RandomSeedGenerator(object):
"""Represents a random seed generator resource."""
def __init__(self, seed, seed2):
super(_RandomSeedGenerator, self).__init__()
self._device = context.context().device_name
self._handle, self._deleter = (
gen_dataset_ops.anonymous_random_seed_generator(seed=seed, seed2=seed2))
self._resource_deleter = _RandomSeedGeneratorDeleter(
handle=self._handle, device=self._device, deleter=self._deleter)
@property
def handle(self):
return self._handle
class ShuffleDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that randomly shuffles the elements of its input."""
def __init__(self,
input_dataset,
buffer_size,
seed=None,
reshuffle_each_iteration=None):
"""Randomly shuffles the elements of this dataset.
Args:
input_dataset: The input dataset.
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements from this dataset from which the new dataset will sample.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
seed that will be used to create the distribution. See
`tf.compat.v1.set_random_seed` for behavior.
reshuffle_each_iteration: (Optional.) A boolean, which if true indicates
that the dataset should be pseudorandomly reshuffled each time it is
iterated over. (Defaults to `True`.)
Returns:
A `Dataset`.
Raises:
ValueError: if invalid arguments are provided.
"""
self._input_dataset = input_dataset
self._buffer_size = ops.convert_to_tensor(
buffer_size, dtype=dtypes.int64, name="buffer_size")
self._seed, self._seed2 = random_seed.get_seed(seed)
if reshuffle_each_iteration is None:
self._reshuffle_each_iteration = True
else:
self._reshuffle_each_iteration = reshuffle_each_iteration
if tf2.enabled() and self._reshuffle_each_iteration and (
context.executing_eagerly() or
ops.get_default_graph()._building_function): # pylint: disable=protected-access
self._seed_generator = _RandomSeedGenerator(self._seed, self._seed2)
variant_tensor = gen_dataset_ops.shuffle_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
buffer_size=self._buffer_size,
seed_generator=self._seed_generator.handle,
**self._flat_structure)
else:
variant_tensor = gen_dataset_ops.shuffle_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
buffer_size=self._buffer_size,
seed=self._seed,
seed2=self._seed2,
reshuffle_each_iteration=self._reshuffle_each_iteration,
**self._flat_structure)
super(ShuffleDataset, self).__init__(input_dataset, variant_tensor)
class TakeDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` containing the first `count` elements from its input."""
def __init__(self, input_dataset, count):
"""See `Dataset.take()` for details."""
self._input_dataset = input_dataset
self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count")
variant_tensor = gen_dataset_ops.take_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
count=self._count,
**self._flat_structure)
super(TakeDataset, self).__init__(input_dataset, variant_tensor)
class SkipDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` skipping the first `count` elements from its input."""
def __init__(self, input_dataset, count):
"""See `Dataset.skip()` for details."""
self._input_dataset = input_dataset
self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count")
variant_tensor = gen_dataset_ops.skip_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
count=self._count,
**self._flat_structure)
super(SkipDataset, self).__init__(input_dataset, variant_tensor)
class ShardDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` for sharding its input."""
def __init__(self, input_dataset, num_shards, index):
"""See `Dataset.shard()` for details."""
self._input_dataset = input_dataset
self._num_shards = ops.convert_to_tensor(
num_shards, dtype=dtypes.int64, name="num_shards")
self._index = ops.convert_to_tensor(index, dtype=dtypes.int64, name="index")
variant_tensor = gen_dataset_ops.shard_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
num_shards=self._num_shards,
index=self._index,
**self._flat_structure)
super(ShardDataset, self).__init__(input_dataset, variant_tensor)
class BatchDataset(UnaryDataset):
"""A `Dataset` that batches contiguous elements from its input."""
def __init__(self, input_dataset, batch_size, drop_remainder):
"""See `Dataset.batch()` for details."""
self._input_dataset = input_dataset
self._batch_size = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name="batch_size")
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
constant_drop_remainder = tensor_util.constant_value(self._drop_remainder)
# pylint: disable=protected-access
if constant_drop_remainder:
# NOTE(mrry): `constant_drop_remainder` may be `None` (unknown statically)
# or `False` (explicitly retaining the remainder).
# pylint: disable=g-long-lambda
self._structure = nest.map_structure(
lambda component_spec: component_spec._batch(
tensor_util.constant_value(self._batch_size)),
input_dataset.element_spec)
else:
self._structure = nest.map_structure(
lambda component_spec: component_spec._batch(None),
input_dataset.element_spec)
variant_tensor = gen_dataset_ops.batch_dataset_v2(
input_dataset._variant_tensor,
batch_size=self._batch_size,
drop_remainder=self._drop_remainder,
**self._flat_structure)
super(BatchDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
class _VariantTracker(tracking.CapturableResource):
"""Allows export of functions capturing a Dataset in SavedModels.
When saving a SavedModel, `tf.saved_model.save` traverses the object
graph. Since Datasets reference _VariantTracker objects, that traversal will
find a _VariantTracker for each Dataset and so know how to save and restore
functions which reference the Dataset's variant Tensor.
"""
def __init__(self, variant_tensor, resource_creator):
"""Record that `variant_tensor` is associated with `resource_creator`.
Args:
variant_tensor: The variant-dtype Tensor associated with the Dataset. This
Tensor will be a captured input to functions which use the Dataset, and
is used by saving code to identify the corresponding _VariantTracker.
resource_creator: A zero-argument function which creates a new
variant-dtype Tensor. This function will be included in SavedModels and
run to re-create the Dataset's variant Tensor on restore.
"""
super(_VariantTracker, self).__init__(device="CPU")
self._resource_handle = variant_tensor
self._create_resource = resource_creator
def _is_padded_shape_compatible_with(padded_shape, input_component_shape):
"""Returns `True` if `input_component_shape` can be padded to `padded_shape`.
Args:
padded_shape: A `tf.TensorShape`.
input_component_shape: A `tf.TensorShape`.
Returns:
`True` if `input_component_shape` can be padded to `padded_shape`, otherwise
`False`.
"""
if padded_shape.dims is None or input_component_shape.dims is None:
return True
if len(padded_shape.dims) != len(input_component_shape.dims):
return False
for padded_dim, input_dim in zip(
padded_shape.dims, input_component_shape.dims):
if (padded_dim.value is not None and input_dim.value is not None
and padded_dim.value < input_dim.value):
return False
return True
def _padded_shape_to_tensor(padded_shape, input_component_shape):
"""Converts `padded_shape` to a `tf.Tensor` representing that shape.
Args:
padded_shape: A shape-like object, which may be a `tf.TensorShape`, a Python
sequence, or a 1-D `tf.Tensor` of `tf.int64` elements.
input_component_shape: A `tf.TensorShape`, with which `padded_shape` must
be compatible.
Returns:
A 1-D `tf.Tensor` of `tf.int64` elements, representing `padded_shape`.
Raises:
ValueError: If `padded_shape` is not a shape or not compatible with
`input_component_shape`.
TypeError: If `padded_shape` is not convertible to a `tf.int64` tensor.
"""
try:
# Try to convert the `padded_shape` to a `tf.TensorShape`
padded_shape_as_shape = tensor_shape.as_shape(padded_shape)
# We will return the "canonical" tensor representation, which uses
# `-1` in place of `None`.
ret = ops.convert_to_tensor(
[dim if dim is not None else -1
for dim in padded_shape_as_shape.as_list()], dtype=dtypes.int64)
except (TypeError, ValueError):
# The argument was not trivially convertible to a
# `tf.TensorShape`, so fall back on the conversion to tensor
# machinery.
ret = ops.convert_to_tensor(padded_shape, preferred_dtype=dtypes.int64)
if ret.shape.dims is not None and len(ret.shape.dims) != 1:
six.reraise(ValueError, ValueError(
"Padded shape %s must be a 1-D tensor of tf.int64 values, but its "
"shape was %s." % (padded_shape, ret.shape)), sys.exc_info()[2])
if ret.dtype != dtypes.int64:
six.reraise(
TypeError,
TypeError(
"Padded shape %s must be a 1-D tensor of tf.int64 values, but "
"its element type was %s." % (padded_shape, ret.dtype.name)),
sys.exc_info()[2])
padded_shape_as_shape = tensor_util.constant_value_as_shape(ret)
if not _is_padded_shape_compatible_with(padded_shape_as_shape,
input_component_shape):
raise ValueError("The padded shape %s is not compatible with the "
"corresponding input component shape %s."
% (padded_shape_as_shape, input_component_shape))
return ret
def _padding_value_to_tensor(value, output_type):
"""Converts the padding value to a tensor.
Args:
value: The padding value.
output_type: Its expected dtype.
Returns:
A scalar `Tensor`.
Raises:
ValueError: if the padding value is not a scalar.
TypeError: if the padding value's type does not match `output_type`.
"""
value = ops.convert_to_tensor(value, name="padding_value")
if not value.shape.is_compatible_with(tensor_shape.TensorShape([])):
raise ValueError("Padding value should be a scalar, but is not: %s" % value)
if value.dtype != output_type:
raise TypeError("Padding value tensor (%s) does not match output type: %s" %
(value, output_type))
return value
def _default_padding(input_dataset):
"""Returns default padding tensors in a structure matching `input_dataset`."""
def make_zero(t):
if t.base_dtype == dtypes.string:
return ""
elif t.base_dtype == dtypes.variant:
error_msg = ("Unable to create padding for field of type 'variant' "
"because t.base_type == dtypes.variant == "
"{}.".format(
t.base_dtype))
raise TypeError(error_msg)
else:
return np.zeros_like(t.as_numpy_dtype())
return nest.map_structure(
make_zero, get_legacy_output_types(input_dataset))
class PaddedBatchDataset(UnaryDataset):
"""A `Dataset` that batches and pads contiguous elements from its input."""
def __init__(self, input_dataset, batch_size, padded_shapes, padding_values,
drop_remainder):
"""See `Dataset.batch()` for details."""
self._input_dataset = input_dataset
if sparse.any_sparse(get_legacy_output_classes(input_dataset)):
# TODO(b/63669786): support batching of sparse tensors
raise TypeError(
"Batching of padded sparse tensors is not currently supported")
self._input_dataset = input_dataset
self._batch_size = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name="batch_size")
padding_values = (
padding_values
if padding_values is not None else _default_padding(input_dataset))
input_shapes = get_legacy_output_shapes(input_dataset)
flat_padded_shapes = nest.flatten_up_to(input_shapes, padded_shapes)
flat_padded_shapes_as_tensors = []
for input_component_shape, padded_shape in zip(
nest.flatten(input_shapes), flat_padded_shapes):
flat_padded_shapes_as_tensors.append(
_padded_shape_to_tensor(padded_shape, input_component_shape))
self._padded_shapes = nest.pack_sequence_as(input_shapes,
flat_padded_shapes_as_tensors)
self._padding_values = nest.map_structure_up_to(
input_shapes, _padding_value_to_tensor, padding_values,
get_legacy_output_types(input_dataset))
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
def _padded_shape_to_batch_shape(s):
return tensor_shape.TensorShape([
tensor_util.constant_value(self._batch_size)
if smart_cond.smart_constant_value(self._drop_remainder) else None
]).concatenate(tensor_util.constant_value_as_shape(s))
output_shapes = nest.map_structure(
_padded_shape_to_batch_shape, self._padded_shapes)
self._structure = structure.convert_legacy_structure(
get_legacy_output_types(self._input_dataset), output_shapes,
get_legacy_output_classes(self._input_dataset))
# pylint: disable=protected-access
# TODO(jsimsa): Switch to using v2 only any time after 6/30/2018.
if smart_cond.smart_constant_value(self._drop_remainder) is False:
variant_tensor = gen_dataset_ops.padded_batch_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
batch_size=self._batch_size,
padded_shapes=[
ops.convert_to_tensor(s, dtype=dtypes.int64)
for s in nest.flatten(self._padded_shapes)
],
padding_values=nest.flatten(self._padding_values),
output_shapes=structure.get_flat_tensor_shapes(self._structure))
else:
variant_tensor = gen_dataset_ops.padded_batch_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
batch_size=self._batch_size,
padded_shapes=[
ops.convert_to_tensor(s, dtype=dtypes.int64)
for s in nest.flatten(self._padded_shapes)
],
padding_values=nest.flatten(self._padding_values),
drop_remainder=self._drop_remainder,
output_shapes=structure.get_flat_tensor_shapes(self._structure))
super(PaddedBatchDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
def _should_unpack_args(args):
"""Returns `True` if `args` should be `*args` when passed to a callable."""
return type(args) is tuple # pylint: disable=unidiomatic-typecheck
class MapDataset(UnaryDataset):
"""A `Dataset` that maps a function over elements in its input."""
def __init__(self,
input_dataset,
map_func,
use_inter_op_parallelism=True,
preserve_cardinality=False,
use_legacy_function=False):
"""See `Dataset.map()` for details."""
self._input_dataset = input_dataset
self._use_inter_op_parallelism = use_inter_op_parallelism
self._preserve_cardinality = preserve_cardinality
self._map_func = StructuredFunctionWrapper(
map_func,
self._transformation_name(),
dataset=input_dataset,
use_legacy_function=use_legacy_function)
variant_tensor = gen_dataset_ops.map_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
use_inter_op_parallelism=self._use_inter_op_parallelism,
preserve_cardinality=self._preserve_cardinality,
**self._flat_structure)
super(MapDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._map_func.output_structure
def _transformation_name(self):
return "Dataset.map()"
class ParallelMapDataset(UnaryDataset):
"""A `Dataset` that maps a function over elements in its input in parallel."""
def __init__(self,
input_dataset,
map_func,
num_parallel_calls,
use_inter_op_parallelism=True,
preserve_cardinality=False,
use_legacy_function=False):
"""See `Dataset.map()` for details."""
self._input_dataset = input_dataset
self._use_inter_op_parallelism = use_inter_op_parallelism
self._map_func = StructuredFunctionWrapper(
map_func,
self._transformation_name(),
dataset=input_dataset,
use_legacy_function=use_legacy_function)
self._num_parallel_calls = ops.convert_to_tensor(
num_parallel_calls, dtype=dtypes.int32, name="num_parallel_calls")
self._preserve_cardinality = preserve_cardinality
variant_tensor = gen_dataset_ops.parallel_map_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
num_parallel_calls=self._num_parallel_calls,
use_inter_op_parallelism=self._use_inter_op_parallelism,
preserve_cardinality=self._preserve_cardinality,
**self._flat_structure)
super(ParallelMapDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._map_func.output_structure
def _transformation_name(self):
return "Dataset.map()"
class FlatMapDataset(UnaryDataset):
"""A `Dataset` that maps a function over its input and flattens the result."""
def __init__(self, input_dataset, map_func):
"""See `Dataset.flat_map()` for details."""
self._input_dataset = input_dataset
self._map_func = StructuredFunctionWrapper(
map_func, self._transformation_name(), dataset=input_dataset)
if not isinstance(self._map_func.output_structure, DatasetSpec):
raise TypeError(
"`map_func` must return a `Dataset` object. Got {}".format(
type(self._map_func.output_structure)))
self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access
variant_tensor = gen_dataset_ops.flat_map_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
**self._flat_structure)
super(FlatMapDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._structure
def _transformation_name(self):
return "Dataset.flat_map()"
class InterleaveDataset(UnaryDataset):
"""A `Dataset` that interleaves the result of transformed inputs."""
def __init__(self, input_dataset, map_func, cycle_length, block_length):
"""See `Dataset.interleave()` for details."""
self._input_dataset = input_dataset
self._map_func = StructuredFunctionWrapper(
map_func, self._transformation_name(), dataset=input_dataset)
if not isinstance(self._map_func.output_structure, DatasetSpec):
raise TypeError(
"`map_func` must return a `Dataset` object. Got {}".format(
type(self._map_func.output_structure)))
self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access
self._cycle_length = ops.convert_to_tensor(
cycle_length, dtype=dtypes.int64, name="cycle_length")
self._block_length = ops.convert_to_tensor(
block_length, dtype=dtypes.int64, name="block_length")
variant_tensor = gen_dataset_ops.interleave_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs, # pylint: disable=protected-access
self._cycle_length,
self._block_length,
f=self._map_func.function,
**self._flat_structure)
super(InterleaveDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._structure
def _transformation_name(self):
return "Dataset.interleave()"
class ParallelInterleaveDataset(UnaryDataset):
"""A `Dataset` that maps a function over its input and interleaves the result."""
def __init__(self, input_dataset, map_func, cycle_length, block_length,
num_parallel_calls):
"""See `Dataset.interleave()` for details."""
self._input_dataset = input_dataset
self._map_func = StructuredFunctionWrapper(
map_func, self._transformation_name(), dataset=input_dataset)
if not isinstance(self._map_func.output_structure, DatasetSpec):
raise TypeError(
"`map_func` must return a `Dataset` object. Got {}".format(
type(self._map_func.output_structure)))
self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access
self._cycle_length = ops.convert_to_tensor(
cycle_length, dtype=dtypes.int64, name="cycle_length")
self._block_length = ops.convert_to_tensor(
block_length, dtype=dtypes.int64, name="block_length")
self._num_parallel_calls = ops.convert_to_tensor(
num_parallel_calls, dtype=dtypes.int64, name="num_parallel_calls")
variant_tensor = gen_dataset_ops.parallel_interleave_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs, # pylint: disable=protected-access
self._cycle_length,
self._block_length,
self._num_parallel_calls,
f=self._map_func.function,
**self._flat_structure)
super(ParallelInterleaveDataset, self).__init__(input_dataset,
variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._structure
def _transformation_name(self):
return "Dataset.interleave()"
class FilterDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that filters its input according to a predicate function."""
def __init__(self, input_dataset, predicate, use_legacy_function=False):
"""See `Dataset.filter()` for details."""
self._input_dataset = input_dataset
wrapped_func = StructuredFunctionWrapper(
predicate,
self._transformation_name(),
dataset=input_dataset,
use_legacy_function=use_legacy_function)
if not wrapped_func.output_structure.is_compatible_with(
tensor_spec.TensorSpec([], dtypes.bool)):
error_msg = ("`predicate` return type must be convertible to a scalar "
"boolean tensor. Was {}.").format(
wrapped_func.output_structure)
raise ValueError(error_msg)
self._predicate = wrapped_func
variant_tensor = gen_dataset_ops.filter_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
other_arguments=self._predicate.function.captured_inputs,
predicate=self._predicate.function,
**self._flat_structure)
super(FilterDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._predicate]
def _transformation_name(self):
return "Dataset.filter()"
class PrefetchDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that asynchronously prefetches its input."""
def __init__(self, input_dataset, buffer_size, slack_period=None):
"""See `Dataset.prefetch()` for details.
Args:
input_dataset: The input dataset.
buffer_size: See `Dataset.prefetch()` for details.
slack_period: (Optional.) An integer. If non-zero, determines the number
of GetNext calls before injecting slack into the execution. This may
reduce CPU contention at the start of a step. Note that a tensorflow
user should not have to set this manually; enable this behavior
automatically via `tf.data.Options.experimental_slack` instead. Defaults
to None.
"""
self._input_dataset = input_dataset
if buffer_size is None:
buffer_size = -1 # This is the sentinel for auto-tuning.
self._buffer_size = ops.convert_to_tensor(
buffer_size, dtype=dtypes.int64, name="buffer_size")
variant_tensor = gen_dataset_ops.prefetch_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
buffer_size=self._buffer_size,
slack_period=slack_period,
**self._flat_structure)
super(PrefetchDataset, self).__init__(input_dataset, variant_tensor)
class WindowDataset(UnaryDataset):
"""A dataset that creates window datasets from the input elements."""
def __init__(self, input_dataset, size, shift, stride, drop_remainder):
"""See `window_dataset()` for more details."""
self._input_dataset = input_dataset
self._size = ops.convert_to_tensor(size, dtype=dtypes.int64, name="size")
self._shift = ops.convert_to_tensor(shift, dtype=dtypes.int64, name="shift")
self._stride = ops.convert_to_tensor(
stride, dtype=dtypes.int64, name="stride")
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
self._structure = nest.pack_sequence_as(
get_legacy_output_classes(input_dataset), [
DatasetSpec( # pylint: disable=g-complex-comprehension
structure.convert_legacy_structure(
output_type, output_shape, output_class))
for output_class, output_shape, output_type in zip(
nest.flatten(get_legacy_output_classes(input_dataset)),
nest.flatten(get_legacy_output_shapes(input_dataset)),
nest.flatten(get_legacy_output_types(input_dataset)))
])
variant_tensor = gen_dataset_ops.window_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._size,
self._shift,
self._stride,
self._drop_remainder,
**self._flat_structure)
super(WindowDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
class _OptionsDataset(UnaryUnchangedStructureDataset):
"""An identity `Dataset` that stores options."""
def __init__(self, input_dataset, options):
self._input_dataset = input_dataset
self._options = input_dataset.options()
if self._options:
self._options = self._options.merge(options)
else:
self._options = options
variant_tensor = input_dataset._variant_tensor # pylint: disable=protected-access
super(_OptionsDataset, self).__init__(input_dataset, variant_tensor)
def options(self):
return self._options
class _ModelDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, and models performance."""
def __init__(self, input_dataset, algorithm, cpu_budget):
self._input_dataset = input_dataset
# TODO(jsimsa): This check is introduced for forward compatibility and can
# be removed after 7/24/2019. At that point, all servers are expected to
# recognize the `algorithm` attribute.
if algorithm != AutotuneAlgorithm.HILL_CLIMB:
variant_tensor = gen_dataset_ops.model_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
algorithm=algorithm,
cpu_budget=cpu_budget,
**self._flat_structure)
else:
variant_tensor = gen_dataset_ops.model_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
cpu_budget=cpu_budget,
**self._flat_structure)
super(_ModelDataset, self).__init__(input_dataset, variant_tensor)
class _OptimizeDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, and applies optimizations."""
def __init__(self, input_dataset, optimizations, optimization_configs=None):
self._input_dataset = input_dataset
if optimizations is None:
optimizations = []
if optimization_configs is None:
optimization_configs = []
self._optimizations = ops.convert_to_tensor(
optimizations, dtype=dtypes.string, name="optimizations")
variant_tensor = gen_dataset_ops.optimize_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._optimizations,
optimization_configs=optimization_configs,
**self._flat_structure)
super(_OptimizeDataset, self).__init__(input_dataset, variant_tensor)
class _SetStatsAggregatorDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, and sets a stats aggregator."""
def __init__(self, input_dataset, aggregator, prefix, counter_prefix):
self._input_dataset = input_dataset
self._stats_aggregator = aggregator
self._prefix = prefix
self._counter_prefix = counter_prefix
variant_tensor = ged_ops.set_stats_aggregator_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._stats_aggregator._resource, # pylint: disable=protected-access
self._prefix,
self._counter_prefix,
**self._flat_structure)
super(_SetStatsAggregatorDataset, self).__init__(input_dataset,
variant_tensor)
class _MaxIntraOpParallelismDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, overriding intra-op parallelism."""
def __init__(self, input_dataset, max_intra_op_parallelism):
self._input_dataset = input_dataset
self._max_intra_op_parallelism = ops.convert_to_tensor(
max_intra_op_parallelism,
dtype=dtypes.int64,
name="max_intra_op_parallelism")
variant_tensor = ged_ops.max_intra_op_parallelism_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._max_intra_op_parallelism,
**self._flat_structure)
super(_MaxIntraOpParallelismDataset, self).__init__(input_dataset,
variant_tensor)
class _PrivateThreadPoolDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, setting a private threadpool."""
def __init__(self, input_dataset, num_threads):
self._input_dataset = input_dataset
self._num_threads = ops.convert_to_tensor(
num_threads, dtype=dtypes.int64, name="num_threads")
variant_tensor = ged_ops.private_thread_pool_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._num_threads,
**self._flat_structure)
super(_PrivateThreadPoolDataset, self).__init__(input_dataset,
variant_tensor)
class _RestructuredDataset(UnaryDataset):
"""An internal helper for changing the structure and shape of a dataset."""
def __init__(self, dataset, structure):
self._input_dataset = dataset
self._structure = structure
variant_tensor = self._input_dataset._variant_tensor # pylint: disable=protected-access
super(_RestructuredDataset, self).__init__(dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
class _UnbatchDataset(UnaryDataset):
"""A dataset that splits the elements of its input into multiple elements."""
def __init__(self, input_dataset):
"""See `unbatch()` for more details."""
flat_shapes = input_dataset._flat_shapes # pylint: disable=protected-access
if any(s.ndims == 0 for s in flat_shapes):
raise ValueError("Cannot unbatch an input with scalar components.")
known_batch_dim = tensor_shape.Dimension(None)
for s in flat_shapes:
try:
known_batch_dim = known_batch_dim.merge_with(s[0])
except ValueError:
raise ValueError("Cannot unbatch an input whose components have "
"different batch sizes.")
self._input_dataset = input_dataset
self._structure = nest.map_structure(
lambda component_spec: component_spec._unbatch(), # pylint: disable=protected-access
get_structure(input_dataset))
variant_tensor = ged_ops.unbatch_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
**self._flat_structure)
super(_UnbatchDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
| 39.491671 | 116 | 0.691716 |
59a74e4aa51be938b4e534e27299d8552f823e87 | 2,681 | py | Python | dimod/roof_duality/fix_variables.py | hsadeghidw/dimod | c6b4adc18c22f39fd5a79e6775bbcab84b1a5489 | [
"Apache-2.0"
] | 101 | 2017-08-30T20:08:46.000Z | 2022-03-23T23:40:32.000Z | dimod/roof_duality/fix_variables.py | hsadeghidw/dimod | c6b4adc18c22f39fd5a79e6775bbcab84b1a5489 | [
"Apache-2.0"
] | 862 | 2017-09-27T19:56:43.000Z | 2022-03-30T18:00:43.000Z | dimod/roof_duality/fix_variables.py | hsadeghidw/dimod | c6b4adc18c22f39fd5a79e6775bbcab84b1a5489 | [
"Apache-2.0"
] | 78 | 2017-09-13T23:01:10.000Z | 2022-02-22T02:40:40.000Z | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from dimod.vartypes import Vartype
def fix_variables(bqm, sampling_mode=True):
"""Determine assignments for some variables of a binary quadratic model.
Roof duality finds a lower bound for the minimum of a quadratic polynomial. It
can also find minimizing assignments for some of the polynomial's variables;
these fixed variables take the same values in all optimal solutions [BHT]_ [BH]_.
A quadratic pseudo-Boolean function can be represented as a network to find
the lower bound through network-flow computations. `fix_variables` uses maximum
flow in the implication network to correctly fix variables. Consequently, you can
find an assignment for the remaining variables that attains the optimal value.
Args:
bqm (:obj:`.BinaryQuadraticModel`)
A binary quadratic model.
sampling_mode (bool, optional, default=True):
In sampling mode, only roof-duality is used. When `sampling_mode` is false, strongly
connected components are used to fix more variables, but in some optimal solutions
these variables may take different values.
Returns:
dict: Variable assignments for some variables of the specified binary quadratic model.
.. [BHT] Boros, E., P.L. Hammer, G. Tavares. Preprocessing of Unconstraint Quadratic Binary
Optimization. Rutcor Research Report 10-2006, April, 2006.
.. [BH] Boros, E., P.L. Hammer. Pseudo-Boolean optimization. Discrete Applied Mathematics 123,
(2002), pp. 155-225
"""
try:
from dwave.preprocessing import roof_duality
except ImportError:
raise TypeError("you must install dwave-preprocessing to use this function") from None
warnings.warn("fix_variables() is deprecated and will be removed in dimod 0.11.0, "
"please install dwave-preprocessing and use dwave.preprocessing.roof_duality "
"instead", DeprecationWarning, stacklevel=2)
_, mapping = roof_duality(bqm, strict=sampling_mode)
return mapping
| 43.95082 | 98 | 0.718762 |
f8b49333eb7c846d37b8fa11e9204154d89ac18e | 1,942 | py | Python | pkg/get_face.py | 610yilingliu/FakeDonaldTrump | d5550cc9a7c649f0bcf6a4a1b4c26a5b389a4959 | [
"MIT"
] | 1 | 2021-03-10T05:05:45.000Z | 2021-03-10T05:05:45.000Z | pkg/get_face.py | 610yilingliu/FakeDonaldTrump | d5550cc9a7c649f0bcf6a4a1b4c26a5b389a4959 | [
"MIT"
] | null | null | null | pkg/get_face.py | 610yilingliu/FakeDonaldTrump | d5550cc9a7c649f0bcf6a4a1b4c26a5b389a4959 | [
"MIT"
] | null | null | null | import os
import dlib
import cv2
import face_recognition
import numpy as np
def cut_image(path):
"""
:type path: string, path of a specific image
:ytype cutted image (numpy)
"""
if not os.path.exists(path):
print(path + ' not exists')
return
detector = dlib.get_frontal_face_detector()
image = cv2.imread(path, cv2.IMREAD_COLOR)
dets = detector(image, 1)
if not dets:
return
for idx, face in enumerate(dets):
l = face.left()
r = face.right()
u = face.top()
d = face.bottom()
if r - l < 60 or d - u < 60:
return
# enlarger the selection
extend_d = int((d - u) * 0.02)
extend_u = int((d - u) * 0.18)
extend_lr = int((r - l) * 0.1)
new_u = u - extend_u
new_d = d + extend_d
new_l = l - extend_lr
new_r = r + extend_lr
if new_u < 0 or new_d > image.shape[0] or new_l < 0 or new_d > image.shape[1]:
return
cropped = image[new_u:new_d, l:r]
regularized = cv2.resize(cropped, (64, 64))
yield regularized, idx
def photo_cleaner(photo, valid_photo):
valid_image = face_recognition.load_image_file(valid_photo)
valid_encoding = face_recognition.face_encodings(valid_image)[0]
image = face_recognition.load_image_file(photo)
image_encoding = face_recognition.face_encodings(image)
if image_encoding:
image_encoding = image_encoding[0]
else:
os.remove(photo)
return
results = face_recognition.compare_faces([valid_encoding], image_encoding)
if results[0] == False:
os.remove(photo)
if __name__ == '__main__':
pics = "./trump_photos"
for f in os.listdir(pics):
p = pics + '/' + f
for pic, idx in cut_image(p):
path = './trump/' + str(idx) + '_' + f
cv2.imwrite(path, pic)
photo_cleaner(path, 'trump.png') | 31.322581 | 86 | 0.593203 |
88e58565986111474acc5c4d679b96cc85dd4d63 | 672 | py | Python | SimpleBlog/posts/migrations/0001_initial.py | Ashish94720/Blog | d84a0395098bcf33faec4cfbdc012b0b88627925 | [
"bzip2-1.0.6"
] | 2 | 2019-07-08T22:51:25.000Z | 2019-07-08T22:51:51.000Z | SimpleBlog/posts/migrations/0001_initial.py | Ashish94720/Blog | d84a0395098bcf33faec4cfbdc012b0b88627925 | [
"bzip2-1.0.6"
] | null | null | null | SimpleBlog/posts/migrations/0001_initial.py | Ashish94720/Blog | d84a0395098bcf33faec4cfbdc012b0b88627925 | [
"bzip2-1.0.6"
] | null | null | null | # Generated by Django 2.2.2 on 2019-06-20 17:58
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=120)),
('content', models.TextField()),
('updated', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
),
]
| 26.88 | 114 | 0.568452 |
8a7b131a2c2ab883f68d2616ffcd0d8f2ace48bb | 12,044 | py | Python | models/Unet_2D/U_net_Sample_Generator.py | LongxiZhou/DLPE-method | ed20abc91e27423c7ff677a009cfd99314730217 | [
"BSD-3-Clause"
] | null | null | null | models/Unet_2D/U_net_Sample_Generator.py | LongxiZhou/DLPE-method | ed20abc91e27423c7ff677a009cfd99314730217 | [
"BSD-3-Clause"
] | null | null | null | models/Unet_2D/U_net_Sample_Generator.py | LongxiZhou/DLPE-method | ed20abc91e27423c7ff677a009cfd99314730217 | [
"BSD-3-Clause"
] | 1 | 2021-08-22T14:29:58.000Z | 2021-08-22T14:29:58.000Z | import Functions
import numpy as np
import os
def slicing(raw_array, patient_id, time_point, direction, single=True):
# raw_array has shape [512, 512, 512, 2] for [x, y, z, -],
data = raw_array[:, :, :, 0]
mask = raw_array[:, :, :, 1]
# patient_id looks like "xgfy-A000012", time_point looks like "2012-02-19", direction looks like 'X'
if single:
save_dict = Functions.get_father_dict() + '/samples_for_2D_segmentator/single_slice/' + patient_id + '/' + time_point + '/' + direction + '/'
else:
save_dict = Functions.get_father_dict() + '/samples_for_2D_segmentator/five_slices/' + patient_id + '/' + time_point + '/' + direction + '/'
print('patient_id, time_point, direction are:', patient_id, time_point, direction)
if direction == 'X':
for x in range(8, 504, 1):
current_slice_mask = mask[x, :, :]
if np.sum(current_slice_mask) < 5:
continue
else:
if single:
sample = np.zeros([512, 512, 2], 'float32')
sample[:, :, 0] = data[x, :, :]
sample[:, :, 1] = current_slice_mask
Functions.save_np_array(save_dict, str(x), sample)
else:
current_slice_data = data[x, :, :]
pre_five_data = data[x - 8, :, :] # previous 5 mm is 8 slices on x-axis
pre_two_data = data[x - 3, :, :] # previous 2 mm is 3 slices on x-axis
post_five_data = data[x + 8, :, :]
post_two_data = data[x + 3, :, :]
sample = np.zeros([512, 512, 6], 'float32')
sample[:, :, 0] = pre_five_data
sample[:, :, 1] = pre_two_data
sample[:, :, 2] = current_slice_data
sample[:, :, 3] = post_two_data
sample[:, :, 4] = post_five_data
sample[:, :, 5] = current_slice_mask
Functions.save_np_array(save_dict, str(x), sample)
if direction == 'Y':
for y in range(8, 504, 1):
current_slice_mask = mask[:, y, :]
if np.sum(current_slice_mask) < 5:
continue
else:
if single:
sample = np.zeros([512, 512, 2], 'float32')
sample[:, :, 0] = data[:, y, :]
sample[:, :, 1] = current_slice_mask
Functions.save_np_array(save_dict, str(y), sample)
else:
current_slice_data = data[:, y, :]
pre_five_data = data[:, y - 8, :] # previous 5 mm is 8 slices on y-axis
pre_two_data = data[:, y - 3, :] # previous 2 mm is 3 slices on y-axis
post_five_data = data[:, y + 8, :]
post_two_data = data[:, y + 3, :]
sample = np.zeros([512, 512, 6], 'float32')
sample[:, :, 0] = pre_five_data
sample[:, :, 1] = pre_two_data
sample[:, :, 2] = current_slice_data
sample[:, :, 3] = post_two_data
sample[:, :, 4] = post_five_data
sample[:, :, 5] = current_slice_mask
Functions.save_np_array(save_dict, str(y), sample)
if direction == 'Z':
for z in range(5, 507, 1):
current_slice_mask = mask[:, :, z]
if np.sum(current_slice_mask) < 5:
continue
else:
if single:
sample = np.zeros([512, 512, 2], 'float32')
sample[:, :, 0] = data[:, :, z]
sample[:, :, 1] = current_slice_mask
Functions.save_np_array(save_dict, str(z), sample)
else:
current_slice_data = data[:, :, z]
pre_five_data = data[:, :, z - 5] # previous 5 mm is 5 slices on z-axis
pre_two_data = data[:, :, z - 2] # previous 2 mm is 2 slices on z-axis
post_five_data = data[:, :, z + 5]
post_two_data = data[:, :, z + 2]
sample = np.zeros([512, 512, 6], 'float32')
sample[:, :, 0] = pre_five_data
sample[:, :, 1] = pre_two_data
sample[:, :, 2] = current_slice_data
sample[:, :, 3] = post_two_data
sample[:, :, 4] = post_five_data
sample[:, :, 5] = current_slice_mask
Functions.save_np_array(save_dict, str(z), sample)
def slicing_for_zhongxiao(raw_array, patient_id, time_point, aug, direction, single, base_dir):
# raw_array has shape [512, 512, 512, 2] for [x, y, z, -],
data = raw_array[:, :, :, 0]
mask = raw_array[:, :, :, 1]
# patient_id looks like "xgfy-A000012", time_point looks like "2012-02-19", direction looks like 'X'
if single:
save_dict = os.path.join(base_dir,'single_slice/')
else:
save_dict = os.path.join(base_dir,'five_slices/')
print('patient_id, time_point, direction are:', patient_id, time_point, direction)
name_prefix = patient_id + '_' + time_point + '_'+ ("original" if not aug else "aug-%s"%(aug)) + '_' + direction + '_'
if direction == 'X':
for x in range(8, 504, 1):
current_slice_mask = mask[x, :, :]
if np.sum(current_slice_mask) < 5:
continue
else:
if single:
sample = np.zeros([512, 512, 2], 'float32')
sample[:, :, 0] = data[x, :, :]
sample[:, :, 1] = current_slice_mask
Functions.save_np_array(save_dict, name_prefix + str(x), sample)
else:
current_slice_data = data[x, :, :]
pre_five_data = data[x - 8, :, :] # previous 5 mm is 8 slices on x-axis
pre_two_data = data[x - 3, :, :] # previous 2 mm is 3 slices on x-axis
post_five_data = data[x + 8, :, :]
post_two_data = data[x + 3, :, :]
sample = np.zeros([512, 512, 6], 'float32')
sample[:, :, 0] = pre_five_data
sample[:, :, 1] = pre_two_data
sample[:, :, 2] = current_slice_data
sample[:, :, 3] = post_two_data
sample[:, :, 4] = post_five_data
sample[:, :, 5] = current_slice_mask
Functions.save_np_array(save_dict, name_prefix + str(x), sample)
if direction == 'Y':
for y in range(8, 504, 1):
current_slice_mask = mask[:, y, :]
if np.sum(current_slice_mask) < 5:
continue
else:
if single:
sample = np.zeros([512, 512, 2], 'float32')
sample[:, :, 0] = data[:, y, :]
sample[:, :, 1] = current_slice_mask
Functions.save_np_array(save_dict, name_prefix + str(y), sample)
else:
current_slice_data = data[:, y, :]
pre_five_data = data[:, y - 8, :] # previous 5 mm is 8 slices on y-axis
pre_two_data = data[:, y - 3, :] # previous 2 mm is 3 slices on y-axis
post_five_data = data[:, y + 8, :]
post_two_data = data[:, y + 3, :]
sample = np.zeros([512, 512, 6], 'float32')
sample[:, :, 0] = pre_five_data
sample[:, :, 1] = pre_two_data
sample[:, :, 2] = current_slice_data
sample[:, :, 3] = post_two_data
sample[:, :, 4] = post_five_data
sample[:, :, 5] = current_slice_mask
Functions.save_np_array(save_dict, name_prefix + str(y), sample)
if direction == 'Z':
for z in range(5, 507, 1):
current_slice_mask = mask[:, :, z]
if np.sum(current_slice_mask) < 5:
continue
else:
if single:
sample = np.zeros([512, 512, 2], 'float32')
sample[:, :, 0] = data[:, :, z]
sample[:, :, 1] = current_slice_mask
Functions.save_np_array(save_dict, name_prefix + str(z), sample)
else:
current_slice_data = data[:, :, z]
pre_five_data = data[:, :, z - 5] # previous 5 mm is 5 slices on z-axis
pre_two_data = data[:, :, z - 2] # previous 2 mm is 2 slices on z-axis
post_five_data = data[:, :, z + 5]
post_two_data = data[:, :, z + 2]
sample = np.zeros([512, 512, 6], 'float32')
sample[:, :, 0] = pre_five_data
sample[:, :, 1] = pre_two_data
sample[:, :, 2] = current_slice_data
sample[:, :, 3] = post_two_data
sample[:, :, 4] = post_five_data
sample[:, :, 5] = current_slice_mask
Functions.save_np_array(save_dict, name_prefix + str(z), sample)
def slicing_three(raw_array, patient_id, time_point, direction,base_dir):
# raw_array has shape [512, 512, 512, 2] for [x, y, z, -],
data = raw_array[:, :, :, 0]
mask = raw_array[:, :, :, 1]
# patient_id looks like "xgfy-A000012", time_point looks like "2012-02-19", direction looks like 'X'
save_dict = os.path.join('three_slice/')
print('patient_id, time_point, direction are:', patient_id, time_point, direction)
name_prefix = patient_id + '_' + time_point.replace('_', '-') + '_' + direction + '_'
if direction == 'X':
for x in range(8, 504, 1):
current_slice_mask = mask[x, :, :]
if np.sum(current_slice_mask) < 5:
continue
else:
current_slice_data = data[x, :, :]
pre_one_data = data[x - 2, :, :] # previous 1 mm is 2 slices on x-axis
post_one_data = data[x + 2, :, :]
sample = np.zeros([512, 512, 4], 'float32')
sample[:, :, 0] = pre_one_data
sample[:, :, 1] = current_slice_data
sample[:, :, 2] = post_one_data
sample[:, :, 3] = current_slice_mask
Functions.save_np_array(save_dict, name_prefix + str(x), sample)
if direction == 'Y':
for y in range(8, 504, 1):
current_slice_mask = mask[:, y, :]
if np.sum(current_slice_mask) < 5:
continue
else:
current_slice_data = data[:, y, :]
pre_one_data = data[:, y - 2, :] # previous 1 mm is 2 slices on x-axis
post_one_data = data[:, y + 2, :]
sample = np.zeros([512, 512, 4], 'float32')
sample[:, :, 0] = pre_one_data
sample[:, :, 1] = current_slice_data
sample[:, :, 2] = post_one_data
sample[:, :, 3] = current_slice_mask
Functions.save_np_array(save_dict, name_prefix + str(y), sample)
if direction == 'Z':
for z in range(8, 504, 1):
current_slice_mask = mask[:, :, z]
if np.sum(current_slice_mask) < 5:
continue
else:
current_slice_data = data[:, :, z]
pre_one_data = data[:, :, z - 1] # previous 1 mm is 1 slices on x-axis
post_one_data = data[:, :, z + 1]
sample = np.zeros([512, 512, 4], 'float32')
sample[:, :, 0] = pre_one_data
sample[:, :, 1] = current_slice_data
sample[:, :, 2] = post_one_data
sample[:, :, 3] = current_slice_mask
Functions.save_np_array(save_dict, name_prefix + str(z), sample)
| 48.369478 | 149 | 0.47642 |
ceab0f53d728c9b6377cf4f3f79875773e146a57 | 5,694 | py | Python | encoder/model.py | iclementine/Real-Time-Voice-Cloning | 658d31c241d615e58dbaca283fff36f7013e372e | [
"MIT"
] | null | null | null | encoder/model.py | iclementine/Real-Time-Voice-Cloning | 658d31c241d615e58dbaca283fff36f7013e372e | [
"MIT"
] | null | null | null | encoder/model.py | iclementine/Real-Time-Voice-Cloning | 658d31c241d615e58dbaca283fff36f7013e372e | [
"MIT"
] | null | null | null | from encoder.params_model import *
from encoder.params_data import *
# 通过这种方式去除了层次性,其实也是不错的
from scipy.interpolate import interp1d
from sklearn.metrics import roc_curve
from torch.nn.utils import clip_grad_norm_
from scipy.optimize import brentq
from torch import nn
import numpy as np
import torch
class SpeakerEncoder(nn.Module):
# 实在是有点丑陋, 虽然这是一个 model parallel 的模型,但是把设备作为参数还是有点丑陋啊
# 而且所有的形状相关的东西竟然都不在这里面, 而是弄成了 hparams
# 让我们暂且 hack 一下,直接改成高效的 GPU 实现而暂不改动接口
def __init__(self, device, loss_device):
super().__init__()
self.loss_device = loss_device
# Network defition
self.lstm = nn.LSTM(input_size=mel_n_channels,
hidden_size=model_hidden_size,
num_layers=model_num_layers,
batch_first=True).to(device)
self.linear = nn.Linear(in_features=model_hidden_size,
out_features=model_embedding_size).to(device)
self.relu = torch.nn.ReLU().to(device)
# Cosine similarity scaling (with fixed initial parameter values)
self.similarity_weight = nn.Parameter(torch.tensor([10.]).to(device))
self.similarity_bias = nn.Parameter(torch.tensor([-5.]).to(device))
# Loss
self.loss_fn = nn.CrossEntropyLoss().to(device)
def do_gradient_ops(self):
# Gradient scale
self.similarity_weight.grad *= 0.01
self.similarity_bias.grad *= 0.01
# Gradient clipping
clip_grad_norm_(self.parameters(), 3, norm_type=2)
def forward(self, utterances, hidden_init=None):
"""
Computes the embeddings of a batch of utterance spectrograms.
:param utterances: batch of mel-scale filterbanks of same duration as a tensor of shape
(batch_size, n_frames, n_channels)
:param hidden_init: initial hidden state of the LSTM as a tensor of shape (num_layers,
batch_size, hidden_size). Will default to a tensor of zeros if None.
:return: the embeddings as a tensor of shape (batch_size, embedding_size)
"""
# Pass the input through the LSTM layers and retrieve all outputs, the final hidden state
# and the final cell state.
out, (hidden, cell) = self.lstm(utterances, hidden_init)
# We take only the hidden state of the last layer
embeds_raw = self.relu(self.linear(hidden[-1]))
# L2-normalize it
embeds = embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True)
return embeds
def similarity_matrix(self, embeds):
"""
Computes the similarity matrix according the section 2.1 of GE2E.
:param embeds: the embeddings as a tensor of shape (speakers_per_batch,
utterances_per_speaker, embedding_size)
:return: the similarity matrix as a tensor of shape (speakers_per_batch,
utterances_per_speaker, speakers_per_batch)
"""
speakers_per_batch, utterances_per_speaker, embed_dim = embeds.shape
# Inclusive centroids (1 per speaker). Cloning is needed for reverse differentiation
centroids_incl = torch.mean(embeds, dim=1)
centroids_incl_norm = torch.norm(centroids_incl, dim=-1, keepdim=True)
normalized_centroids_incl = centroids_incl / centroids_incl_norm
# Exclusive centroids (1 per utterance)
centroids_excl = (torch.sum(embeds, dim=1, keepdim=True) - embeds)
centroids_excl /= (utterances_per_speaker - 1)
centroids_excl_norm = torch.norm(centroids_excl, dim=2, keepdim=True)
normalized_centroids_excl = centroids_excl / centroids_excl_norm
p1 = torch.matmul(embeds.reshape(-1, embed_dim), normalized_centroids_incl.transpose(1, 0)) # (NM, N)
#print(p1.shape)
p2 = torch.bmm(embeds.reshape(-1, 1, embed_dim), normalized_centroids_excl.reshape(-1, embed_dim, 1)).squeeze(-1) # (NM, 1)
#print(p2.shape)
index = torch.repeat_interleave(torch.arange(speakers_per_batch), utterances_per_speaker).unsqueeze(-1).to(p1.device)
p = torch.scatter(p1, 1, index, p2)
p = p * self.similarity_weight + self.similarity_bias # neg
return p
def loss(self, embeds):
"""
Computes the softmax loss according the section 2.1 of GE2E.
:param embeds: the embeddings as a tensor of shape (speakers_per_batch,
utterances_per_speaker, embedding_size)
:return: the loss and the EER for this batch of embeddings.
"""
speakers_per_batch, utterances_per_speaker = embeds.shape[:2]
# Loss
sim_matrix = self.similarity_matrix(embeds)
sim_matrix = sim_matrix.reshape((speakers_per_batch * utterances_per_speaker,
speakers_per_batch))
target = torch.repeat_interleave(torch.arange(speakers_per_batch), utterances_per_speaker).to(sim_matrix.device)
loss = self.loss_fn(sim_matrix, target)
# EER (not backpropagated)
with torch.no_grad():
ground_truth = target.data.cpu().numpy()
inv_argmax = lambda i: np.eye(1, speakers_per_batch, i, dtype=np.int)[0]
labels = np.array([inv_argmax(i) for i in ground_truth])
preds = sim_matrix.detach().cpu().numpy()
# Snippet from https://yangcha.github.io/EER-ROC/
fpr, tpr, thresholds = roc_curve(labels.flatten(), preds.flatten())
eer = brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
return loss, eer | 44.834646 | 131 | 0.646294 |
a5b11ed662a2fa702fee96dc580ffbda0cb81025 | 98 | py | Python | sample/WRS2018/script/T3-DoubleArmV7S.py | roto5296/choreonoid | ffe12df8db71e32aea18833afb80dffc42c373d0 | [
"MIT"
] | 91 | 2015-01-29T11:03:42.000Z | 2022-02-13T07:34:04.000Z | sample/WRS2018/script/T3-DoubleArmV7S.py | roto5296/choreonoid | ffe12df8db71e32aea18833afb80dffc42c373d0 | [
"MIT"
] | 213 | 2015-01-26T06:21:15.000Z | 2020-07-23T05:51:30.000Z | sample/WRS2018/script/T3-DoubleArmV7S.py | roto5296/choreonoid | ffe12df8db71e32aea18833afb80dffc42c373d0 | [
"MIT"
] | 71 | 2015-01-06T02:32:05.000Z | 2020-12-01T03:42:25.000Z | import WRSUtil
WRSUtil.loadProject(
"MultiSceneViews", "T3", "AISTSimulator", "DoubleArmV7S")
| 24.5 | 61 | 0.744898 |
0c2a394fb37c98984b46faf3674c76c106788f6c | 514 | py | Python | test/python/compiler/__init__.py | Roshan-Thomas/qiskit-terra | 77219b5c7b7146b1545c5e5190739b36f4064b2f | [
"Apache-2.0"
] | 1,599 | 2018-07-10T10:59:12.000Z | 2022-03-31T23:56:25.000Z | test/python/compiler/__init__.py | Roshan-Thomas/qiskit-terra | 77219b5c7b7146b1545c5e5190739b36f4064b2f | [
"Apache-2.0"
] | 5,244 | 2018-07-10T06:20:13.000Z | 2022-03-31T22:18:48.000Z | test/python/compiler/__init__.py | Roshan-Thomas/qiskit-terra | 77219b5c7b7146b1545c5e5190739b36f4064b2f | [
"Apache-2.0"
] | 1,409 | 2018-07-10T02:16:12.000Z | 2022-03-31T09:01:32.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests for the compiler."""
| 36.714286 | 77 | 0.749027 |
303068ef39701851493fcc01180d4d177d554523 | 3,679 | py | Python | slack_invite/slack_invite/settings/base.py | sanchagrins/umuc-cs-slack | 648e709905b153ad17a3df8bd826a784edd5c11b | [
"MIT"
] | null | null | null | slack_invite/slack_invite/settings/base.py | sanchagrins/umuc-cs-slack | 648e709905b153ad17a3df8bd826a784edd5c11b | [
"MIT"
] | 13 | 2017-08-18T01:19:49.000Z | 2017-11-16T02:24:07.000Z | slack_invite/slack_invite/settings/base.py | umuc-cs/umuc-cs-slack | 648e709905b153ad17a3df8bd826a784edd5c11b | [
"MIT"
] | null | null | null | """
Django settings for slack_invite project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import random
from django.core.exceptions import ImproperlyConfigured
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
def get_env_variable(var_name):
try:
return os.environ[var_name]
except KeyError:
# error_msg = "Set the %s environment variable" % var_name
# raise ImproperlyConfigured(error_msg)
return ''.join([random.SystemRandom().choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])
SECRET_KEY = get_env_variable('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
#DEBUG = True
#ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'slack_invite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'slack_invite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
| 26.467626 | 127 | 0.698559 |
31227badc44624f1c834598c8fa2639b7c404186 | 29,081 | py | Python | SSAH.py | SincereJoy/SSAH_CVPR2018 | 82ada6db1048e51e14cd1c98469b837c78b7584e | [
"MIT"
] | null | null | null | SSAH.py | SincereJoy/SSAH_CVPR2018 | 82ada6db1048e51e14cd1c98469b837c78b7584e | [
"MIT"
] | null | null | null | SSAH.py | SincereJoy/SSAH_CVPR2018 | 82ada6db1048e51e14cd1c98469b837c78b7584e | [
"MIT"
] | null | null | null | from setting import *
from tnet import *
import tensorflow as tf
from ops import *
from utils.calc_hammingranking import calc_map
import os
import scipy.io as sio
from tqdm import tqdm
class SSAH(object):
def __init__(self, sess):
self.train_L = train_L
self.train_X = train_x
self.train_Y = train_y
self.query_L = query_L
self.query_X = query_x
self.query_Y = query_y
self.retrieval_L = retrieval_L
self.retrieval_X = retrieval_x
self.retrieval_Y = retrieval_y
self.lr_lab = lr_lab
self.lr_img = lr_img
self.lr_txt = lr_txt
self.lr_dis = lr_dis
self.Sim = Sim
self.meanpix = mean
self.lab_net = lab_net
self.img_net = img_net
self.txt_net = txt_net
self.dis_net_IL = dis_net_IL
self.dis_net_TL = dis_net_TL
self.mse_loss = mse_criterion
self.sce_loss = sce_criterion
self.image_size = image_size
self.numClass = numClass
self.dimText = dimText
self.dimLab = dimLab
self.phase = phase
self.checkpoint_dir = checkpoint_dir
self.dataset_dir = dataset_dir
self.bit = bit
self.num_train = num_train
self.batch_size = batch_size
self.SEMANTIC_EMBED = SEMANTIC_EMBED
self.build_model()
self.saver = tf.compat.v1.train.Saver()
self.sess = sess
def build_model(self):
self.ph = {}
self.ph['label_input'] = tf.compat.v1.placeholder(tf.float32, (None, 1, self.numClass, 1), name='label_input')
self.ph['image_input'] = tf.compat.v1.placeholder(tf.float32, [None, self.image_size, self.image_size, 3], name='image_input')
self.ph['text_input'] = tf.compat.v1.placeholder(tf.float32, [None, 1, self.dimText, 1], name='text_input')
self.ph['lr_hash'] = tf.compat.v1.placeholder('float32', (), name='lr_hash')
self.ph['lr_lab'] = tf.compat.v1.placeholder('float32', (), name='lr_lab')
self.ph['lr_img'] = tf.compat.v1.placeholder('float32', (), name='lr_img')
self.ph['lr_txt'] = tf.compat.v1.placeholder('float32', (), name='lr_txt')
self.ph['lr_dis'] = tf.compat.v1.placeholder('float32', (), name='lr_discriminator')
self.ph['keep_prob'] = tf.compat.v1.placeholder('float32', (), name='keep_prob')
self.ph['Sim'] = tf.compat.v1.placeholder('float32', [self.num_train, self.batch_size], name='Sim')
self.ph['F'] = tf.compat.v1.placeholder('float32', [None, self.bit], name='F')
self.ph['G'] = tf.compat.v1.placeholder('float32', [None, self.bit], name='G')
self.ph['H'] = tf.compat.v1.placeholder('float32', [None, self.bit], name='H')
self.ph['L_batch'] = tf.compat.v1.placeholder('float32', [None, self.numClass], name='L_batch')
self.ph['B_batch'] = tf.compat.v1.placeholder('float32', [None, self.bit], name='b_batch')
self.ph['I_fea'] = tf.compat.v1.placeholder('float32', [None, self.SEMANTIC_EMBED], name='I_fea')
self.ph['T_fea'] = tf.compat.v1.placeholder('float32', [None, self.SEMANTIC_EMBED], name='T_fea')
self.ph['L_fea'] = tf.compat.v1.placeholder('float32', [None, self.SEMANTIC_EMBED], name='L_fea')
self.ph['L_fea_batch'] = tf.compat.v1.placeholder('float32', [None, 1, self.SEMANTIC_EMBED, 1], name='L_fea_batch')
self.ph['I_fea_batch'] = tf.compat.v1.placeholder('float32', [None, 1, self.SEMANTIC_EMBED, 1], name='I_fea_batch')
self.ph['T_fea_batch'] = tf.compat.v1.placeholder('float32', [None, 1, self.SEMANTIC_EMBED, 1], name='T_fea_batch')
# construct label network
self.Hsh_L, self.Fea_L, self.Lab_L = self.lab_net(self.ph['label_input'], self.bit, self.dimLab)
# construct image network
self.Hsh_I, self.Fea_I, self.Lab_I = self.img_net(self.ph['image_input'], self.bit, self.numClass)
# construct text network
self.Hsh_T, self.Fea_T, self.Lab_T = self.txt_net(self.ph['text_input'], self.dimText, self.bit, self.numClass)
# construct two discriminator networks
self.isfrom_IL = self.dis_net_IL(self.ph['I_fea_batch'], self.ph['keep_prob'], reuse=False, name="disnet_IL")
self.isfrom_L1 = self.dis_net_IL(self.ph['L_fea_batch'], self.ph['keep_prob'], reuse=True, name="disnet_IL")
self.isfrom_TL = self.dis_net_TL(self.ph['T_fea_batch'], self.ph['keep_prob'], reuse=False, name="disnet_TL")
self.isfrom_L2 = self.dis_net_TL(self.ph['L_fea_batch'], self.ph['keep_prob'], reuse=True, name="disnet_TL")
# loss_D
Loss_adver_IL = self.sce_loss(logits=self.isfrom_IL, labels=tf.zeros_like(self.isfrom_IL))
Loss_adver_TL = self.sce_loss(logits=self.isfrom_TL, labels=tf.zeros_like(self.isfrom_TL))
Loss_adver_L1 = self.sce_loss(logits=self.isfrom_L1, labels=tf.ones_like(self.isfrom_L1))
Loss_adver_L2 = self.sce_loss(logits=self.isfrom_L2, labels=tf.ones_like(self.isfrom_L2))
self.Loss_D = tf.compat.v1.div(Loss_adver_IL + Loss_adver_TL + Loss_adver_L1 + Loss_adver_L2, 4.0)
# train lab_net
theta_L_1 = 1.0 / 2 * tf.matmul(self.ph['L_fea'], tf.transpose(self.Fea_L))
Loss_pair_Fea_L = self.mse_loss(tf.multiply(self.ph['Sim'], theta_L_1), tf.math.log(1.0 + tf.exp(theta_L_1)))
theta_L_2 = 1.0 / 2 * tf.matmul(self.ph['H'], tf.transpose(self.Hsh_L))
Loss_pair_Hsh_L = self.mse_loss(tf.multiply(self.ph['Sim'], theta_L_2), tf.math.log(1.0 + tf.exp(theta_L_2)))
Loss_quant_L = self.mse_loss(self.ph['B_batch'], self.Hsh_L)
Loss_label_L = self.mse_loss(self.ph['L_batch'], self.Lab_L)
self.loss_l = alpha * Loss_pair_Fea_L + gamma * Loss_pair_Hsh_L + beta * Loss_quant_L + eta * Loss_label_L
# train img_net combined with lab_net
theta_I_1 = 1.0 / 2 * tf.matmul(self.ph['L_fea'], tf.transpose(self.Fea_I))
Loss_pair_Fea_I = self.mse_loss(tf.multiply(self.ph['Sim'], theta_I_1), tf.math.log(1.0 + tf.exp(theta_I_1)))
theta_I_2 = 1.0 / 2 * tf.matmul(self.ph['H'], tf.transpose(self.Hsh_I))
Loss_pair_Hsh_I = self.mse_loss(tf.multiply(self.ph['Sim'], theta_I_2), tf.math.log(1.0 + tf.exp(theta_I_2)))
Loss_quant_I = self.mse_loss(self.ph['B_batch'], self.Hsh_I)
Loss_label_I = self.mse_loss(self.ph['L_batch'], self.Lab_I)
Loss_adver_I = self.sce_loss(logits=self.isfrom_IL, labels=tf.ones_like(self.isfrom_IL))
self.loss_i = alpha * Loss_pair_Fea_I + gamma * Loss_pair_Hsh_I + beta * Loss_quant_I + eta * Loss_label_I + delta * Loss_adver_I
# train txt_net combined with lab_net
theta_T_1 = 1.0 / 2 * tf.matmul(self.ph['L_fea'], tf.transpose(self.Fea_T))
Loss_pair_Fea_T = self.mse_loss(tf.multiply(self.ph['Sim'], theta_T_1), tf.math.log(1.0 + tf.exp(theta_T_1)))
theta_T_2 = 1.0 / 2 * tf.matmul(self.ph['H'], tf.transpose(self.Hsh_T))
Loss_pair_Hsh_T = self.mse_loss(tf.multiply(self.ph['Sim'], theta_T_2), tf.math.log(1.0 + tf.exp(theta_T_2)))
Loss_quant_T = self.mse_loss(self.ph['B_batch'], self.Hsh_T)
Loss_label_T = self.mse_loss(self.ph['L_batch'], self.Lab_T)
Loss_adver_T = self.sce_loss(logits=self.isfrom_TL, labels=tf.ones_like(self.isfrom_TL))
self.loss_t = alpha * Loss_pair_Fea_T + gamma * Loss_pair_Hsh_T + beta * Loss_quant_T + eta * Loss_label_T + delta * Loss_adver_T
def train(self):
# """Train"""
optimizer = tf.compat.v1.train.AdamOptimizer(self.ph['lr_hash'])
dis_optim = tf.compat.v1.train.AdamOptimizer(self.ph['lr_dis'])
gradient_l = optimizer.compute_gradients(self.loss_l)
self.train_lab = optimizer.apply_gradients(gradient_l)
gradient_i = optimizer.compute_gradients(self.loss_i)
self.train_img = optimizer.apply_gradients(gradient_i)
gradient_t = optimizer.compute_gradients(self.loss_t)
self.train_txt = optimizer.apply_gradients(gradient_t)
gradient_D = dis_optim.compute_gradients(self.Loss_D)
self.train_dis = dis_optim.apply_gradients(gradient_D)
init_op = tf.compat.v1.global_variables_initializer()
self.sess.run(init_op)
var = {}
var['lr_lab'] = self.lr_lab
var['lr_dis'] = self.lr_dis
var['lr_img'] = self.lr_img
var['lr_txt'] = self.lr_txt
var['batch_size'] = batch_size
var['F'] = np.random.randn(self.num_train, self.bit)
var['G'] = np.random.randn(self.num_train, self.bit)
var['H'] = np.random.randn(self.num_train, self.bit)
var['LABEL_L'] = np.random.randn(self.num_train, self.numClass)
var['LABEL_I'] = np.random.randn(self.num_train, self.numClass)
var['LABEL_T'] = np.random.randn(self.num_train, self.numClass)
var['feat_I'] = np.random.randn(self.num_train, self.SEMANTIC_EMBED)
var['feat_T'] = np.random.randn(self.num_train, self.SEMANTIC_EMBED)
var['feat_L'] = np.random.randn(self.num_train, self.SEMANTIC_EMBED)
var['B'] = np.sign(var['H'] + var['G'] + var['F'])
# Iterations
for epoch in range(Epoch):
results = {}
results['loss_labNet'] = []
results['loss_imgNet'] = []
results['loss_txtNet'] = []
results['Loss_D'] = []
results['mapl2l'] = []
results['mapi2i'] = []
results['mapt2t'] = []
if epoch % 1 == 0:
print('++++++++Start train lab_net++++++++')
for idx in range(2):
lr_lab_Up = var['lr_lab'][epoch:]
lr_lab = lr_lab_Up[idx]
for train_labNet_k in range(int(k_lab_net/(idx+1))):
# Train lab_net
var['H'], var['LABEL_L'], var['feat_L'] = self.train_lab_net(var, lr_lab)
var['B'] = np.sign(var['H'])
train_labNet_loss = self.calc_labnet_loss(var['H'], var['LABEL_L'], var['feat_L'], Sim)
results['loss_labNet'].append(train_labNet_loss)
print('---------------------------------------------------------------')
print('...epoch: %3d, loss_labNet: %3.3f' % (epoch, train_labNet_loss))
print('---------------------------------------------------------------')
if train_labNet_k > 1 and (results['loss_labNet'][-1] - results['loss_labNet'][-2]) >= 0:
break
# Train domain discriminator
if epoch % 1 == 0:
print('++++++++Start train dis_net++++++++')
for idx in range(2):
lr_dis_Up = var['lr_dis'][epoch:]
lr_dis = lr_dis_Up[idx]
for train_disNet_k in range(k_dis_net):
IsFrom_, IsFrom, Loss_D = self.train_dis_net(lr_dis)
erro, acc = self.calc_isfrom_acc(IsFrom_, IsFrom)
results['Loss_D'].append(Loss_D)
print('----------------------------------------')
print('..epoch:{0}, Loss_D:{1}, acc:{2}'.format(epoch, Loss_D, acc))
print('----------------------------------------')
if train_disNet_k > 1 and (results['Loss_D'][-1] - results['Loss_D'][-2]) <= 0:
break
print('++++++++Starting Train img_net++++++++')
for idx in range(3):
lr_img_Up = var['lr_img'][epoch:]
lr_img = lr_img_Up[idx]
for train_imgNet_k in range(int(k_img_net/(idx+1))):
# Train img_net
var['F'], var['LABEL_I'], var['feat_I'] = self.train_img_net(var, lr_img)
B_i = np.sign(var['F'])
if train_imgNet_k % 2 == 0:
train_imgNet_loss = self.calc_loss(B_i, var['F'], var['H'], var['H'], Sim, var['LABEL_I'],
train_L, alpha, beta, gamma, eta)
results['loss_imgNet'].append(train_imgNet_loss)
print('---------------------------------------------------------------')
print('...epoch: %3d, loss_imgNet: %3.3f' % (epoch, train_imgNet_loss))
print('---------------------------------------------------------------')
if train_imgNet_k > 2 and (results['loss_imgNet'][-1] - results['loss_imgNet'][-2]) >= 0:
break
print('++++++++Starting Train txt_net++++++++')
for idx in range(3):
lr_txt_Up = var['lr_txt'][epoch:]
lr_txt = lr_txt_Up[idx]
for train_txtNet_k in range(int(k_txt_net / (idx + 1))):
var['G'], var['LABEL_T'], var['feat_T'] = self.train_txt_net(var, lr_txt)
B_t = np.sign(var['G'])
if train_txtNet_k % 2 == 0:
train_txtNet_loss = self.calc_loss(B_t, var['H'], var['G'], var['H'], Sim, var['LABEL_T'], train_L, alpha, beta, gamma, eta)
results['loss_txtNet'].append(train_txtNet_loss)
print('---------------------------------------------------------------')
print('...epoch: %3d, Loss_txtNet: %s' % (epoch, train_txtNet_loss))
print('---------------------------------------------------------------')
if train_txtNet_k > 2 and (results['loss_txtNet'][-1] - results['loss_txtNet'][-2]) >= 0:
break
var['B'] = np.sign(var['H'] + var['G'] + var['F'])
print("********test************")
self.test(self.phase)
if np.mod(epoch, save_freq) == 0:
self.save(self.checkpoint_dir, epoch)
def test(self, phase):
test = {}
print('==========================================================')
print(' ==== Test map in all ====')
print('==========================================================')
if phase == 'test' and self.load(self.checkpoint_dir):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
test['qBX'] = self.generate_code(self.query_X, self.bit, "image")
test['qBY'] = self.generate_code(self.query_Y, self.bit, "text")
test['rBX'] = self.generate_code(self.retrieval_X, self.bit, "image")
test['rBY'] = self.generate_code(self.retrieval_Y, self.bit, "text")
test['mapi2t'] = calc_map(test['qBX'], test['rBY'], self.query_L, self.retrieval_L)
test['mapt2i'] = calc_map(test['qBY'], test['rBX'], self.query_L, self.retrieval_L)
test['mapi2i'] = calc_map(test['qBX'], test['rBX'], self.query_L, self.retrieval_L)
test['mapt2t'] = calc_map(test['qBY'], test['rBY'], self.query_L, self.retrieval_L)
print('==================================================')
print('...test map: map(i->t): %3.3f, map(t->i): %3.3f' % (test['mapi2t'], test['mapt2i']))
print('...test map: map(t->t): %3.3f, map(i->i): %3.3f' % (test['mapt2t'], test['mapi2i']))
print('==================================================')
# Save hash code
datasetStr = DATA_DIR.split('/')[-1]
dataset_bit_net = datasetStr + str(bit) + netStr
savePath = '/'.join([os.getcwd(), 'Savecode', dataset_bit_net + '.mat'])
if os.path.exists(savePath):
os.remove(savePath)
sio.savemat(dataset_bit_net, {'Qi': test['qBX'], 'Qt': test['qBY'],
'Di': test['rBX'], 'Dt': test['rBY'],
'retrieval_L': L['retrieval'], 'query_L': L['query']})
def train_lab_net(self, var, lr_lab):
print('update label_net')
H = var['H']
Feat_L = var['feat_L']
LABEL_L = var['LABEL_L']
batch_size = var['batch_size']
num_train = self.train_L.shape[0]
for iter in tqdm(range(int(num_train / batch_size))):
index = np.random.permutation(num_train)
ind = index[0: batch_size]
sample_L = self.train_L[ind, :]
label = self.train_L[ind, :].astype(np.float32)
label = label.reshape([label.shape[0], 1, label.shape[1], 1])
S = calc_neighbor(self.train_L, sample_L)
result = self.sess.run([self.Hsh_L, self.Lab_L, self.Fea_L], feed_dict={self.ph['label_input']: label})
Hsh_L = result[0]
Lab_L = result[1]
Fea_L = result[2]
H[ind, :] = Hsh_L
Feat_L[ind, :] = Fea_L
LABEL_L[ind, :] = Lab_L
self.train_lab.run(feed_dict={self.ph['Sim']: S,
self.ph['H']: var['H'],
self.ph['L_batch']: self.train_L[ind, :],
self.ph['lr_hash']: lr_lab,
self.ph['L_fea']: Feat_L,
self.ph['label_input']: label,
self.ph['B_batch']: np.sign(Hsh_L),
self.ph['keep_prob']: 1.0})
return H, LABEL_L, Feat_L
def train_dis_net(self, lr):
print('update dis_net')
for iter in range(int(num_train / batch_size)):
index = np.random.permutation(num_train)
ind = index[0: batch_size]
image = self.train_X[ind].astype(np.float64)
image = image - self.meanpix.astype(np.float64)
text = self.train_Y[ind, :].astype(np.float32)
text = text.reshape([text.shape[0], 1, text.shape[1], 1])
label = train_L[ind, :].astype(np.float32)
label = label.reshape([label.shape[0], 1, label.shape[1], 1])
result = self.sess.run([self.Fea_I, self.Fea_T, self.Fea_L], feed_dict={self.ph['image_input']: image,
self.ph['text_input']: text,
self.ph['label_input']: label})
Fea_I = result[0]
Fea_T = result[1]
Fea_L = result[2]
self.train_dis.run(feed_dict={self.ph['L_fea_batch']: Fea_L.reshape([Fea_L.shape[0], 1, Fea_L.shape[1], 1]),
self.ph['I_fea_batch']: Fea_I.reshape([Fea_I.shape[0], 1, Fea_I.shape[1], 1]),
self.ph['T_fea_batch']: Fea_T.reshape([Fea_T.shape[0], 1, Fea_T.shape[1], 1]),
self.ph['lr_dis']: lr,
self.ph['keep_prob']: 1.0})
isfrom_IL = self.isfrom_IL.eval(feed_dict={self.ph['I_fea_batch']: Fea_I.reshape([Fea_I.shape[0], 1, Fea_I.shape[1], 1]),
self.ph['keep_prob']: 1.0})
isfrom_L1 = self.isfrom_L1.eval(feed_dict={self.ph['L_fea_batch']: Fea_L.reshape([Fea_L.shape[0], 1, Fea_L.shape[1], 1]),
self.ph['keep_prob']: 1.0})
isfrom_TL = self.isfrom_TL.eval(feed_dict={self.ph['T_fea_batch']: Fea_T.reshape([Fea_T.shape[0], 1, Fea_T.shape[1], 1]),
self.ph['keep_prob']: 1.0})
Loss_Dis = self.Loss_D.eval(feed_dict={self.ph['L_fea_batch']: Fea_L.reshape([Fea_L.shape[0], 1, Fea_L.shape[1], 1]),
self.ph['I_fea_batch']: Fea_I.reshape([Fea_I.shape[0], 1, Fea_I.shape[1], 1]),
self.ph['T_fea_batch']: Fea_T.reshape([Fea_T.shape[0], 1, Fea_T.shape[1], 1]),
self.ph['lr_dis']: lr,
self.ph['keep_prob']: 1.0})
if iter % 5 == 0:
print('...discriminator_Loss_D: {0}'.format(Loss_Dis))
return np.hstack((isfrom_IL, isfrom_L1, isfrom_TL)), np.hstack((np.zeros_like(isfrom_IL), np.ones_like(isfrom_L1), np.zeros_like(isfrom_TL))), Loss_Dis
def train_img_net(self, var, lr_img):
print('update image_net')
F = var['F']
LABEL_I = var['LABEL_I']
Feat_I = var['feat_I']
batch_size = var['batch_size']
num_train = self.train_X.shape[0]
for iter in tqdm(range(int(num_train / batch_size))):
index = np.random.permutation(num_train)
ind = index[0: batch_size]
sample_L = train_L[ind, :]
label = train_L[ind, :].astype(np.float32)
label = label.reshape([label.shape[0], 1, label.shape[1], 1])
image = self.train_X[ind, :, :, :].astype(np.float64)
image = image - self.meanpix.astype(np.float64)
S = calc_neighbor(train_L, sample_L)
result = self.sess.run([self.Hsh_I, self.Fea_I, self.Lab_I], feed_dict={self.ph['image_input']: image,
self.ph['label_input']: label})
Hsh_I = result[0]
Fea_I = result[1]
Lab_I = result[2]
F[ind, :] = Hsh_I
Feat_I[ind, :] = Fea_I
LABEL_I[ind, :] = Lab_I
self.train_img.run(feed_dict={self.ph['Sim']: S,
self.ph['H']: var['H'],
self.ph['B_batch']: np.sign(Hsh_I),
self.ph['L_batch']: self.train_L[ind, :],
self.ph['L_fea']: var['feat_L'],
self.ph['lr_hash']: lr_img,
self.ph['I_fea_batch']: var['feat_I'].reshape([var['feat_I'].shape[0], 1, var['feat_I'].shape[1], 1]),
self.ph['image_input']: image,
self.ph['label_input']: label,
self.ph['keep_prob']: 1.0})
return F, LABEL_I, Feat_I
def train_txt_net(self, var, lr_txt):
print('update text_net')
G = var['G']
Feat_T = var['feat_T']
LABEL_T = var['LABEL_T']
batch_size = var['batch_size']
num_train = self.train_Y.shape[0]
for iter in tqdm(range(int(num_train / batch_size))):
index = np.random.permutation(num_train)
ind = index[0: batch_size]
sample_L = train_L[ind, :]
label = train_L[ind, :].astype(np.float32)
label = label.reshape([label.shape[0], 1, label.shape[1], 1])
text = self.train_Y[ind, :].astype(np.float32)
text = text.reshape([text.shape[0], 1, text.shape[1], 1])
S = calc_neighbor(train_L, sample_L)
result = self.sess.run([self.Hsh_T, self.Fea_T, self.Lab_T],feed_dict={self.ph['text_input']: text,
self.ph['label_input']: label})
Hsh_T = result[0]
Fea_T = result[1]
Lab_T = result[2]
G[ind, :] = Hsh_T
Feat_T[ind, :] = Fea_T
LABEL_T[ind,:] = Lab_T
self.train_txt.run(feed_dict={self.ph['text_input']: text,
self.ph['Sim']: S,
self.ph['H']: var['H'],
self.ph['B_batch']: np.sign(Hsh_T),
self.ph['L_batch']: self.train_L[ind, :],
self.ph['L_fea']: var['feat_L'],
self.ph['lr_hash']: lr_txt,
self.ph['T_fea_batch']: Fea_T.reshape([Fea_T.shape[0], 1, Fea_T.shape[1], 1]),
self.ph['label_input']: label,
self.ph['keep_prob']: 1.0})
return G, LABEL_T, Feat_T
def generate_code(self, Modal, bit, generate):
batch_size = 128
if generate=="label":
num_data = Modal.shape[0]
index = np.linspace(0, num_data - 1, num_data).astype(int)
B = np.zeros([num_data, bit], dtype=np.float32)
for iter in tqdm(range(num_data / batch_size + 1)):
ind = index[iter * batch_size: min((iter + 1) * batch_size, num_data)]
label = Modal[ind, :].astype(np.float32)
label = label.reshape([label.shape[0], 1, label.shape[1], 1])
Hsh_L = self.Hsh_L.eval(feed_dict={self.ph['label_input']: label})
B[ind, :] = Hsh_L
elif generate=="image":
num_data = len(Modal)
index = np.linspace(0, num_data - 1, num_data).astype(int)
B = np.zeros([num_data, bit], dtype=np.float32)
for iter in tqdm(range(int(num_data / batch_size) + 1)):
ind = index[iter * batch_size: min((iter + 1) * batch_size, num_data)]
mean_pixel = np.repeat(self.meanpix[:, :, :, np.newaxis], len(ind), axis=3)
image = Modal[ind,:,:,:].astype(np.float64)
image = image - mean_pixel.astype(np.float64).transpose(3, 0, 1, 2)
Hsh_I = self.Hsh_I.eval(feed_dict={self.ph['image_input']: image})
B[ind, :] = Hsh_I
else:
num_data = Modal.shape[0]
index = np.linspace(0, num_data - 1, num_data).astype(int)
B = np.zeros([num_data, bit], dtype=np.float32)
for iter in tqdm(range(int(num_data / batch_size) + 1)):
ind = index[iter * batch_size: min((iter + 1) * batch_size, num_data)]
text = Modal[ind, :].astype(np.float32)
text = text.reshape([text.shape[0], 1, text.shape[1], 1])
Hsh_T = self.Hsh_T.eval(feed_dict={self.ph['text_input']: text})
B[ind, :] = Hsh_T
B = np.sign(B)
return B
def calc_labnet_loss(self, H, label_, feature, SIM):
term1 = np.sum(np.power((label_ - self.train_L), 2))
theta_2 = np.matmul(H, np.transpose(H)) / 2
term2 = np.sum(np.log(1 + np.exp(theta_2)) - SIM * theta_2)
theta_3 = np.matmul(feature, np.transpose(feature)) / 2
term3 = np.sum(np.log(1 + np.exp(theta_3)) - SIM * theta_3)
loss = alpha * term1 + gamma * term2 + beta * term3# + gama4 * term4 + gama5 * term5
print('label:',term1)
print('pairwise_hash:',term2)
print('pairwise_feat:',term3)
return loss
def calc_loss(self, B, F, G, H, Sim, label_, label, alpha, beta, gamma, eta):
theta = np.matmul(F, np.transpose(G)) / 2
term1 = np.sum(np.log(1 + np.exp(theta)) - Sim * theta)
term2 = np.sum(np.power(B-F, 2) + np.power(B-G, 2))
term3 = np.sum(np.power(H-F, 2) + np.power(H-G, 2))
term4 = np.sum(np.power((label_ - label), 2))
loss = alpha * term1 + beta * term2 + gamma * term3 + eta * term4
print('pairwise:', term1)
print('quantization:', term2)
print('hash_feature:', term3)
print('labe_predict:', term4)
return loss
def calc_isfrom_acc(self, train_isfrom_, Train_ISFROM):
erro = Train_ISFROM.shape[0] - np.sum(np.equal(np.sign(train_isfrom_ - 0.5), np.sign(Train_ISFROM - 0.5)).astype(int))
acc = np.divide(np.sum(np.equal(np.sign(train_isfrom_ - 0.5), np.sign(Train_ISFROM - 0.5)).astype('float32')), Train_ISFROM.shape[0])
return erro, acc
def save(self, checkpoint_dir, step):
model_name = "SSAH"
model_dir = "%s_%s" % (self.dataset_dir, self.bit)
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def load(self, checkpoint_dir):
print(" [*] Reading checkpoint...")
model_dir = "%s_%s" % (self.dataset_dir, self.bit)
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
ckpt = tf.compat.v1.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
return True
else:
return False | 53.457721 | 159 | 0.523606 |
a98479cfc3073d382f4b4ad2d78f6a71972d070c | 785 | py | Python | sensors/watchExtensionsV1beta1HorizontalPodAutoscalerListForAllNamespaces.py | blinkops/stackstorm-kubernetes | 3b4a15d42f603f3e700efaf534169e2ec361f5d2 | [
"Apache-2.0"
] | 20 | 2016-12-24T01:35:41.000Z | 2022-03-06T08:32:16.000Z | sensors/watchExtensionsV1beta1HorizontalPodAutoscalerListForAllNamespaces.py | blinkops/stackstorm-kubernetes | 3b4a15d42f603f3e700efaf534169e2ec361f5d2 | [
"Apache-2.0"
] | 16 | 2017-05-02T19:38:57.000Z | 2021-06-17T08:31:17.000Z | sensors/watchExtensionsV1beta1HorizontalPodAutoscalerListForAllNamespaces.py | blinkops/stackstorm-kubernetes | 3b4a15d42f603f3e700efaf534169e2ec361f5d2 | [
"Apache-2.0"
] | 18 | 2017-06-20T00:44:12.000Z | 2022-03-30T08:41:42.000Z | from os import sys, path
if __name__ == '__main__' and __package__ is None:
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from sensor_base import SensorBase
class watchExtensionsV1beta1HorizontalPodAutoscalerListForAllNamespaces(SensorBase):
def __init__(
self,
sensor_service,
config=None,
extension="/apis/extensions/v1beta1/watch/horizontalpodautoscalers",
trigger_ref="kubernetes.horizontalpodautoscalers"):
super( # pylint: disable=bad-super-call
self.__class__, # pylint: disable=bad-super-call
self).__init__(
sensor_service=sensor_service,
config=config,
extension=extension,
trigger_ref=trigger_ref)
| 34.130435 | 84 | 0.66879 |
2f7d0998a0435b80b8c05dba70ee22b8de2ce185 | 29,502 | py | Python | tests/test_modeling_gpt2.py | TiffanyHsuuuu/adapter | 9352bc33af5c7aea0ea4675c05ea5c3b049aa95c | [
"Apache-2.0"
] | null | null | null | tests/test_modeling_gpt2.py | TiffanyHsuuuu/adapter | 9352bc33af5c7aea0ea4675c05ea5c3b049aa95c | [
"Apache-2.0"
] | null | null | null | tests/test_modeling_gpt2.py | TiffanyHsuuuu/adapter | 9352bc33af5c7aea0ea4675c05ea5c3b049aa95c | [
"Apache-2.0"
] | 1 | 2022-02-13T14:33:41.000Z | 2022-02-13T14:33:41.000Z | # coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_generation_utils import GenerationTesterMixin
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
GPT2Config,
GPT2DoubleHeadsModel,
GPT2ForSequenceClassification,
GPT2LMHeadModel,
GPT2Model,
GPT2ModelWithHeads,
GPT2Tokenizer,
)
class GPT2ModelTester:
def __init__(
self,
parent,
batch_size=14,
seq_length=7,
is_training=True,
use_token_type_ids=True,
use_input_mask=True,
use_labels=True,
use_mc_token_ids=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_token_type_ids = use_token_type_ids
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.use_mc_token_ids = use_mc_token_ids
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = None
self.bos_token_id = vocab_size - 1
self.eos_token_id = vocab_size - 1
self.pad_token_id = vocab_size - 1
def get_large_model_config(self):
return GPT2Config.from_pretrained("gpt2")
def prepare_config_and_inputs(self, gradient_checkpointing=False):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
mc_token_ids = None
if self.use_mc_token_ids:
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = GPT2Config(
vocab_size=self.vocab_size,
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
# intermediate_size=self.intermediate_size,
# hidden_act=self.hidden_act,
# hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
n_ctx=self.max_position_embeddings,
# type_vocab_size=self.type_vocab_size,
# initializer_range=self.initializer_range,
use_cache=not gradient_checkpointing,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
gradient_checkpointing=gradient_checkpointing,
)
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_gpt2_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = GPT2Model(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values), config.n_layer)
def create_and_check_gpt2_model_past(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = GPT2Model(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids, token_type_ids=token_type_ids)
outputs_no_past = model(input_ids, token_type_ids=token_type_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
output, past = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
next_token_types = ids_tensor([self.batch_size, 1], self.type_vocab_size)
# append to next input_ids and token_type_ids
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1)
output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)["last_hidden_state"]
output_from_past = model(next_tokens, token_type_ids=next_token_types, past_key_values=past)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_gpt2_model_attention_mask_past(
self, config, input_ids, input_mask, head_mask, token_type_ids, *args
):
model = GPT2Model(config=config)
model.to(torch_device)
model.eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = self.seq_length // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
output, past = model(input_ids, attention_mask=attn_mask).to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_gpt2_model_past_large_inputs(
self, config, input_ids, input_mask, head_mask, token_type_ids, *args
):
model = GPT2Model(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(input_ids, token_type_ids=token_type_ids, attention_mask=input_mask, use_cache=True)
output, past = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_token_types = ids_tensor([self.batch_size, 3], self.type_vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and token_type_ids
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids, token_type_ids=next_token_type_ids, attention_mask=next_attention_mask
)["last_hidden_state"]
output_from_past = model(
next_tokens, token_type_ids=next_token_types, attention_mask=next_attention_mask, past_key_values=past
)["last_hidden_state"]
self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1])
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = GPT2LMHeadModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_forward_and_backwards(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = GPT2LMHeadModel(config)
model.to(torch_device)
result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def create_and_check_double_lm_head_model(
self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, *args
):
model = GPT2DoubleHeadsModel(config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
inputs = {
"input_ids": multiple_choice_inputs_ids,
"mc_token_ids": mc_token_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
"labels": multiple_choice_inputs_ids,
}
result = model(**inputs)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_choices, self.seq_length, self.vocab_size)
)
self.parent.assertEqual(result.mc_logits.shape, (self.batch_size, self.num_choices))
def create_and_check_gpt2_for_sequence_classification(
self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args
):
config.num_labels = self.num_labels
model = GPT2ForSequenceClassification(config)
model.to(torch_device)
model.eval()
print(config.num_labels, sequence_labels.size())
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class GPT2ModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (
(GPT2Model, GPT2LMHeadModel, GPT2DoubleHeadsModel, GPT2ForSequenceClassification, GPT2ModelWithHeads)
if is_torch_available()
else ()
)
all_generative_model_classes = (GPT2LMHeadModel, GPT2DoubleHeadsModel) if is_torch_available() else ()
all_parallelizable_model_classes = (GPT2LMHeadModel, GPT2DoubleHeadsModel) if is_torch_available() else ()
test_missing_keys = False
test_model_parallel = True
# special case for DoubleHeads model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ == "GPT2DoubleHeadsModel":
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length),
dtype=torch.long,
device=torch_device,
)
inputs_dict["input_ids"] = inputs_dict["labels"]
inputs_dict["token_type_ids"] = inputs_dict["labels"]
inputs_dict["mc_token_ids"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices),
dtype=torch.long,
device=torch_device,
)
inputs_dict["mc_labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = GPT2ModelTester(self)
self.config_tester = ConfigTester(self, config_class=GPT2Config, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_gpt2_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_model(*config_and_inputs)
def test_gpt2_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_model_past(*config_and_inputs)
def test_gpt2_model_att_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_model_attention_mask_past(*config_and_inputs)
def test_gpt2_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_model_past_large_inputs(*config_and_inputs)
def test_gpt2_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
def test_gpt2_double_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*config_and_inputs)
def test_gpt2_sequence_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_for_sequence_classification(*config_and_inputs)
def test_gpt2_gradient_checkpointing(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs(gradient_checkpointing=True)
self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs)
@slow
def test_batch_generation(self):
model = GPT2LMHeadModel.from_pretrained("gpt2")
model.to(torch_device)
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
tokenizer.padding_side = "left"
# Define PAD Token = EOS Token = 50256
tokenizer.pad_token = tokenizer.eos_token
model.config.pad_token_id = model.config.eos_token_id
# use different length sentences to test batching
sentences = [
"Hello, my dog is a little",
"Today, I",
]
inputs = tokenizer(sentences, return_tensors="pt", padding=True)
input_ids = inputs["input_ids"].to(torch_device)
token_type_ids = torch.cat(
[
input_ids.new_full((input_ids.shape[0], input_ids.shape[1] - 1), 0),
input_ids.new_full((input_ids.shape[0], 1), 500),
],
dim=-1,
)
outputs = model.generate(
input_ids=input_ids,
attention_mask=inputs["attention_mask"].to(torch_device),
)
outputs_tt = model.generate(
input_ids=input_ids,
attention_mask=inputs["attention_mask"].to(torch_device),
token_type_ids=token_type_ids,
)
inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device)
output_non_padded = model.generate(input_ids=inputs_non_padded)
num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device)
output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings)
batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True)
batch_out_sentence_tt = tokenizer.batch_decode(outputs_tt, skip_special_tokens=True)
non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True)
padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True)
expected_output_sentence = [
"Hello, my dog is a little bit of a mess. I'm not sure if he's going",
"Today, I'm going to be doing a lot of research on this. I",
]
self.assertListEqual(expected_output_sentence, batch_out_sentence)
self.assertTrue(batch_out_sentence_tt != batch_out_sentence) # token_type_ids should change output
self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence])
@slow
def test_batch_generation_2heads(self):
model = GPT2DoubleHeadsModel.from_pretrained("gpt2")
model.to(torch_device)
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
tokenizer.padding_side = "left"
# This tokenizer has no pad token, so we have to set it in some way
# Define PAD Token = EOS Token = 50256
tokenizer.pad_token = tokenizer.eos_token
model.config.pad_token_id = model.config.eos_token_id
# use different length sentences to test batching
sentences = [
"Hello, my dog is a little",
"Today, I",
]
inputs = tokenizer(sentences, return_tensors="pt", padding=True)
input_ids = inputs["input_ids"].to(torch_device)
token_type_ids = torch.cat(
[
input_ids.new_full((input_ids.shape[0], input_ids.shape[1] - 1), 0),
input_ids.new_full((input_ids.shape[0], 1), 500),
],
dim=-1,
)
outputs = model.generate(
input_ids=input_ids,
attention_mask=inputs["attention_mask"].to(torch_device),
)
outputs_tt = model.generate(
input_ids=input_ids,
attention_mask=inputs["attention_mask"].to(torch_device),
token_type_ids=token_type_ids,
)
inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device)
output_non_padded = model.generate(input_ids=inputs_non_padded)
num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device)
output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings)
batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True)
batch_out_sentence_tt = tokenizer.batch_decode(outputs_tt, skip_special_tokens=True)
non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True)
padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True)
expected_output_sentence = [
"Hello, my dog is a little bit of a mess. I'm not sure if he's going",
"Today, I'm going to be doing a lot of research on this. I",
]
self.assertListEqual(expected_output_sentence, batch_out_sentence)
self.assertTrue(batch_out_sentence_tt != batch_out_sentence) # token_type_ids should change output
self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence])
@slow
def test_model_from_pretrained(self):
for model_name in GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = GPT2Model.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class GPT2ModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_gpt2(self):
for checkpointing in [True, False]:
model = GPT2LMHeadModel.from_pretrained("gpt2", gradient_checkpointing=checkpointing)
model.to(torch_device)
input_ids = torch.tensor([[464, 3290]], dtype=torch.long, device=torch_device) # The dog
expected_output_ids = [
464,
3290,
373,
1043,
287,
257,
2214,
1474,
262,
16246,
286,
2688,
290,
2688,
27262,
13,
198,
198,
464,
3290,
] # The dog was found in a field near the intersection of West and West Streets.\n\nThe dog
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
@slow
def test_gpt2_sample(self):
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
model = GPT2LMHeadModel.from_pretrained("gpt2")
model.to(torch_device)
torch.manual_seed(0)
tokenized = tokenizer("Today is a nice day and", return_tensors="pt", return_token_type_ids=True)
input_ids = tokenized.input_ids.to(torch_device)
output_ids = model.generate(input_ids, do_sample=True)
output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True)
token_type_ids = tokenized.token_type_ids.to(torch_device)
output_seq = model.generate(input_ids=input_ids, do_sample=True, num_return_sequences=5)
output_seq_tt = model.generate(
input_ids=input_ids, token_type_ids=token_type_ids, do_sample=True, num_return_sequences=5
)
output_seq_strs = tokenizer.batch_decode(output_seq, skip_special_tokens=True)
output_seq_tt_strs = tokenizer.batch_decode(output_seq_tt, skip_special_tokens=True)
EXPECTED_OUTPUT_STR = (
"Today is a nice day and if you don't know anything about the state of play during your holiday"
)
self.assertEqual(output_str, EXPECTED_OUTPUT_STR)
self.assertTrue(
all([output_seq_strs[idx] != output_seq_tt_strs[idx] for idx in range(len(output_seq_tt_strs))])
) # token_type_ids should change output
@slow
def test_gpt2_sample_max_time(self):
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
model = GPT2LMHeadModel.from_pretrained("gpt2")
model.to(torch_device)
torch.manual_seed(0)
tokenized = tokenizer("Today is a nice day and", return_tensors="pt", return_token_type_ids=True)
input_ids = tokenized.input_ids.to(torch_device)
MAX_TIME = 0.5
start = datetime.datetime.now()
model.generate(input_ids, do_sample=True, max_time=MAX_TIME, max_length=256)
duration = datetime.datetime.now() - start
self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME))
self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME))
start = datetime.datetime.now()
model.generate(input_ids, do_sample=False, max_time=MAX_TIME, max_length=256)
duration = datetime.datetime.now() - start
self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME))
self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME))
start = datetime.datetime.now()
model.generate(input_ids, do_sample=False, num_beams=2, max_time=MAX_TIME, max_length=256)
duration = datetime.datetime.now() - start
self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME))
self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME))
start = datetime.datetime.now()
model.generate(input_ids, do_sample=True, num_beams=2, max_time=MAX_TIME, max_length=256)
duration = datetime.datetime.now() - start
self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME))
self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME))
start = datetime.datetime.now()
model.generate(input_ids, do_sample=False, max_time=None, max_length=256)
duration = datetime.datetime.now() - start
self.assertGreater(duration, datetime.timedelta(seconds=1.5 * MAX_TIME))
| 42.448921 | 118 | 0.679649 |
302f2c50f87c0b7ba6d3a744e385f0cf1aa755bf | 4,255 | py | Python | fare/tests/test_metrics.py | caitlinkuhlman/fare | e29a8360c7de12d4efb8de083c046d962157d95c | [
"BSD-3-Clause"
] | 4 | 2019-05-15T21:03:22.000Z | 2021-03-22T20:24:53.000Z | fare/tests/test_metrics.py | caitlinkuhlman/fare | e29a8360c7de12d4efb8de083c046d962157d95c | [
"BSD-3-Clause"
] | null | null | null | fare/tests/test_metrics.py | caitlinkuhlman/fare | e29a8360c7de12d4efb8de083c046d962157d95c | [
"BSD-3-Clause"
] | null | null | null | """Testing for metrics module"""
import pytest
import numpy as np
import matplotlib.pyplot as plt
import itertools
from sklearn import manifold, datasets
#from sklearn.utils.testing import assert_equal, assert_almost_equal
# TODO should these functions go in a base file?
from fare.metrics import _pairs
from fare.metrics import _merge_cal
from fare.metrics import _merge_eq
from fare.metrics import _merge_parity
from fare.metrics import _count_inversions
from fare.metrics import rank_equality
from fare.metrics import rank_calibration
from fare.metrics import rank_parity
def _eq_np64(var, value):
"""
Helper function to check the value and type of rank method outputs.
"""
if (var != value): # Values are not equal
assert False
if type(var) is not np.float64: # Not proper type (np.float64)
assert False
return True
#return (var == value) and type(var) is np.float64
@pytest.mark.rank_equality
@pytest.mark.rank_calibration
@pytest.mark.rank_parity
def test_metric_output_type_1():
"""
Type checking of values, all should return Python floats
"""
y_true = [7, -0.5, 2, 3]
y_pred = [2.5, 0.0, 2, 8]
groups = [1, 1, 1, 0]
# Rank equality
error0,error1 = rank_equality(y_true, y_pred, groups)
assert type(error0) is np.float64
assert type(error1) is np.float64
# Rank calibration
error0,error1 = rank_calibration(y_true, y_pred, groups)
assert type(error0) is np.float64
assert type(error1) is np.float64
# Rank parity
error0,error1 = rank_parity(y_pred,groups)
assert type(error0) is np.float64
assert type(error1) is np.float64
def test_scenario_1():
y_true = [1,2,3,4,5,6,7,8]
y_pred = [1,2,3,4,5,6,7,8]
groups = [1,1,1,1,1,0,0,0] # More 1 groups than 0 groups
error0,error1 = rank_equality(y_true, y_pred, groups)
assert _eq_np64(error0, 0.0) and _eq_np64(error1, 0.0)
# Should run with no error
error0,error1 = rank_calibration(y_true, y_pred, groups)
assert _eq_np64(error0, 0.0) and _eq_np64(error1, 0.0)
# Should run with no error
error0,error1 = rank_parity(y_pred,groups)
# TODO this is not giving expected
assert _eq_np64(error0, 0.0) # and _eq_np64(error1, 0.1)
assert error0 < error1 # group1 always favored
groups = [0,0,0,1,1,1,1,1]
error0,error1 = rank_parity(y_pred,groups)
assert _eq_np64(error0, 1.0) and _eq_np64(error1, 0.0)
groups = [0,0,0,0,0,0,0,0]
error0,error1 = rank_parity(y_pred,groups)
# TODO error0 and error1 types are not proper
#assert _eq_np64(error0, 1.0) and _eq_np64(error1, 0.0)
assert error0 > error1 # group0 always favored
groups = [1,1,1,1,1,1,1,1]
error0,error1 = rank_parity(y_pred,groups)
# TODO error0 and error1 types are not proper
#assert _eq_np64(error0, 0.0) and _eq_np64(error1, 1.0)
assert error0 < error1 # group1 always favored
def test_scenario_2():
y_true = [0,1,2,3,4,5]
y_pred = [5,4,3,2,1,0]
groups = [0,0,1,0,1,1]
# Equality
error0,error1 = rank_equality(y_true, y_pred, groups)
assert _eq_np64(error0, 0.8888888888888888)
assert _eq_np64(error1, 0.1111111111111111)
# Should run with no error
# Calibration
error0,error1 = rank_calibration(y_true, y_pred, groups)
assert _eq_np64(error0, 1.0) and _eq_np64(error1, 1.0)
# Should run with no error
# Parity
error0,error1 = rank_parity(y_pred,groups)
assert _eq_np64(error0, 0.1111111111111111)
assert _eq_np64(error1, 0.8888888888888888)
# parity has same values as equality because all pairs are inverted.
# The order is opposite because y_pred orders the groups in reverse.
def test_scenario_3():
""" Example from paper """
y_true = [1,2,3,4]
y_pred = [1,3,4,2]
groups =[0,1,0,1]
# Equality
error0,error1 = rank_equality(y_true, y_pred, groups)
assert _eq_np64(error0, 0.25) and _eq_np64(error1, 0.0)
# Calibration
error0,error1 = rank_calibration(y_true, y_pred, groups)
assert _eq_np64(error0, 0.2) and _eq_np64(error1, 0.4)
# Parity
error0,error1 = rank_parity(y_pred,groups)
assert _eq_np64(error0, 0.5) and _eq_np64(error1, 0.5)
| 31.286765 | 72 | 0.687897 |
a33966a465608dab11bd17d380af09429f3f50b5 | 141 | py | Python | CaseConfigParser.py | redoules/Ivory | c464902f02936db2b4e63c543ad064443e34b0eb | [
"MIT"
] | null | null | null | CaseConfigParser.py | redoules/Ivory | c464902f02936db2b4e63c543ad064443e34b0eb | [
"MIT"
] | null | null | null | CaseConfigParser.py | redoules/Ivory | c464902f02936db2b4e63c543ad064443e34b0eb | [
"MIT"
] | null | null | null | from configparser import ConfigParser
class CaseConfigParser(ConfigParser):
def optionxform(self, optionstr):
return optionstr
| 20.142857 | 37 | 0.77305 |
badaa68da426f71a64ff04922638607ac245d659 | 4,547 | py | Python | catalyst_rl/contrib/dl/callbacks/telegram_logger.py | rhololkeolke/catalyst-rl | ec18ff4a58b6d00652f772231db8de86debb4b3d | [
"Apache-2.0"
] | 46 | 2020-03-27T20:12:32.000Z | 2021-11-21T19:08:51.000Z | catalyst_rl/contrib/dl/callbacks/telegram_logger.py | rhololkeolke/catalyst-rl | ec18ff4a58b6d00652f772231db8de86debb4b3d | [
"Apache-2.0"
] | 2 | 2020-04-06T10:43:04.000Z | 2020-07-01T18:26:10.000Z | catalyst_rl/contrib/dl/callbacks/telegram_logger.py | rhololkeolke/catalyst-rl | ec18ff4a58b6d00652f772231db8de86debb4b3d | [
"Apache-2.0"
] | 5 | 2020-04-17T14:09:53.000Z | 2021-05-10T08:58:29.000Z | from typing import List # isort:skip
import logging
import os
from urllib.parse import quote_plus
from urllib.request import Request, urlopen
from catalyst_rl import utils
from catalyst_rl.core import _State, Callback, CallbackNode, CallbackOrder
class TelegramLogger(Callback):
"""
Logger callback, translates ``state.metric_manager`` to telegram channel
"""
def __init__(
self,
token: str = None,
chat_id: str = None,
metric_names: List[str] = None,
log_on_stage_start: bool = True,
log_on_loader_start: bool = True,
log_on_loader_end: bool = True,
log_on_stage_end: bool = True,
log_on_exception: bool = True,
):
"""
Args:
token (str): telegram bot's token,
see https://core.telegram.org/bots
chat_id (str): Chat unique identifier
metric_names: List of metric names to log.
if none - logs everything.
log_on_stage_start (bool): send notification on stage start
log_on_loader_start (bool): send notification on loader start
log_on_loader_end (bool): send notification on loader end
log_on_stage_end (bool): send notification on stage end
log_on_exception (bool): send notification on exception
"""
super().__init__(order=CallbackOrder.Logging, node=CallbackNode.Master)
# @TODO: replace this logic with global catalyst_rl config at ~/.catalyst_rl
self._token = token or os.environ.get("CATALYST_TELEGRAM_TOKEN", None)
self._chat_id = (
chat_id or os.environ.get("CATALYST_TELEGRAM_CHAT_ID", None)
)
assert self._token is not None and self._chat_id is not None
self._base_url = (
f"https://api.telegram.org/bot{self._token}/sendMessage"
)
self.log_on_stage_start = log_on_stage_start
self.log_on_loader_start = log_on_loader_start
self.log_on_loader_end = log_on_loader_end
self.log_on_stage_end = log_on_stage_end
self.log_on_exception = log_on_exception
self.metrics_to_log = metric_names
def _send_text(self, text: str):
try:
url = (
f"{self._base_url}?"
f"chat_id={self._chat_id}&"
f"disable_web_page_preview=1&"
f"text={quote_plus(text, safe='')}"
)
request = Request(url)
urlopen(request)
except Exception as e:
logging.getLogger(__name__).warning(f"telegram.send.error:{e}")
def on_stage_start(self, state: _State):
"""Notify about starting a new stage"""
if self.log_on_stage_start:
text = f"{state.stage_name} stage was started"
self._send_text(text)
def on_loader_start(self, state: _State):
"""Notify about starting running the new loader"""
if self.log_on_loader_start:
text = (
f"{state.loader_name} {state.global_epoch} epoch has started"
)
self._send_text(text)
def on_loader_end(self, state: _State):
"""Translate ``state.metric_manager`` to telegram channel"""
if self.log_on_loader_end:
metrics = state.loader_metrics
if self.metrics_to_log is None:
metrics_to_log = sorted(list(metrics.keys()))
else:
metrics_to_log = self.metrics_to_log
rows: List[str] = [
f"{state.loader_name} {state.global_epoch} epoch was finished:"
]
for name in metrics_to_log:
if name in metrics:
rows.append(utils.format_metric(name, metrics[name]))
text = "\n".join(rows)
self._send_text(text)
def on_stage_end(self, state: _State):
"""Notify about finishing a stage"""
if self.log_on_stage_end:
text = f"{state.stage_name} stage was finished"
self._send_text(text)
def on_exception(self, state: _State):
"""Notify about raised Exception"""
if self.log_on_exception:
exception = state.exception
if utils.is_exception(exception) and not isinstance(
exception, KeyboardInterrupt
):
text = (
f"`{type(exception).__name__}` exception was raised:\n"
f"{exception}"
)
self._send_text(text)
| 34.976923 | 84 | 0.598417 |
bdb00537691b0182389f586de8cf73f7902135da | 10,218 | py | Python | setup_scripts/group_stream_vectors.py | dankovacek/hysets_validation | debbd410ed12fa043091c0c60cc217b688ea941b | [
"MIT"
] | null | null | null | setup_scripts/group_stream_vectors.py | dankovacek/hysets_validation | debbd410ed12fa043091c0c60cc217b688ea941b | [
"MIT"
] | null | null | null | setup_scripts/group_stream_vectors.py | dankovacek/hysets_validation | debbd410ed12fa043091c0c60cc217b688ea941b | [
"MIT"
] | null | null | null |
import os
from multiprocessing import Pool
import pandas as pd
# import rioxarray as rxr
import geopandas as gpd
import fiona
from shapely.geometry import Polygon
from shapely.ops import linemerge
import zipfile
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
processed_data_dir = os.path.join(BASE_DIR, 'processed_data')
vector_save_path = os.path.join(processed_data_dir, 'grouped_hydrographic_features')
if not os.path.exists(vector_save_path):
os.mkdir(vector_save_path)
#
# Using the regrouped hydrologic regions, (process_hydrologic_regions.py),
# group the stream vectors for dem processing
#
def fill_holes(data):
interior_gaps = data.interiors.values.tolist()[0]
group_name = data.index.values[0]
gap_list = []
if interior_gaps is not None:
print(f' ...{len(interior_gaps)} gaps found in {group_name} groupings.')
for i in interior_gaps:
gap_list.append(Polygon(i))
data_gaps = gpd.GeoDataFrame(geometry=gap_list, crs=data.crs)
appended_set = data.append(data_gaps)
appended_set['group'] = 0
merged_polygon = appended_set.dissolve(by='group')
return merged_polygon.geometry.values[0]
else:
print(f' ...no gaps found in {group_name}')
return data.geometry.values[0]
# nhn_path = '/media/danbot/Samsung_T5/geospatial_data/WSC_data/NHN_feature_data/'
nhn_path = '/home/danbot/Documents/code/hysets_validation/source_data/NHN_feature_data/'
nhn_feature_path = os.path.join(nhn_path, 'BC_NHN_features/')
seak_path = os.path.join(nhn_path, 'SEAK_features')
bc_groups_path = os.path.join(processed_data_dir, 'merged_basin_groups/')
bc_groups = gpd.read_file(bc_groups_path + 'BC_transborder_final_regions_4326.geojson')
bc_groups = bc_groups.to_crs(3005)
# 1. get the list of coastal + island regions
coast_groups = [
'08A', '08B', '08C', '08D',
'08E', '08F', '08G', '08M',
'09M'
]
coast_islands = ['08O', '08H']
seak_groups = ['08A', '08B', '08C', '08D']
seak_dict = {
'08A': [19010405, 19010404, 19010403, 19010406],
'08B': [19010301, 19010302, 19010303, 19010304,
19010206, 19010204, 19010212, 19010211],
'08C': [19010210, 19010208, 19010207, 19010205],
'08D': [19010103, 19010209, 19010104, 19010102],
}
# 2. retrieve the polygons associated with the 'region' boundary.
# 3. retrieve littoral / shoreline layers and merge them
# 4. split the region polygon using the line created in step 3.
# 5. discard the sea surface polygon
# 6. save new polygon and use to trim DEM in dem_basin_mapper.py
# collection of individual linestrings for splitting in a
# list and add the polygon lines to it.
# line_split_collection.append(polygon.boundary)
# merged_lines = shapely.ops.linemerge(line_split_collection)
# border_lines = shapely.ops.unary_union(merged_lines)
# decomposition = shapely.ops.polygonize(border_lines)
# load and merge the SEAK files into one gdf
seak_streams_path = os.path.join(nhn_path, 'SEAK_WBDHU8_polygons.geojson')
SEAK_polygons = gpd.read_file(seak_streams_path)
SEAK_polygons = SEAK_polygons.to_crs(3005)
SEAK_files = os.listdir(seak_path)
def retrieve_and_group_layers(feature_path, files, target_crs, target_layer):
dfs = []
all_crs = []
print(f' ...checking features at {feature_path} for layer {target_layer}.')
for file in files:
file_layers = zipfile.ZipFile(os.path.join(feature_path, file)).namelist()
layers = [e for e in file_layers if (target_layer in e) & (e.endswith('.shp'))]
if layers:
for layer in layers:
layer_path = os.path.join(feature_path, file) + f'!{layer}'
df = gpd.read_file(layer_path)
crs = df.crs
print(f' crs={crs}')
if crs not in all_crs:
all_crs.append(crs)
print(f' new crs found: {crs}')
df = df.to_crs(target_crs)
# append the dataframe to the group list
dfs.append(df)
else:
print(f'no target layers found in {file}')
return dfs
all_crs = []
# bc_groups = bc_groups[bc_groups['group_name'] == '08H'].copy()
# print(bc_groups)
target_crs = 3005
bc_groups = bc_groups.to_crs(target_crs)
bc_groups = bc_groups[bc_groups['group_name'].isin(['08B', '08C', '08D'])]
for i, row in bc_groups.iterrows():
grp_code = row['group_name']
sda_codes = row['WSCSDAs']
if sda_codes == None:
sda_codes = [row['group_code'].lower()]
grp_code = row['group_code']
else:
sda_codes = [e.lower() for e in row['WSCSDAs'].split(',')]
print(f'Starting stream vector merge on {grp_code}: {sda_codes}')
nhn_files = [e for e in os.listdir(nhn_feature_path) if e.split('_')[2][:3] in sda_codes]
# there is one sub-sub basin region polygon that has
# a corrupt archive and needs to be filtered out
bad_zip_file_link = 'https://ftp.maps.canada.ca/pub/nrcan_rncan/vector/geobase_nhn_rhn/shp_en/08/nhn_rhn_08nec00_shp_en.zip'
bad_zip_file = bad_zip_file_link.split('/')[-1]
# skip the bad file:
nhn_files_trimmed = [f for f in nhn_files if f != bad_zip_file]
seak_included = False
for target_layer in ['WATERBODY', 'ISLAND', 'NLFLOW', 'LITTORAL',]:
df_list = []
group_stream_layers = []
print(f' Starting merge of {target_layer} features.')
output_folder = os.path.join(vector_save_path, f'{grp_code}/{target_layer}/')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# use geojson for littoral and island (polygons)
# use .shp for stream network (NLFLOW layer)
output_filename = f'{grp_code}_{target_layer}_{target_crs}.geojson'
if target_layer in ['NLFLOW']:
output_filename = f'{grp_code}_{target_layer}_{target_crs}.shp'
output_filepath = os.path.join(output_folder, output_filename)
if not os.path.exists(output_filepath):
nhn_dfs = retrieve_and_group_layers(nhn_feature_path, nhn_files_trimmed, target_crs, target_layer)
if len(nhn_dfs) == 0:
continue
else:
nhn_gdf = gpd.GeoDataFrame(pd.concat(nhn_dfs, ignore_index=True), crs=target_crs)
print(f' {len(nhn_gdf)} NHN items found.')
# nhn_gdf['intersects_group_polygon'] = gpd.sjoin(gdf, row, how='inner', predicate='contains')
# gdf = gdf[gdf['intersects_group_polygon']].copy()
# print(nhn_gdf.head())
if nhn_gdf.empty:
continue
else:
df_list.append(nhn_gdf)
if (target_layer == 'NLFLOW') & (grp_code in seak_dict.keys()):
huc_codes = [str(e) for e in seak_dict[grp_code]]
print('')
print(f' ...searching for USGS vector info for {grp_code}.')
group_seak_files = []
for h in huc_codes:
files = [f for f in SEAK_files if h in f]
if len(files) > 0:
group_seak_files += files
# there should be as many files as there are codes,
# otherwise a file is missing.
assert len(group_seak_files) == len(seak_dict[grp_code])
# get the southeast alaska hydrographic feature files
seak_dfs = retrieve_and_group_layers(seak_path, group_seak_files, target_crs, 'NHDFlowline')
seak_gdf = gpd.GeoDataFrame(pd.concat(seak_dfs, ignore_index=True), crs=target_crs)
# seak_gdf = seak_gdf.iloc[:5000]
# seak_gdf = gpd.GeoDataFrame(pd.concat([gdf,seak_layer], ignore_index=True), crs=target_crs)
print(f' {len(seak_gdf)} SEAK items found.')
if not seak_gdf.empty:
df_list.append(seak_gdf)
if len(df_list) > 0:
gdf = gpd.GeoDataFrame(pd.concat(df_list, ignore_index=True), crs=target_crs)
# filter out geometries that lie outside of the group polygon
# n_objects_before = len(gdf)
# if target_layer == 'NLFLOW':
# print(' ...finding lines intersecting region polygon.')
# n_objects_after = len(gdf)
# print(f' {n_objects_before - n_objects_after} objects removed as non-intersecting.')
gdf['geom_type'] = gdf.geometry.geom_type
gdf['group_name'] = grp_code
if target_layer == 'LITTORAL':
# cut out very small polygons (< 1km^2)
min_area = 1E6
gdf = gdf.to_crs(3005)
merged = linemerge(gdf.geometry.values)
merged_gdf = gpd.GeoDataFrame(geometry=[merged], crs=gdf.crs).explode(index_parts=False)
merged_gdf['is_ring'] = merged_gdf.geometry.is_ring
islands = merged_gdf[merged_gdf['is_ring']]
islands.geometry = [Polygon(e) for e in islands.geometry]
islands['area'] = islands.geometry.area
islands = islands[islands['area'] >= min_area]
# file extension must be .shp for whiteboxtools StreamFill function.
if target_layer in ['ISLAND', 'LITTORAL']:
print(f' ...dissolving {target_layer} in {grp_code}.')
dissolved_regions = gdf.dissolve(by='group_name', dropna=True, aggfunc='sum')
# fill holes and gaps in merged polygons
dissolved_regions.to_file(output_filepath, driver='GeoJSON')
else:
gdf.to_file(output_filepath)
fname = output_filepath.split('/')[-1]
print(f' ...saved {fname}')
else:
fpath = output_filepath.split('/')[-1]
print(f'file {fpath} exists')
| 41.036145 | 128 | 0.613819 |
62c6b8309bc232ee86e1510ba1108c7590bf3da3 | 6,314 | py | Python | mysite_env/Lib/site-packages/pip/_internal/locations.py | Hongyil1/Django-learning-project | 13d4a5731f81a538e91d8fc7fad0587245056aea | [
"Apache-2.0"
] | null | null | null | mysite_env/Lib/site-packages/pip/_internal/locations.py | Hongyil1/Django-learning-project | 13d4a5731f81a538e91d8fc7fad0587245056aea | [
"Apache-2.0"
] | null | null | null | mysite_env/Lib/site-packages/pip/_internal/locations.py | Hongyil1/Django-learning-project | 13d4a5731f81a538e91d8fc7fad0587245056aea | [
"Apache-2.0"
] | null | null | null | """Locations where we look for configs, install stuff, etc"""
from __future__ import absolute_import
import os
import os.path
import platform
import site
import sys
import sysconfig
from distutils import sysconfig as distutils_sysconfig
from distutils.command.install import SCHEME_KEYS, install # type: ignore
from pip._internal.compat import WINDOWS, expanduser
from pip._internal.utils import appdirs
# Application Directories
USER_CACHE_DIR = appdirs.user_cache_dir("pip")
DELETE_MARKER_MESSAGE = '''\
This file is placed here by pip to indicate the source was put
here by pip.
Once this package is successfully installed this source code will be
deleted (unless you remove this file).
'''
PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt'
def write_delete_marker_file(directory):
"""
Write the pip delete marker file into this directory.
"""
filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME)
with open(filepath, 'w') as marker_fp:
marker_fp.write(DELETE_MARKER_MESSAGE)
def running_under_virtualenv():
"""
Return True if we're running inside a virtualenv, False otherwise.
"""
if hasattr(sys, 'real_prefix'):
return True
elif sys.prefix != getattr(sys, "base_prefix", sys.prefix):
return True
return False
def virtualenv_no_global():
"""
Return True if in a venv and no system site packages.
"""
# this mirrors the logic in virtualenv.py for locating the
# no-global-site-packages.txt file
site_mod_dir = os.path.dirname(os.path.abspath(site.__file__))
no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt')
if running_under_virtualenv() and os.path.isfile(no_global_file):
return True
if running_under_virtualenv():
src_prefix = os.path.join(sys.prefix, 'src')
else:
# FIXME: keep src in cwd for now (it is not a temporary folder)
try:
src_prefix = os.path.join(os.getcwd(), 'src')
except OSError:
# In case the current working directory has been renamed or deleted
sys.exit(
"The folder you are executing pip from can no longer be found."
)
# under macOS + virtualenv sys.prefix is not properly resolved
# it is something like /path/to/python/bin/..
# Note: using realpath due to tmp dirs on OSX being symlinks
src_prefix = os.path.abspath(src_prefix)
# FIXME doesn't account for venv linked to global site-packages
site_packages = sysconfig.get_path("purelib")
# This is because of a bug in PyPy's sysconfig module, see
# https://bitbucket.org/pypy/pypy/issues/2506/sysconfig-returns-incorrect-paths
# for more information.
if platform.python_implementation().lower() == "pypy":
site_packages = distutils_sysconfig.get_python_lib()
try:
# Use getusersitepackages if this is present, as it ensures that the
# value is initialised properly.
user_site = site.getusersitepackages()
except AttributeError:
user_site = site.USER_SITE
user_dir = expanduser('~')
if WINDOWS:
bin_py = os.path.join(sys.prefix, 'Scripts')
bin_user = os.path.join(user_site, 'Scripts')
# buildout uses 'bin' on Windows too?
if not os.path.exists(bin_py):
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
config_basename = 'pip.ini'
legacy_storage_dir = os.path.join(user_dir, 'pip')
legacy_config_file = os.path.join(
legacy_storage_dir,
config_basename,
)
else:
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
config_basename = 'pip.conf'
legacy_storage_dir = os.path.join(user_dir, '.pip')
legacy_config_file = os.path.join(
legacy_storage_dir,
config_basename,
)
# Forcing to use /usr/local/bin for standard macOS framework installs
# Also log to ~/Library/Logs/ for use with the Console.app log viewer
if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/':
bin_py = '/usr/local/bin'
site_config_files = [
os.path.join(path, config_basename)
for path in appdirs.site_config_dirs('pip')
]
venv_config_file = os.path.join(sys.prefix, config_basename)
new_config_file = os.path.join(appdirs.user_config_dir("pip"), config_basename)
def distutils_scheme(dist_name, user=False, home=None, root=None,
isolated=False, prefix=None):
"""
Return a distutils install scheme
"""
from distutils.dist import Distribution
scheme = {}
if isolated:
extra_dist_args = {"script_args": ["--no-user-cfg"]}
else:
extra_dist_args = {}
dist_args = {'name': dist_name}
dist_args.update(extra_dist_args)
d = Distribution(dist_args)
d.parse_config_files()
i = d.get_command_obj('install', create=True)
# NOTE: setting user or home has the side-effect of creating the home dir
# or user base.css for installations during finalize_options()
# ideally, we'd prefer a scheme class that has no side-effects.
assert not (user and prefix), "user={} prefix={}".format(user, prefix)
i.user = user or i.user
if user:
i.prefix = ""
i.prefix = prefix or i.prefix
i.home = home or i.home
i.root = root or i.root
i.finalize_options()
for key in SCHEME_KEYS:
scheme[key] = getattr(i, 'install_' + key)
# install_lib specified in setup.cfg should install *everything*
# into there (i.e. it takes precedence over both purelib and
# platlib). Note, i.install_lib is *always* set after
# finalize_options(); we only want to override here if the user
# has explicitly requested it hence going back to the config
if 'install_lib' in d.get_option_dict('install'):
scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib))
if running_under_virtualenv():
scheme['headers'] = os.path.join(
sys.prefix,
'include',
'site',
'python' + sys.version[:3],
dist_name,
)
if root is not None:
path_no_drive = os.path.splitdrive(
os.path.abspath(scheme["headers"]))[1]
scheme["headers"] = os.path.join(
root,
path_no_drive[1:],
)
return scheme
| 32.379487 | 79 | 0.680868 |
9b5e9abcbf445aaaff1fd9aea7db8931a121d4c7 | 2,917 | py | Python | batchcreate.py | grecoe/amlsdummy | f83ade8f75bf972f574834eae2535cfda6e2711b | [
"MIT"
] | 1 | 2020-03-20T16:38:39.000Z | 2020-03-20T16:38:39.000Z | batchcreate.py | grecoe/amlsdummy | f83ade8f75bf972f574834eae2535cfda6e2711b | [
"MIT"
] | 1 | 2020-02-28T18:03:42.000Z | 2020-02-28T18:03:42.000Z | batchcreate.py | grecoe/amlsdummy | f83ade8f75bf972f574834eae2535cfda6e2711b | [
"MIT"
] | 2 | 2020-02-12T03:30:20.000Z | 2020-02-28T00:20:06.000Z | '''
Program Code: Create Azure Machine Learning BATCH Scoring Service
NOTE: THIS PATH IS INCOMPLETE
'''
import os
import sys
import json
from scripts.azure_utils import get_auth
from contexts.btchcontext import BatchScoringContext
from scripts.argument_utils import ExperimentType, loadConfiguration
from scripts.general_utils import JobType, JobLog
job_log = JobLog(JobType.batch_scoring)
try :
'''
Get the program arguments and user authentication into the context
'''
job_log.startStep("Setup")
programargs = loadConfiguration(ExperimentType.batch_scoring,sys.argv[1:])
userAuth = get_auth()
program_context = BatchScoringContext(programargs, userAuth, job_log)
job_log.endStep("Setup")
'''
Get or create an AMLS workspace. If the settings identify an existing
workspace, that workspace is retrieved.
'''
job_log.startStep("Workspace")
program_context.generateWorkspace()
job_log.endStep("Workspace")
'''
Because this example is using Azure Storage for both input and output
we need to ensure that the containers in the storage account exists.
'''
job_log.startStep("Storage Containers")
program_context.generateStorageContainers()
job_log.endStep("Storage Containers")
'''
Upload the data file that will be processed by the batch service. In a real
service this file would just be a pointer to data we expect to be there and
not a full file on it's own.
'''
job_log.startStep("File Uploads")
program_context.uploadDataFiles()
job_log.endStep("File Uploads")
'''
Get or create batch compute. Batch compute is managed by AMLS itself. The
nodes are brought online when a batch service starts.
'''
job_log.startStep("Batch Compute")
program_context.generateCompute()
job_log.endStep("Batch Compute")
'''
Create the datasets that the pipeline requires. Two are created, one for
input and one for output.
'''
job_log.startStep("Data References")
program_context.createPipelineDataReferences()
job_log.endStep("Data References")
'''
Create the pipeline that will process data in a batch state. Internally,
the pipeline will be scheduled to run every hour.
'''
job_log.startStep("Pipeline Creation")
program_context.createPipeline()
job_log.endStep("Pipeline Creation")
'''
Add in final details and dump the log
'''
job_log.addInfo("Pipeline Status: {}".format(program_context.publishedPipeline.status))
job_log.addInfo("Pipeline Endpoint: {}".format(program_context.publishedPipeline.endpoint))
except Exception as ex:
job_log.addInfo("An error occured executing this path")
job_log.addInfo(str(ex))
print("An error occured executing this path: {}".format(ex))
job_log.dumpLog() | 31.706522 | 95 | 0.704148 |
c117880dcd45ef147bf2e915661ba2bfed4127b0 | 788 | py | Python | PyMOTW/source/urllib.robotparser/urllib_robotparser_longlived.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | 1 | 2019-01-04T05:47:50.000Z | 2019-01-04T05:47:50.000Z | PyMOTW/source/urllib.robotparser/urllib_robotparser_longlived.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | 1 | 2020-07-18T03:52:03.000Z | 2020-07-18T04:18:01.000Z | PyMOTW/source/urllib.robotparser/urllib_robotparser_longlived.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | 2 | 2021-03-06T04:28:32.000Z | 2021-03-06T04:59:17.000Z | #!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2009 Doug Hellmann All rights reserved.
#
"""
"""
#end_pymotw_header
from urllib import robotparser
import time
AGENT_NAME = 'PyMOTW'
parser = robotparser.RobotFileParser()
# Using the local copy
parser.set_url('file:robots.txt')
parser.read()
parser.modified()
PATHS = [
'/',
'/PyMOTW/',
'/admin/',
'/downloads/PyMOTW-1.92.tar.gz',
]
for path in PATHS:
age = int(time.time() - parser.mtime())
print('age:', age, end=' ')
if age > 1:
print('rereading robots.txt')
parser.read()
parser.modified()
else:
print()
print('{!r:>6} : {}'.format(
parser.can_fetch(AGENT_NAME, path), path))
# Simulate a delay in processing
time.sleep(1)
print()
| 19.219512 | 55 | 0.610406 |
68c29f393a8e116254b6aed9acb9c05df3283a79 | 13,155 | py | Python | HillClimbing_FieldScanning.py | ayshahilal/Hill-Climbing-on-Field-Scanning | 55e07b3700e9521cdc50ec42fcd8adb5eb1c8c51 | [
"Apache-2.0"
] | null | null | null | HillClimbing_FieldScanning.py | ayshahilal/Hill-Climbing-on-Field-Scanning | 55e07b3700e9521cdc50ec42fcd8adb5eb1c8c51 | [
"Apache-2.0"
] | null | null | null | HillClimbing_FieldScanning.py | ayshahilal/Hill-Climbing-on-Field-Scanning | 55e07b3700e9521cdc50ec42fcd8adb5eb1c8c51 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import random
import pygame
import matplotlib.pyplot as plt
x_start1 = 8
y_start1 = 0
x_start2 = 4
y_start2 = 8
x_start3 = 4
y_start3 = 4
P = 200 # popülasyon büyüklügü
G = 500 # jenerasyon sayısı
u = 81 # N*N
# Drone'larin yolunu ekrana cizen fonksiyon
def plotting(drone_path, x_start, y_start, d):
the_xs = np.zeros((d,u+1)) # +1 bitis noktasini eklemek icin
the_ys = np.zeros((d,u+1))
# her bir drone icin x ve y leri matrisin ilgili yerine kaydet
for k in range(0, d):
# her drone icin baslangic noktasini tekrar guncelle
x = x_start
y = y_start
# u (n*n)
for i in range(0, u):
the_xs[k][i] = x
the_ys[k][i] = y
select = drone_path[k][i]
# hareketten sonraki x ve y koordinatlarini bul
x, y = direction_finder(x, y, select)
# baslangica geri donmesi icin dizinin sonuna bitis noktasi (x_start,y_start) verilir
the_xs[k][u] = x_start
the_ys[k][u] = y_start
# Ekrana yolu ciz
# Drone sayisina gore drone'lari beraber yada tek ciz
if d==1 or d==2 or d==3 or d==4:
plt.plot(the_xs[0,:], the_ys[0,:])
if d==2 or d==3 or d==4:
plt.plot(the_xs[1, :], the_ys[1, :])
if d==3 or d==4:
plt.plot(the_xs[2, :], the_ys[2, :])
if d==4:
plt.plot(the_xs[3, :], the_ys[3, :])
plt.title('Jenerasyon Sayisi: {}'.format(G))
plt.xlim([0, 8])
plt.ylim([0, 8])
plt.show()
def plot_w(G, watch_mu, a):
plt.title('{}. drone icin Jenerasyon-Fitness degeri degisimi'.format(a+1))
plt.plot(G, watch_mu)
plt.xlabel("Jenerasyon sayisi (G)")
plt.ylabel("Fitness degeri (w)")
#plt.xlim([0, G])
plt.show()
# Taranmis alani gosteren fonksiyon
def GUI(drone_path, x_start, y_start, d):
matrix = np.zeros((9, 9))
# Renkleri belirle
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (170, 255, 30)
RED = (255, 0, 0)
renk = (154,73,99)
renk2 = (100, 0, 255)
# Width, height ve margin leri belirle
WIDTH = 20
HEIGHT = 20
MARGIN = 5
# İlgili drone icin kayitli olan yonlere gore (drone_path[]) ugradigi noktaları 9*9 luk matriste 1 yapar ve matrisi return eder
if d == 1:
matrix1 = draw(drone_path[0], x_start, y_start)
if d == 2:
matrix2 = draw(drone_path[1], x_start, y_start)
if d == 3:
matrix3 = draw(drone_path[2], x_start, y_start)
if d == 4:
matrix4 = draw(drone_path[3], x_start, y_start)
# Initialize pygame
pygame.init()
# Set the HEIGHT and WIDTH of the screen
WINDOW_SIZE = [230, 230]
screen = pygame.display.set_mode(WINDOW_SIZE)
# Set title of screen
if d == 1:
pygame.display.set_caption("DRONE 1 AREA")
if d == 2:
pygame.display.set_caption("DRONE 2 AREA")
if d == 3:
pygame.display.set_caption("DRONE 3 AREA")
if d == 4:
pygame.display.set_caption("DRONE 4 AREA")
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# -------- Main Program Loop -----------
while not done:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
# Set the screen background
screen.fill(BLACK)
# Draw the grid
if d == 1:
temp_color = GREEN
matrix = matrix1
if d == 2:
temp_color = renk
matrix = matrix2
if d == 3:
temp_color = RED
matrix = matrix3
if d == 4:
temp_color = renk2
matrix = matrix4
for row in range(9):
for column in range(9):
color = WHITE
if matrix[row][column] == 1:
color = temp_color
pygame.draw.rect(screen,
color,
[(MARGIN + WIDTH) * column + MARGIN,
(MARGIN + HEIGHT) * row + MARGIN,
WIDTH,
HEIGHT])
# Limit to 60 frames per second
clock.tick(60)
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Be IDLE friendly. If you forget this line, the program will 'hang'
# on exit.
pygame.quit()
# yonleri ve baslangic noktasi belirli olan drone icin x ve y koordinatlarini bulur ve gezdigi hucreyi 1 yapar
def draw(B, x_start, y_start):
matrix = np.zeros((9, 9))
x = x_start
y = y_start
# baslangic noktasini ugranildi olarak 1 yap
matrix[x][y] = 1
# u ((N*N)-1) kadar giderek her bir yon sonucu gidilen noktayi bul, drone'u hareket ettir
for i in range(0, u):
# drone'un gidecegi yon'u alir
select = B[i]
# hareketten sonraki x ve y koordinatlarini bul
x, y = direction_finder(x, y, select)
# gezdigi hucreyi 1 yap
matrix[x][y] = 1
return matrix
# kopyalanan ve %mu kadar degistirilen P tane yol icin drone'u hareket ettiren fonksiyon
def generate_random_path(individuals, x_start, y_start):
fitness1 = np.zeros(P)
fitness2 = np.zeros(P)
# P yol icin ayri ayri fitness fonks hesapla
for i in range(0, P):
x = x_start
y = y_start
# her bir yol icin matrisi sifirla
matrix = np.zeros((9, 9))
# drone'un baslangic noktasini matriste 1 yap
matrix[x][y] = 1
# Drone'un yolunu belirlemek icin u uzunlugundaki yonleri iceren matrisi (individuals[][]) gez
for j in range(0, u):
# hareketin yonunu al
select = individuals[i][j]
flag = 0
# matrisin disina cikip cikmadigini is_there_an_obstacle() fonskiyonu ile kontrol et
while is_there_an_obstacle(x, y, select):
# disari cikmaya calisirsa baska bir random hareket sec
select = random.randint(1, 8)
flag = 1
# hareketten sonraki x ve y koordinatlarini bul
x, y = direction_finder(x, y, select)
# gezdigi hucreyi 1 yap
matrix[x][y] = 1
# disari cikmaya calistiysa flag=1 dir; yonleri iceren matristeki degisikligi guncelle
if flag == 1:
individuals[i][j] = select
# max-gezdigi hucre sayisi
f1 = sum(map(sum, matrix)) / 81
# fitness dizisine daha sonra en iyisini bulabilmek icin kaydedilir
fitness1[i] = f1
route = individuals[i]
# acilar arasi donuslerin maliyeti cost() da hesaplanir
# min cost iyi oldugu icin minimize edilmek icin asagidaki islem yapilir
f2 = 100/cost(route)
# fitness2 dizisine daha sonra en iyisini bulabilmek icin kaydedilir
fitness2[i] = f2
return fitness1, fitness2, individuals
# P yolun arasindan fitness fonksiyonlarına bakarak en iyisini bulur
def find_best(B, best_w, f1, f2, mu, individuals):
mu_start = 0.01
mu_dec = 0.99 # azalma oranı
mu_inc = 1.01 # artma oranı
tk_max = 5
tk = 1
w = -1
# fitness degerlerinden ortalama bir deger uret, en yuksek olani w) ve indexini (index) bul
for i in range(P):
x = (f1[i] + f2[i]) / 2
if x > w:
w = x # fitnessların en iyisini w da tut
index = i # index'ini tut
# mevcut iyiden (best_w) daha iyi mi karsilastir, daha iyiyse best_w yu ve en iyi yolu guncelle
if w > best_w:
tk = 0
best_w = w
B = individuals[index]
if mu <= mu_start:
mu = mu * mu_dec # mutasyon oranini azalt
else:
mu = mu_start # mu_start'tan buyukse ve iyilesme varsa mu_start'tan tekrar başlasın
# iyilesme yoksa / daha iyi yol bulamadiysa tk yi arttir ve mutasyon oraninin arttir
else:
tk += 1
mu = mu * mu_inc # mutasyon oranini arttir
# tk_max kez mevcut yol'dan daha iyi yol bulunamadiysa baslangic yolunu random bir yolla degistir, mutasyon oranini bastan baslat
if tk == tk_max:
tk = 0
mu = mu_start
B = np.zeros(u)
# Random bir yon dizisi belirle
for i in range(0, u):
n = random.randint(1, 8) # 1-8 arasi sayilarla doldur
B.append(n)
return B, mu, best_w, index
# cost ne kadar buyukse okadar kotu donus yapiyor, iyilik fonksiyonu: min cost
def cost(route):
cost = 0
for i in range(0, 80):
# donusleri ters yonde yapiyorsa cost'u fazla arttir
if (abs(route[i] - route[i + 1])) == 4:
cost += 4
# degilse cost'u 1 hareket yaptigi icin 1 arttir
else:
cost += 1
return cost
# Alanin disina cikmasini engelleyen fonskiyon
def is_there_an_obstacle(x, y, direction):
# x,y den (direction) yonunde hareket ettiginde gittigi koordinatlari bul
x, y = direction_finder(x, y, direction)
# Eger alanin disina cikiyorsa True don
if x < 0 or x > 8:
return True
if y < 0 or y > 8:
return True
return False
# x ve y nin hareket sonucu degerlerini gunceller ve dondurur
def direction_finder(x, y, direction):
if direction == 1:
return x - 1, y
elif direction == 2:
return x - 1, y - 1
elif direction == 3:
return x, y - 1
elif direction == 4:
return x + 1, y - 1
elif direction == 5:
return x + 1, y
elif direction == 6:
return x + 1, y + 1
elif direction == 7:
return x, y + 1
elif direction == 8:
return x - 1, y + 1
# Verilen diziyi P kere kopyalar ve %mu kadar degisiklik yapar
def create_P_path(B, mu):
# degisiklik yapilmasi icin uretilecek random path
Path = np.zeros((P, u))
# P tane diziyi kaydetmek icin P*u uzunlugunda matris
individuals = np.zeros((P, u))
# B baslangic dizisini P kere kopyala
for i in range(0, P):
individuals[i] = B # P diziyi individuals'a kopyala
# degisiklik yaparken kullanilmasi icin P tane path uret (1-8 arasi sayilardan olusan)
for i in range(0, P):
for j in range(0, u):
# 1-8 arasi sayilarla doldur ve kaydet
n = random.randint(1, 8)
Path[i][j] = n
# her bir path icin (P tane u uzunlugunda) 0-1 arası rastgele degerler uret
# check_mu = np.random.random_sample(size=P)
check_mu = np.random.random_sample((P, u))
# %mu kadar degisiklik yap
# check_mu daki degerlerin mu'dan kucuk olup olmadigini kontrol et
for i in range(0, P):
for j in range(0, u):
if check_mu[i][j] < mu: # mu'dan kucukse degistir
individuals[i][j] = Path[i][j]
return individuals
if __name__ == "__main__":
x_s = []
y_s = []
x_s.append(x_start1)
x_s.append(x_start2)
x_s.append(x_start3)
y_s.append(y_start1)
y_s.append(y_start2)
y_s.append(y_start3)
d = int(input("Kac drone'la tarama yapilsin?(1-2-4)"))
drone_path = np.zeros((d, u))
x = int(input("Bir baslangic noktasi secin (1-2-3)"))
watch_w = np.zeros(G)
list = list(range(0, G)) # w-G grafigini olusturmak icin 0-G arasi sayilardan olusan dizi
if d>4 or d<1 or x>3 or x<1:
print("Yanlis secim yaptiniz")
exit()
# Drone sayisi kadar islem yap
for a in range(0, d):
mu = 0.01 # değişim oranı
best_w = 0
B = [] # baslangic icin uretilecek random path
# rastgele baslangic icin (N*N)-1 uzunlukta random path uret
for i in range(0, u):
n = random.randint(1, 8) # 1-8 arasi sayilarla doldur
B.append(n)
# G iterasyon
for k in range(0, G):
# Baslangic dizisini P kere kopyalar ve %mu kadar degisiklik yapar
individuals = create_P_path(B, mu)
# kopyalanan ve %mu kadar degistirilen P tane yol icin drone'u hareket ettirir, fitness degerlerini dondurur
fitness1, fitness2, individuals = generate_random_path(individuals, x_s[x-1], y_s[x-1])
# P yolun arasindan fitness fonksiyonlarına bakarak en iyisini bulur ve dondurur
B, mu, best_w, index = find_best(B, best_w, fitness1, fitness2, mu, individuals)
# Mevcut drone icin G iterasyon boyunca bulunan en iyi w degerini watch_w dizisinde tutar
watch_w[k] = best_w
# Her drone icin bulunan en iyi yol drone_path[][] e kaydedilir
drone_path[a] = B
#print("Fitness Function: w={} index={} mu={}".format(best_w, index, mu))
print("Best Route is", B)
# Jenerasyon (G) sayisina gore w (iyilik fonksiyonu) degisimini her drone icin ayri ayri grafikler
plot_w(list, watch_w, a)
print("----{} ITERASYON YAPILDI----".format(G))
# Gidilen yolu cizdir
plotting(drone_path, x_s[x-1], y_s[x-1], d)
# Taranmis alani goster
for i in range(1, d+1):
GUI(drone_path, x_s[x-1], y_s[x-1], i)
| 33.052764 | 133 | 0.589206 |
72801e9b73bc75367190ab6f85d583bde96fd0a8 | 3,650 | py | Python | testing/testing_module.py | ajbradberry96/SeniorDesignAntiNN | c88cbdf699e68102cb705d8a7000616922cb73ee | [
"MIT"
] | null | null | null | testing/testing_module.py | ajbradberry96/SeniorDesignAntiNN | c88cbdf699e68102cb705d8a7000616922cb73ee | [
"MIT"
] | 12 | 2019-12-16T21:30:53.000Z | 2022-03-11T23:40:28.000Z | testing/testing_module.py | ajbradberry96/SeniorDesignAntiNN | c88cbdf699e68102cb705d8a7000616922cb73ee | [
"MIT"
] | null | null | null | import tensorflow as tf
import pandas as pd
from testing import forward_model
from testing import detect_adversarial
from testing import image_processing
import matplotlib
import seaborn
from matplotlib import pyplot as plt
from urllib.request import urlretrieve
import PIL
import os
tf.logging.set_verbosity(tf.logging.ERROR)
sess = tf.InteractiveSession()
img_path = "media/dataset/"
#forward_model.init(sess)
#file_arr = os.listdir(img_path)
#pil_imgs = [PIL.Image.open(img_path+x) for x in file_arr]
#detect_adversarial.detect_test(pil_imgs,file_arr,separate_advs=False)
distort_ops = ["warp","colorshift","saturate","noise","average"]
dist_dict = {x:[] for x in distort_ops}
for op in distort_ops:
dist_dict["adv"+op] = []
with open("cosine_data.csv","r") as f:
header = f.readline()
for line in f:
line_parts = line.strip().split(",")
for op in distort_ops:
if op in line_parts[0]:
if "adv" in line_parts[0]:
dist_dict["adv"+op].append(float(line_parts[1]))
else:
dist_dict[op].append(float(line_parts[1]))
xoff=0
bar_xs = []
bar_heights = []
bar_names = []
for op in distort_ops:
bar_xs.append(xoff)
bar_heights.append(sum(dist_dict[op])/len(dist_dict[op]))
bar_names.append(op)
xoff+=0.5
bar_xs.append(xoff)
bar_heights.append(sum(dist_dict["adv"+op])/len(dist_dict["adv"+op]))
bar_names.append("adv " + op)
xoff+=1
plt.figure()
plt.bar(bar_xs, bar_heights, 0.4, color=["b","r"]*len(distort_ops), tick_label=bar_names)
val_sets = []
for op in distort_ops:
val_sets.append(dist_dict[op])
val_sets.append(dist_dict["adv" + op])
plt.figure()
plt.boxplot(val_sets, labels=bar_names)
for i in range(len(val_sets)//2):
plt.figure()
plt.title(bar_names[i*2])
seaborn.kdeplot(val_sets[i*2],color="b")
print(distort_ops[i])
print(val_sets[i*2+1])
seaborn.kdeplot(val_sets[i*2+1],color="r")
plt.show()
from sklearn.linear_model import LogisticRegression
#distort_ops = ["warp","colorshift","saturate","noise","average"]
distort_ops = ["warp","colorshift","saturate","noise"]
#distort_ops = ["average"]
X_data_dict_norm = {}
X_data_dict_adv = {}
with open("cosine_data.csv","r") as f:
header = f.readline()
for line in f:
line_parts = line.strip().split(",")
for i, op in enumerate(distort_ops):
if op in line_parts[0]:
use_dict = X_data_dict_adv if "adv" in line_parts[0] else X_data_dict_norm
base_name = line_parts[0].split("_" + op)[0]
if base_name not in use_dict:
use_dict[base_name] = [0] * len(distort_ops)
use_dict[base_name][i] += float(line_parts[1]) * (1/3)
X_data = []
Y_data = []
for entry in X_data_dict_norm:
X_data.append(X_data_dict_norm[entry])
Y_data.append(0)
for entry in X_data_dict_adv:
X_data.append(X_data_dict_adv[entry])
Y_data.append(1)
lr = LogisticRegression()
lr.fit(X_data, Y_data)
predictions = []
for entry in X_data:
predictions.append(lr.predict_proba([entry])[0][1])
print(predictions)
#AVG_data = [x[0] for x in X_data]
#dat_frame = pd.DataFrame({"X":AVG_data,"Y":Y_data})
#plt.figure()
#seaborn.regplot("X","Y",dat_frame,logistic=True)
import sklearn.metrics as metrics
fpr, tpr, threshold = metrics.roc_curve(Y_data, predictions)
roc_auc = metrics.auc(fpr, tpr)
plt.figure()
plt.title('Receiver Operating Characteristic (Multiple)')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([-0.01, 1.01])
plt.ylim([-0.01, 1.01])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
LogisticRegression().fit
"""
Questions to ask
Which distortions are the best discriminators?
AUC curve
PCA of images"""
| 24.333333 | 89 | 0.715616 |
b88b864188d20f952e3f59fdea617b91e22e7535 | 896 | py | Python | src/util/openpose.py | peabody124/hmr | 8a1c4bf81340b1489077ed3f9057fcf54e4b63ae | [
"MIT"
] | null | null | null | src/util/openpose.py | peabody124/hmr | 8a1c4bf81340b1489077ed3f9057fcf54e4b63ae | [
"MIT"
] | null | null | null | src/util/openpose.py | peabody124/hmr | 8a1c4bf81340b1489077ed3f9057fcf54e4b63ae | [
"MIT"
] | null | null | null | """
Script to convert openpose output into bbox
"""
import json
import numpy as np
def read_json(json_path):
with open(json_path) as f:
data = json.load(f)
kps = []
for people in data['people']:
kp = np.array(people['pose_keypoints_2d']).reshape(-1, 3)
kps.append(kp)
return kps
def get_bbox(json_path, vis_thr=0.2):
kps = read_json(json_path)
# Pick the most confident detection.
scores = [np.mean(kp[kp[:, 2] > vis_thr, 2]) for kp in kps]
kp = kps[np.argmax(scores)]
vis = kp[:, 2] > vis_thr
vis_kp = kp[vis, :2]
min_pt = np.min(vis_kp, axis=0)
max_pt = np.max(vis_kp, axis=0)
person_height = np.linalg.norm(max_pt - min_pt)
if person_height == 0:
print('bad!')
import ipdb
ipdb.set_trace()
center = (min_pt + max_pt) / 2.
scale = 150. / person_height
return scale, center
| 24.888889 | 65 | 0.607143 |
6f14df7c8690cb3d31340b7852ef65f620cd0211 | 1,992 | py | Python | labgraph/graphs/tests/test_config.py | Yunusbcr/labgraph | a00ae7098b7b0e0eda8ce2e7e62dae86854616fb | [
"MIT"
] | 124 | 2021-07-14T21:25:59.000Z | 2022-03-08T20:40:16.000Z | labgraph/graphs/tests/test_config.py | Yunusbcr/labgraph | a00ae7098b7b0e0eda8ce2e7e62dae86854616fb | [
"MIT"
] | 46 | 2021-07-16T18:41:11.000Z | 2022-03-31T20:53:00.000Z | labgraph/graphs/tests/test_config.py | Yunusbcr/labgraph | a00ae7098b7b0e0eda8ce2e7e62dae86854616fb | [
"MIT"
] | 22 | 2021-07-16T18:34:56.000Z | 2022-03-31T15:12:06.000Z | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
from enum import Enum, IntEnum
import pytest
from ...util import LabgraphError
from ..config import Config
from ..node import Node
class MyIntEnum(IntEnum):
A = 0
B = 1
class MyStrEnum(str, Enum):
A = "0"
B = "1"
class MyConfig(Config):
int_field: int
str_field: str
bool_field: bool
float_field: float
int_enum_field: MyIntEnum
str_enum_field: MyStrEnum
class MyNode(Node):
config: MyConfig
def setup(self) -> None:
self.config
def test_config_from_args() -> None:
"""
Test that we can build a config from command-line arguments.
"""
test_args = [
"--int-field",
"5",
"--str-field",
"hello",
"--float-field",
"0.5",
"--int-enum-field",
"B",
"--str-enum-field",
"A",
"--bool-field",
]
expected_config = {
"int_field": 5,
"str_field": "hello",
"float_field": 0.5,
"int_enum_field": MyIntEnum.B,
"str_enum_field": MyStrEnum.A,
"bool_field": True,
}
config = MyConfig.fromargs(args=test_args)
assert config.asdict() == expected_config
def test_node_config() -> None:
"""
Test that we can provide config to a node.
"""
node = MyNode()
node.configure(
MyConfig(
int_field=5,
str_field="hello",
float_field=0.5,
int_enum_field=MyIntEnum.B,
str_enum_field=MyStrEnum.A,
bool_field=True,
)
)
node.setup()
def test_node_no_config() -> None:
"""
Test that accessing config on an unconfigured node throws a descriptive exception.
"""
node = MyNode()
with pytest.raises(LabgraphError) as err:
node.setup()
assert (
"Configuration not set. Call MyNode.configure() to set the configuration."
in str(err.value)
)
| 20.121212 | 86 | 0.576305 |
96777e5a44dcc75daf26d338b0f9e0ea3ecd2241 | 1,847 | py | Python | selfdrive/car/nissan/values.py | wocsor-com/openpilot | d1372c4890fd45d785789d94d5ed39bc9c8b80b7 | [
"MIT"
] | 3 | 2020-07-20T10:42:55.000Z | 2021-03-25T21:24:38.000Z | selfdrive/car/nissan/values.py | wocsor-com/openpilot | d1372c4890fd45d785789d94d5ed39bc9c8b80b7 | [
"MIT"
] | 4 | 2020-04-12T21:34:03.000Z | 2020-04-15T22:22:15.000Z | selfdrive/car/nissan/values.py | wocsor-com/openpilot | d1372c4890fd45d785789d94d5ed39bc9c8b80b7 | [
"MIT"
] | 3 | 2019-03-06T20:58:50.000Z | 2019-05-21T01:01:37.000Z | from selfdrive.car import dbc_dict
STEER_THRESHOLD = 1.0
class CAR:
XTRAIL = "NISSAN X-TRAIL 2017"
LEAF = "NISSAN LEAF 2018"
FINGERPRINTS = {
CAR.XTRAIL: [
{
2: 5, 42: 6, 346: 6, 347: 5, 348: 8, 349: 7, 361: 8, 386: 8, 389: 8, 397: 8, 398: 8, 403: 8, 520: 2, 523: 6, 548: 8, 645: 8, 658: 8, 665: 8, 666: 8, 674: 2, 682: 8, 683: 8, 689: 8, 723: 8, 758: 3, 768: 2, 783: 3, 851: 8, 855: 8, 1041: 8, 1055: 2, 1104: 4, 1105: 6, 1107: 4, 1108: 8, 1111: 4, 1227: 8, 1228: 8, 1247: 4, 1266: 8, 1273: 7, 1342: 1, 1376: 6, 1401: 8, 1474: 2, 1497: 3, 1821: 8, 1823: 8, 1837:8, 2015: 8, 2016: 8, 2024: 8
},
{
2: 5, 42: 6, 346: 6, 347: 5, 348: 8, 349: 7, 361: 8, 386: 8, 389: 8, 397: 8, 398: 8, 403: 8, 520: 2, 523: 6, 527: 1, 548: 8, 637: 4, 645: 8, 658: 8, 665: 8, 666: 8, 674: 2, 682: 8, 683: 8, 689: 8, 723: 8, 758: 3, 768: 6, 783: 3, 851: 8, 855: 8, 1041: 8, 1055: 2, 1104: 4, 1105: 6, 1107: 4, 1108: 8, 1111: 4, 1227: 8, 1228: 8, 1247: 4, 1266: 8, 1273: 7, 1342: 1, 1376: 6, 1401: 8, 1474: 8, 1497: 3,1534: 6, 1792: 8, 1821: 8, 1823: 8, 1837: 8, 1872: 8, 1937: 8, 1953: 8, 1968: 8, 2015: 8, 2016: 8, 2024: 8
},
],
CAR.LEAF: [
{
2: 5, 42: 6, 264: 3, 361: 8, 372: 8, 384: 8, 389: 8, 403: 8, 459: 7, 460: 4, 470: 8, 520: 1, 569: 8, 581: 8, 634: 7, 640: 8, 644: 8, 645: 8, 646: 5, 658: 8, 682: 8, 683: 8, 689: 8, 724: 6, 758: 3, 761: 2, 783: 3, 852: 8, 853: 8, 856: 8, 861: 8, 944: 1, 976: 6, 1008: 7, 1011: 7, 1057: 3, 1227: 8, 1228: 8, 1261: 5, 1342: 1, 1354: 8, 1361: 8, 1459: 8, 1477: 8, 1497: 3, 1549: 8, 1573: 6, 1821: 8, 1837: 8, 1856: 8, 1859: 8, 1861: 8, 1864: 8, 1874: 8, 1888: 8, 1891: 8, 1893: 8, 1906: 8, 1947: 8, 1949: 8, 1979: 8, 1981: 8, 2016: 8, 2017: 8, 2021: 8
},
],
}
DBC = {
CAR.XTRAIL: dbc_dict('nissan_x_trail_2017', None),
CAR.LEAF: dbc_dict('nissan_leaf_2018', None),
}
| 61.566667 | 553 | 0.532756 |
3468713eb8cb8e07ec297d2f18c3f83a44c410ad | 15,037 | py | Python | foundation/office.py | futursolo/furtherland | 33ead7d4e651ed3154c8047e3bdc4bb2871e4468 | [
"Apache-2.0"
] | null | null | null | foundation/office.py | futursolo/furtherland | 33ead7d4e651ed3154c8047e3bdc4bb2871e4468 | [
"Apache-2.0"
] | null | null | null | foundation/office.py | futursolo/furtherland | 33ead7d4e651ed3154c8047e3bdc4bb2871e4468 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2015 Futur Solo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tornado.web import *
from tornado.gen import *
from foundation.place import PlacesOfInterest, slug_validation, visitor_only
from collections import OrderedDict
import pyotp
import json
import bcrypt
import datetime
import time
class ManagementOffice(PlacesOfInterest):
def management_url(self, path, include_host=None, **kwargs):
path = "management/" + path
return RequestHandler.static_url(
self, path, include_host=include_host, **kwargs)
def management_render(self, page):
self.render_list["management_url"] = self.management_url
page = "management/" + page
self.render(page, nutrition=False)
class CheckinOffice(ManagementOffice):
@coroutine
@visitor_only
def get(self):
self.render_list["checkin_status"] = self.get_scookie(
"checkin_status", arg_type="hash", default="ok")
self.clear_cookie("checkin_status")
self.management_render("checkin.htm")
@coroutine
@visitor_only
def post(self):
username = self.get_arg("username", arg_type="username")
password = self.get_arg("password", arg_type="hash")
two = self.get_arg("two", arg_type="number")
remember = self.get_arg("remember", arg_type="boolean")
if not (username and password and two):
self.set_scookie("checkin_status", "password", expires_days=None)
self.redirect("/management/checkin")
return
user = yield self.get_user(username=username)
"""
generate new password by listed commands:
password = "YOUR NEW PASSWORD"
bcrypt.hashpw(
hashlib.sha256(password.encode()
).hexdigest().encode(), bcrypt.gensalt()
)
"""
if not user:
self.set_scookie("checkin_status", "password", expires_days=None)
self.redirect("/management/checkin")
return
if bcrypt.hashpw(
password.encode("utf-8"),
user["password"].encode()) != user["password"].encode():
self.set_scookie("checkin_status", "password", expires_days=None)
self.redirect("/management/checkin")
return
def verify_otp(key, two):
totp = pyotp.TOTP(key)
current_datetime = datetime.datetime.now()
if totp.verify(two, for_time=current_datetime):
return True
early_datetime = current_datetime - datetime.timedelta(seconds=30)
if totp.verify(two, for_time=early_datetime):
return True
later_datetime = current_datetime + datetime.timedelta(seconds=30)
if totp.verify(two, for_time=later_datetime):
return True
return False
if not verify_otp(user["otp_key"], two):
self.set_scookie("checkin_status", "two", expires_days=None)
self.redirect("/management/checkin")
return
else:
expires_days = None
if remember:
expires_days = 180
device_id = self.get_random(32)
agent_auth = self.hash((device_id + user["password"]), "sha256")
self.set_scookie("user_id", user["_id"], expires_days=expires_days,
httponly=True)
self.set_scookie("device_id", device_id, expires_days=expires_days,
httponly=True)
self.set_scookie("agent_auth", agent_auth,
expires_days=expires_days, httponly=True)
self.redirect(self.next_url)
class CheckoutOffice(ManagementOffice):
@authenticated
def get(self):
self.clear_cookie("user_id")
self.clear_cookie("device_id")
self.clear_cookie("agent_auth")
self.redirect(self.next_url)
class MainOffice(ManagementOffice):
@coroutine
@authenticated
def get(self, slug, sub_slug=""):
if not self.value_validation("hash", slug):
raise HTTPError(404)
if sub_slug and not self.value_validation("hash", sub_slug):
raise HTTPError(404)
self.render_list["slug"] = slug
self.render_list["sub_slug"] = sub_slug
self.management_render("office.htm")
class ActionOffice(ManagementOffice):
@coroutine
@authenticated
def post(self):
action = self.get_arg("action", default=None, arg_type="link")
if hasattr(self, action):
yield getattr(self, action)()
else:
raise HTTPError(500)
@coroutine
def load_public(self):
book = self.memories.select("Publics")
book.find({"type": "file"}).sort([["time", False]])
book.length(0, force_dict=True)
yield book.do()
result = book.result()
self.finish(json.dumps(list(result.values())))
@coroutine
def save_public(self):
public_path = os.path.join(
os.path.join(
self.settings["static_path"], "public"), "files")
url_base = "/spirit/public/files"
if self.request.files:
for f in self.request.files["files[]"]:
book = self.memories.select("Publics")
current_time = int(time.time())
current_path = os.path.join(public_path, str(
current_time))
current_url = os.path.join(url_base, str(
current_time))
if not os.path.exists(current_path):
os.makedirs(current_path)
filename = f["filename"]
current_file_path = os.path.join(
current_path, filename)
current_file_url = os.path.join(
current_url, filename)
with open(current_file_path, "wb") as file:
file.write(f["body"])
file_info = OrderedDict()
file_info["time"] = current_time
file_info["type"] = "file"
file_info["content_type"] = None
file_info["filename"] = filename
file_info["filepath"] = current_file_path
file_info["fileurl"] = current_file_url
file_info["email_md5"] = None
file_info["_id"] = yield self.issue_id("Publics")
book.add(file_info)
yield book.do()
else:
raise HTTPError(500)
self.finish(json.dumps({"status": True}))
@coroutine
def count(self):
info = yield self.get_count()
self.finish(json.dumps(info))
@coroutine
def save_working(self):
working_type = self.get_arg("working_type", arg_type="hash")
if working_type == "writing":
book = self.memories.select("Writings")
elif working_type == "page":
book = self.memories.select("Pages")
else:
raise HTTPError(500)
working_method = self.get_arg("working_method", arg_type="hash")
working_id = self.get_arg("working_id", arg_type="number")
def make_working():
working = {}
working["title"] = self.get_arg("working_title", arg_type="origin")
working["content"] = self.get_arg("working_content",
arg_type="origin")
working["time"] = self.get_arg("working_time", arg_type="number")
working["publish"] = self.get_arg("working_publish",
arg_type="boolean")
working["slug"] = self.get_arg("working_slug", arg_type="slug")
working["author"] = self.current_user["_id"]
if not working["slug"]:
raise HTTPError(500)
return working
def check_slug(slug):
book.find({"slug": slug})
yield book.do()
slug_result = book.result()
if slug_result and (
slug_result is not False and slug_result["_id"] != working_id):
self.finish(json.dumps({"succeed": False, "reason": "slug"}))
return False
return True
if working_method == "new":
working = make_working()
if not check_slug(working["slug"]):
return
if working_type == "writing":
working_id = yield self.issue_id("Writings")
elif working_type == "page":
working_id = yield self.issue_id("Pages")
else:
raise HTTPError(500)
working["_id"] = working_id
book.add(working)
elif working_method == "edit":
working = make_working()
if not check_slug(working["slug"]):
return
book.set({"_id": working_id}, working)
elif working_method == "erase":
@coroutine
def erase_reply(working_id):
book = self.memories.select("Replies")
book.erase({"writing_id": working_id})
yield book.do()
if working_type == "writing":
yield erase_reply(working_id)
book.erase({"_id": working_id})
else:
raise HTTPError(500)
yield book.do()
self.finish(json.dumps({
"succeed": True,
"id": working_id,
}))
@coroutine
def load_working(self):
working_type = self.get_arg("type", arg_type="hash")
working_id = self.get_arg("id", arg_type="number")
if working_type == "writing":
book = self.memories.select("Writings")
elif working_type == "page":
book = self.memories.select("Pages")
else:
raise HTTPError(500)
book.find({"_id": working_id})
yield book.do()
working = book.result()
self.finish(json.dumps(working))
@coroutine
def load_crda(self):
type = self.get_arg("type", arg_type="hash")
if type == "writings":
book = self.memories.select("Writings")
book.find({}, ["content"])
elif type == "pages":
book = self.memories.select("Pages")
book.find({}, ["content"])
elif type == "replies":
book = self.memories.select("Replies")
book.find({})
writing_list = []
else:
raise HTTPError(500)
book.sort([["time", False]])
book.length(0, True)
yield book.do()
content_list = book.result()
if type == "replies":
for key in content_list:
content_list[key]["_id"] = int(
content_list[key]["_id"])
if content_list[key]["writing_id"] not in writing_list:
writing_list.append(content_list[key]["writing_id"])
writing_list = yield self.get_writing(writing_list=writing_list)
for key in content_list:
if content_list[key]["writing_id"] not in writing_list.keys():
del content_list[key]
continue
content_list[key]["writing"] = writing_list[
content_list[key]["writing_id"]]
self.finish(json.dumps(list(content_list.values())))
@coroutine
def save_reply(self):
reply_id = self.get_arg("reply", arg_type="number")
reply_method = self.get_arg("method", arg_type="hash")
book = self.memories.select("Replies")
if reply_method == "permit":
permit = self.get_arg("permit", arg_type="boolean")
if permit is None:
raise HTTPError(500)
book.find({"_id": reply_id})
yield book.do()
reply = book.result()
if not reply:
raise HTTPError(500)
book.set({"_id": reply_id}, {"permit": permit})
yield book.do()
self.finish(json.dumps({"status": True}))
elif reply_method == "erase":
book.erase({"_id": reply_id})
yield book.do()
self.finish(json.dumps({"status": True}))
elif reply_method == "edit":
reply_name = self.get_arg("name", arg_type="origin")
reply_homepage = self.get_arg("homepage", arg_type="origin")
reply_email = self.get_arg("email", arg_type="mail_address")
reply_content = self.get_arg("content", arg_type="origin")
if not (reply_id and reply_name and reply_homepage and
reply_email and reply_content):
raise HTTPError(500)
reply = {}
reply["name"] = reply_name
reply["homepage"] = reply_homepage
reply["email"] = reply_email
reply["content"] = reply_content
book.set({"_id": reply_id}, reply)
yield book.do()
self.finish(json.dumps({"status": True}))
@coroutine
def load_configuration(self):
book = self.memories.select("Configs")
book.find({})
book.length(0, True)
yield book.do()
configs = book.result()
self.finish(json.dumps(configs))
@coroutine
def save_configuration(self):
post_config = OrderedDict()
post_config["site_name"] = self.get_arg("site_name", arg_type="origin")
post_config["site_description"] = self.get_arg(
"site_description", arg_type="origin")
post_config["site_keywords"] = self.get_arg(
"site_keywords", arg_type="origin")
post_config["site_url"] = self.get_arg("site_url", arg_type="link")
post_config["nutrition_type"] = self.get_arg(
"nutrition_type", arg_type="hash")
post_config["trace_code"] = self.get_arg(
"trace_code", arg_type="origin")
for key in post_config:
if not post_config[key]:
raise HTTPError(500)
book = self.memories.select("Configs")
book.find({}).length(0, force_dict=True)
yield book.do()
origin_config = book.result()
for key in post_config:
if origin_config[key] != post_config[key]:
book.set({"_id": key}, {"value": post_config[key]})
yield book.do()
self.finish(json.dumps({"status": True}))
| 36.059952 | 79 | 0.566004 |
32971a4d8f7c8d3007e29f91b5752dc9afb9d82a | 17,292 | py | Python | Python/empire/betsiamites/python/lexer/tokenizer/specialize_token.py | Tombmyst/Empire | f28782787c5fa9127e353549b73ec90d3c82c003 | [
"Apache-2.0"
] | null | null | null | Python/empire/betsiamites/python/lexer/tokenizer/specialize_token.py | Tombmyst/Empire | f28782787c5fa9127e353549b73ec90d3c82c003 | [
"Apache-2.0"
] | null | null | null | Python/empire/betsiamites/python/lexer/tokenizer/specialize_token.py | Tombmyst/Empire | f28782787c5fa9127e353549b73ec90d3c82c003 | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
from empire.python.typings import *
from keyword import kwlist
from empire.betsiamites.python.lexer.tokenizer.code_token import CodeToken
from empire.betsiamites.python.lexer.token_types import *
from empire.betsiamites.python.lexer.tokenizer.operators import OPERATORS_MAPPING
from empire.betsiamites.python.lexer.tokenizer.magics import MAGIC_MAPPING
from empire.betsiamites.python.lexer.tokenizer.type_annotations import TYPE_ANNOTATION_MAPPING
from empire.data_structures import Stack
from empire.util.log import *
AcceptedTokenType = Final[List[TokenType]]
class SpecializeToken:
"""
Takes a token (a word) and determines what it is
"""
_KEYWORDS: Final[List[str]] = [kw for kw in kwlist if kw.lower() not in ['none', 'true', 'false', 'and', 'or', 'is', 'in', 'not', 'lambda']]
_OPERATORS: Final[List[str]] = ['and', 'or', 'is', 'in', 'not', '=', '+=', '-=', '*=', '/=', '**=', '//=', '+', '-', '*', '/', '**', '//', '==', '!=', '<>', '<', '<=', '>', '>=', '&', '&=', '|',
'|=', '^', '^=', '~', '~=', '<<', '<<=', '>>', '>>=', '@', '@=', ':', '.', ',', '[', ']', '(', ')', '{', '}', 'lambda', '->', 'del']
_EXPECTED_FOLLOWING_TOKEN_TYPES_TO_POP_ON_PAREN_CLOSE: AcceptedTokenType = [
TokenTypes.PARENT_CLASS,
TokenTypes.FUNCTION_PARAMETER_DECL,
TokenTypes.FUNCTION_PARAMETER_DEFINITION
]
_EXPECTED_FOLLOWING_TOKEN_TYPES_TO_POP_ON_COLON: AcceptedTokenType = [
TokenTypes.CLASS_DECL,
TokenTypes.FUNCTION_DECL
]
_EXPECTED_FOLLOWING_TOKEN_TYPES_TO_POP_ON_SUBSCRIPT_CLOSE: AcceptedTokenType = [
TokenTypes.TYPE_ANNOTATION
]
expected_following_token_type: Stack = Stack()
@staticmethod
def specialize(token: CodeToken):
if SpecializeToken._keyword(token):
return
elif SpecializeToken._operator(token):
return
elif token.python_type in [PythonTokenTypes.NAME, PythonTokenTypes.NUMBER, PythonTokenTypes.STRING]:
SpecializeToken._identifier(token)
return
@staticmethod
def _keyword(token: CodeToken) -> bool:
if token.string in SpecializeToken._KEYWORDS:
token.token_type = TokenTypes.KEYWORD
SpecializeToken._expected_following_keyword(token)
return True
return False
@staticmethod
def _operator(token: CodeToken) -> bool:
if token.string in SpecializeToken._OPERATORS:
mapped_type: Union[TokenType, List[TokenType]] = OPERATORS_MAPPING[token.string]
if type(mapped_type) == TokenType:
token.token_type = mapped_type
else:
token.token_type = SpecializeToken._determine_operator(token, mapped_type)
SpecializeToken._expected_following_operator(token)
return True
return False
@staticmethod
def _identifier(token: CodeToken) -> bool:
if token.string == 'self': # ------------------------------------------------------------------------- SELF
token.token_type = TokenTypes.SELF_KW
return True
elif token.string in MAGIC_MAPPING.keys(): # --------------------------------------------------------- OVERLOADS
token.token_type = MAGIC_MAPPING[token.string]
return True
elif SpecializeToken.expected_following_token_type.last(raise_on_empty=False) == TokenTypes.TYPE_ANNOTATION: # -------------- TYPE ANNOT
SpecializeToken._process_type_annotation(token)
return True
elif SpecializeToken.expected_following_token_type.last(raise_on_empty=False) == TokenTypes.FUNCTION_PARAMETER_DEFAULT_VALUE: # PARAM DEF VALUE
token.token_type = TokenTypes.FUNCTION_PARAMETER_DEFAULT_VALUE
param_decl_token: CodeToken = token.navigate_to_token(reach_first_of_types=TokenTypes.FUNCTION_PARAMETER_DECL)
if param_decl_token:
param_decl_token.possible_literal_values.add(token.literal_value)
else:
pass
# Log.warn('Unable to reach FUNCTION PARAMETER DECL', __file__, get_function_name(), current_token=token)
SpecializeToken.expected_following_token_type.pop(raise_on_empty=False)
return True
elif len(SpecializeToken.expected_following_token_type) > 0: # WARNING: ensure to not put anything below that should have priority over the expected following token type
token.token_type = SpecializeToken.expected_following_token_type.pop(raise_on_empty=False)
return True
else:
SpecializeToken._determine_identifier(token)
return False
@staticmethod
def _expected_following_keyword(token: CodeToken):
keyword_value: str = token.string
if keyword_value == 'from':
SpecializeToken.expected_following_token_type.push(TokenTypes.PACKAGE_IMPORT)
elif keyword_value == 'import':
if token.is_first:
SpecializeToken.expected_following_token_type.push(TokenTypes.PACKAGE_IMPORT)
else:
SpecializeToken.expected_following_token_type.push(TokenTypes.MODULE_IMPORT)
elif keyword_value == 'def':
SpecializeToken.expected_following_token_type.push(TokenTypes.FUNCTION_DECL)
elif keyword_value == 'class':
SpecializeToken.expected_following_token_type.push(TokenTypes.CLASS_DECL)
@staticmethod
def _expected_following_operator(token: CodeToken):
operator_value: str = token.string
token_type: TokenType = token.token_type
if operator_value == 'lambda': # ------------------------------------- lambda
SpecializeToken.expected_following_token_type.push(TokenTypes.FUNCTION_PARAMETER_DECL)
elif token_type == TokenTypes.OPERATOR_ASSIGN: # --------------------- =
SpecializeToken._expected_following_equal(token)
elif token_type == TokenTypes.OPERATOR_RETURN_TYPE: # ---------------- ->
SpecializeToken._expected_following_return_type(token)
elif token_type == TokenTypes.OPERATOR_DECORATOR: # ------------------ @
SpecializeToken._expected_following_decorator(token)
elif token_type == TokenTypes.OPERATOR_PAREN_OPEN: # ----------------- (
SpecializeToken._expected_following_paren_open(token)
elif token_type == TokenTypes.OPERATOR_PAREN_CLOSE: # ---------------- )
SpecializeToken._expected_following_paren_close(token)
elif token_type == TokenTypes.OPERATOR_SUBSCRIPT_OPEN: # ------------- [
SpecializeToken._expected_following_subscript_open(token)
elif token_type == TokenTypes.OPERATOR_SUBSCRIPT_CLOSE: # ------------ ]
SpecializeToken._expected_following_subscript_close(token)
elif token_type == TokenTypes.OPERATOR_EXPONENT: # ------------------- **
pass
elif token_type == TokenTypes.OPERATOR_BIT_NOT: # -------------------- ~
pass
elif token_type == TokenTypes.OPERATOR_POSITIVE: # ------------------- +
pass
elif token_type == TokenTypes.OPERATOR_NEGATIVE: # ------------------- -
pass
elif token_type == TokenTypes.OPERATOR_BIT_AND: # -------------------- &
pass
elif token_type == TokenTypes.OPERATOR_BIT_OR: # --------------------- |
pass
elif token_type == TokenTypes.OPERATOR_COLON: # ---------------------- :
SpecializeToken._expected_following_colon(token)
elif token_type == TokenTypes.OPERATOR_COMMA: # ---------------------- ,
SpecializeToken._expected_following_comma(token)
elif token_type == TokenTypes.OPERATOR_MEMBER_ACCESS: # -------------- .
SpecializeToken._expected_following_member_access(token)
else:
pass
# print('UNPARSED TOKEN: {}'.format(token_type))
@staticmethod
def _expected_following_equal(token: CodeToken):
if token.precedent_token.token_type == TokenTypes.FUNCTION_PARAMETER or \
token.navigate_to_token(reach_first_of_types=TokenTypes.FUNCTION_PARAMETER, stop_navigating_at_types=TokenTypes.OPERATOR_PAREN_OPEN):
if token.precedent_token.token_type != TokenTypes.FUNCTION_PARAMETER_DEFINITION:
SpecializeToken.expected_following_token_type.push(TokenTypes.FUNCTION_PARAMETER_DEFAULT_VALUE)
@staticmethod
def _expected_following_return_type(token: CodeToken):
SpecializeToken.expected_following_token_type.push(TokenTypes.RETURN_TYPE_ANNOTATION)
@staticmethod
def _expected_following_decorator(token: CodeToken):
SpecializeToken.expected_following_token_type.push(TokenTypes.DECORATOR)
@staticmethod
def _expected_following_paren_open(token: CodeToken):
if token.precedent_token.token_type == TokenTypes.CLASS_DECL:
SpecializeToken.expected_following_token_type.push(TokenTypes.PARENT_CLASS)
elif token.precedent_token.token_type == TokenTypes.FUNCTION_DECL or token.precedent_token.token_type.category == TokenTypes.MAGIC_FUNCTION:
SpecializeToken.expected_following_token_type.push(TokenTypes.FUNCTION_PARAMETER_DECL)
elif token.precedent_token.token_type == TokenTypes.SPECIALS_TBD_FUNCTION:
if token.precedent_token.string == 'list':
token.precedent_token.token_type = TokenTypes.SPECIALS_LIST_FUNCTION
elif token.precedent_token.string == 'tuple':
token.precedent_token.token_type = TokenTypes.SPECIALS_TUPLE_FUNCTION
elif token.precedent_token.string == 'set':
token.precedent_token.token_type = TokenTypes.SPECIALS_SET_FUNCTION
elif token.precedent_token.string == 'dict':
token.precedent_token.token_type = TokenTypes.SPECIALS_DICTIONARY_FUNCTION
elif token.precedent_token.token_type == TokenTypes.EITHER_VAR_OR_FUNC_CALL:
token.precedent_token.token_type = TokenTypes.FUNCTION_CALL
SpecializeToken.expected_following_token_type.push(TokenTypes.FUNCTION_PARAMETER_DEFINITION)
@staticmethod
def _expected_following_paren_close(token: CodeToken):
SpecializeToken._expected_following_pop(token, SpecializeToken._EXPECTED_FOLLOWING_TOKEN_TYPES_TO_POP_ON_PAREN_CLOSE)
@staticmethod
def _expected_following_subscript_open(token: CodeToken):
if token.precedent_token.token_type.category == TokenTypes.TYPINGS:
SpecializeToken.expected_following_token_type.push(TokenTypes.TYPE_ANNOTATION)
@staticmethod
def _expected_following_subscript_close(token: CodeToken):
SpecializeToken._expected_following_pop(token, SpecializeToken._EXPECTED_FOLLOWING_TOKEN_TYPES_TO_POP_ON_SUBSCRIPT_CLOSE)
@staticmethod
def _expected_following_colon(token: CodeToken):
SpecializeToken._expected_following_pop(token, SpecializeToken._EXPECTED_FOLLOWING_TOKEN_TYPES_TO_POP_ON_COLON)
if token.precedent_token.token_type in [TokenTypes.FUNCTION_PARAMETER_DECL, TokenTypes.CLASS_VARIABLE_DECL, TokenTypes.VARIABLE_DECL]:
SpecializeToken.expected_following_token_type.push(TokenTypes.TYPE_ANNOTATION)
return
elif token.precedent_token.token_type == TokenTypes.EITHER_VAR_OR_FUNC_CALL:
SpecializeToken.expected_following_token_type.push(TokenTypes.TYPE_ANNOTATION)
token.precedent_token.token_type = TokenTypes.UNKNOWN_VAR
return
@staticmethod
def _expected_following_member_access(token: CodeToken):
if token.precedent_token.token_type in [TokenTypes.PACKAGE_IMPORT]:
SpecializeToken.expected_following_token_type.push(TokenTypes.PACKAGE_IMPORT)
@staticmethod
def _expected_following_comma(token: CodeToken):
if token.precedent_token.token_type.category == TokenTypes.TYPINGS:
SpecializeToken.expected_following_token_type.push(TokenTypes.TYPE_ANNOTATION)
elif token.precedent_token.token_type in [TokenTypes.MODULE_IMPORT, TokenTypes.PACKAGE_IMPORT]:
SpecializeToken.expected_following_token_type.push(token.precedent_token.token_type)
@staticmethod
def _expected_following_pop(token: CodeToken, accepted_token_types: AcceptedTokenType):
current_expected: TokenType = SpecializeToken.expected_following_token_type.last(raise_on_empty=False)
if current_expected in accepted_token_types:
SpecializeToken.expected_following_token_type.pop(raise_on_empty=False)
@staticmethod
def _determine_operator(token: CodeToken, mapped_token_list: List[TokenType]) -> TokenType:
if token.string == '@':
if token.is_first:
return TokenTypes.OPERATOR_DECORATOR
else:
return TokenTypes.OPERATOR_MATRIX_MULTIPLY
elif token.string == '*':
if token.precedent_token.string == 'import':
SpecializeToken.expected_following_token_type.pop(raise_on_empty=False)
return TokenTypes.IMPORT_ALL_STAR
@staticmethod
def _process_type_annotation(token: CodeToken):
SpecializeToken.expected_following_token_type.pop(raise_on_empty=False)
if token.string not in TYPE_ANNOTATION_MAPPING:
token.token_type = TokenTypes.TYPINGS_USER_DEFINED
else:
token.token_type = TYPE_ANNOTATION_MAPPING[token.string]
if token.string == 'Union':
return
identifier: CodeToken = token.navigate_to_token(reach_first_of_types=[TokenTypes.FUNCTION_PARAMETER_DECL, TokenTypes.CLASS_VARIABLE_DECL, TokenTypes.VARIABLE_DECL])
if identifier:
if token.token_type != TokenTypes.TYPINGS_USER_DEFINED:
if token.token_type == TokenTypes.TYPINGS_OPTIONAL:
identifier.possible_typings.add(TokenTypes.TYPINGS_NONE)
else:
identifier.possible_typings.add(token.token_type)
else:
identifier.possible_typings.add(TokenType.user_defined_token_type(token.string))
token.token_type = TokenType.user_defined_token_type(token.string)
token.token_type.category = TokenTypes.TYPINGS
@staticmethod
def _determine_identifier(token: CodeToken):
if token.python_type == PythonTokenTypes.NUMBER:
if token.string.endswith('j'):
token.token_type = TokenTypes.LITERAL_IMAGINARY_NUMBER
elif '.' in token.string:
token.token_type = TokenTypes.LITERAL_FLOAT_NUMBER
else:
token.token_type = TokenTypes.LITERAL_INT_NUMBER
elif token.python_type == PythonTokenTypes.STRING:
if token.string.startswith("'") or token.string.endswith("'"):
token.token_type = TokenTypes.LITERAL_SQ_STRING
elif token.string.startswith('"') or token.string.endswith('"'):
token.token_type = TokenTypes.LITERAL_DQ_STRING
elif token.string.startswith('"""') or token.string.endswith('"""') or \
token.string.startswith("'''") or token.string.endswith("'''"):
token.token_type = TokenTypes.LITERAL_TQ_STRING
else:
Log.info('Unsupported strings format', __file__, get_function_name(), token=token)
token.token_type = TokenTypes.LITERAL_STRING
elif token.python_type == PythonTokenTypes.NAME:
if token.string == 'True':
token.token_type = TokenTypes.LITERAL_TRUE
elif token.string == 'False':
token.token_type = TokenTypes.LITERAL_FALSE
elif token.string == 'None':
token.token_type = TokenTypes.LITERAL_NONE
elif token.string in ['list', 'dict', 'tuple', 'set']:
token.token_type = TokenTypes.SPECIALS_TBD_FUNCTION
else:
token.token_type = TokenTypes.EITHER_VAR_OR_FUNC_CALL # To be defined further
return
else:
Log.warn('Unknown identifier: {}'.format(token.string))
return
token_to_set_value: Union[CodeToken, None] = token.navigate_to_token(reach_first_of_categories=[TokenTypes.VARIABLE, TokenTypes.CLASS_VARIABLE, TokenTypes.FUNCTION_PARAMETER, TokenTypes.INSTANCE_VARIABLE], reach_first_of_types=[TokenTypes.EITHER_VAR_OR_FUNC_CALL, TokenTypes.UNKNOWN_VAR], return_none_on_stops=True)
if token_to_set_value:
token_to_set_value.possible_literal_values.add(token.literal_value)
if token.literal_value is None:
token_to_set_value.possible_typings.add(None)
else:
the_type: TokenType = LITERAL_MAPPING_TO_TOKEN_TYPE.get(type(token.literal_value), TokenTypes.TYPINGS_ANY)
if the_type == TokenTypes.TYPINGS_ANY:
Log.info('Adding "ANY" as typing', __file__, get_function_name(), literal_type=type(token.literal_value))
token_to_set_value.possible_typings.add(the_type)
| 54.895238 | 323 | 0.672797 |
6b83eeaa96fd3504e570d0f48d3e035fcc7cb8d6 | 684 | py | Python | footy/engine/PredictionEngine.py | dallinb/footy | d6879481a85b4a84023805bf29bd7dff32afa67f | [
"BSD-3-Clause"
] | 2 | 2020-08-27T17:59:13.000Z | 2021-10-10T02:26:20.000Z | footy/engine/PredictionEngine.py | FootyStats/footy | d6879481a85b4a84023805bf29bd7dff32afa67f | [
"BSD-3-Clause"
] | 32 | 2020-08-24T15:01:57.000Z | 2022-03-12T00:47:02.000Z | footy/engine/PredictionEngine.py | dallinb/footy | d6879481a85b4a84023805bf29bd7dff32afa67f | [
"BSD-3-Clause"
] | null | null | null | """Prediction Engine - Engine to predict the result of future fixtures."""
# calculate the results for fixtures
from footy.domain import Competition
class PredictionEngine:
"""Prediction Engine - Engine to predict the result of future fixtures."""
def __init__(self, competition):
"""Construct a competition object."""
self._competition = competition
self._results = {}
def predict_results(self, competition):
"""
Generate the predictions for fixtures within a competition.
Return
-------
Competition
Enriched competition with most recent predictions.
"""
return Competition
| 28.5 | 78 | 0.656433 |
a5be6fede88d1a5abafa62bc000bf45b8f3128da | 738 | py | Python | setup.py | franciszhangkk/VSR-master | bfa5262ae2b3e1c4e50e97a47cc80550c1607f02 | [
"MIT"
] | null | null | null | setup.py | franciszhangkk/VSR-master | bfa5262ae2b3e1c4e50e97a47cc80550c1607f02 | [
"MIT"
] | null | null | null | setup.py | franciszhangkk/VSR-master | bfa5262ae2b3e1c4e50e97a47cc80550c1607f02 | [
"MIT"
] | null | null | null | from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = [
"absl-py==0.2.2",
"astor==0.6.2",
"bleach==1.5.0",
"gast==0.2.0",
"grpcio==1.12.1",
"h5py==2.8.0",
"html5lib==0.9999999",
"Keras==2.2.0",
"Keras-Applications==1.0.2",
"Keras-Preprocessing==1.0.1",
"Markdown==2.6.11",
"numpy==1.14.5",
"protobuf==3.6.0",
"PyYAML==3.12",
"six==1.11.0",
"tensorboard==1.8.0",
"tensorflow==1.8.0",
"termcolor==1.1.0",
"Werkzeug==0.14.1"
]
setup(
name='trainer',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='Video Super resolution neural network'
) | 22.363636 | 55 | 0.590786 |
6866219807b65f8b8e19fe0b32726cd88041b368 | 11,997 | py | Python | salt/modules/boto3_sns.py | springborland/salt | bee85e477d57e9a171884e54fefb9a59d0835ed0 | [
"Apache-2.0"
] | 1 | 2020-04-09T03:25:10.000Z | 2020-04-09T03:25:10.000Z | salt/modules/boto3_sns.py | springborland/salt | bee85e477d57e9a171884e54fefb9a59d0835ed0 | [
"Apache-2.0"
] | null | null | null | salt/modules/boto3_sns.py | springborland/salt | bee85e477d57e9a171884e54fefb9a59d0835ed0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Connection module for Amazon SNS
:configuration: This module accepts explicit sns credentials but can also
utilize IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More Information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
sns.keyid: GKTADJGHEIQSXMKKRBJ08H
sns.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
sns.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto3
"""
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt libs
import salt.utils.versions
log = logging.getLogger(__name__)
# Import third party libs
# pylint: disable=unused-import
try:
import botocore
import boto3
import jmespath
logging.getLogger("boto3").setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=unused-import
def __virtual__():
"""
Only load if boto libraries exist.
"""
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__["boto3.assign_funcs"](__name__, "sns")
return has_boto_reqs
def list_topics(region=None, key=None, keyid=None, profile=None):
"""
Returns a list of the requester's topics
CLI example::
salt myminion boto3_sns.list_topics
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
res = {}
NextToken = ""
while NextToken is not None:
ret = conn.list_topics(NextToken=NextToken)
NextToken = ret.get("NextToken", None)
arns = jmespath.search("Topics[*].TopicArn", ret)
for t in arns:
short_name = t.split(":")[-1]
res[short_name] = t
return res
def describe_topic(name, region=None, key=None, keyid=None, profile=None):
"""
Returns details about a specific SNS topic, specified by name or ARN.
CLI example::
salt my_favorite_client boto3_sns.describe_topic a_sns_topic_of_my_choice
"""
topics = list_topics(region=region, key=key, keyid=keyid, profile=profile)
ret = {}
for topic, arn in topics.items():
if name in (topic, arn):
ret = {"TopicArn": arn}
ret["Subscriptions"] = list_subscriptions_by_topic(
arn, region=region, key=key, keyid=keyid, profile=profile
)
ret["Attributes"] = get_topic_attributes(
arn, region=region, key=key, keyid=keyid, profile=profile
)
return ret
def topic_exists(name, region=None, key=None, keyid=None, profile=None):
"""
Check to see if an SNS topic exists.
CLI example::
salt myminion boto3_sns.topic_exists mytopic region=us-east-1
"""
topics = list_topics(region=region, key=key, keyid=keyid, profile=profile)
return name in list(topics.values() + topics.keys())
def create_topic(Name, region=None, key=None, keyid=None, profile=None):
"""
Create an SNS topic.
CLI example::
salt myminion boto3_sns.create_topic mytopic region=us-east-1
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
ret = conn.create_topic(Name=Name)
log.info("SNS topic %s created with ARN %s", Name, ret["TopicArn"])
return ret["TopicArn"]
except botocore.exceptions.ClientError as e:
log.error("Failed to create SNS topic %s: %s", Name, e)
return None
except KeyError:
log.error("Failed to create SNS topic %s", Name)
return None
def delete_topic(TopicArn, region=None, key=None, keyid=None, profile=None):
"""
Delete an SNS topic.
CLI example::
salt myminion boto3_sns.delete_topic mytopic region=us-east-1
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.delete_topic(TopicArn=TopicArn)
log.info("SNS topic %s deleted", TopicArn)
return True
except botocore.exceptions.ClientError as e:
log.error("Failed to delete SNS topic %s: %s", name, e)
return False
def get_topic_attributes(TopicArn, region=None, key=None, keyid=None, profile=None):
"""
Returns all of the properties of a topic. Topic properties returned might differ based on the
authorization of the user.
CLI example::
salt myminion boto3_sns.get_topic_attributes someTopic region=us-west-1
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
return conn.get_topic_attributes(TopicArn=TopicArn).get("Attributes")
except botocore.exceptions.ClientError as e:
log.error("Failed to garner attributes for SNS topic %s: %s", TopicArn, e)
return None
def set_topic_attributes(
TopicArn,
AttributeName,
AttributeValue,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Set an attribute of a topic to a new value.
CLI example::
salt myminion boto3_sns.set_topic_attributes someTopic DisplayName myDisplayNameValue
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.set_topic_attributes(
TopicArn=TopicArn,
AttributeName=AttributeName,
AttributeValue=AttributeValue,
)
log.debug(
"Set attribute %s=%s on SNS topic %s",
AttributeName,
AttributeValue,
TopicArn,
)
return True
except botocore.exceptions.ClientError as e:
log.error(
"Failed to set attribute %s=%s for SNS topic %s: %s",
AttributeName,
AttributeValue,
TopicArn,
e,
)
return False
def list_subscriptions_by_topic(
TopicArn, region=None, key=None, keyid=None, profile=None
):
"""
Returns a list of the subscriptions to a specific topic
CLI example::
salt myminion boto3_sns.list_subscriptions_by_topic mytopic region=us-east-1
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
NextToken = ""
res = []
try:
while NextToken is not None:
ret = conn.list_subscriptions_by_topic(
TopicArn=TopicArn, NextToken=NextToken
)
NextToken = ret.get("NextToken", None)
subs = ret.get("Subscriptions", [])
res += subs
except botocore.exceptions.ClientError as e:
log.error("Failed to list subscriptions for SNS topic %s: %s", TopicArn, e)
return None
return res
def list_subscriptions(region=None, key=None, keyid=None, profile=None):
"""
Returns a list of the requester's topics
CLI example::
salt myminion boto3_sns.list_subscriptions region=us-east-1
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
NextToken = ""
res = []
try:
while NextToken is not None:
ret = conn.list_subscriptions(NextToken=NextToken)
NextToken = ret.get("NextToken", None)
subs = ret.get("Subscriptions", [])
res += subs
except botocore.exceptions.ClientError as e:
log.error("Failed to list SNS subscriptions: %s", e)
return None
return res
def get_subscription_attributes(
SubscriptionArn, region=None, key=None, keyid=None, profile=None
):
"""
Returns all of the properties of a subscription.
CLI example::
salt myminion boto3_sns.get_subscription_attributes somesubscription region=us-west-1
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
ret = conn.get_subscription_attributes(SubscriptionArn=SubscriptionArn)
return ret["Attributes"]
except botocore.exceptions.ClientError as e:
log.error(
"Failed to list attributes for SNS subscription %s: %s", SubscriptionArn, e
)
return None
except KeyError:
log.error("Failed to list attributes for SNS subscription %s", SubscriptionArn)
return None
def set_subscription_attributes(
SubscriptionArn,
AttributeName,
AttributeValue,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Set an attribute of a subscription to a new value.
CLI example::
salt myminion boto3_sns.set_subscription_attributes someSubscription RawMessageDelivery jsonStringValue
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.set_subscription_attributes(
SubscriptionArn=SubscriptionArn,
AttributeName=AttributeName,
AttributeValue=AttributeValue,
)
log.debug(
"Set attribute %s=%s on SNS subscription %s",
AttributeName,
AttributeValue,
SubscriptionArn,
)
return True
except botocore.exceptions.ClientError as e:
log.error(
"Failed to set attribute %s=%s for SNS subscription %s: %s",
AttributeName,
AttributeValue,
SubscriptionArn,
e,
)
return False
def subscribe(
TopicArn, Protocol, Endpoint, region=None, key=None, keyid=None, profile=None
):
"""
Subscribe to a Topic.
CLI example::
salt myminion boto3_sns.subscribe mytopic https https://www.example.com/sns-endpoint
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
ret = conn.subscribe(TopicArn=TopicArn, Protocol=Protocol, Endpoint=Endpoint)
log.info(
"Subscribed %s %s to topic %s with SubscriptionArn %s",
Protocol,
Endpoint,
TopicArn,
ret["SubscriptionArn"],
)
return ret["SubscriptionArn"]
except botocore.exceptions.ClientError as e:
log.error("Failed to create subscription to SNS topic %s: %s", TopicArn, e)
return None
except KeyError:
log.error("Failed to create subscription to SNS topic %s", TopicArn)
return None
def unsubscribe(SubscriptionArn, region=None, key=None, keyid=None, profile=None):
"""
Unsubscribe a specific SubscriptionArn of a topic.
CLI Example:
.. code-block:: bash
salt myminion boto3_sns.unsubscribe my_subscription_arn region=us-east-1
"""
subs = list_subscriptions(region=region, key=key, keyid=keyid, profile=profile)
sub = [s for s in subs if s.get("SubscriptionArn") == SubscriptionArn]
if not sub:
log.error("Subscription ARN %s not found", SubscriptionArn)
return False
TopicArn = sub[0]["TopicArn"]
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.unsubscribe(SubscriptionArn=SubscriptionArn)
log.info("Deleted subscription %s from SNS topic %s", SubscriptionArn, TopicArn)
return True
except botocore.exceptions.ClientError as e:
log.error("Failed to delete subscription %s: %s", SubscriptionArn, e)
return False
| 30.067669 | 111 | 0.650663 |
7d899aa026577bad9dffcc0cf3a7c4b4a11d1aec | 10,981 | py | Python | netcam_aioeos/topology/eos_check_ipaddrs.py | jeremyschulman/netcam-aioeos | ae8b46bcef1bbd86441342a9a282e404d597d662 | [
"Apache-2.0"
] | null | null | null | netcam_aioeos/topology/eos_check_ipaddrs.py | jeremyschulman/netcam-aioeos | ae8b46bcef1bbd86441342a9a282e404d597d662 | [
"Apache-2.0"
] | null | null | null | netcam_aioeos/topology/eos_check_ipaddrs.py | jeremyschulman/netcam-aioeos | ae8b46bcef1bbd86441342a9a282e404d597d662 | [
"Apache-2.0"
] | 1 | 2022-01-04T19:55:12.000Z | 2022-01-04T19:55:12.000Z | # Copyright 2021 Jeremy Schulman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
# System Imports
# -----------------------------------------------------------------------------
from typing import TYPE_CHECKING
from typing import Generator, Sequence
# -----------------------------------------------------------------------------
# Public Imports
# -----------------------------------------------------------------------------
from netcad.topology.checks.check_ipaddrs import (
IpInterfacesCheckCollection,
IpInterfaceCheck,
IpInterfaceCheckExclusiveList,
)
from netcad.device import Device
from netcad.netcam import any_failures
from netcad.checks import check_result_types as trt
# -----------------------------------------------------------------------------
# Private Imports
# -----------------------------------------------------------------------------
if TYPE_CHECKING:
from netcam_aioeos.eos_dut import EOSDeviceUnderTest
# -----------------------------------------------------------------------------
# Exports
# -----------------------------------------------------------------------------
__all__ = ["eos_test_ipaddrs"]
# -----------------------------------------------------------------------------
#
# CODE BEGINS
#
# -----------------------------------------------------------------------------
async def eos_test_ipaddrs(
self, testcases: IpInterfacesCheckCollection
) -> trt.CheckResultsCollection:
"""
This check executor validates the IP addresses used on the device against
those that are defined in the design.
"""
dut: EOSDeviceUnderTest = self
device = dut.device
cli_rsp = await dut.eapi.cli("show ip interface brief")
dev_ips_data = cli_rsp["interfaces"]
results = list()
if_names = list()
for check in testcases.checks:
if_name = check.check_id()
if_names.append(if_name)
if not (if_ip_data := dev_ips_data.get(if_name)):
results.append(
trt.CheckFailNoExists(device=device, check=check, field="if_ipaddr")
)
continue
one_results = await eos_test_one_interface(
dut, device=device, check=check, msrd_data=if_ip_data
)
results.extend(one_results)
# only include device interface that have an assigned IP address; this
# conditional is checked by examining the interface IP address mask length
# against zero.
results.extend(
eos_test_exclusive_list(
device=device,
expd_if_names=if_names,
msrd_if_names=[
if_ip_data["name"]
for if_ip_data in dev_ips_data.values()
if if_ip_data["interfaceAddress"]["ipAddr"]["maskLen"] != 0
],
)
)
return results
# -----------------------------------------------------------------------------
async def eos_test_one_interface(
dut: "EOSDeviceUnderTest",
device: Device,
check: IpInterfaceCheck,
msrd_data: dict,
) -> trt.CheckResultsCollection:
"""
This function validates a specific interface use of an IP address against
the design expectations.
"""
results = list()
# get the interface name begin tested
if_name = check.check_id()
# -------------------------------------------------------------------------
# if there is any error accessing the expect interface IP address
# information, then yeild a failure and return.
# -------------------------------------------------------------------------
try:
msrd_if_addr = msrd_data["interfaceAddress"]["ipAddr"]
msrd_if_ipaddr = f"{msrd_if_addr['address']}/{msrd_if_addr['maskLen']}"
except KeyError:
results.append(
trt.CheckFailFieldMismatch(
device=device,
check=check,
field="measurement",
measurement=msrd_data,
)
)
return results
# -------------------------------------------------------------------------
# Ensure the IP interface value matches.
# -------------------------------------------------------------------------
expd_if_ipaddr = check.expected_results.if_ipaddr
# if the IP address is marked as "is_reserved" it means that an external
# entity configured the IP address, and this check will only record the
# value as an INFO check result.
if expd_if_ipaddr == "is_reserved":
results.append(
trt.CheckInfoLog(
device=device,
check=check,
field="if_ipaddr",
measurement=msrd_if_ipaddr,
)
)
elif msrd_if_ipaddr != expd_if_ipaddr:
results.append(
trt.CheckFailFieldMismatch(
device=device,
check=check,
field="if_ipaddr",
measurement=msrd_if_ipaddr,
)
)
# -------------------------------------------------------------------------
# Ensure the IP interface is "up".
# TODO: should check if the interface is enabled before presuming this
# up condition check.
# -------------------------------------------------------------------------
# check to see if the interface is disabled before we check to see if the IP
# address is in the up condition.
dut_interfaces = dut.device_info["interfaces"]
dut_iface = dut_interfaces[if_name]
iface_enabled = dut_iface["enabled"] is True
if iface_enabled and (if_oper := msrd_data["lineProtocolStatus"]) != "up":
# if the interface is an SVI, then we need to check to see if _all_ of
# the associated physical interfaces are either disabled or in a
# reseverd condition.
if if_name.startswith("Vlan"):
svi_res = await _check_vlan_assoc_interface(
dut, check, if_name=if_name, msrd_ipifaddr_oper=if_oper
)
results.extend(svi_res)
else:
results.append(
trt.CheckFailFieldMismatch(
device=device,
check=check,
field="if_oper",
expected="up",
measurement=if_oper,
error=f"interface for IP {expd_if_ipaddr} is not up: {if_oper}",
)
)
if not any_failures(results):
results.append(
trt.CheckPassResult(device=device, check=check, measurement=msrd_data)
)
return results
def eos_test_exclusive_list(
device: Device, expd_if_names: Sequence[str], msrd_if_names: Sequence[str]
) -> Generator:
"""
This check determines if there are any extra IP Interfaces defined on the
device that are not expected per the design.
"""
# the previous per-interface checks for any missing; therefore we only need
# to check for any extra interfaces found on the device.
tc = IpInterfaceCheckExclusiveList()
if extras := set(msrd_if_names) - set(expd_if_names):
result = trt.CheckFailExtraMembers(
device=device,
check=tc,
field="ip-interfaces",
expected=sorted(expd_if_names),
extras=sorted(extras),
)
else:
result = trt.CheckPassResult(device=device, check=tc, measurement="exists")
yield result
async def _check_vlan_assoc_interface(
dut: "EOSDeviceUnderTest", check, if_name: str, msrd_ipifaddr_oper
) -> trt.CheckResultsCollection:
"""
This coroutine is used to check whether or not a VLAN SVI ip address is not
"up" due to the fact that the underlying interfaces are either disabled or
in a "reserved" design; meaning we do not care if they are up or down. If
the SVI is down because of this condition, the test case will "pass", and an
information record is yielded to inform the User.
Parameters
----------
dut:
The device under test
check:
The specific test case
if_name:
The specific VLAN SVI name, "Vlan12" for example:
msrd_ipifaddr_oper:
The measured opertional state of the IP interface
Yields
------
netcad test case results; one or more depending on the condition of SVI
interfaces.
"""
vlan_id = if_name.split("Vlan")[-1]
cli_res = await dut.eapi.cli(f"show vlan id {vlan_id} configured-ports")
vlan_cfgd_ifnames = set(cli_res["vlans"][vlan_id]["interfaces"])
disrd_ifnames = set()
dut_ifs = dut.device_info["interfaces"]
results = list()
for check_ifname in vlan_cfgd_ifnames:
dut_iface = dut_ifs[check_ifname]
if (dut_iface["enabled"] is False) or (
"is_reserved" in dut_iface["profile_flags"]
):
disrd_ifnames.add(check_ifname)
if disrd_ifnames == vlan_cfgd_ifnames:
results.append(
trt.CheckInfoLog(
device=dut.device,
check=check,
field="if_oper",
measurement=dict(
if_oper=msrd_ipifaddr_oper,
interfaces=list(vlan_cfgd_ifnames),
message="interfaces are either disabled or in reserved state",
),
)
)
results.append(
trt.CheckPassResult(device=dut.device, check=check, measurement="exists")
)
return results
results.append(
trt.CheckFailFieldMismatch(
device=dut.device,
check=check,
field="if_oper",
expected="up",
measurement=msrd_ipifaddr_oper,
error=f"interface for IP {check.expected_results.if_ipaddr} is not up: {msrd_ipifaddr_oper}",
)
)
return results
| 32.58457 | 105 | 0.556416 |
5ed7184e105cf171c3b238567e871221f4f2dcd0 | 3,032 | py | Python | tests/test_upgrade.py | riddopic/opta | 25fa6435fdc7e2ea9c7963ed74100fffb0743063 | [
"Apache-2.0"
] | null | null | null | tests/test_upgrade.py | riddopic/opta | 25fa6435fdc7e2ea9c7963ed74100fffb0743063 | [
"Apache-2.0"
] | null | null | null | tests/test_upgrade.py | riddopic/opta | 25fa6435fdc7e2ea9c7963ed74100fffb0743063 | [
"Apache-2.0"
] | null | null | null | import pytest
import requests
import requests_mock
from pytest_mock import MockFixture
from opta.upgrade import (
LATEST_VERSION_FILE_URL,
_get_latest_version,
check_version_upgrade,
)
TEST_LATEST_VERSION = "1.11.1"
TEST_OLD_VERSION = "1.9.6"
class TestGetLatestVersion:
def test_returns_version_number_from_file(self) -> None:
with requests_mock.Mocker() as m:
m.register_uri(
"GET", LATEST_VERSION_FILE_URL, text=f"{TEST_LATEST_VERSION}\n"
)
assert _get_latest_version() == TEST_LATEST_VERSION
def test_raises_exception_if_connection_error(self) -> None:
with requests_mock.Mocker() as m:
m.register_uri(
"GET", LATEST_VERSION_FILE_URL, exc=requests.exceptions.ConnectTimeout
)
with pytest.raises(Exception):
_get_latest_version()
def test_raises_exception_if_error_response(self) -> None:
with requests_mock.Mocker() as m:
m.register_uri(
"GET", LATEST_VERSION_FILE_URL, status_code=404, text="Not Found"
)
with pytest.raises(Exception):
_get_latest_version()
m.register_uri(
"GET", LATEST_VERSION_FILE_URL, status_code=500, text="Server error"
)
with pytest.raises(Exception):
_get_latest_version()
class TestCheckVersionUpgrade:
def test_does_not_check_if_should_check_false(self, mocker: MockFixture) -> None:
mocked_should_check = mocker.patch(
"opta.upgrade._should_check_for_version_upgrade", return_value=False
)
mocked_get_latest_version = mocker.patch("opta.upgrade._get_latest_version")
check_version_upgrade()
mocked_should_check.assert_called_once()
mocked_get_latest_version.assert_not_called()
def test_logs_update_instructions_if_newer_version_available(
self, mocker: MockFixture
) -> None:
mocker.patch("opta.upgrade._should_check_for_version_upgrade", return_value=True)
mocker.patch("opta.upgrade._get_latest_version", return_value=TEST_LATEST_VERSION)
mocker.patch("opta.upgrade.VERSION", TEST_OLD_VERSION)
mocked_logger_warning = mocker.patch("opta.upgrade.logger.warning")
check_version_upgrade()
mocked_logger_warning.assert_called_once()
warning_message: str = mocked_logger_warning.call_args.args[0]
assert warning_message.find(TEST_OLD_VERSION) > -1
assert warning_message.find(TEST_LATEST_VERSION) > -1
def test_handles_get_latest_version_exceptions(self, mocker: MockFixture) -> None:
mocker.patch("opta.upgrade._should_check_for_version_upgrade", return_value=True)
mocked_get_latest_version = mocker.patch(
"opta.upgrade._get_latest_version",
side_effect=requests.exceptions.ConnectTimeout,
)
check_version_upgrade()
mocked_get_latest_version.assert_called_once()
| 38.871795 | 90 | 0.692282 |
bbb48a071c1fbc2e2975dac4fefd85533ad9c757 | 1,287 | py | Python | test/dataset/test_speech_synthesis_dataset.py | csukuangfj/lhotse | 9b12055ca75718914c5457b33e498d1c8e8b86d8 | [
"Apache-2.0"
] | 1 | 2020-10-02T02:42:25.000Z | 2020-10-02T02:42:25.000Z | test/dataset/test_speech_synthesis_dataset.py | csukuangfj/lhotse | 9b12055ca75718914c5457b33e498d1c8e8b86d8 | [
"Apache-2.0"
] | 2 | 2020-11-05T11:44:17.000Z | 2021-04-08T11:38:48.000Z | test/dataset/test_speech_synthesis_dataset.py | csukuangfj/lhotse | 9b12055ca75718914c5457b33e498d1c8e8b86d8 | [
"Apache-2.0"
] | null | null | null | import pytest
import torch
from lhotse import CutSet
from lhotse.dataset.signal_transforms import GlobalMVN
from lhotse.dataset.speech_synthesis import SpeechSynthesisDataset
@pytest.fixture
def cut_set():
return CutSet.from_json('test/fixtures/ljspeech/cuts.json')
@pytest.mark.parametrize('transform', [None, GlobalMVN, [GlobalMVN]])
def test_speech_synthesis_dataset(cut_set, transform):
if isinstance(transform, list):
transform = [transform[0].from_cuts(cut_set)]
elif isinstance(transform, GlobalMVN):
transform = transform(cut_set)
else:
transform = None
dataset = SpeechSynthesisDataset(cut_set, feature_transforms=transform)
example = dataset[cut_set]
assert example['audio'].shape[1] > 0
assert example['features'].shape[1] > 0
assert example['tokens'].shape[1] > 0
assert example['audio'].ndim == 2
assert example['features'].ndim == 3
assert example['tokens'].ndim == 2
assert isinstance(example['audio_lens'], torch.IntTensor)
assert isinstance(example['features_lens'], torch.IntTensor)
assert isinstance(example['tokens_lens'], torch.IntTensor)
assert example['audio_lens'].ndim == 1
assert example['features_lens'].ndim == 1
assert example['tokens_lens'].ndim == 1
| 31.390244 | 75 | 0.722611 |
a6c8fd85c43b35ec024ad4065053a7685a6bf9ee | 666 | py | Python | src/oci/osub_organization_subscription/models/__init__.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/osub_organization_subscription/models/__init__.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/osub_organization_subscription/models/__init__.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import absolute_import
from .currency import Currency
from .subscription_summary import SubscriptionSummary
# Maps type names to classes for osub_organization_subscription services.
osub_organization_subscription_type_mapping = {
"Currency": Currency,
"SubscriptionSummary": SubscriptionSummary
}
| 44.4 | 245 | 0.795796 |
b71c4f1dd28d50bf22557a50bdf6bdf616f918ea | 34,861 | py | Python | src/python/pants/backend/jvm/tasks/coursier_resolve.py | revl/pants | 8ad83e4ca80c095d44efceafd8b41e575da39c65 | [
"Apache-2.0"
] | 1 | 2021-05-05T18:58:28.000Z | 2021-05-05T18:58:28.000Z | src/python/pants/backend/jvm/tasks/coursier_resolve.py | revl/pants | 8ad83e4ca80c095d44efceafd8b41e575da39c65 | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/jvm/tasks/coursier_resolve.py | revl/pants | 8ad83e4ca80c095d44efceafd8b41e575da39c65 | [
"Apache-2.0"
] | 3 | 2020-06-30T08:28:13.000Z | 2021-07-28T09:35:57.000Z | # Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import hashlib
import itertools
import json
import os
from collections import defaultdict
from urllib import parse
from pants.backend.jvm.ivy_utils import IvyUtils
from pants.backend.jvm.subsystems.jar_dependency_management import (
JarDependencyManagement,
PinnedJarArtifactSet,
)
from pants.backend.jvm.subsystems.resolve_subsystem import JvmResolveSubsystem
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.backend.jvm.tasks.classpath_products import ClasspathProducts
from pants.backend.jvm.tasks.coursier.coursier_subsystem import CoursierSubsystem
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.backend.jvm.tasks.resolve_shared import JvmResolverBase
from pants.base.exceptions import TaskError
from pants.base.fingerprint_strategy import FingerprintStrategy
from pants.base.workunit import WorkUnitLabel
from pants.invalidation.cache_manager import VersionedTargetSet
from pants.java import util
from pants.java.distribution.distribution import DistributionLocator
from pants.java.executor import Executor, SubprocessExecutor
from pants.java.jar.jar_dependency_utils import M2Coordinate, ResolvedJar
from pants.util.contextutil import temporary_file
from pants.util.dirutil import safe_mkdir
from pants.util.fileutil import safe_hardlink_or_copy
class CoursierResultNotFound(Exception):
pass
class CoursierMixin(JvmResolverBase):
"""Experimental 3rdparty resolver using coursier.
TODO(wisechengyi):
1. Add relative url support
"""
RESULT_FILENAME = "result"
@classmethod
def implementation_version(cls):
return super().implementation_version() + [("CoursierMixin", 2)]
@classmethod
def subsystem_dependencies(cls):
return super().subsystem_dependencies() + (
CoursierSubsystem,
DistributionLocator,
JarDependencyManagement,
)
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--allow-global-excludes",
type=bool,
advanced=False,
fingerprint=True,
default=True,
help="Whether global excludes are allowed.",
)
register(
"--report",
type=bool,
advanced=False,
default=False,
help="Show the resolve output. This would also force a resolve even if the resolve task is validated.",
)
@staticmethod
def _compute_jars_to_resolve_and_pin(raw_jars, artifact_set, manager):
"""This method provides settled lists of jar dependencies and coordinates based on conflict
management.
:param raw_jars: a collection of `JarDependencies`
:param artifact_set: PinnedJarArtifactSet
:param manager: JarDependencyManagement
:return: (list of settled `JarDependency`, set of pinned `M2Coordinate`)
"""
if artifact_set is None:
artifact_set = PinnedJarArtifactSet()
untouched_pinned_artifact = {M2Coordinate.create(x) for x in artifact_set}
jar_list = list(raw_jars)
for i, dep in enumerate(jar_list):
direct_coord = M2Coordinate.create(dep)
# Portion to manage pinned jars in case of conflict
if direct_coord in artifact_set:
managed_coord = artifact_set[direct_coord]
untouched_pinned_artifact.remove(managed_coord)
if direct_coord.rev != managed_coord.rev:
# It may be necessary to actually change the version number of the jar we want to resolve
# here, because overrides do not apply directly (they are exclusively transitive). This is
# actually a good thing, because it gives us more control over what happens.
coord = manager.resolve_version_conflict(
managed_coord, direct_coord, force=dep.force
)
# Once a version is settled, we force it anyway
jar_list[i] = dep.copy(rev=coord.rev, force=True)
return jar_list, untouched_pinned_artifact
def resolve(self, targets, compile_classpath, sources, javadoc, executor):
"""This is the core function for coursier resolve.
Validation strategy:
1. All targets are going through the `invalidated` to get fingerprinted in the target level.
No cache is fetched at this stage because it is disabled.
2. Once each target is fingerprinted, we combine them into a `VersionedTargetSet` where they
are fingerprinted together, because each run of 3rdparty resolve is context sensitive.
Artifacts are stored in `VersionedTargetSet`'s results_dir, the contents are the aggregation of
each coursier run happened within that context.
Caching: (TODO): https://github.com/pantsbuild/pants/issues/5187
Currently it is disabled due to absolute paths in the coursier results.
:param targets: a collection of targets to do 3rdparty resolve against
:param compile_classpath: classpath product that holds the resolution result. IMPORTANT: this parameter will be changed.
:param sources: if True, fetch sources for 3rdparty
:param javadoc: if True, fetch javadoc for 3rdparty
:param executor: An instance of `pants.java.executor.Executor`. If None, a subprocess executor will be assigned.
:return: n/a
"""
manager = JarDependencyManagement.global_instance()
jar_targets = manager.targets_by_artifact_set(targets)
executor = executor or SubprocessExecutor(DistributionLocator.cached())
if not isinstance(executor, Executor):
raise ValueError(
"The executor argument must be an Executor instance, given {} of type {}".format(
executor, type(executor)
)
)
for artifact_set, target_subset in jar_targets.items():
# TODO(wisechengyi): this is the only place we are using IvyUtil method, which isn't specific to ivy really.
raw_jar_deps, global_excludes = IvyUtils.calculate_classpath(target_subset)
# ['sources'] * False = [], ['sources'] * True = ['sources']
confs_for_fingerprint = ["sources"] * sources + ["javadoc"] * javadoc
fp_strategy = CoursierResolveFingerprintStrategy(confs_for_fingerprint)
compile_classpath.add_excludes_for_targets(target_subset)
with self.invalidated(
target_subset,
invalidate_dependents=False,
silent=False,
fingerprint_strategy=fp_strategy,
) as invalidation_check:
if not invalidation_check.all_vts:
continue
resolve_vts = VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts)
vt_set_results_dir = self._prepare_vts_results_dir(resolve_vts)
pants_jar_base_dir = self._prepare_workdir()
coursier_cache_dir = CoursierSubsystem.global_instance().get_options().cache_dir
# If a report is requested, do not proceed with loading validated result.
if not self.get_options().report:
# Check each individual target without context first
# If the individuals are valid, check them as a VersionedTargetSet
# The order of 'or' statement matters, because checking for cache is more expensive.
if resolve_vts.valid or (
self.artifact_cache_reads_enabled()
and len(self.check_artifact_cache([resolve_vts])[0])
== len(resolve_vts.targets)
):
# Load up from the results dir
success = self._load_from_results_dir(
compile_classpath,
vt_set_results_dir,
coursier_cache_dir,
invalidation_check,
pants_jar_base_dir,
)
if success:
resolve_vts.update()
return
jars_to_resolve, pinned_coords = self._compute_jars_to_resolve_and_pin(
raw_jar_deps, artifact_set, manager
)
results = self._get_result_from_coursier(
jars_to_resolve,
global_excludes,
pinned_coords,
coursier_cache_dir,
sources,
javadoc,
executor,
)
for conf, result_list in results.items():
for result in result_list:
self._load_json_result(
conf,
compile_classpath,
coursier_cache_dir,
invalidation_check,
pants_jar_base_dir,
result,
self._override_classifiers_for_conf(conf),
)
self._populate_results_dir(vt_set_results_dir, results)
resolve_vts.update()
if self.artifact_cache_writes_enabled():
self.update_artifact_cache([(resolve_vts, [vt_set_results_dir])])
def _override_classifiers_for_conf(self, conf):
# TODO Encapsulate this in the result from coursier instead of here.
# https://github.com/coursier/coursier/issues/803
if conf == "src_doc":
return ["sources", "javadoc"]
else:
return None
def _prepare_vts_results_dir(self, vts):
"""Given a `VergetTargetSet`, prepare its results dir."""
vt_set_results_dir = os.path.join(self.versioned_workdir, "results", vts.cache_key.hash)
safe_mkdir(vt_set_results_dir)
return vt_set_results_dir
def _prepare_workdir(self):
"""Prepare the location in our task workdir to store all the hardlinks to coursier cache
dir."""
pants_jar_base_dir = os.path.join(self.versioned_workdir, "cache")
safe_mkdir(pants_jar_base_dir)
return pants_jar_base_dir
def _get_result_from_coursier(
self,
jars_to_resolve,
global_excludes,
pinned_coords,
coursier_cache_path,
sources,
javadoc,
executor,
):
"""Calling coursier and return the result per invocation.
If coursier was called once for classifier '' and once for classifier 'tests', then the return value
would be: {'default': [<first coursier output>, <second coursier output>]}
:param jars_to_resolve: List of `JarDependency`s to resolve
:param global_excludes: List of `M2Coordinate`s to exclude globally
:param pinned_coords: List of `M2Coordinate`s that need to be pinned.
:param coursier_cache_path: path to where coursier cache is stored.
:param executor: An instance of `pants.java.executor.Executor`
:return: The aggregation of results by conf from coursier. Each coursier call could return
the following:
{
"conflict_resolution": {
"org:name:version" (requested): "org:name:version" (reconciled)
},
"dependencies": [
{
"coord": "orgA:nameA:versionA",
"file": <path>,
"dependencies": [ // coodinates for its transitive dependencies
<orgX:nameX:versionX>,
<orgY:nameY:versionY>,
]
},
{
"coord": "orgB:nameB:jar:classifier:versionB",
"file": <path>,
"dependencies": [ // coodinates for its transitive dependencies
<orgX:nameX:versionX>,
<orgZ:nameZ:versionZ>,
]
},
... // more about orgX:nameX:versionX, orgY:nameY:versionY, orgZ:nameZ:versionZ
]
}
Hence the aggregation of the results will be in the following format, for example when default classifier
and sources are fetched:
{
'default': [<result from coursier call with default conf with classifier X>,
<result from coursier call with default conf with classifier Y>],
'src_doc': [<result from coursier call with --sources and/or --javadoc>],
}
"""
# Prepare coursier args
coursier_subsystem_instance = CoursierSubsystem.global_instance()
coursier_jar = coursier_subsystem_instance.select()
repos = coursier_subsystem_instance.get_options().repos
# make [repoX, repoY] -> ['-r', repoX, '-r', repoY]
repo_args = list(itertools.chain(*list(zip(["-r"] * len(repos), repos))))
artifact_types_arg = [
"-A",
",".join(coursier_subsystem_instance.get_options().artifact_types),
]
advanced_options = coursier_subsystem_instance.get_options().fetch_options
common_args = (
[
"fetch",
# Print the resolution tree
"-t",
"--cache",
coursier_cache_path,
]
+ repo_args
+ artifact_types_arg
+ advanced_options
)
coursier_work_temp_dir = os.path.join(self.versioned_workdir, "tmp")
safe_mkdir(coursier_work_temp_dir)
results_by_conf = self._get_default_conf_results(
common_args,
coursier_jar,
global_excludes,
jars_to_resolve,
coursier_work_temp_dir,
pinned_coords,
executor,
)
if sources or javadoc:
non_default_conf_results = self._get_non_default_conf_results(
common_args,
coursier_jar,
global_excludes,
jars_to_resolve,
coursier_work_temp_dir,
pinned_coords,
sources,
javadoc,
executor,
)
results_by_conf.update(non_default_conf_results)
return results_by_conf
def _get_default_conf_results(
self,
common_args,
coursier_jar,
global_excludes,
jars_to_resolve,
coursier_work_temp_dir,
pinned_coords,
executor,
):
# Variable to store coursier result each run.
results = defaultdict(list)
with temporary_file(coursier_work_temp_dir, cleanup=False) as f:
output_fn = f.name
cmd_args = self._construct_cmd_args(
jars_to_resolve,
common_args,
global_excludes if self.get_options().allow_global_excludes else [],
pinned_coords,
coursier_work_temp_dir,
output_fn,
)
results["default"].append(self._call_coursier(cmd_args, coursier_jar, output_fn, executor))
return results
def _get_non_default_conf_results(
self,
common_args,
coursier_jar,
global_excludes,
jars_to_resolve,
coursier_work_temp_dir,
pinned_coords,
sources,
javadoc,
executor,
):
# To prevent improper api usage during development. User should not see this anyway.
if not sources and not javadoc:
raise TaskError("sources or javadoc has to be True.")
with temporary_file(coursier_work_temp_dir, cleanup=False) as f:
output_fn = f.name
results = defaultdict(list)
new_pinned_coords = []
new_jars_to_resolve = []
special_args = []
if not sources and not javadoc:
new_pinned_coords = pinned_coords
new_jars_to_resolve = jars_to_resolve
if sources:
special_args.append("--sources")
new_pinned_coords.extend(c.copy(classifier="sources") for c in pinned_coords)
new_jars_to_resolve.extend(c.copy(classifier="sources") for c in jars_to_resolve)
if javadoc:
special_args.append("--javadoc")
new_pinned_coords.extend(c.copy(classifier="javadoc") for c in pinned_coords)
new_jars_to_resolve.extend(c.copy(classifier="javadoc") for c in jars_to_resolve)
cmd_args = self._construct_cmd_args(
new_jars_to_resolve,
common_args,
global_excludes if self.get_options().allow_global_excludes else [],
new_pinned_coords,
coursier_work_temp_dir,
output_fn,
)
cmd_args.extend(special_args)
# sources and/or javadoc share the same conf
results["src_doc"] = [self._call_coursier(cmd_args, coursier_jar, output_fn, executor)]
return results
def _call_coursier(self, cmd_args, coursier_jar, output_fn, executor):
runner = executor.runner(
classpath=[coursier_jar],
main="coursier.cli.Coursier",
jvm_options=self.get_options().jvm_options,
args=cmd_args,
)
labels = [WorkUnitLabel.COMPILER] if self.get_options().report else [WorkUnitLabel.TOOL]
return_code = util.execute_runner(runner, self.context.new_workunit, "coursier", labels)
if return_code:
raise TaskError(f"The coursier process exited non-zero: {return_code}")
with open(output_fn, "r") as f:
return json.loads(f.read())
@staticmethod
def _construct_cmd_args(
jars, common_args, global_excludes, pinned_coords, coursier_workdir, json_output_path
):
# Make a copy, so there is no side effect or others using `common_args`
cmd_args = list(common_args)
cmd_args.extend(["--json-output-file", json_output_path])
# Dealing with intransitivity and forced versions.
for j in jars:
if not j.rev:
raise TaskError(
'Undefined revs for jars unsupported by Coursier. "{}"'.format(
repr(j.coordinate).replace("M2Coordinate", "jar")
)
)
module = j.coordinate.simple_coord
if j.coordinate.classifier:
module += f",classifier={j.coordinate.classifier}"
if j.get_url():
jar_url = j.get_url()
module += f",url={parse.quote_plus(jar_url)}"
if j.intransitive:
cmd_args.append("--intransitive")
cmd_args.append(module)
# Force requires specifying the coord again with -V
if j.force:
cmd_args.append("-V")
cmd_args.append(j.coordinate.simple_coord)
# Force pinned coordinates
for m2coord in pinned_coords:
cmd_args.append("-V")
cmd_args.append(m2coord.simple_coord)
# Local exclusions
local_exclude_args = []
for jar in jars:
for ex in jar.excludes:
# `--` means exclude. See --local-exclude-file in `coursier fetch --help`
# If ex.name does not exist, that means the whole org needs to be excluded.
ex_arg = f"{jar.org}:{jar.name}--{ex.org}:{ex.name or '*'}"
local_exclude_args.append(ex_arg)
if local_exclude_args:
with temporary_file(coursier_workdir, cleanup=False) as f:
exclude_file = f.name
with open(exclude_file, "w") as ex_f:
ex_f.write("\n".join(local_exclude_args))
cmd_args.append("--local-exclude-file")
cmd_args.append(exclude_file)
for ex in global_excludes:
cmd_args.append("-E")
cmd_args.append(f"{ex.org}:{ex.name or '*'}")
return cmd_args
def _load_json_result(
self,
conf,
compile_classpath,
coursier_cache_path,
invalidation_check,
pants_jar_path_base,
result,
override_classifiers=None,
):
"""Given a coursier run result, load it into compile_classpath by target.
:param compile_classpath: `ClasspathProducts` that will be modified
:param coursier_cache_path: cache location that is managed by coursier
:param invalidation_check: InvalidationCheck
:param pants_jar_path_base: location under pants workdir that contains all the hardlinks to coursier cache
:param result: result dict converted from the json produced by one coursier run
:return: n/a
"""
# Parse the coursier result
flattened_resolution = self._extract_dependencies_by_root(result)
coord_to_resolved_jars = self._map_coord_to_resolved_jars(
result, coursier_cache_path, pants_jar_path_base
)
# Construct a map from org:name to the reconciled org:name:version coordinate
# This is used when there is won't be a conflict_resolution entry because the conflict
# was resolved in pants.
org_name_to_org_name_rev = {}
for coord in coord_to_resolved_jars.keys():
org_name_to_org_name_rev[f"{coord.org}:{coord.name}"] = coord
jars_per_target = []
for vt in invalidation_check.all_vts:
t = vt.target
jars_to_digest = []
if isinstance(t, JarLibrary):
def get_transitive_resolved_jars(my_coord, resolved_jars):
transitive_jar_path_for_coord = []
coord_str = str(my_coord)
if coord_str in flattened_resolution and my_coord in resolved_jars:
transitive_jar_path_for_coord.append(resolved_jars[my_coord])
for c in flattened_resolution[coord_str]:
j = resolved_jars.get(self.to_m2_coord(c))
if j:
transitive_jar_path_for_coord.append(j)
return transitive_jar_path_for_coord
for jar in t.jar_dependencies:
# if there are override classifiers, then force use of those.
coord_candidates = []
if override_classifiers:
coord_candidates = [
jar.coordinate.copy(classifier=c) for c in override_classifiers
]
else:
coord_candidates = [jar.coordinate]
# if conflict resolution entries, then update versions to the resolved ones.
if jar.coordinate.simple_coord in result["conflict_resolution"]:
parsed_conflict = self.to_m2_coord(
result["conflict_resolution"][jar.coordinate.simple_coord]
)
coord_candidates = [
c.copy(rev=parsed_conflict.rev) for c in coord_candidates
]
elif f"{jar.coordinate.org}:{jar.coordinate.name}" in org_name_to_org_name_rev:
parsed_conflict = org_name_to_org_name_rev[
f"{jar.coordinate.org}:{jar.coordinate.name}"
]
coord_candidates = [
c.copy(rev=parsed_conflict.rev) for c in coord_candidates
]
for coord in coord_candidates:
transitive_resolved_jars = get_transitive_resolved_jars(
coord, coord_to_resolved_jars
)
if transitive_resolved_jars:
for jar in transitive_resolved_jars:
jars_to_digest.append(jar)
jars_per_target.append((t, jars_to_digest))
for target, jars_to_add in self.add_directory_digests_for_jars(jars_per_target):
if override_classifiers is not None:
for jar in jars_to_add:
compile_classpath.add_jars_for_targets(
[target], jar.coordinate.classifier, [jar]
)
else:
compile_classpath.add_jars_for_targets([target], conf, jars_to_add)
def _populate_results_dir(self, vts_results_dir, results):
with open(os.path.join(vts_results_dir, self.RESULT_FILENAME), "w") as f:
json.dump(results, f)
def _load_from_results_dir(
self,
compile_classpath,
vts_results_dir,
coursier_cache_path,
invalidation_check,
pants_jar_path_base,
):
"""Given vts_results_dir, load the results which can be from multiple runs of coursier into
compile_classpath.
:return: True if success; False if any of the classpath is not valid anymore.
"""
result_file_path = os.path.join(vts_results_dir, self.RESULT_FILENAME)
if not os.path.exists(result_file_path):
return
with open(result_file_path, "r") as f:
results = json.load(f)
for conf, result_list in results.items():
for result in result_list:
try:
self._load_json_result(
conf,
compile_classpath,
coursier_cache_path,
invalidation_check,
pants_jar_path_base,
result,
self._override_classifiers_for_conf(conf),
)
except CoursierResultNotFound:
return False
return True
@classmethod
def _extract_dependencies_by_root(cls, result):
"""Only extracts the transitive dependencies for the given coursier resolve. Note the
"dependencies" field is already transitive.
Example:
{
"conflict_resolution": {},
"dependencies": [
{
"coord": "a",
"dependencies": ["b", "c"]
"file": ...
},
{
"coord": "b",
"dependencies": []
"file": ...
},
{
"coord": "c",
"dependencies": []
"file": ...
}
]
}
Should return { "a": ["b", "c"], "b": [], "c": [] }
:param result: coursier result like the example.
:return: a simplified view with the top artifact as the roots.
"""
flat_result = defaultdict(list)
for artifact in result["dependencies"]:
flat_result[artifact["coord"]].extend(artifact["dependencies"])
return flat_result
@classmethod
def _map_coord_to_resolved_jars(cls, result, coursier_cache_path, pants_jar_path_base):
"""Map resolved files to each org:name:version.
Example:
{
"conflict_resolution": {},
"dependencies": [
{
"coord": "a",
"dependencies": ["b", "c"],
"file": "a.jar"
},
{
"coord": "b",
"dependencies": [],
"file": "b.jar"
},
{
"coord": "c",
"dependencies": [],
"file": "c.jar"
},
{
"coord": "a:sources",
"dependencies": ["b", "c"],
"file": "a-sources.jar"
},
]
}
Should return:
{
M2Coordinate("a", ...): ResolvedJar(classifier='', path/cache_path="a.jar"),
M2Coordinate("a", ..., classifier="sources"): ResolvedJar(classifier='sources', path/cache_path="a-sources.jar"),
M2Coordinate("b", ...): ResolvedJar(classifier='', path/cache_path="b.jar"),
M2Coordinate("c", ...): ResolvedJar(classifier='', path/cache_path="c.jar"),
}
:param result: coursier json output
:param coursier_cache_path: coursier cache location
:param pants_jar_path_base: location under pants workdir to store the hardlink to the coursier cache
:return: a map from maven coordinate to a resolved jar.
"""
coord_to_resolved_jars = dict()
for dep in result["dependencies"]:
coord = dep["coord"]
jar_path = dep.get("file", None)
if not jar_path:
# NB: Not all coordinates will have associated files.
# This is fine. Some coordinates will just have dependencies.
continue
if not os.path.exists(jar_path):
raise CoursierResultNotFound(f"Jar path not found: {jar_path}")
pants_path = cls._get_path_to_jar(coursier_cache_path, pants_jar_path_base, jar_path)
if not os.path.exists(pants_path):
safe_mkdir(os.path.dirname(pants_path))
safe_hardlink_or_copy(jar_path, pants_path)
coord = cls.to_m2_coord(coord)
resolved_jar = ResolvedJar(coord, cache_path=jar_path, pants_path=pants_path)
coord_to_resolved_jars[coord] = resolved_jar
return coord_to_resolved_jars
@classmethod
def to_m2_coord(cls, coord_str):
return M2Coordinate.from_string(coord_str)
@classmethod
def _get_path_to_jar(cls, coursier_cache_path, pants_jar_path_base, jar_path):
"""Create the path to the jar that will live in .pants.d.
:param coursier_cache_path: coursier cache location
:param pants_jar_path_base: location under pants workdir to store the hardlink to the coursier cache
:param jar_path: path of the jar
:return:
"""
if os.path.abspath(coursier_cache_path) not in os.path.abspath(jar_path):
# Appending the string 'absolute' to the jar_path and joining that is a hack to work around
# python's os.path.join behavior of throwing away all components that come before an
# absolute path. See https://docs.python.org/3.3/library/os.path.html#os.path.join
return os.path.join(pants_jar_path_base, os.path.normpath("absolute/" + jar_path))
else:
return os.path.join(
pants_jar_path_base, "relative", os.path.relpath(jar_path, coursier_cache_path)
)
class CoursierResolve(CoursierMixin, NailgunTask):
@classmethod
def subsystem_dependencies(cls):
return super().subsystem_dependencies() + (JvmResolveSubsystem,)
@classmethod
def product_types(cls):
return ["compile_classpath", "resolve_sources_signal", "resolve_javadocs_signal"]
@classmethod
def prepare(cls, options, round_manager):
super().prepare(options, round_manager)
# Codegen may inject extra resolvable deps, so make sure we have a product dependency
# on relevant codegen tasks, if any.
round_manager.optional_data("java")
round_manager.optional_data("scala")
@classmethod
def register_options(cls, register):
super().register_options(register)
@classmethod
def implementation_version(cls):
return super().implementation_version() + [("CoursierResolve", 2)]
def execute(self):
"""Resolves the specified confs for the configured targets and returns an iterator over
tuples of (conf, jar path)."""
jvm_resolve_subsystem = JvmResolveSubsystem.global_instance()
if jvm_resolve_subsystem.get_options().resolver != "coursier":
return
# executor = self.create_java_executor()
classpath_products = self.context.products.get_data(
"compile_classpath",
init_func=ClasspathProducts.init_func(self.get_options().pants_workdir),
)
executor = self.create_java_executor()
self.resolve(
self.context.targets(),
classpath_products,
sources=self.context.products.is_required_data("resolve_sources_signal"),
javadoc=self.context.products.is_required_data("resolve_javadocs_signal"),
executor=executor,
)
def check_artifact_cache_for(self, invalidation_check):
# Coursier resolution is an output dependent on the entire target set, and is not divisible
# by target. So we can only cache it keyed by the entire target set.
global_vts = VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts)
return [global_vts]
class CoursierResolveFingerprintStrategy(FingerprintStrategy):
def __init__(self, confs):
super().__init__()
self._confs = sorted(confs or [])
def compute_fingerprint(self, target):
hash_elements_for_target = []
if isinstance(target, JarLibrary):
managed_jar_artifact_set = JarDependencyManagement.global_instance().for_target(target)
if managed_jar_artifact_set:
hash_elements_for_target.append(str(managed_jar_artifact_set.id))
hash_elements_for_target.append(target.payload.fingerprint())
elif isinstance(target, JvmTarget) and target.payload.excludes:
hash_elements_for_target.append(target.payload.fingerprint(field_keys=("excludes",)))
else:
pass
if not hash_elements_for_target:
return None
hasher = hashlib.sha1()
hasher.update(target.payload.fingerprint().encode())
for conf in self._confs:
hasher.update(conf.encode())
for element in hash_elements_for_target:
hasher.update(element.encode())
# Just in case so we do not collide with ivy cache
hasher.update(b"coursier")
return hasher.hexdigest()
def __hash__(self):
return hash((type(self), "-".join(self._confs)))
def __eq__(self, other):
return type(self) == type(other) and self._confs == other._confs
| 39.257883 | 129 | 0.596856 |
8069146a6f993fabd8ba7f3f5047135357ea2464 | 3,188 | py | Python | kino/skills/samhangsi/generator.py | DongjunLee/kino-bot | 226aa009008e30d76e9253fe47fbe862766bdc83 | [
"MIT"
] | 109 | 2017-04-28T05:42:05.000Z | 2020-06-23T03:27:54.000Z | kino/skills/samhangsi/generator.py | DongjunLee/kino-bot | 226aa009008e30d76e9253fe47fbe862766bdc83 | [
"MIT"
] | 32 | 2016-09-18T16:35:04.000Z | 2019-05-04T16:16:21.000Z | kino/skills/samhangsi/generator.py | DongjunLee/stalker-bot | 226aa009008e30d76e9253fe47fbe862766bdc83 | [
"MIT"
] | 17 | 2017-05-15T14:19:24.000Z | 2020-05-13T15:35:11.000Z | # -- coding: utf-8 -*-
import argparse
from hbconfig import Config
import numpy as np
import tensorflow as tf
from .data_loader import TextLoader
from .model import CharRNN
class SamhangSiGenerator:
SENTENCE_LENGTH = 20
def __init__(self):
self._set_data()
self._make_estimator()
def _set_data(self):
data_loader = TextLoader(Config.data.data_dir)
Config.data.vocab_size = data_loader.vocab_size
def get_rev_vocab(vocab):
if vocab is None:
return None
return {idx: key for key, idx in vocab.items()}
self.vocab = data_loader.vocab
self.rev_vocab = get_rev_vocab(data_loader.vocab)
def _make_estimator(self):
params = tf.contrib.training.HParams(**Config.model.to_dict())
run_config = tf.contrib.learn.RunConfig(model_dir=Config.train.model_dir)
char_rnn = CharRNN()
self.estimator = tf.estimator.Estimator(
model_fn=char_rnn.model_fn,
model_dir=Config.train.model_dir,
params=params,
config=run_config,
)
def generate(self, word):
result = ""
for char in word:
result += self._generate_sentence(char)
return self._combine_sentence(result, word)
def _generate_sentence(self, char):
if char not in self.vocab:
raise ValueError(f"'{char}' is not trained. (can use char in vocab)")
sample = self.vocab[char]
sentence = [sample]
for _ in range(self.SENTENCE_LENGTH):
X = np.zeros((1, 1), dtype=np.int32)
X[0, 0] = sample
predict_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"input_data": X}, num_epochs=1, shuffle=False
)
result = self.estimator.predict(input_fn=predict_input_fn)
probs = next(result)["probs"]
def weighted_pick(weights):
t = np.cumsum(weights)
s = np.sum(weights)
return int(np.searchsorted(t, np.random.rand(1) * s))
sample = weighted_pick(probs)
sentence.append(sample)
sentence = list(map(lambda sample: self.rev_vocab.get(sample, ""), sentence))
sentence = "".join(sentence)
return sentence
def _combine_sentence(self, result, word):
print("word: " + word)
result = result.replace("\n", " ")
for char in word[1:]:
result = result.replace(char, "\n" + char, 1)
return result
def main(word):
samhangsi_generator = SamhangSiGenerator()
result = samhangsi_generator.generate(word)
print(result)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--config", type=str, default="config", help="config file name")
parser.add_argument(
"--word", type=str, default="삼행시", help="Input Korean word (ex. 삼행시)"
)
args = parser.parse_args()
Config(args.config)
Config.model.batch_size = 1
Config.model.seq_length = 1
print("Config: ", Config)
main(args.word)
| 28.212389 | 88 | 0.611669 |
6e92eabcf1dd675414052aca95fb15b3313eb44a | 384 | py | Python | 0232.implement_queue_using_stacks/solution.py | WZMJ/Algorithms | 07f648541d38e24df38bda469665c12df6a50637 | [
"MIT"
] | 5 | 2020-05-23T02:18:26.000Z | 2021-07-05T05:36:01.000Z | 0232.implement_queue_using_stacks/solution.py | WZMJ/Algorithms | 07f648541d38e24df38bda469665c12df6a50637 | [
"MIT"
] | 1 | 2020-06-10T07:17:24.000Z | 2020-07-20T02:21:24.000Z | 0232.implement_queue_using_stacks/solution.py | WZMJ/Algorithms | 07f648541d38e24df38bda469665c12df6a50637 | [
"MIT"
] | 1 | 2019-04-23T13:01:50.000Z | 2019-04-23T13:01:50.000Z | class MyQueue:
def __init__(self):
self.queue = []
def push(self, x):
self.queue.append(x)
def pop(self):
if not self.empty():
return self.queue.pop(0)
return -1
def peek(self):
if not self.empty():
return self.queue[0]
return -1
def empty(self):
return not bool(len(self.queue))
| 19.2 | 40 | 0.518229 |
dbd19b89b40e55a67b09ee61067ce0abddaeb8a0 | 9,035 | py | Python | tests/svd_test.py | mariogeiger/jax | 7098088f4eb15cf750398889e4341dbc15cda1b3 | [
"Apache-2.0"
] | null | null | null | tests/svd_test.py | mariogeiger/jax | 7098088f4eb15cf750398889e4341dbc15cda1b3 | [
"Apache-2.0"
] | 6 | 2022-01-03T23:12:33.000Z | 2022-02-14T23:13:52.000Z | tests/svd_test.py | mariogeiger/jax | 7098088f4eb15cf750398889e4341dbc15cda1b3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
"""Tests for the library of QDWH-based singular value decomposition."""
import functools
import jax
from jax.config import config
import jax.numpy as jnp
import numpy as np
import scipy.linalg as osp_linalg
from jax._src.lax import svd
from jax._src import test_util as jtu
from absl.testing import absltest
from absl.testing import parameterized
config.parse_flags_with_absl()
_JAX_ENABLE_X64 = config.x64_enabled
# Input matrix data type for SvdTest.
_SVD_TEST_DTYPE = np.float64 if _JAX_ENABLE_X64 else np.float32
# Machine epsilon used by SvdTest.
_SVD_TEST_EPS = jnp.finfo(_SVD_TEST_DTYPE).eps
# SvdTest relative tolerance.
_SVD_RTOL = 1E-6 if _JAX_ENABLE_X64 else 1E-2
_MAX_LOG_CONDITION_NUM = 9 if _JAX_ENABLE_X64 else 4
@jtu.with_config(jax_numpy_rank_promotion='allow')
class SvdTest(jtu.JaxTestCase):
@parameterized.named_parameters(jtu.cases_from_list(
{ # pylint:disable=g-complex-comprehension
'testcase_name': '_m={}_by_n={}_log_cond={}_full_matrices={}'.format(
m, n, log_cond, full_matrices),
'm': m, 'n': n, 'log_cond': log_cond, 'full_matrices': full_matrices}
for m, n in zip([2, 8, 10, 20], [4, 6, 10, 18])
for log_cond in np.linspace(1, _MAX_LOG_CONDITION_NUM, 4)
for full_matrices in [True, False]))
def testSvdWithRectangularInput(self, m, n, log_cond, full_matrices):
"""Tests SVD with rectangular input."""
with jax.default_matmul_precision('float32'):
a = np.random.uniform(
low=0.3, high=0.9, size=(m, n)).astype(_SVD_TEST_DTYPE)
u, s, v = osp_linalg.svd(a, full_matrices=False)
cond = 10**log_cond
s = jnp.linspace(cond, 1, min(m, n))
a = (u * s) @ v
a = a + 1j * a
osp_linalg_fn = functools.partial(
osp_linalg.svd, full_matrices=full_matrices)
actual_u, actual_s, actual_v = svd.svd(a, full_matrices=full_matrices)
k = min(m, n)
if m > n:
unitary_u = jnp.real(actual_u.T.conj() @ actual_u)
unitary_v = jnp.real(actual_v.T.conj() @ actual_v)
unitary_u_size = m if full_matrices else k
unitary_v_size = k
else:
unitary_u = jnp.real(actual_u @ actual_u.T.conj())
unitary_v = jnp.real(actual_v @ actual_v.T.conj())
unitary_u_size = k
unitary_v_size = n if full_matrices else k
_, expected_s, _ = osp_linalg_fn(a)
svd_fn = lambda a: svd.svd(a, full_matrices=full_matrices)
args_maker = lambda: [a]
with self.subTest('Test JIT compatibility'):
self._CompileAndCheck(svd_fn, args_maker)
with self.subTest('Test unitary u.'):
self.assertAllClose(np.eye(unitary_u_size), unitary_u, rtol=_SVD_RTOL,
atol=2E-3)
with self.subTest('Test unitary v.'):
self.assertAllClose(np.eye(unitary_v_size), unitary_v, rtol=_SVD_RTOL,
atol=2E-3)
with self.subTest('Test s.'):
self.assertAllClose(
expected_s, jnp.real(actual_s), rtol=_SVD_RTOL, atol=1E-6)
@parameterized.named_parameters(jtu.cases_from_list(
{'testcase_name': f'_m={m}_by_n={n}', 'm': m, 'n': n}
for m, n in zip([50, 6], [3, 60])))
def testSvdWithSkinnyTallInput(self, m, n):
"""Tests SVD with skinny and tall input."""
# Generates a skinny and tall input
with jax.default_matmul_precision('float32'):
np.random.seed(1235)
a = np.random.randn(m, n).astype(_SVD_TEST_DTYPE)
u, s, v = svd.svd(a, full_matrices=False, hermitian=False)
relative_diff = np.linalg.norm(a - (u * s) @ v) / np.linalg.norm(a)
np.testing.assert_almost_equal(relative_diff, 1E-6, decimal=6)
@parameterized.named_parameters(jtu.cases_from_list(
{ # pylint:disable=g-complex-comprehension
'testcase_name': f'_m={m}_r={r}_log_cond={log_cond}',
'm': m, 'r': r, 'log_cond': log_cond}
for m, r in zip([8, 8, 8, 10], [3, 5, 7, 9])
for log_cond in np.linspace(1, 3, 3)))
def testSvdWithOnRankDeficientInput(self, m, r, log_cond):
"""Tests SVD with rank-deficient input."""
with jax.default_matmul_precision('float32'):
a = jnp.triu(jnp.ones((m, m))).astype(_SVD_TEST_DTYPE)
# Generates a rank-deficient input.
u, s, v = jnp.linalg.svd(a, full_matrices=False)
cond = 10**log_cond
s = jnp.linspace(cond, 1, m)
s = s.at[r:m].set(jnp.zeros((m-r,)))
a = (u * s) @ v
with jax.default_matmul_precision('float32'):
u, s, v = svd.svd(a, full_matrices=False, hermitian=False)
diff = np.linalg.norm(a - (u * s) @ v)
np.testing.assert_almost_equal(diff, 1E-4, decimal=2)
@parameterized.named_parameters(jtu.cases_from_list(
{ # pylint:disable=g-complex-comprehension
'testcase_name': '_m={}_by_n={}_log_cond={}_full_matrices={}'.format(
m, n, log_cond, full_matrices),
'm': m, 'n': n, 'log_cond': log_cond, 'full_matrices': full_matrices}
for m, n in zip([2, 8, 10, 20], [4, 6, 10, 18])
for log_cond in np.linspace(1, _MAX_LOG_CONDITION_NUM, 4)
for full_matrices in [True, False]))
def testSingularValues(self, m, n, log_cond, full_matrices):
"""Tests singular values."""
with jax.default_matmul_precision('float32'):
a = np.random.uniform(
low=0.3, high=0.9, size=(m, n)).astype(_SVD_TEST_DTYPE)
u, s, v = osp_linalg.svd(a, full_matrices=False)
cond = 10**log_cond
s = np.linspace(cond, 1, min(m, n))
a = (u * s) @ v
a = a + 1j * a
# Only computes singular values.
compute_uv = False
osp_linalg_fn = functools.partial(
osp_linalg.svd, full_matrices=full_matrices, compute_uv=compute_uv)
actual_s = svd.svd(a, full_matrices=full_matrices, compute_uv=compute_uv)
expected_s = osp_linalg_fn(a)
svd_fn = lambda a: svd.svd(a, full_matrices=full_matrices)
args_maker = lambda: [a]
with self.subTest('Test JIT compatibility'):
self._CompileAndCheck(svd_fn, args_maker)
with self.subTest('Test s.'):
self.assertAllClose(expected_s, actual_s, rtol=_SVD_RTOL, atol=1E-6)
with self.subTest('Test non-increasing order.'):
# Computes `actual_diff[i] = s[i+1] - s[i]`.
actual_diff = jnp.diff(actual_s, append=0)
np.testing.assert_array_less(actual_diff, np.zeros_like(actual_diff))
@parameterized.named_parameters([
{'testcase_name': f'_m={m}_by_n={n}_full_matrices={full_matrices}_' # pylint:disable=g-complex-comprehension
f'compute_uv={compute_uv}_dtype={dtype}',
'm': m, 'n': n, 'full_matrices': full_matrices, # pylint:disable=undefined-variable
'compute_uv': compute_uv, 'dtype': dtype} # pylint:disable=undefined-variable
for m, n in zip([2, 4, 8], [4, 4, 6])
for full_matrices in [True, False]
for compute_uv in [True, False]
for dtype in jtu.dtypes.floating + jtu.dtypes.complex
])
def testSvdOnZero(self, m, n, full_matrices, compute_uv, dtype):
"""Tests SVD on matrix of all zeros."""
osp_fun = functools.partial(osp_linalg.svd, full_matrices=full_matrices,
compute_uv=compute_uv)
lax_fun = functools.partial(svd.svd, full_matrices=full_matrices,
compute_uv=compute_uv)
args_maker_svd = lambda: [jnp.zeros((m, n), dtype=dtype)]
self._CheckAgainstNumpy(osp_fun, lax_fun, args_maker_svd)
self._CompileAndCheck(lax_fun, args_maker_svd)
@parameterized.named_parameters([
{'testcase_name': f'_m={m}_by_n={n}_r={r}_c={c}_dtype={dtype}',
'm': m, 'n': n, 'r': r, 'c': c, 'dtype': dtype}
for m, n, r, c in zip([2, 4, 8], [4, 4, 6], [1, 0, 1], [1, 0, 1])
for dtype in jtu.dtypes.floating
])
def testSvdOnTinyElement(self, m, n, r, c, dtype):
"""Tests SVD on matrix of zeros and close-to-zero entries."""
a = jnp.zeros((m, n), dtype=dtype)
tiny_element = jnp.finfo(a).tiny
a = a.at[r, c].set(tiny_element)
@jax.jit
def lax_fun(a):
return svd.svd(a, full_matrices=False, compute_uv=False, hermitian=False)
actual_s = lax_fun(a)
k = min(m, n)
expected_s = np.zeros((k,), dtype=dtype)
expected_s[0] = tiny_element
self.assertAllClose(expected_s, jnp.real(actual_s), rtol=_SVD_RTOL,
atol=1E-6)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| 38.776824 | 115 | 0.651467 |
01604b6339719cd95e51c95e01a3907fedc3d41b | 13,569 | py | Python | testing/test_pdb.py | emabdeen/pytest | e71a71b1fe8fe6fb341b5d273e77052bd9490da9 | [
"MIT"
] | 1 | 2018-02-26T09:45:53.000Z | 2018-02-26T09:45:53.000Z | testing/test_pdb.py | emabdeen/pytest | e71a71b1fe8fe6fb341b5d273e77052bd9490da9 | [
"MIT"
] | null | null | null | testing/test_pdb.py | emabdeen/pytest | e71a71b1fe8fe6fb341b5d273e77052bd9490da9 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
import sys
import platform
import _pytest._code
import pytest
def runpdb_and_get_report(testdir, source):
p = testdir.makepyfile(source)
result = testdir.runpytest_inprocess("--pdb", p)
reports = result.reprec.getreports("pytest_runtest_logreport")
assert len(reports) == 3, reports # setup/call/teardown
return reports[1]
@pytest.fixture
def custom_pdb_calls():
called = []
# install dummy debugger class and track which methods were called on it
class _CustomPdb(object):
def __init__(self, *args, **kwargs):
called.append("init")
def reset(self):
called.append("reset")
def interaction(self, *args):
called.append("interaction")
_pytest._CustomPdb = _CustomPdb
return called
class TestPDB(object):
@pytest.fixture
def pdblist(self, request):
monkeypatch = request.getfixturevalue("monkeypatch")
pdblist = []
def mypdb(*args):
pdblist.append(args)
plugin = request.config.pluginmanager.getplugin('debugging')
monkeypatch.setattr(plugin, 'post_mortem', mypdb)
return pdblist
def test_pdb_on_fail(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
def test_func():
assert 0
""")
assert rep.failed
assert len(pdblist) == 1
tb = _pytest._code.Traceback(pdblist[0][0])
assert tb[-1].name == "test_func"
def test_pdb_on_xfail(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
import pytest
@pytest.mark.xfail
def test_func():
assert 0
""")
assert "xfail" in rep.keywords
assert not pdblist
def test_pdb_on_skip(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
import pytest
def test_func():
pytest.skip("hello")
""")
assert rep.skipped
assert len(pdblist) == 0
def test_pdb_on_BdbQuit(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
import bdb
def test_func():
raise bdb.BdbQuit
""")
assert rep.failed
assert len(pdblist) == 0
def test_pdb_interaction(self, testdir):
p1 = testdir.makepyfile("""
def test_1():
i = 0
assert i == 1
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect(".*def test_1")
child.expect(".*i = 0")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "def test_1" not in rest
self.flush(child)
@staticmethod
def flush(child):
if platform.system() == 'Darwin':
return
if child.isalive():
child.wait()
def test_pdb_unittest_postmortem(self, testdir):
p1 = testdir.makepyfile("""
import unittest
class Blub(unittest.TestCase):
def tearDown(self):
self.filename = None
def test_false(self):
self.filename = 'debug' + '.me'
assert 0
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect('(Pdb)')
child.sendline('p self.filename')
child.sendeof()
rest = child.read().decode("utf8")
assert 'debug.me' in rest
self.flush(child)
def test_pdb_unittest_skip(self, testdir):
"""Test for issue #2137"""
p1 = testdir.makepyfile("""
import unittest
@unittest.skipIf(True, 'Skipping also with pdb active')
class MyTestCase(unittest.TestCase):
def test_one(self):
assert 0
""")
child = testdir.spawn_pytest("-rs --pdb %s" % p1)
child.expect('Skipping also with pdb active')
child.expect('1 skipped in')
child.sendeof()
self.flush(child)
def test_pdb_print_captured_stdout(self, testdir):
p1 = testdir.makepyfile("""
def test_1():
print("get\\x20rekt")
assert False
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("captured stdout")
child.expect("get rekt")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "get rekt" not in rest
self.flush(child)
def test_pdb_print_captured_stderr(self, testdir):
p1 = testdir.makepyfile("""
def test_1():
import sys
sys.stderr.write("get\\x20rekt")
assert False
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("captured stderr")
child.expect("get rekt")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "get rekt" not in rest
self.flush(child)
def test_pdb_dont_print_empty_captured_stdout_and_stderr(self, testdir):
p1 = testdir.makepyfile("""
def test_1():
assert False
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("(Pdb)")
output = child.before.decode("utf8")
child.sendeof()
assert "captured stdout" not in output
assert "captured stderr" not in output
self.flush(child)
def test_pdb_interaction_exception(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def globalfunc():
pass
def test_1():
pytest.raises(ValueError, globalfunc)
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect(".*def test_1")
child.expect(".*pytest.raises.*globalfunc")
child.expect("(Pdb)")
child.sendline("globalfunc")
child.expect(".*function")
child.sendeof()
child.expect("1 failed")
self.flush(child)
def test_pdb_interaction_on_collection_issue181(self, testdir):
p1 = testdir.makepyfile("""
import pytest
xxx
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
# child.expect(".*import pytest.*")
child.expect("(Pdb)")
child.sendeof()
child.expect("1 error")
self.flush(child)
def test_pdb_interaction_on_internal_error(self, testdir):
testdir.makeconftest("""
def pytest_runtest_protocol():
0/0
""")
p1 = testdir.makepyfile("def test_func(): pass")
child = testdir.spawn_pytest("--pdb %s" % p1)
# child.expect(".*import pytest.*")
child.expect("(Pdb)")
child.sendeof()
self.flush(child)
def test_pdb_interaction_capturing_simple(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def test_1():
i = 0
print ("hello17")
pytest.set_trace()
x = 3
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("x = 3")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf-8")
assert "1 failed" in rest
assert "def test_1" in rest
assert "hello17" in rest # out is captured
self.flush(child)
def test_pdb_set_trace_interception(self, testdir):
p1 = testdir.makepyfile("""
import pdb
def test_1():
pdb.set_trace()
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "reading from stdin while output" not in rest
self.flush(child)
def test_pdb_and_capsys(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def test_1(capsys):
print ("hello1")
pytest.set_trace()
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.send("capsys.readouterr()\n")
child.expect("hello1")
child.sendeof()
child.read()
self.flush(child)
def test_set_trace_capturing_afterwards(self, testdir):
p1 = testdir.makepyfile("""
import pdb
def test_1():
pdb.set_trace()
def test_2():
print ("hello")
assert 0
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.send("c\n")
child.expect("test_2")
child.expect("Captured")
child.expect("hello")
child.sendeof()
child.read()
self.flush(child)
def test_pdb_interaction_doctest(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def function_1():
'''
>>> i = 0
>>> assert i == 1
'''
""")
child = testdir.spawn_pytest("--doctest-modules --pdb %s" % p1)
child.expect("(Pdb)")
child.sendline('i')
child.expect("0")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
self.flush(child)
def test_pdb_interaction_capturing_twice(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def test_1():
i = 0
print ("hello17")
pytest.set_trace()
x = 3
print ("hello18")
pytest.set_trace()
x = 4
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("x = 3")
child.expect("(Pdb)")
child.sendline('c')
child.expect("x = 4")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "def test_1" in rest
assert "hello17" in rest # out is captured
assert "hello18" in rest # out is captured
self.flush(child)
def test_pdb_used_outside_test(self, testdir):
p1 = testdir.makepyfile("""
import pytest
pytest.set_trace()
x = 5
""")
child = testdir.spawn("%s %s" % (sys.executable, p1))
child.expect("x = 5")
child.sendeof()
self.flush(child)
def test_pdb_used_in_generate_tests(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def pytest_generate_tests(metafunc):
pytest.set_trace()
x = 5
def test_foo(a):
pass
""")
child = testdir.spawn_pytest(str(p1))
child.expect("x = 5")
child.sendeof()
self.flush(child)
def test_pdb_collection_failure_is_shown(self, testdir):
p1 = testdir.makepyfile("xxx")
result = testdir.runpytest_subprocess("--pdb", p1)
result.stdout.fnmatch_lines([
"*NameError*xxx*",
"*1 error*",
])
def test_enter_pdb_hook_is_called(self, testdir):
testdir.makeconftest("""
def pytest_enter_pdb(config):
assert config.testing_verification == 'configured'
print 'enter_pdb_hook'
def pytest_configure(config):
config.testing_verification = 'configured'
""")
p1 = testdir.makepyfile("""
import pytest
def test_foo():
pytest.set_trace()
""")
child = testdir.spawn_pytest(str(p1))
child.expect("enter_pdb_hook")
child.send('c\n')
child.sendeof()
self.flush(child)
def test_pdb_custom_cls(self, testdir, custom_pdb_calls):
p1 = testdir.makepyfile("""xxx """)
result = testdir.runpytest_inprocess(
"--pdb", "--pdbcls=_pytest:_CustomPdb", p1)
result.stdout.fnmatch_lines([
"*NameError*xxx*",
"*1 error*",
])
assert custom_pdb_calls == ["init", "reset", "interaction"]
def test_pdb_custom_cls_without_pdb(self, testdir, custom_pdb_calls):
p1 = testdir.makepyfile("""xxx """)
result = testdir.runpytest_inprocess(
"--pdbcls=_pytest:_CustomPdb", p1)
result.stdout.fnmatch_lines([
"*NameError*xxx*",
"*1 error*",
])
assert custom_pdb_calls == []
def test_pdb_custom_cls_with_settrace(self, testdir, monkeypatch):
testdir.makepyfile(custom_pdb="""
class CustomPdb(object):
def set_trace(*args, **kwargs):
print 'custom set_trace>'
""")
p1 = testdir.makepyfile("""
import pytest
def test_foo():
pytest.set_trace()
""")
monkeypatch.setenv('PYTHONPATH', str(testdir.tmpdir))
child = testdir.spawn_pytest("--pdbcls=custom_pdb:CustomPdb %s" % str(p1))
child.expect('custom set_trace>')
self.flush(child)
| 31.050343 | 82 | 0.539096 |
020ee12201b9c9cd9f39693d121fef8cd1264c61 | 1,260 | py | Python | tests/test_hw1.py | vcu-aldaniels/root_homework1 | a6271280376b0f9aae6f4e6341f89cae703ab0cb | [
"Apache-2.0"
] | null | null | null | tests/test_hw1.py | vcu-aldaniels/root_homework1 | a6271280376b0f9aae6f4e6341f89cae703ab0cb | [
"Apache-2.0"
] | null | null | null | tests/test_hw1.py | vcu-aldaniels/root_homework1 | a6271280376b0f9aae6f4e6341f89cae703ab0cb | [
"Apache-2.0"
] | null | null | null | """Test cases are important."""
from homework1 import hw1
def test_return_number_3():
"""Make sure the return of the function is 3, integer"""
assert hw1.return_number_3() == 3
def test_return_string_vcu():
"""Make sure the return of the function is the string vcu"""
assert hw1.return_string_vcu
def test_return_lowercased_string():
"""Make sure the return of the function is the lowercased version of the parameter."""
assert hw1.return_lowercased_string("HI THERE MOM") == "hi there mom"
assert hw1.return_lowercased_string("vcu.edu") == "vcu.edu"
assert (
hw1.return_lowercased_string("We Wish You a Merry Monday")
== "we wish you a merry monday"
)
def test_return_without_starting_ending_whitespace():
"""Make sure the function strips the whitespace """
assert (
hw1.return_without_starting_ending_whitespace(" asdfasdf ") == "asdfasdf"
)
def test_return_addition():
""" Make sure the function returns the sum of the two parameters. """
assert hw1.return_addition(1, 2) == 3
assert hw1.return_addition(100, 200) == 300
assert hw1.return_addition(43, 43) == 86
assert hw1.return_addition(50, 25) == 75
assert hw1.return_addition(19, -19) == 0
| 30.731707 | 90 | 0.692063 |
a169a142c2813569f87dc5eb11dfcca7dc2f9361 | 539 | py | Python | sdk/SpellCheck/spell_check_client/__init__.py | WMRamadan/bing-search-sdk-for-python | 276d9cd6963c939081b3dec91bdd9aded42b3b35 | [
"MIT"
] | 12 | 2021-03-11T20:24:12.000Z | 2022-02-10T22:55:03.000Z | sdk/SpellCheck/spell_check_client/__init__.py | WMRamadan/bing-search-sdk-for-python | 276d9cd6963c939081b3dec91bdd9aded42b3b35 | [
"MIT"
] | null | null | null | sdk/SpellCheck/spell_check_client/__init__.py | WMRamadan/bing-search-sdk-for-python | 276d9cd6963c939081b3dec91bdd9aded42b3b35 | [
"MIT"
] | 10 | 2021-03-09T17:02:48.000Z | 2022-02-12T18:40:01.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6320, generator: {generator})
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._spell_check_client import SpellCheckClient
__all__ = ['SpellCheckClient']
try:
from ._patch import patch_sdk # type: ignore
patch_sdk()
except ImportError:
pass
| 35.933333 | 102 | 0.539889 |
574ede8c2d7b6644979c0387e7212e2b4b08bb47 | 2,403 | py | Python | node_server/__init__.py | sommersoft/RosiePi_Node_Server | 2cb46ee8e7e9f1a42f76adc83ea9d17dc4f61335 | [
"MIT"
] | null | null | null | node_server/__init__.py | sommersoft/RosiePi_Node_Server | 2cb46ee8e7e9f1a42f76adc83ea9d17dc4f61335 | [
"MIT"
] | 1 | 2020-06-06T16:03:37.000Z | 2020-06-06T16:03:37.000Z | node_server/__init__.py | sommersoft/RosiePi_Node_Server | 2cb46ee8e7e9f1a42f76adc83ea9d17dc4f61335 | [
"MIT"
] | 1 | 2020-06-06T15:50:58.000Z | 2020-06-06T15:50:58.000Z | # The MIT License (MIT)
#
# Copyright (c) 2019 Michael Schroeder
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import os
from flask import Flask, jsonify, request
from . import api, verify_sig
def create_app(test_config=None):
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev'
)
if test_config is None:
app.config.from_pyfile('config.py', silent=True)
else:
app.config.from_mapping(test_config)
try:
os.makedirs(app.instance_path)
except OSError:
pass
@app.before_request
def check_http_sig():
verified = verify_sig.VerifySig()
if not verified.config.sections() or not verified.node_sig_key:
raise api.InvalidUsage('Authorization could not be verified',
status_code=500)
if not verified.verify_signature(request):
raise api.InvalidUsage('Authorization failed.',
status_code=401)
@app.errorhandler(api.InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
app.add_url_rule('/status', view_func=api.NodeStatus.as_view('status'))
app.add_url_rule('/run-test', view_func=api.RunTest.as_view('run-test'))
return app
| 36.409091 | 79 | 0.708697 |
303b1b497be89beee20b296f5e5f046d483e102f | 906 | py | Python | Base_Awesome_Collect/save_local.py | Anant/awesome-transform | a11daa9a821da8d0add74a205b1af370b38d6799 | [
"Apache-2.0"
] | 1 | 2018-08-28T10:09:16.000Z | 2018-08-28T10:09:16.000Z | Base_Awesome_Collect/save_local.py | Anant/awesome-transform | a11daa9a821da8d0add74a205b1af370b38d6799 | [
"Apache-2.0"
] | 2 | 2018-03-20T22:27:55.000Z | 2018-03-20T23:00:01.000Z | Base_Awesome_Collect/save_local.py | Anant/awesome-transform | a11daa9a821da8d0add74a205b1af370b38d6799 | [
"Apache-2.0"
] | null | null | null | from app import db
from app.model import Document
from sqlalchemy.orm import Session
import createDb
# Create a db session
sess = Session(db)
def pr_insert_document(docId, url):
# Store processing state in sql
sqldoc = Document(docId, url)
if documentProcessed(url) == 0:
sess.add(sqldoc)
sess.commit()
else:
sqldoc.tick()
def documentProcessed(url):
try:
sqldoc = sess.query(Document).filter(Document.url == url).first()
if sqldoc == None:
return 0
else:
return 1
except Exception as e:
print("Error while retrieving document from db ", str(e))
return 0
def initialiseDB():
try:
sqldoc = sess.query(Document).first()
print("Db already present")
except Exception as e:
createDb.initialiseDB()
print("Error while retrieving document from db ", str(e)) | 25.885714 | 73 | 0.629139 |
648e548465530a01b746313eeb666f815eed931c | 11,166 | py | Python | examples/Ni__eam__born_exp_rose/01_preconditioning_3.5NN/data/Ni__eam__born_exp_rose.py | eragasa/pypospack | 21cdecaf3b05c87acc532d992be2c04d85bfbc22 | [
"MIT"
] | 4 | 2018-01-18T19:59:56.000Z | 2020-08-25T11:56:52.000Z | examples/Ni__eam__born_exp_rose/01_preconditioning_3.5NN/data/Ni__eam__born_exp_rose.py | eragasa/pypospack | 21cdecaf3b05c87acc532d992be2c04d85bfbc22 | [
"MIT"
] | 1 | 2018-04-22T23:02:13.000Z | 2018-04-22T23:02:13.000Z | examples/Ni__eam__born_exp_rose/01_preconditioning_3.5NN/data/Ni__eam__born_exp_rose.py | eragasa/pypospack | 21cdecaf3b05c87acc532d992be2c04d85bfbc22 | [
"MIT"
] | 1 | 2019-09-14T07:04:42.000Z | 2019-09-14T07:04:42.000Z | import os
from collections import OrderedDict
from pypospack.qoi import QoiDatabase
import pypospack.utils
#------------------------------------------------------------------------------
# CONFIGURATION SECTION FOR PYPOSMAT PARETO FITTING
#------------------------------------------------------------------------------
# <---------------- SAMPLING CONFIGURATION
sampling = OrderedDict()
sampling['n_iterations'] = 10
sampling['mc_seed'] = None
# <---------------- INITIAL DEFAULT CONFIGURATION
for i in range(sampling['n_iterations']):
sampling[i] = OrderedDict()
sampling[i]['type'] = 'kde'
sampling[i]['n_samples'] = 20
# <---------------- OVERRIDE DEFAULT CONFIGURATION, FOR I=0
sampling[0]['type'] = 'from_file'
sampling[0]['file']= 'data/pyposmat.kde.0.out'
#-----------------------------------------------------------------------------
# DEFINE POTENTIAL FORMALISM
#-----------------------------------------------------------------------------
potential_formalism = OrderedDict()
potential_formalism['potential_type'] = 'eam'
potential_formalism['symbols'] = ['Ni']
potential_formalism['setfl_filename'] = None
potential_formalism['pair_type'] = 'bornmayer'
potential_formalism['density_type'] = 'eam_dens_exp'
potential_formalism['embedding_type'] = 'eam_embed_eos_rose'
# <---------------- THESE ARE NECESSARY FOR DETERMINING THE SETFL FILE
potential_formalism['N_r'] = 10000
potential_formalism['r_max'] = 10.0
potential_formalism['r_cut'] = 10.0
potential_formalism['N_rho'] = 10000
potential_formalism['rho_max'] = 1000.0
potential_formalism['a0'] = 3.52
potential_formalism['lattice_type'] = 'fcc'
# <---------------- INITIAL PARAMETER DEFINITION
a0 = 3.52
re = 1/(2**0.5)*a0
parameter_distribution = OrderedDict()
parameter_distribution['p_NiNi_phi0'] = [
'uniform',{
'a':+0.05,
'b':+4.00}]
parameter_distribution['p_NiNi_gamma'] = [
'uniform',{
'a':2.00,
'b':7.00}]
parameter_distribution['p_NiNi_r0'] = [
'equals',re]
parameter_distribution['d_Ni_rho0'] = [
'uniform',{
'a':1.0,
'b':4.0}]
parameter_distribution['d_Ni_beta'] = [
'uniform',{
'a':2.0000,
'b':7.0000}]
parameter_distribution['d_Ni_r0'] = [
'equals',re]
# parameters - Rose Equation of State
parameter_distribution['e_Ni_latticetype'] = [
'equals','fcc']
parameter_distribution['e_Ni_ecoh'] = [
'equals',-4.45]
parameter_distribution['e_Ni_B'] = [
'equals',188.]
parameter_distribution['e_Ni_a0'] = [
'equals',a0]
#------------------------------------------------------------------------------
# PARAMETER CONSTRAINTS
#------------------------------------------------------------------------------
parameter_constraints = OrderedDict()
parameter_constraints['p_NiNi_phi0 > 0'] = 'p_NiNi_phi0 > 0.'
parameter_constraints['p_NiNi_gamma > 0'] = 'p_NiNi_gamma > 0.'
parameter_constraints['d_Ni_rho0 > 0'] = 'd_Ni_rho0 > 0.'
parameter_constraints['d_Ni_beta > 0'] = 'd_Ni_beta > 0.'
#------------------------------------------------------------------------------
# STRUCTURE DATABASE DEFINITION
#------------------------------------------------------------------------------
pypospack_root_dir = [v.strip() for v in os.environ['PYTHONPATH'].split(':') if v.endswith('pypospack')][0]
structure_db = OrderedDict()
structure_db['structure_directory'] = os.path.join(pypospack_root_dir,'data/Ni_structure_db')
structure_db['structures'] = OrderedDict()
structure_db['structures']['Ni_fcc'] = 'Ni_fcc_100_unit.gga.relaxed.vasp'
structure_db['structures']['Ni_bcc'] = 'Ni_bcc_100_unit.gga.relaxed.vasp'
structure_db['structures']['Ni_sc'] = 'Ni_sc_100_unit.gga.relaxed.vasp'
structure_db['structures']['Ni_hcp'] = 'Ni_hcp_ortho.vasp'
structure_db['structures']['Ni_dia'] = 'Ni_dia_100_unit.gga.relaxed.vasp'
structure_db['structures']['Ni_fcc_100_unit'] = 'Ni_fcc_100_unit.gga.relaxed.vasp'
structure_db['structures']['Ni_fcc_110_unit'] = 'Ni_fcc_110_unit.gga.relaxed.vasp'
structure_db['structures']['Ni_fcc_111_unit'] = 'Ni_fcc_111_unit.gga.relaxed.vasp'
structure_db['structures']['Ni_fcc_100_s'] = 'Ni_fcc_100_surf.vasp'
structure_db['structures']['Ni_fcc_110_s'] = 'Ni_fcc_110_surf.vasp'
structure_db['structures']['Ni_fcc_111_s'] = 'Ni_fcc_111_surf.vasp'
structure_db['structures']['Ni_fcc_isf'] = 'Ni_fcc_isf.vasp'
structure_db['structures']['Ni_fcc_esf'] = 'Ni_fcc_esf.vasp'
structure_db['structures']['Ni_fcc_vac'] = 'Ni_fcc_sc_333_vac.vasp'
structure_db['structures']['Ni_fcc_o_int'] = 'Ni_fcc_sc_333_o_int.vasp'
structure_db['structures']['Ni_fcc_i_int'] = 'Ni_fcc_sc_333_t_int.vasp'
#------------------------------------------------------------------------------
# FITTING DATABASE
#------------------------------------------------------------------------------
qoi_db = QoiDatabase()
qoi_db.add_qoi(
qoi_name='Ni_fcc.E_coh',
qoi_type='Ecoh_min_all',
structures=OrderedDict([('ideal','Ni_fcc')]),
target=-4.45)
qoi_db.add_qoi(
qoi_name='Ni_fcc.a0',
qoi_type='a11_min_all',
structures=OrderedDict([('ideal','Ni_fcc')]),
target=3.52)
qoi_db.add_qoi(
qoi_name='Ni_fcc.c11',
qoi_type='c11',
structures=OrderedDict([('ideal','Ni_fcc')]),
target=261.)
qoi_db.add_qoi(
qoi_name='Ni_fcc.c12',
qoi_type='c12',
structures=OrderedDict([('ideal','Ni_fcc')]),
target=151.)
qoi_db.add_qoi(
qoi_name='Ni_fcc.c44',
qoi_type='c44',
structures=OrderedDict([('ideal','Ni_fcc')]),
target=132.)
qoi_db.add_qoi(
qoi_name='Ni_fcc.B',
qoi_type='bulk_modulus',
structures=OrderedDict([('ideal','Ni_fcc')]),
target=188.)
qoi_db.add_qoi(
qoi_name='Ni_fcc.G',
qoi_type='shear_modulus',
structures=OrderedDict([('ideal','Ni_fcc')]),
target=101.)
qoi_db.add_qoi(
qoi_name='Ni_fcc.vac',
qoi_type='E_formation',
structures=OrderedDict(
[
('defect','Ni_fcc_vac'),
('ideal','Ni_fcc')
]
),
target=1.6)
qoi_db.add_qoi(
qoi_name='Ni_fcc.100s',
qoi_type='E_surface',
structures=OrderedDict(
[
('slab','Ni_fcc_100_s'),
('ideal','Ni_fcc_100_unit')
]
),
target=1.51e-1)
qoi_db.add_qoi(
qoi_name='Ni_fcc.110s',
qoi_type='E_surface',
structures=OrderedDict(
[
('slab','Ni_fcc_110_s'),
('ideal','Ni_fcc_110_unit')
]
),
target=1.48e-1)
qoi_db.add_qoi(
qoi_name='Ni_fcc.111s',
qoi_type='E_surface',
structures=OrderedDict(
[
('slab','Ni_fcc_111_s'),
('ideal','Ni_fcc_111_unit')
]
),
target=1.25e-1)
qoi_db.add_qoi(
qoi_name='Ni_fcc.isf',
qoi_type='E_stacking_fault',
structures=OrderedDict([
('defect','Ni_fcc_isf'),
('ideal','Ni_fcc_111_unit')]),
target=1.45e-02)
qoi_db.add_qoi(
qoi_name='E_Ni_fcc_hcp',
qoi_type='phase_order',
structures=OrderedDict([
('low','Ni_fcc'),
('high','Ni_hcp')]),
target=0.024)
qoi_db.add_qoi(
qoi_name='E_Ni_fcc_bcc',
qoi_type='phase_order',
structures=OrderedDict([
('low','Ni_fcc'),
('high','Ni_bcc')]),
target=0.092)
qoi_db.add_qoi(
qoi_name='E_Ni_fcc_sc',
qoi_type='phase_order',
structures=OrderedDict([
('low','Ni_fcc'),
('high','Ni_sc')]),
target=0.600)
qoi_db.add_qoi(
qoi_name='E_Ni_fcc_dia',
qoi_type='phase_order',
structures=OrderedDict([
('low','Ni_fcc'),
('high','Ni_dia')]),
target=1.27)
#------------------------------------------------------------------------------
# QOI CONSTRAINTS
# QOI constraints are performed in the order they are iterated through in
# in the dictionary.
#
# If you want to implement new constraints, they should be implemented in
# pypospack.pyposmat.data.DataAnalyzer
# filter_by_qoi_err:
# key - the qoi_name as in the qoi_db
# value - the maximum allowable absolute error
# filter_by_pareto:
# filters out dominated points if set to True
#------------------------------------------------------------------------------
qoi_constraints = OrderedDict()
qoi_constraints['qoi_constraints']=OrderedDict()
#qoi_constraints['qoi_constraints']['Ni_fcc.E_coh.abserr'] = ['<',2]
#qoi_constraints['qoi_constraints']['Ni_fcc.a0.abserr'] = ['<',1.00 * abs(qoi_db.qois['Ni_fcc.a0']['target'])]
#qoi_constraints['qoi_constraints']['Ni_fcc.c11.abserr'] = ['<',1.00 * abs(qoi_db.qois['Ni_fcc.c11']['target'])]
#qoi_constraints['qoi_constraints']['Ni_fcc.c12.abserr'] = ['<',1.00 * abs(qoi_db.qois['Ni_fcc.c12']['target'])]
#qoi_constraints['qoi_constraints']['Ni_fcc.c44.abserr'] = ['<',1.00 * abs(qoi_db.qois['Ni_fcc.c44']['target'])]
#qoi_constraints['qoi_constraints']['Ni_fcc.B.abserr'] = ['<',1.00 * abs(qoi_db.qois['Ni_fcc.c11']['target'])]
#qoi_constraints['qoi_constraints']['Ni_fcc.G.abserr'] = ['<',1.00 * abs(qoi_db.qois['Ni_fcc.c12']['target'])]
#qoi_constraints['qoi_constraints']['Ni_fcc.vac.abserr'] = ['<',1.00 * abs(qoi_db.qois['Ni_fcc.c12']['target'])]
#qoi_constraints['qoi_constraints']['Ni_fcc.110s.abserr'] = ['<',1.00 * abs(qoi_db.qois['Ni_fcc.110s']['target'])]
#qoi_constraints['qoi_constraints']['Ni_fcc.100s.abserr'] = ['<',1.00 * abs(qoi_db.qois['Ni_fcc.100s']['target'])]
#qoi_constraints['qoi_constraints']['Ni_fcc.111s.abserr'] = ['<',1.00 * abs(qoi_db.qois['Ni_fcc.111s']['target'])]
#qoi_constraints['qoi_constraints']['Ni_fcc.isf.abserr'] = ['<',1.00 * abs(qoi_db.qois['Ni_fcc.isf']['target'])]
qoi_constraints['qoi_constraints']['E_Ni_fcc_bcc'] = ['>',0.]
qoi_constraints['qoi_constraints']['E_Ni_fcc_sc'] = ['>',0.]
qoi_constraints['qoi_constraints']['E_Ni_fcc_hcp'] = ['>',0.]
qoi_constraints['qoi_constraints']['E_Ni_fcc_dia'] = ['>',0.]
qoi_constraints['filter_by_pareto_membership'] = True
qoi_constraints['filter_by_cost_function'] = OrderedDict([
('weighting_scheme_type', 'scaled_by_qoi_target'),
('loss_function_type', 'abs_error'),
('cost_function_type', 'weighted_sum'),
('pct_to_keep', 0.95),
('n_potentials_min', 5),
('n_potentials_max', 20)
])
if __name__ == '__main__':
from pypospack.pyposmat.data import PyposmatConfigurationFile
pyposmat_filename_in = 'pyposmat.config.in'
configuration = PyposmatConfigurationFile()
configuration.qois = qoi_db.qois
configuration.qoi_constraints = qoi_constraints
configuration.structures = structure_db
configuration.potential = potential_formalism
configuration.sampling_type = sampling
configuration.sampling_distribution = parameter_distribution
configuration.sampling_constraints = parameter_constraints
configuration.write(filename=pyposmat_filename_in)
| 40.456522 | 114 | 0.591617 |
144dbc378cec0957df521a475d4b8eef8b528c65 | 1,159 | py | Python | day9.py | mayitbeegh/advent-of-code-2020 | 1b35bdad595db3ce72712e5e81163ae7f48c344c | [
"MIT"
] | 3 | 2020-12-07T22:20:32.000Z | 2020-12-07T22:21:56.000Z | day9.py | mayitbeegh/advent-of-code-2020 | 1b35bdad595db3ce72712e5e81163ae7f48c344c | [
"MIT"
] | null | null | null | day9.py | mayitbeegh/advent-of-code-2020 | 1b35bdad595db3ce72712e5e81163ae7f48c344c | [
"MIT"
] | null | null | null | from collections import deque
def part_one(inputs, length):
numbers = list(map(int, inputs))
preamble = deque(numbers[:length], maxlen=length)
for number in numbers[length:]:
if check_valid(preamble, number):
preamble.append(number)
else:
return number
def check_valid(preamble, number):
for i in range(len(preamble)):
for j in range(i+1, len(preamble)):
if preamble[i] + preamble[j] == number:
return True
return False
def part_two(inputs, length):
numbers = list(map(int, inputs))
invalid_number = part_one(inputs, length)
for i in range(len(numbers)):
for j in range(i+2, len(numbers)):
if sum(numbers[i:j+1]) == invalid_number:
return min(numbers[i:j+1]) + max(numbers[i:j+1])
test_inputs = """35
20
15
25
47
40
62
55
65
95
102
117
150
182
127
219
299
277
309
576""".split('\n')
assert part_one(test_inputs, 5) == 127
assert part_two(test_inputs, 5) == 62
with open('day9.input') as f:
inputs = f.read().splitlines()
print(part_one(inputs, 25))
print(part_two(inputs, 25))
| 21.072727 | 64 | 0.612597 |
60afc46e189d02665921594ec4519ed356d9db71 | 191 | py | Python | mowgli_etl/pipeline/swow/swow_constants.py | tetherless-world/mowgli | 28c19eba41e03e053ae4addff56a313d926e18d7 | [
"MIT"
] | 4 | 2021-01-15T15:36:23.000Z | 2021-09-01T06:52:05.000Z | mowgli_etl/pipeline/swow/swow_constants.py | tetherless-world/mowgli | 28c19eba41e03e053ae4addff56a313d926e18d7 | [
"MIT"
] | 63 | 2020-05-04T13:48:04.000Z | 2020-06-06T02:32:58.000Z | mowgli_etl/pipeline/swow/swow_constants.py | tetherless-world/mowgli-etl | 28c19eba41e03e053ae4addff56a313d926e18d7 | [
"MIT"
] | null | null | null | from mowgli_etl.paths import DATA_DIR
SWOW_NAMESPACE = 'swow'
SWOW_DATASOURCE_ID = 'swow'
SWOW_ARCHIVE_PATH = DATA_DIR / 'swow' / 'SWOW-EN.R100.csv.bz2'
SWOW_CSV_FILE_KEY = 'swow_csv_file'
| 23.875 | 62 | 0.774869 |
a4503a33be696fb2c8b9e8fe5cd946ff7b2f1d88 | 1,184 | py | Python | bionir_pipeline/common/clausie/clausiewrapper.py | neo2100/BioNIR_Pipeline | ba1995e18b14956053bf43d76b1702f8ff5d18bf | [
"BSD-2-Clause"
] | null | null | null | bionir_pipeline/common/clausie/clausiewrapper.py | neo2100/BioNIR_Pipeline | ba1995e18b14956053bf43d76b1702f8ff5d18bf | [
"BSD-2-Clause"
] | null | null | null | bionir_pipeline/common/clausie/clausiewrapper.py | neo2100/BioNIR_Pipeline | ba1995e18b14956053bf43d76b1702f8ff5d18bf | [
"BSD-2-Clause"
] | null | null | null | import os
from subprocess import Popen
from sys import stderr
class ClausIEWrapper:
@staticmethod
def run_clausie(input_filename, output_filename, verbose=False):
source_dir = os.path.dirname(os.path.abspath(__file__))
jars = '{0}/clausie:{0}/clausie/build:{0}/clausie/clausie_lib/stanford-parser.jar:{0}/clausie/clausie_lib/stanford-parser-2.0.4-models.jar:{0}/clausie/clausie_lib/jopt-simple-4.4.jar'.format(source_dir)
command = 'java -cp "' + jars + '" de.mpii.clausie.ClausIE -f {} -l -o {}'.format(input_filename, output_filename)
if verbose:
print('ClausIE command: {}'.format(command))
java_process = Popen(command, stdout=stderr, shell=True)
else:
java_process = Popen(command, stdout=stderr, stderr=open(os.devnull, 'w'), shell=True)
java_process.wait()
assert not java_process.returncode, 'ERROR: ClausIE exited with a non-zero code status.'
if verbose:
with open(output_filename, 'r') as out_file:
out_contents = out_file.read()
out_file.close()
print(out_contents)
return output_filename
| 34.823529 | 210 | 0.652872 |
da3d5e31746e52f61af5cdc2f788fdb6940aeea9 | 123 | py | Python | apps/projects/admin.py | ExpoAshique/ProveBanking__s | f0b45fffea74d00d14014be27aa50fe5f42f6903 | [
"MIT"
] | null | null | null | apps/projects/admin.py | ExpoAshique/ProveBanking__s | f0b45fffea74d00d14014be27aa50fe5f42f6903 | [
"MIT"
] | null | null | null | apps/projects/admin.py | ExpoAshique/ProveBanking__s | f0b45fffea74d00d14014be27aa50fe5f42f6903 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import ProposedResourceStatus
admin.site.register(ProposedResourceStatus)
| 17.571429 | 43 | 0.853659 |
921b0b9abe44981d8d6ba390f014684a33b8f278 | 3,293 | py | Python | train_joint_morph_id.py | iwbn/age-joint-loss-tensorflow | 0968acd48ae8e87577e7b64e56a25b960589a759 | [
"BSD-2-Clause"
] | 17 | 2018-11-25T21:02:17.000Z | 2021-05-03T11:39:16.000Z | train_joint_morph_id.py | iwbn/age-joint-loss-tensorflow | 0968acd48ae8e87577e7b64e56a25b960589a759 | [
"BSD-2-Clause"
] | 1 | 2019-01-22T10:47:24.000Z | 2019-01-23T07:12:19.000Z | train_joint_morph_id.py | iwbn/age-joint-loss-tensorflow | 0968acd48ae8e87577e7b64e56a25b960589a759 | [
"BSD-2-Clause"
] | 1 | 2019-12-25T02:03:05.000Z | 2019-12-25T02:03:05.000Z | from model.joint import AgeModelMorph
import tensorflow as tf
import sys, os
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('fold_number', 0, 'Morph fold number.')
flags.DEFINE_string('log_dir', "path-to-log", 'Log is saved to this directory.')
flags.DEFINE_string('loss_weights', "1.0,0.01", 'triplet,xentropy')
flags.DEFINE_float('max_iter', 30000, 'max step')
flags.DEFINE_float('seed', 0, 'random seed.')
flags.DEFINE_float('learning_rate', 5e-4, 'Initial learning rate.')
flags.DEFINE_float('weight_decay', 0., 'Lambda value for l2 decay.')
flags.DEFINE_integer('batch_size', 64, 'batch_size.')
flags.DEFINE_bool('use_pretrain', True, 'use pretrain.')
flags.DEFINE_integer('gpu', 2, 'GPU to use.')
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "%d" % FLAGS.gpu
print (os.environ["CUDA_VISIBLE_DEVICES"])
def train():
model = AgeModelMorph()
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
sess = tf.Session(config=sess_config)
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train', sess.graph)
val_writer = tf.summary.FileWriter(FLAGS.log_dir + '/val')
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(FLAGS.log_dir)
if ckpt:
saver.restore(sess, ckpt.model_checkpoint_path)
elif FLAGS.use_pretrain:
model.load_pretrained(sess)
model.set_option_variable(model.batch_size, FLAGS.batch_size, sess)
model.set_option_variable(model.max_iter, FLAGS.max_iter, sess)
model.set_option_variable(model.fold_num, FLAGS.fold_number, sess)
model.set_option_variable(model.l2_decay, FLAGS.weight_decay, sess)
model.set_option_variable(model.exponential_decay, 0.99, sess)
model.set_option_variable(model.learning_rate, FLAGS.learning_rate, sess)
model.set_option_variable(model.triplet_weight, float(FLAGS.loss_weights.split(',')[0]), sess)
model.set_option_variable(model.xentropy_weight, float(FLAGS.loss_weights.split(',')[1]), sess)
model.prepare_data(sess)
train_iter, val_iter = model.make_train_iterators()
sess.run([train_iter.initializer, val_iter.initializer])
train_next, val_next = train_iter.get_next(), val_iter.get_next()
while not sess.run(model.end_of_training_policy):
x, y = sess.run(train_next)
[_, summaries, step] = sess.run([model.train_op,
model.summaries, model.global_step],
feed_dict={model.imgs: x, model.ages: y})
if step % 5 == 0:
train_writer.add_summary(summaries, step)
if step % 2 == 0:
x, y = sess.run(val_next)
[summaries, step] = sess.run([model.summaries, model.global_step],
feed_dict={model.imgs: x, model.ages: y,
model.phase: "val", model.is_training: False})
val_writer.add_summary(summaries, step)
print ("step: %d"%step)
if step % 500 == 0:
saver.save(sess, FLAGS.log_dir + '/ckpt' , step)
saver.save(sess, FLAGS.log_dir + '/ckpt', sess.run(model.global_step))
sess.close()
train() | 42.217949 | 103 | 0.668995 |
c0c49d2390ef5ecc80379373c0b5bcc9178956cd | 36,733 | py | Python | helper_functions.py | goldsmdn/SteaneCode | dc9a2a2f54b93d44d1249c7ff1acbb0188e46c2e | [
"Apache-2.0"
] | 1 | 2021-12-21T14:52:12.000Z | 2021-12-21T14:52:12.000Z | helper_functions.py | goldsmdn/SteaneCode | dc9a2a2f54b93d44d1249c7ff1acbb0188e46c2e | [
"Apache-2.0"
] | 62 | 2021-04-02T15:33:02.000Z | 2021-09-01T08:47:29.000Z | helper_functions.py | goldsmdn/SteaneCode | dc9a2a2f54b93d44d1249c7ff1acbb0188e46c2e | [
"Apache-2.0"
] | 1 | 2022-02-11T15:32:55.000Z | 2022-02-11T15:32:55.000Z | #helper_functions.py
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.aer.noise.errors import (
pauli_error,
depolarizing_error,
)
from statistics import stdev
from math import sqrt
from datetime import datetime
SPACE = ' '
def string_reverse(input_string):
"""Reverses a string.
Parameters
----------
input_string : str
Holds the string to be reversed
Returns
----------
reversed_string : str
The reversed string
"""
reversed_string = input_string[::-1]
return(reversed_string)
def find_parity(counts):
"""Finds the parity of the output bit string held in the counts dictionary.
Parameters
----------
counts : dictionary
Holds the observed output bit strings
Returns
----------
parity_count : dict
A dictionary holding the parity count for each observed output bit string.
"""
#initialise dictionary to hold counts
parity_count = {str(i) : 0 for i in range(2)}
for key, value in counts.items():
#split out data part of key
data = key.split()[1]
parity = calculate_parity(data)
old_count = parity_count[str(parity)]
new_count = old_count + value
parity_count[str(parity)] = new_count
return(parity_count)
def calculate_parity(bit_string):
""" Calculates the parity of a bit string
Parameters
----------
bit_string : str
bit string on which parity is to be calculated
Returns
-------
parity :int
0 if even parity
1 if odd parity
"""
parity = 0
for i in range(len(bit_string)):
bit = bit_string[i]
if bit == '1':
#parity has changed
if parity == 0:
parity = 1
elif parity == 1:
parity = 0
else:
raise Exception("Unexpected error calculating parity")
return(parity)
def count_valid_output_strings(counts, codewords, data_location = 0,
post_selection = False, simple = False,
single = False, single_bit = 0):
"""Finds the number of valid and invalid output bit strings
in a given location in a dictionary representing
the counts for each output bit string.
Various algorithms for determining validaty are supported,
including post selection, where a bit is only valid if it is the codewords,
simple decoding based on the parity of three bits and
looking at a single bit only.
Parameters
----------
counts : dictionary
holds the observed populations for each
combination of qubit
codewords : list
holds allowed codewords
data_location : int
location of the data string
post_selection : bool
if true then only strings in logical zero are invalid.
Strings outside the codespace are counted separately.
simple : bool
looks only at the parity of bits with exactly two non-zero columns in the parity matrix
single : bool
look at single bit only
single_bit : int
single bit to validate against
Returns
-------
count_valid : int
Number of valid bit strings
count_invalid : int
Number of invalid bit strings
count_outside_codeword : int
Number of strings outside codespace.
Notes
-----
This code was originally designed to handle the codewords
in a list of lists, but will also work fine
with a list of strings.
"""
if single:
if len(codewords) != 1:
raise ValueError('Only send a one bit codeword with calculation using a single bit')
if simple:
raise ValueError('Validity calculation not designed for both simple algorithm and single_bit')
if post_selection:
raise ValueError('Validity calculation not designed for both post_selection and single_bit')
if simple:
if post_selection:
raise ValueError('Validity calculation not designed for both post_selection and simple')
if len(codewords) != 1:
raise ValueError('Only send a one bit codeword with simple calculation')
count_valid = 0
count_invalid = 0
count_outside_codeword = 0
for key, value in counts.items():
#split out data part of key
if data_location == 0:
data = key
else:
data = key.split()[data_location]
#need to reverse the data string showing the relevant qubits as
#the codewords and the data have a different format
reversed_data_string = string_reverse(data)
valid, invalid, outside_codeword = compute_string_validity(value = value, codewords = codewords,
reversed_data_string = reversed_data_string,
post_selection = post_selection,
simple = simple,
single = single,
single_bit = single_bit
)
count_valid = count_valid + valid
count_invalid = count_invalid + invalid
count_outside_codeword = count_outside_codeword + outside_codeword
return(count_valid, count_invalid, count_outside_codeword)
def compute_string_validity(value, codewords, reversed_data_string, post_selection = False,
simple = False, single = False, single_bit = 0):
"""Categorises a string as valid, invalid or outside the codeword and based on this assigns
the number of counts of that string to the values returned.
Various algorithms for determining validaty are supported,
including post selection, where a bit is only valid if it is the codewords,
simple decoding based on the parity of three bits and
looking at a single bit only.
Parameters
----------
value : int
number of strings for this data string
codewords : list
holds allowed codewords
reversed_data_string : str
string holding element to be processed
post_selection : bool
if true then only strings in logical zero are invalid.
Strings outside the codespace are counted separately.
simple : bool
looks only at the parity of bits with exactly two non-zero columns in the parity matrix
single : bool
look at single bit only
single_bit : int
single bit to validate against
Returns
-------
valid : int
value if the bit string is valid
invalid : int
value if the bit string is invalid
outside_codeword : int
value if the bit string is outside the codespace
Notes
-----
This code was originally designed to handle the codewords
in a list of lists, but will also work fine
with a list of strings.
"""
if simple:
if post_selection:
raise Exception('simple and post selection algorithm are exclusive')
valid = 0
invalid = 0
outside_codeword = 0
if post_selection:
logical_zero = codewords
logical_one = flip_code_words(codewords)
if reversed_data_string in logical_zero:
valid = value
elif reversed_data_string in logical_one:
invalid = value
else:
outside_codeword = outside_codeword + value
elif simple:
simple_parity_bits = calculate_simple_parity_bits()
bit_string = ['']
for bit_location in simple_parity_bits:
bit_string.append(reversed_data_string[bit_location])
parity = str(calculate_parity(bit_string))
if parity in codewords:
valid = value
else:
invalid = value
elif single:
if reversed_data_string[single_bit] in codewords:
valid = value
else:
invalid = value
else:
if reversed_data_string in codewords:
valid = value
else:
invalid = value
return(valid, invalid, outside_codeword)
def calculate_simple_parity_bits():
"""Returns a list of qubits with exactly two non zero rows in the parity matrix
Returns
-------
simple_parity_bits : list
A list of all qubits with exactly two non zero rows in the parity matrix
"""
parity_matrix_totals = calculate_parity_matrix_totals()
simple_parity_bits = []
count = 0
for items in parity_matrix_totals:
if items == 2:
simple_parity_bits.append(count)
count = count + 1
return(simple_parity_bits)
def find_individual_ancilla_values(ancilla_values, data_qubits,
ancilla_qubits, label_string = ''):
"""Returns the count of individual ancilla bit strings as a dictionary.
Parameters
----------
ancilla_values : dict
holds the counts for each combination of ancilla bit strings.
data_qubits : int
number of data qubits used as an offset to calculate
the ancilla number
ancilla_qubits : int
number of ancilla qubits
label_string : str
first part of label
Returns
-------
individual_ancilla_values : dict
dictionary containing the count of individual
ancilla bit string
"""
#initialise dictionary to hold values
individual_ancilla_values = {label_string + str(count): 0
for count in range(data_qubits + 1,
data_qubits + 1 +
ancilla_qubits) }
for ancilla, value in ancilla_values.items():
for count in range(ancilla_qubits):
bit = ancilla[count]
if bit == '1':
# note that order of Qiskit qubit order needs to be reversed to compare with the paper
key = label_string + str(data_qubits + ancilla_qubits - count)
old_count = individual_ancilla_values[key]
new_count = old_count + value
individual_ancilla_values[key] = new_count
return(individual_ancilla_values)
def find_ancilla_values(counts, ancilla_qubits, ancilla_location = 0):
"""Returns a dictionary with a count of each possible ancilla bit string.
Parameters
----------
counts : dictionary
counts for each possible output bit string
ancilla_qubits : int
number of ancilla qubits
ancilla_location : int
designates which bit string is relevant
Returns
-------
ancilla_values : dict
dictionary containing the count of each possible ancilla bit string
"""
#build a list of all the possible ancilla in binary
possible_ancilla_list = []
format_string = '0' + str(ancilla_qubits) + 'b'
for i in range(2 ** (ancilla_qubits)):
possible_ancilla_value = format(i, format_string)
possible_ancilla_list.append(possible_ancilla_value)
#use the list to initialise a dictionary which hold the results by ancilla
ancilla_values = {i:0 for i in possible_ancilla_list}
# loop through the results and summarise by ancilla
for key, value in counts.items():
#split out the ancilla part of key
ancilla = key.split()[ancilla_location]
old_count = ancilla_values[ancilla]
new_count = old_count + value
ancilla_values[ancilla] = new_count
return(ancilla_values)
def strings_AND_bitwise(string1, string2):
"""Returns the bitwise AND of two equal length bit strings.
Parameters
----------
string1 : str
First string
string2 : str
Second string
Returns
-------
string_out : str
bitwise AND of the two input strings
"""
string_out = ''
if len(string1) != len(string2):
raise Exception('When taking the logical AND of two strings they must both have the same length')
for count in range(len(string1)):
i = (string1)[count]
j = (string2)[count]
k = '0'
if i == '0':
if j == '1':
k = '1'
if i == '1':
if j == '0':
k = '1'
string_out = string_out + k
return(string_out)
def string_ancilla_mask(location, length):
"""Returns a bit string with a 1 in a certain bit and the 0 elsewhere.
Parameters
----------
location : int
location of the bit which should be set to '1' in the mask
length : int
length of string in the mask
Returns
-------
string : str
ancilla bit mask string in required format
"""
if not isinstance(location, int):
return Exception('Location of string must an integer when calculating ancilla mask')
if not isinstance(length, int):
return Exception('Length of string must an integer when calculating ancilla mask')
if location < 1:
return Exception('Location of string must be strictly positive when calculating ancilla mask')
if length < 1:
return Exception('String length must be greater than 1 when calculating ancilla mask')
if length < location:
return Exception('Location must be less than string length when calculating ancilla mask')
string = '1'
for i in range(length - 1):
string = '0' + string
for count in range(location - 1):
new_string = string[1:7] + '0'
string = new_string
return(string)
def correct_qubit(data_in, ancilla, data_qubits):
"""Returns the corrected data bit string calculated from the ancilla settings.
Parameters
----------
data_in : str
input data bit string
ancilla : str
three bit ancilla logical Z code
data_qubits : int
length of bit string
Returns
-------
data_out : str
corrected data bit string
Notes
-----
The ancilla number calculation needs to take into account
that the ancilla bit string is reversed
compared to numbering of the databits shown on the Qiskit diagrams.
This code corrects bit string errors only, not phase errors
"""
data_out = ''
if ancilla == '000':
data_out = data_in
else:
bin_ancilla = string_reverse(ancilla)
int_ancilla = int(bin_ancilla, 2)
ancilla_mask = string_ancilla_mask(int_ancilla, data_qubits)
data_out = strings_AND_bitwise(data_in, ancilla_mask)
return(data_out)
def flip_code_words(codewords_in):
"""Returns a list of codewords for the logical one from
the list of codewords for the logical zero
by flipped each bit of the input codewords.
Parameters
----------
codewords_in : list
logical codewords in seven bit Steane code data qubit
for the logical zero
Returns
-------
Codewords_out : list
bit flipped input codeword
"""
codewords_out = []
for items in codewords_in:
new_string = ''
for bit in items:
if bit == '1':
flipped_bit = '0'
elif bit == '0':
flipped_bit = '1'
else:
raise Exception('Not able to interpret bit in codewords')
new_string = new_string + flipped_bit
codewords_out.append(new_string)
return(codewords_out)
def get_noise(p_meas, single_qubit_error,
two_qubit_error, single_qubit_gate_set,
two_qubit_gate_set, all = True,
noisy_qubit_list = [],
decohere = False,
dummy_gate_set = [],
dummy_gate_error = 0
):
"""Returns a noise model
Parameters
----------
p_meas : float
probability of X error on measurement
single_qubit_error : float
probability of a depolarizing error on a single qubit gate
two_qubit_error : float
probability of a depolarizing error on a two qubit gate
single_qubit_gate_set : list
list of all single qubit gate types relevant for noise
two_qubit_gate_set : list
list of all two qubit gate types relevant for noise
all : bool
apply two gate noise to all qubits
noisy_qubit_list : list of list
list of list of noisy qubits on which errors are applied
decohere : bool
Add extra noise to represent de-coherence
dummy_gate_set : list
Set of dummy gates on which the de-coherence error is applied. Normally ['id'].
dummy_gate_error : float
error to apply to dummy gate which is set up to model de-coherence at certain stages in the circuit.
Returns
-------
noise_model : dict
noise model to be used
Notes
-----
Can apply noise selectively to qubits in noisy_qubit_list. This is a list of lists.
"""
error_meas = pauli_error([('X', p_meas), ('I', 1 - p_meas)])
error_gate1 = depolarizing_error(single_qubit_error, 1)
error_gate2 = depolarizing_error(two_qubit_error, 1)
error_gate3 = error_gate2.tensor(error_gate2)
if decohere:
if 'id' in single_qubit_gate_set:
raise ValueError('Do not include gate id in the single_qubit_gate_set as used for decoherent errors')
error_decohere = depolarizing_error(dummy_gate_error, 1)
noise_model = NoiseModel()
if all:
if noisy_qubit_list != []:
raise ValueError('Errors are applied to all qubits but a list of qubits with errors is given')
noise_model.add_all_qubit_quantum_error(error_meas, 'measure')
# measurement error is applied to measurements
noise_model.add_all_qubit_quantum_error(error_gate1,
single_qubit_gate_set)
# single qubit gate errors
noise_model.add_all_qubit_quantum_error(error_gate3,
two_qubit_gate_set)
# two qubit gate error is applied to two qubit gates
if decohere:
noise_model.add_all_qubit_quantum_error(error_decohere,
dummy_gate_set)
# decoherence error is applied to dummy gates
else:
if noisy_qubit_list == []:
raise ValueError('A list of qubits must be supplied if errors are not to be applied to all qubits')
#read through list of list of error gates
for gate_list in noisy_qubit_list:
for gate_index1 in gate_list:
noise_model.add_quantum_error(error_meas, 'measure',
[gate_index1]
)
# measurement error is applied to measurements
noise_model.add_quantum_error(error_gate1,
single_qubit_gate_set,
[gate_index1]
)
if decohere:
noise_model.add_quantum_error(error_decohere ,
dummy_gate_set,
[gate_index1]
)
# decoherence error is applied to dummy gates
# single qubit gate errors
for gate_index2 in gate_list:
if gate_index1 != gate_index2:
noise_model.add_quantum_error(error_gate3,
two_qubit_gate_set,
[gate_index1,
gate_index2]
)
return noise_model
def mean_of_list(list_in):
"""Returns the mean of a list
Parameters
----------
list_in : list
data for analysis
Returns
-------
mean : float
result of calculation
"""
mean = sum(list_in) / len(list_in)
return(mean)
def calculate_standard_error(list_in):
""" Calculates the standard error of a list of numbers
Parameters
----------
list_in : list
data for analysis
Returns
-------
standard_deviation : float
standard deviation estimated from sample
standard_error : float
standard error estimated from sample
result of calculation
"""
if len(list_in) > 1:
standard_deviation = stdev(list_in)
standard_error = standard_deviation / sqrt(len(list_in))
elif len(list_in) == 1:
standard_deviation = 0
standard_error = 0
print('Unable to carry out standard error calcuation with one point. ')
print('Standard error of 0 used.')
else:
raise ValueError('f The number of iterations must be positive {iterations} used')
return(standard_deviation, standard_error)
def convert_codewords(codewords):
""" Changes the codewords list of lists to a list of strings
Parameters
----------
codewords : list
allowed codewords for logical zero
Returns
-------
list_of_strings : list
a list of strings
Notes
-----
No longer needed at present as codeword is a list of strings
but retained in case needed in future.
"""
list_of_strings = []
for lists in codewords:
new_string = ''
for item in lists:
new_string = new_string + str(item)
list_of_strings.append(new_string)
return(list_of_strings)
def summarise_logical_counts(counts, logical_zero_strings, logical_one_strings,
data1_location, data2_location, simple = False):
"""Simplifies bit strings for logical operations
to show each qubit as 0, 1, or 2 instead of the full bit string.
0. means qubit is the logical zero
1. means qubit is the logical one
2. means qubit is outside code space
Parameters
----------
counts : dict
results of computation
logical_zero_strings : list
list of strings in logical zero
logical_one_strings : list
list of strings in logical zero
data1_location : int
where in the counts bit string data1 is held
data2_location : int
where in the counts bit string data2 is held
simple : bool
use simple decoding based on bit parity
Returns
-------
new_counts : dict
simplified results
"""
#set up dictionary to hold answer
if type(logical_zero_strings) != list:
raise Exception('logical_zero_strings should be a list')
if type(logical_one_strings) != list:
raise Exception('logical_one_strings should be a list')
validate_integer(data1_location)
validate_integer(data2_location)
if simple:
if len(logical_zero_strings) != 1:
raise Exception('with simple decoding logical zero should be a list with one entry')
if len(logical_zero_strings) != 1:
raise Exception('with simple decoding logical one should be a list with one entry')
simple_parity_bits = calculate_simple_parity_bits()
new_counts = {str(i) + str(j):0 for i in range(3) for j in range(3)}
for key, value in counts.items():
#split out the data parts of key
data1 = key.split()[data1_location]
data2 = key.split()[data2_location]
#need to reverse the string from qiskit format
reverse1 = string_reverse(data1)
reverse2 = string_reverse(data2)
if simple:
#string is calculated from parity
bit_string1 = ['']
bit_string2 = ['']
for bit_location in simple_parity_bits:
bit_string1.append(reverse1[bit_location])
bit_string2.append(reverse2[bit_location])
new_data1 = str(calculate_parity(bit_string1))
new_data2 = str(calculate_parity(bit_string2))
else:
new_data1 = look_up_data(reverse1, logical_zero_strings, logical_one_strings)
new_data2 = look_up_data(reverse2, logical_zero_strings, logical_one_strings)
new_key = new_data1 + new_data2
if new_counts.get(new_key) == None:
new_counts.update({new_key: value})
else:
new_counts[new_key] = new_counts[new_key] + value
return(new_counts)
def look_up_data(input_string, logical_zero, logical_one):
"""Looks up the input data to determine if the string is a logical one,
logical zero, or outside the code base.
Parameters
----------
input_string : str
data for analysis
logical_zero : list
list of strings representing a logical zero
logical_one : str
list of strings representing a logical one
Returns
-------
output_string : str
result of look-up"""
if input_string in logical_zero:
output_string = '0'
elif input_string in logical_one:
output_string = '1'
else:
output_string = 'E'
return(output_string)
def print_time():
"""Prints current time"""
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Current Time =", current_time)
return
def validate_integer(number):
"""Checks if a number is an integer.
Parameters
----------
number: int
number to be validated
"""
if type(number) != int:
raise ValueError(f'The number {number} entered is not an integer')
def process_FT_results(counts, codewords, data_meas_strings = ['0'],
anc_zero = '0', anc_one = '1',
verbose = False, data_qubits = 7,
ancilla_start = 0, data_meas_start = 0, data_start = 0,
ancilla_types = 2, ancilla_qubits = 0, ancilla_meas_repeats = 1,
data_meas_qubits = 0, data_meas_repeats = 0,
post_selection = False, simple = False,
):
"""Process results from fault tolerant processing.
Parameters
----------
counts : dictionary
results for analysis
codewords : list
list of valid data codewords
data_meas_strings: string
allowed strings for the data measurement bits
anc_zero : string
allowed strings for the ancilla zero
anc_one : string
allowed strings for the ancilla one
verbose : bool
if true enables printing
data_qubits : int
Length of data bit string. Usually seven
ancilla_start : int
starting place for ancilla (if any)
data_meas_start : int
starting place for data measurement qubits (if any)
data_start : int
starting place for data string
ancilla_types : int
number of different ancilla types. Normally 2 (X and Z) or 0
ancilla_qubits : int
number of strings for each ancilla qubits. Normally 0, 1 or 3
ancilla_meas_repeats : int
number of times ancilla measurements are repeated. Normally 3 or 1
data_meas_qubits : int
number of distinct data measurement qubits. Normally 7, 1 or 0
data_meas_repeats: int
number of times data measurements are repeated. Normally 3 or 1.
post_select: bool
if true then only strings in logical zero are invalid
simple : bool
if true then simple decoding based on three bits shall be used.
Returns
-------
error_rate : float
error rate calculated
rejected : int
strings rejected for validation
accepted : int
strings accepted for validation
valid : int
strings validated and found to be in the code space
invalid : int
strings validated and found to not be in the code space
Notes
-----
This function takes the output string, splits it, and determines if it passes
data and ancilla checks. If so the data keyword is validated.
"""
anc_meas_strings = [anc_zero, anc_one]
validate_integer(ancilla_start)
validate_integer(data_meas_start)
validate_integer(data_start)
validate_integer(ancilla_types)
validate_integer(ancilla_qubits)
validate_integer(ancilla_meas_repeats)
validate_integer(data_meas_qubits)
validate_integer(data_meas_repeats)
total_keys = ancilla_types * ancilla_qubits * ancilla_meas_repeats
total_keys = total_keys + (data_meas_qubits * data_meas_repeats) + 1
count_valid = 0
count_invalid = 0
count_outside_codeword = 0
ancilla_rejected = 0
ancilla_accepted = 0
data_rejected = 0
data_accepted = 0
rejected = 0
accepted = 0
for string, value in counts.items():
qubit_strings = []
data_syndrome_strings = []
data_OK = False
for i in range(total_keys):
qubit_strings.append(string.split()[i])
data_string = qubit_strings[data_start]
for i in range(data_meas_start, data_meas_start + data_meas_repeats):
#need to reverse strings because Qiskit reverses them
data_syndrome_strings.append(string_reverse(qubit_strings[i]))
if data_meas_repeats == 3:
if data_syndrome_strings[2] in data_meas_strings:
if data_syndrome_strings[1] in data_meas_strings:
if data_syndrome_strings[0] in data_meas_strings:
data_OK = True
elif data_meas_repeats == 0:
data_OK = True
else:
raise Exception('At present only 3 or zero data measurements are coded for')
if data_OK:
data_accepted = data_accepted + value
if ancilla_qubits == 0:
#no ancilla
ancilla_accepted = data_accepted
ancilla_rejected = 0
ancilla_OK = True
corrected_data_string = data_string
elif ancilla_qubits == 1:
#simple case without fault tolerance. No check on ancilla possible
ancilla_OK = True
ancilla_accepted = data_accepted
ancilla_rejected = 0
if ancilla_meas_repeats != 1:
raise Exception('can not handle multiple measurements on one ancilla qubit')
ancilla = qubit_strings[ancilla_start]
corrected_data_string = correct_qubit(data_string, ancilla, data_qubits)
elif ancilla_qubits == 3:
#complex case with fault tolerance
count_ancilla_OK = 0
X = ['' for i in range(ancilla_qubits)]
for i in range(ancilla_types):
for j in range(ancilla_meas_repeats):
first = i * (ancilla_qubits * ancilla_meas_repeats) + j * ancilla_meas_repeats
second = first + 1
third = second + 1
if qubit_strings[third] == qubit_strings[second]:
if qubit_strings[second] == qubit_strings[first]:
if qubit_strings[first] in anc_meas_strings:
count_ancilla_OK = count_ancilla_OK + 1
if i == 0:
#only interested in X values
if qubit_strings[first] in anc_zero:
X[j] = '0'
elif qubit_strings[first] in anc_one:
X[j] = '1'
else:
raise Exception('Error in processing strings for i, j, k = {i}, {j}, {k}')
if count_ancilla_OK == ancilla_qubits * ancilla_types:
ancilla_OK = True
ancilla_accepted = ancilla_accepted + value
#always first three ancilla with Steane code
ancilla = X[0] + X[1] + X[2]
corrected_data_string = correct_qubit(data_string, ancilla, data_qubits)
else:
ancilla_OK = False
ancilla_rejected = ancilla_rejected + value
else:
raise Exception('Can only process ancilla strings of 0, 1 or 3 qubits')
if ancilla_OK:
#need to reverse string because of Qisit convention
reversed_data_string = string_reverse(corrected_data_string)
valid, invalid, outside_codeword = compute_string_validity(value,
codewords,
reversed_data_string,
post_selection = post_selection,
simple = simple,
)
count_valid = count_valid + valid
count_invalid = count_invalid + invalid
count_outside_codeword = count_outside_codeword + outside_codeword
else:
data_rejected = data_rejected + value
if ancilla_accepted != 0:
# calculate on ancilla_accepted because this always holds the amounts to be validated
error_rate = count_invalid / ancilla_accepted
else:
error_rate = 0
print('Error rate not defined as no strings accepted')
rejected = data_rejected + ancilla_rejected
accepted = ancilla_accepted
if verbose:
print(f'At the data validation stage')
print(f'There are {data_rejected} strings rejected and {data_accepted} strings submitted for processing')
print(f'Making {data_rejected + data_accepted} in total submitted for data processing')
print()
print(f'At the ancilla validation stage')
print(f'There are {ancilla_rejected} strings rejected and {ancilla_accepted} strings submitted for validation')
print(f'Making {ancilla_rejected + ancilla_accepted} in total submitted to check against ancilla')
print()
print(f'Of these {ancilla_accepted} strings validated there are {count_valid} valid strings and {count_invalid} invalid_strings')
if post_selection:
print(f'There were {count_outside_codeword} strings that were neither logical one or logical zero')
print(f'The error rate is {error_rate:.4f}')
return(error_rate, rejected, accepted, count_valid, count_invalid)
def get_parity_check_matrix():
"""Stores the parity matrix in one place"""
parity_check_matrix = ['0001111',
'0110011',
'1010101'
]
return(parity_check_matrix)
def get_codewords():
"""Stores the codewords for the logical zero in one place
Returns
-------
codewords : list
A list of valid codewords for the logical zero
"""
codewords =['0000000',
'1010101',
'0110011',
'1100110',
'0001111',
'1011010',
'0111100',
'1101001'
]
return(codewords)
def calculate_parity_matrix_totals():
"""Calculates the number of items in each row of the parity matrix
Returns
-------
parity_matrix_totals : list
List holding parity matrix totals for each row in the parity matrix.
"""
parity_check_matrix = get_parity_check_matrix()
n = len(parity_check_matrix[0])
parity_matrix_totals = [ 0 for x in range(n)] # define an empty list
#ready to work out parity_matrix_totals
#calculate the number of non-zero entries in each row of the parity matrix and store
for parity_string in parity_check_matrix :
for index in range(n):
parity_matrix_totals[index] = parity_matrix_totals[index] + int(parity_string[index])
return(parity_matrix_totals) | 36.261599 | 137 | 0.596276 |
cf44c411bebc7cddfb123d9d1492d7bfc758f934 | 3,957 | py | Python | src/utils/sensor_listener.py | WhiteHyun/smart-locker | a5b87309741cf9ae4134dda8936edcb258068301 | [
"MIT"
] | null | null | null | src/utils/sensor_listener.py | WhiteHyun/smart-locker | a5b87309741cf9ae4134dda8936edcb258068301 | [
"MIT"
] | 27 | 2021-02-12T15:40:24.000Z | 2021-05-24T07:32:54.000Z | src/utils/sensor_listener.py | WhiteHyun/smart-locker | a5b87309741cf9ae4134dda8936edcb258068301 | [
"MIT"
] | 1 | 2021-10-05T13:45:36.000Z | 2021-10-05T13:45:36.000Z | import serial
from multiprocessing import *
if __name__ == "__main__" or __name__ == "sensor_listener":
from util import dict2Query, connect_arduino
from sql import SQL
else:
from .util import dict2Query, connect_arduino
from .sql import SQL
class SensorListener:
"""아두이노(여러 함)로 부터 센싱값을 듣는(Listening) 클래스.
Attributes
----------
sql: SQL Class
DB에 저장하기 위한 `connection` 객체
arduino_number: str
연결되어있는 아두이노의 번호
sync_sensor: str
아두이노 번호(arduino_number)와 센서묶음셋 번호를 합친 문자열
"""
def __init__(self, LCKMngKey):
"""
Parameters
----------
LCKMngKey : str
사물함 관리번호
Examples
---------
>>> SensorListener(arduino_num=0, port="/dev/ttyACM0", LCKMngKey="H001234")
"""
self.sql = SQL("root", "", "10.80.76.63", "SML")
self.LCKMngKey = LCKMngKey
self.seri, self.arduino_number = self.__get_serial_connection()
self.sync_sensor = self.__set_sensor_number()
def __get_serial_connection(self):
'''
DB에 저장되어있는 아두이노 정보를 가지고 포트를 사용하여 아두이노 커넥션을 생성하여 반환하고 아두이노번호를 반환
'''
try:
sql = SQL("root", "", "10.80.76.63", "SML")
result = sql.processDB(
f"SELECT Port, ARDNum FROM ARDInfo WHERE LCKMngKey='{self.LCKMngKey}' AND ARDKind='S' ORDER BY ARDNum;")
seri = []
ardNum = []
for portDict in result:
seri.append(connect_arduino(f"/dev/{portDict['Port']}"))
ardNum.append(portDict['ARDNum'])
return seri, ardNum
except Exception as e:
print(e)
def __set_sensor_number(self):
"""
DB에 저장되어있는 함과 센서의 정보를 가지고 {"syncsensor": "CRRMngKey"} 형태의 딕셔너리로 생성합니다.
"""
sync_sensor = dict()
sql_data = self.sql.processDB(
f"SELECT CRRMngKey, SyncSensor FROM CRRInfo WHERE State = 'N' AND LCKMngKey = '{self.LCKMngKey}'")
for dataset in sql_data:
if dataset["SyncSensor"] is not None:
sync_sensor[dataset["SyncSensor"]] = dataset["CRRMngKey"]
return sync_sensor
def __listen_sensor(self, seri, ardNum):
"""각 함들의 센서들의 듣는(Listening) 값을 DB에 저장합니다.
"""
dataset = {"CRRMngKey": None, "FSR": -1,
"LIG": -1, "SSO": -1, "HAL": -1, "VIB": -1}
while True:
if seri.readable():
try:
res = seri.readline().decode()
res = res[:-2]
if not res:
continue
if res[:-1] == "dataset":
if dataset["CRRMngKey"] is not None:
sql_query = dict2Query("SensorValue", dataset)
self.sql.processDB(sql_query)
dataset = {"CRRMngKey": None, "FSR": -1,
"LIG": -1, "SSO": -1, "HAL": -1, "VIB": -1}
dataset["CRRMngKey"] = self.sync_sensor.get(
str(ardNum) + res[-1:])
elif res[0] == "F":
dataset["FSR"] = res[2:]
elif res[0] == "S":
dataset["SSO"] = res[2:]
elif res[0] == "L":
dataset["LIG"] = res[2:]
elif res[0] == "H":
dataset["HAL"] = res[2:]
elif res[0] == "V":
dataset["VIB"] = res[2:]
except Exception as e:
print(f'sensor exception : {e}')
def listen(self):
for i in range(len(self.seri)):
t = Process(target=self.__listen_sensor, args=(
self.seri[i], self.arduino_number[i]))
t.start()
# test = sensorListener(0,"COM6","H001234")
# print (test.SyncSensor)
# test.listen()
| 32.975 | 120 | 0.488501 |
31332732bbdc48abc071aabf79f399dcd51ecadf | 10,087 | bzl | Python | research/seq_flow_lite/tf_ops/repo.bzl | NasTul/models | 4fabd84c5c5e2b34a1b95431788f7801de036421 | [
"Apache-2.0"
] | 82,518 | 2016-02-05T12:07:23.000Z | 2022-03-31T23:09:47.000Z | research/seq_flow_lite/tf_ops/repo.bzl | NasTul/models | 4fabd84c5c5e2b34a1b95431788f7801de036421 | [
"Apache-2.0"
] | 9,021 | 2016-03-08T01:02:05.000Z | 2022-03-31T08:06:35.000Z | research/seq_flow_lite/tf_ops/repo.bzl | NasTul/models | 4fabd84c5c5e2b34a1b95431788f7801de036421 | [
"Apache-2.0"
] | 54,341 | 2016-02-06T17:19:55.000Z | 2022-03-31T10:27:44.000Z | """Reverb custom external dependencies."""
# Sanitize a dependency so that it works correctly from code that includes
# reverb as a submodule.
def clean_dep(dep):
return str(Label(dep))
def get_python_path(ctx):
path = ctx.os.environ.get("PYTHON_BIN_PATH")
if not path:
fail(
"Could not get environment variable PYTHON_BIN_PATH. " +
"Check your .bazelrc file.",
)
return path
def _find_tf_include_path(repo_ctx):
exec_result = repo_ctx.execute(
[
get_python_path(repo_ctx),
"-c",
"import tensorflow as tf; import sys; " +
"sys.stdout.write(tf.sysconfig.get_include())",
],
quiet = True,
)
if exec_result.return_code != 0:
fail("Could not locate tensorflow installation path:\n{}"
.format(exec_result.stderr))
return exec_result.stdout.splitlines()[-1]
def _find_tf_lib_path(repo_ctx):
exec_result = repo_ctx.execute(
[
get_python_path(repo_ctx),
"-c",
"import tensorflow as tf; import sys; " +
"sys.stdout.write(tf.sysconfig.get_lib())",
],
quiet = True,
)
if exec_result.return_code != 0:
fail("Could not locate tensorflow installation path:\n{}"
.format(exec_result.stderr))
return exec_result.stdout.splitlines()[-1]
def _find_numpy_include_path(repo_ctx):
exec_result = repo_ctx.execute(
[
get_python_path(repo_ctx),
"-c",
"import numpy; import sys; " +
"sys.stdout.write(numpy.get_include())",
],
quiet = True,
)
if exec_result.return_code != 0:
fail("Could not locate numpy includes path:\n{}"
.format(exec_result.stderr))
return exec_result.stdout.splitlines()[-1]
def _find_python_include_path(repo_ctx):
exec_result = repo_ctx.execute(
[
get_python_path(repo_ctx),
"-c",
"from distutils import sysconfig; import sys; " +
"sys.stdout.write(sysconfig.get_python_inc())",
],
quiet = True,
)
if exec_result.return_code != 0:
fail("Could not locate python includes path:\n{}"
.format(exec_result.stderr))
return exec_result.stdout.splitlines()[-1]
def _find_python_solib_path(repo_ctx):
exec_result = repo_ctx.execute(
[
get_python_path(repo_ctx),
"-c",
"import sys; vi = sys.version_info; " +
"sys.stdout.write('python{}.{}'.format(vi.major, vi.minor))",
],
)
if exec_result.return_code != 0:
fail("Could not locate python shared library path:\n{}"
.format(exec_result.stderr))
version = exec_result.stdout.splitlines()[-1]
basename = "lib{}.so".format(version)
exec_result = repo_ctx.execute(
["{}-config".format(version), "--configdir"],
quiet = True,
)
if exec_result.return_code != 0:
fail("Could not locate python shared library path:\n{}"
.format(exec_result.stderr))
solib_dir = exec_result.stdout.splitlines()[-1]
full_path = repo_ctx.path("{}/{}".format(solib_dir, basename))
if not full_path.exists:
fail("Unable to find python shared library file:\n{}/{}"
.format(solib_dir, basename))
return struct(dir = solib_dir, basename = basename)
def _eigen_archive_repo_impl(repo_ctx):
tf_include_path = _find_tf_include_path(repo_ctx)
repo_ctx.symlink(tf_include_path, "tf_includes")
repo_ctx.file(
"BUILD",
content = """
cc_library(
name = "includes",
hdrs = glob(["tf_includes/Eigen/**/*.h",
"tf_includes/Eigen/**",
"tf_includes/unsupported/Eigen/**/*.h",
"tf_includes/unsupported/Eigen/**"]),
# https://groups.google.com/forum/#!topic/bazel-discuss/HyyuuqTxKok
includes = ["tf_includes"],
visibility = ["//visibility:public"],
)
""",
executable = False,
)
def _nsync_includes_repo_impl(repo_ctx):
tf_include_path = _find_tf_include_path(repo_ctx)
repo_ctx.symlink(tf_include_path + "/external", "nsync_includes")
repo_ctx.file(
"BUILD",
content = """
cc_library(
name = "includes",
hdrs = glob(["nsync_includes/nsync/public/*.h"]),
includes = ["nsync_includes"],
visibility = ["//visibility:public"],
)
""",
executable = False,
)
def _zlib_includes_repo_impl(repo_ctx):
tf_include_path = _find_tf_include_path(repo_ctx)
repo_ctx.symlink(
tf_include_path + "/external/zlib",
"zlib",
)
repo_ctx.file(
"BUILD",
content = """
cc_library(
name = "includes",
hdrs = glob(["zlib/**/*.h"]),
includes = ["zlib"],
visibility = ["//visibility:public"],
)
""",
executable = False,
)
def _snappy_includes_repo_impl(repo_ctx):
tf_include_path = _find_tf_include_path(repo_ctx)
repo_ctx.symlink(
tf_include_path + "/external/snappy",
"snappy",
)
repo_ctx.file(
"BUILD",
content = """
cc_library(
name = "includes",
hdrs = glob(["snappy/*.h"]),
includes = ["snappy"],
visibility = ["//visibility:public"],
)
""",
executable = False,
)
def _protobuf_includes_repo_impl(repo_ctx):
tf_include_path = _find_tf_include_path(repo_ctx)
repo_ctx.symlink(tf_include_path, "tf_includes")
repo_ctx.symlink(Label("//third_party:protobuf.BUILD"), "BUILD")
def _tensorflow_includes_repo_impl(repo_ctx):
tf_include_path = _find_tf_include_path(repo_ctx)
repo_ctx.symlink(tf_include_path, "tensorflow_includes")
repo_ctx.file(
"BUILD",
content = """
cc_library(
name = "includes",
hdrs = glob(
[
"tensorflow_includes/**/*.h",
"tensorflow_includes/third_party/eigen3/**",
],
exclude = ["tensorflow_includes/absl/**/*.h"],
),
includes = ["tensorflow_includes"],
deps = [
"@eigen_archive//:eigen3",
"@protobuf_archive//:includes",
"@zlib_includes//:includes",
"@snappy_includes//:includes",
],
visibility = ["//visibility:public"],
)
filegroup(
name = "protos",
srcs = glob(["tensorflow_includes/**/*.proto"]),
visibility = ["//visibility:public"],
)
""",
executable = False,
)
def _tensorflow_solib_repo_impl(repo_ctx):
tf_lib_path = _find_tf_lib_path(repo_ctx)
repo_ctx.symlink(tf_lib_path, "tensorflow_solib")
repo_ctx.file(
"BUILD",
content = """
cc_library(
name = "framework_lib",
srcs = ["tensorflow_solib/libtensorflow_framework.so.2"],
deps = ["@python_includes", "@python_includes//:numpy_includes"],
visibility = ["//visibility:public"],
)
""",
)
def _python_includes_repo_impl(repo_ctx):
python_include_path = _find_python_include_path(repo_ctx)
python_solib = _find_python_solib_path(repo_ctx)
repo_ctx.symlink(python_include_path, "python_includes")
numpy_include_path = _find_numpy_include_path(repo_ctx)
repo_ctx.symlink(numpy_include_path, "numpy_includes")
repo_ctx.symlink(
"{}/{}".format(python_solib.dir, python_solib.basename),
python_solib.basename,
)
# Note, "@python_includes" is a misnomer since we include the
# libpythonX.Y.so in the srcs, so we can get access to python's various
# symbols at link time.
repo_ctx.file(
"BUILD",
content = """
cc_library(
name = "python_includes",
hdrs = glob(["python_includes/**/*.h"]),
srcs = ["{}"],
includes = ["python_includes"],
visibility = ["//visibility:public"],
)
cc_library(
name = "numpy_includes",
hdrs = glob(["numpy_includes/**/*.h"]),
includes = ["numpy_includes"],
visibility = ["//visibility:public"],
)
""".format(python_solib.basename),
executable = False,
)
def cc_tf_configure():
"""Autoconf pre-installed tensorflow repo."""
make_nsync_repo = repository_rule(
implementation = _nsync_includes_repo_impl,
)
make_nsync_repo(name = "nsync_includes")
make_zlib_repo = repository_rule(
implementation = _zlib_includes_repo_impl,
)
make_zlib_repo(name = "zlib_includes")
make_snappy_repo = repository_rule(
implementation = _snappy_includes_repo_impl,
)
make_snappy_repo(name = "snappy_includes")
make_protobuf_repo = repository_rule(
implementation = _protobuf_includes_repo_impl,
)
make_protobuf_repo(name = "protobuf_archive")
make_tfinc_repo = repository_rule(
implementation = _tensorflow_includes_repo_impl,
)
make_tfinc_repo(name = "tensorflow_includes")
make_tflib_repo = repository_rule(
implementation = _tensorflow_solib_repo_impl,
)
make_tflib_repo(name = "tensorflow_solib")
make_python_inc_repo = repository_rule(
implementation = _python_includes_repo_impl,
)
make_python_inc_repo(name = "python_includes")
def _reverb_protoc_archive(ctx):
version = ctx.attr.version
sha256 = ctx.attr.sha256
override_version = ctx.os.environ.get("REVERB_PROTOC_VERSION")
if override_version:
sha256 = ""
version = override_version
urls = [
"https://github.com/protocolbuffers/protobuf/releases/download/v%s/protoc-%s-linux-x86_64.zip" % (version, version),
]
ctx.download_and_extract(
url = urls,
sha256 = sha256,
)
ctx.file(
"BUILD",
content = """
filegroup(
name = "protoc_bin",
srcs = ["bin/protoc"],
visibility = ["//visibility:public"],
)
""",
executable = False,
)
reverb_protoc_archive = repository_rule(
implementation = _reverb_protoc_archive,
attrs = {
"version": attr.string(mandatory = True),
"sha256": attr.string(mandatory = True),
},
)
def reverb_protoc_deps(version, sha256):
reverb_protoc_archive(name = "protobuf_protoc", version = version, sha256 = sha256)
| 30.020833 | 124 | 0.628234 |
e4d30d50c157e9f6875a2690c69f79d1e83ff1c3 | 2,799 | py | Python | tests/integration/sugar/test_reliable_submission.py | mDuo13/xrpl-py | 70f927dcd2dbb8644b3e210b0a8de2a214e71e3d | [
"0BSD"
] | null | null | null | tests/integration/sugar/test_reliable_submission.py | mDuo13/xrpl-py | 70f927dcd2dbb8644b3e210b0a8de2a214e71e3d | [
"0BSD"
] | null | null | null | tests/integration/sugar/test_reliable_submission.py | mDuo13/xrpl-py | 70f927dcd2dbb8644b3e210b0a8de2a214e71e3d | [
"0BSD"
] | null | null | null | from unittest import TestCase
from tests.integration.it_utils import JSON_RPC_CLIENT
from tests.integration.reusable_values import DESTINATION as DESTINATION_WALLET
from tests.integration.reusable_values import FEE, WALLET
from xrpl.account import get_next_valid_seq_number
from xrpl.models.transactions import AccountSet, Payment
from xrpl.transaction import (
LastLedgerSequenceExpiredException,
send_reliable_submission,
)
ACCOUNT = WALLET.classic_address
DESTINATION = DESTINATION_WALLET.classic_address
CLEAR_FLAG = 3
DOMAIN = "6578616D706C652E636F6D".lower()
EMAIL_HASH = "10000000002000000000300000000012"
MESSAGE_KEY = "03AB40A0490F9B7ED8DF29D246BF2D6269820A0EE7742ACDD457BEA7C7D0931EDB"
SET_FLAG = 8
TRANSFER_RATE = 0
TICK_SIZE = 10
class TestReliableSubmission(TestCase):
def test_simple(self):
WALLET.next_sequence_num = get_next_valid_seq_number(ACCOUNT, JSON_RPC_CLIENT)
account_set = AccountSet(
account=ACCOUNT,
fee=FEE,
sequence=WALLET.next_sequence_num,
set_flag=SET_FLAG,
last_ledger_sequence=WALLET.next_sequence_num + 10,
)
response = send_reliable_submission(account_set, WALLET, JSON_RPC_CLIENT)
self.assertTrue(response.result["validated"])
self.assertEqual(response.result["meta"]["TransactionResult"], "tesSUCCESS")
self.assertTrue(response.is_successful())
def test_payment(self):
WALLET.next_sequence_num = get_next_valid_seq_number(ACCOUNT, JSON_RPC_CLIENT)
payment_dict = {
"account": ACCOUNT,
"sequence": WALLET.next_sequence_num,
"last_ledger_sequence": WALLET.next_sequence_num + 10,
"fee": "10000",
"amount": "10",
"destination": DESTINATION,
}
payment_transaction = Payment.from_dict(payment_dict)
response = send_reliable_submission(
payment_transaction, WALLET, JSON_RPC_CLIENT
)
self.assertTrue(response.result["validated"])
self.assertEqual(response.result["meta"]["TransactionResult"], "tesSUCCESS")
self.assertTrue(response.is_successful())
def test_last_ledger_expiration(self):
WALLET.next_sequence_num = get_next_valid_seq_number(ACCOUNT, JSON_RPC_CLIENT)
payment_dict = {
"account": ACCOUNT,
"sequence": WALLET.next_sequence_num,
"last_ledger_sequence": WALLET.next_sequence_num + 1,
"fee": "10000",
"amount": "100",
"destination": DESTINATION,
}
payment_transaction = Payment.from_dict(payment_dict)
with self.assertRaises(LastLedgerSequenceExpiredException):
send_reliable_submission(payment_transaction, WALLET, JSON_RPC_CLIENT)
| 39.422535 | 86 | 0.709182 |
476ff451ee1083b3aca8ffc87f863b3c931ece50 | 1,250 | py | Python | 1_1.py | tanny2015/Code_189 | 3cbd57ed7e3d7da69800696be6481620362dce76 | [
"MIT"
] | null | null | null | 1_1.py | tanny2015/Code_189 | 3cbd57ed7e3d7da69800696be6481620362dce76 | [
"MIT"
] | null | null | null | 1_1.py | tanny2015/Code_189 | 3cbd57ed7e3d7da69800696be6481620362dce76 | [
"MIT"
] | null | null | null |
#Solution 1
def add_and_check(key, hmap): # key is char
if key in hmap:
hmap[key] += 1
else:
hmap[key] = 1
if hmap[key] > 1:
return False # has duplicate
else:
return True # unique
def check_duplicate(str):
hmap = dict()
for ch in str:
if add_and_check(ch, hmap):
continue
else:
print("not all unique1! \n")
#break
return False
print("all unique! \n")
return True
#check_duplicate("hkhkjhkjhkj")
#check_duplicate("hkdqe")
class Solution(object):
def add_and_check(self, key, hmap): # key is char
if key in hmap:
hmap[key] += 1
else:
hmap[key] = 1
if hmap[key] > 1:
return False # has duplicate
else:
return True # unique
def isUnique(self, astr):
hmap = dict()
for ch in astr:
if self.add_and_check(ch, hmap):
continue
else:
print("not all unique1! \n")
#break
return False
print("all unique! \n")
return True
| 17.361111 | 55 | 0.4616 |
22c0a5476b98c5b62f5242cd1e07000f787e03b2 | 1,826 | py | Python | py/scripts/pointing_offsets.py | ameisner/ci_reduce | 17d1e339c72ab115bceaf67e3d73b399e527b49c | [
"BSD-3-Clause"
] | null | null | null | py/scripts/pointing_offsets.py | ameisner/ci_reduce | 17d1e339c72ab115bceaf67e3d73b399e527b49c | [
"BSD-3-Clause"
] | 13 | 2019-02-08T02:11:01.000Z | 2019-09-10T02:11:28.000Z | py/scripts/pointing_offsets.py | ameisner/ci_reduce | 17d1e339c72ab115bceaf67e3d73b399e527b49c | [
"BSD-3-Clause"
] | 2 | 2019-12-23T15:25:32.000Z | 2020-12-22T13:43:52.000Z | #!/usr/bin/env python
import argparse
import gfa_reduce.gfa_red as gfa_red
import astropy.io.fits as fits
import numpy as np
import os
basedir = '/exposures/desi'
def pointing_offsets(fm, fname_in):
if fm is None:
print('did not obtain a good FieldModel???')
h = fits.getheader(fname_in, extname='GFA')
target_ra = h['TARGTRA']
target_dec = h['TARGTDEC']
print('cos(Dec) factor = ', np.cos(target_dec/(180.0/np.pi)))
dra_true_asec = 3600.0*(target_ra - fm.ra)*np.cos(target_dec/(180.0/np.pi))
ddec_true_asec = 3600.0*(target_dec - fm.dec)
print('RA POINTING OFFSET : ', '{:.2f}'.format(dra_true_asec), ' asec')
print('DEC POINTING OFFSET : ', '{:.2f}'.format(ddec_true_asec), ' asec')
def _get_raw_filename(expid, night):
# assumes file name of the form gfa-????????.fits.fz
# as opposed to one alternative which I guess would be guide-????????-0000.fits.fz
# acquisition images
fname = 'gfa-' + str(expid).zfill(8) + '.fits.fz'
nightdir = os.path.join(basedir, str(night).zfill(8))
dir = os.path.join(nightdir, str(expid).zfill(8))
fname = os.path.join(dir, fname)
return fname
if __name__ == "__main__":
# eventually should add a parameter for what angular
# search radius to use, think default is 1.5 arcmin
descr = 'compute GFA pointing offsets'
parser = argparse.ArgumentParser(description=descr)
parser.add_argument('expid', type=int, nargs=1)
parser.add_argument('night', type=str, nargs=1)
args = parser.parse_args()
fname_in = _get_raw_filename(args.expid[0], args.night[0])
print(fname_in)
if not os.path.exists(fname_in):
print('raw gfa-????????.fits.fz file not found??')
assert(False)
fm = gfa_red.acquire_field(fname_in)
pointing_offsets(fm, fname_in)
| 28.984127 | 86 | 0.662103 |
a0ba6658f36d285887c34a73705d73c6178607d1 | 2,191 | py | Python | functions/heroku.py | Zslez/AttentiAlBot | 2c2ea5c8bfefe5f38a5f98ab2f13e59de748f355 | [
"MIT"
] | 5 | 2021-08-13T02:03:06.000Z | 2022-03-09T01:00:16.000Z | functions/heroku.py | Zslez/AttentiAlBot | 2c2ea5c8bfefe5f38a5f98ab2f13e59de748f355 | [
"MIT"
] | null | null | null | functions/heroku.py | Zslez/AttentiAlBot | 2c2ea5c8bfefe5f38a5f98ab2f13e59de748f355 | [
"MIT"
] | null | null | null | # letteralmente io che aggiro i limiti del server che mi hosta il Bot, così da tenerlo attivo 24/7 per sempre
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
import heroku3
import os
import globals
__all__ = [
'change_heroku',
'hkey',
'hkey2',
'hname'
]
hkey = None if globals.name else os.environ['HKEY']
hkey2 = None if globals.name else os.environ['HKEY2']
hname = None if globals.name else os.environ['HNAME']
hpass = None if globals.name else os.environ['HPASS']
hemail = None if globals.name else os.environ['HEMAIL']
def change_heroku(ctx):
res = get_remaining_time(hemail, hpass)
string = bool(hname.replace('attentialbot', ''))
if res < 100:
api = heroku3.from_key(hkey2)
app = api.app(['attentialbot2', 'attentialbot'][string])
app.process_formation()['worker'].scale(1)
api = heroku3.from_key(hkey)
app = api.app(hname)
app.process_formation()['worker'].scale(0)
def get_remaining_time(email, passw):
options = Options()
options.add_argument('--headless')
options.add_argument('--disable-gpu')
with webdriver.Chrome(options = options) as driver:
driver.get('https://dashboard.heroku.com/account/billing')
wait10 = WebDriverWait(driver, 10).until
wait10(EC.presence_of_element_located((By.ID, 'onetrust-reject-all-handler'))).click()
driver.find_element_by_id('email').send_keys(email)
driver.find_element_by_id('password').send_keys(passw)
driver.find_element_by_xpath('//*[@id="login"]/form/button').click()
wait10(EC.presence_of_element_located((By.XPATH, '//*[@id="mfa-later"]/button'))).click()
wait10(EC.presence_of_element_located((By.CLASS_NAME, 'account-quota-usage')))
result = driver.find_element_by_class_name('account-quota-usage').find_element_by_class_name('gray')
return float(result.text.split()[0]) | 33.707692 | 109 | 0.680968 |
4ff9b453191e9ec651fd1b4c1161e2675fb0f8a8 | 636 | py | Python | src/odontology/register/migrations/0013_auto_20160221_2224.py | nanomolina/JP | 248a47bced4dac850f85d28968ddf279cd123400 | [
"Apache-2.0"
] | 2 | 2016-06-23T15:35:29.000Z | 2022-01-11T00:55:21.000Z | src/odontology/register/migrations/0013_auto_20160221_2224.py | nanomolina/JP | 248a47bced4dac850f85d28968ddf279cd123400 | [
"Apache-2.0"
] | 27 | 2016-06-24T12:28:01.000Z | 2022-01-13T00:37:25.000Z | src/odontology/register/migrations/0013_auto_20160221_2224.py | nanomolina/JP | 248a47bced4dac850f85d28968ddf279cd123400 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-22 01:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('register', '0012_benefit_detailbenefit'),
]
operations = [
migrations.RenameField(
model_name='benefit',
old_name='social_worl',
new_name='social_work',
),
migrations.AlterField(
model_name='benefit',
name='primary_entity',
field=models.CharField(blank=True, max_length=250, null=True),
),
]
| 24.461538 | 74 | 0.603774 |
f6223e14293dc84baace4fd48863dae2158880e2 | 8,069 | py | Python | src/sqlfluff/rules/L010.py | tinder-albertyue/sqlfluff | 992bd8d02465c9a57f29d58d5c328e8f548f6cc9 | [
"MIT"
] | 1 | 2021-12-29T18:34:20.000Z | 2021-12-29T18:34:20.000Z | src/sqlfluff/rules/L010.py | tinder-albertyue/sqlfluff | 992bd8d02465c9a57f29d58d5c328e8f548f6cc9 | [
"MIT"
] | null | null | null | src/sqlfluff/rules/L010.py | tinder-albertyue/sqlfluff | 992bd8d02465c9a57f29d58d5c328e8f548f6cc9 | [
"MIT"
] | null | null | null | """Implementation of Rule L010."""
import regex
from typing import Tuple, List
from sqlfluff.core.rules.base import BaseRule, LintResult, LintFix, RuleContext
from sqlfluff.core.rules.config_info import get_config_info
from sqlfluff.core.rules.doc_decorators import (
document_fix_compatible,
document_configuration,
)
@document_fix_compatible
@document_configuration
class Rule_L010(BaseRule):
"""Inconsistent capitalisation of keywords.
| **Anti-pattern**
| In this example, 'select 'is in lower-case whereas 'FROM' is in upper-case.
.. code-block:: sql
select
a
FROM foo
| **Best practice**
| Make all keywords either in upper-case or in lower-case
.. code-block:: sql
SELECT
a
FROM foo
-- Also good
select
a
from foo
"""
# Binary operators behave like keywords too.
_target_elems: List[Tuple[str, str]] = [
("type", "keyword"),
("type", "binary_operator"),
("type", "date_part"),
]
config_keywords = ["capitalisation_policy"]
# Human readable target elem for description
_description_elem = "Keywords"
def _eval(self, context: RuleContext) -> LintResult:
"""Inconsistent capitalisation of keywords.
We use the `memory` feature here to keep track of cases known to be
INconsistent with what we've seen so far as well as the top choice
for what the possible case is.
"""
# Skip if not an element of the specified type/name
if not self.matches_target_tuples(context.segment, self._target_elems):
return LintResult(memory=context.memory)
# Get the capitalisation policy configuration.
try:
cap_policy = self.cap_policy
cap_policy_opts = self.cap_policy_opts
except AttributeError:
# First-time only, read the settings from configuration. This is
# very slow.
cap_policy, cap_policy_opts = self._init_capitalisation_policy()
memory = context.memory
refuted_cases = memory.get("refuted_cases", set())
# Which cases are definitely inconsistent with the segment?
if context.segment.raw[0] != context.segment.raw[0].upper():
refuted_cases.update(["upper", "capitalise", "pascal"])
if context.segment.raw != context.segment.raw.lower():
refuted_cases.update(["lower"])
else:
refuted_cases.update(["lower"])
if context.segment.raw != context.segment.raw.upper():
refuted_cases.update(["upper"])
if context.segment.raw != context.segment.raw.capitalize():
refuted_cases.update(["capitalise"])
if not context.segment.raw.isalnum():
refuted_cases.update(["pascal"])
# Update the memory
memory["refuted_cases"] = refuted_cases
self.logger.debug(
f"Refuted cases after segment '{context.segment.raw}': {refuted_cases}"
)
# Skip if no inconsistencies, otherwise compute a concrete policy
# to convert to.
if cap_policy == "consistent":
possible_cases = [c for c in cap_policy_opts if c not in refuted_cases]
self.logger.debug(
f"Possible cases after segment '{context.segment.raw}': {possible_cases}"
)
if possible_cases:
# Save the latest possible case and skip
memory["latest_possible_case"] = possible_cases[0]
self.logger.debug(
f"Consistent capitalization, returning with memory: {memory}"
)
return LintResult(memory=memory)
else:
concrete_policy = memory.get("latest_possible_case", "upper")
self.logger.debug(
f"Getting concrete policy '{concrete_policy}' from memory"
)
else:
if cap_policy not in refuted_cases:
# Skip
self.logger.debug(
f"Consistent capitalization {cap_policy}, returning with "
f"memory: {memory}"
)
return LintResult(memory=memory)
else:
concrete_policy = cap_policy
self.logger.debug(
f"Setting concrete policy '{concrete_policy}' from cap_policy"
)
# Set the fixed to same as initial in case any of below don't match
fixed_raw = context.segment.raw
# We need to change the segment to match the concrete policy
if concrete_policy in ["upper", "lower", "capitalise"]:
if concrete_policy == "upper":
fixed_raw = fixed_raw.upper()
elif concrete_policy == "lower":
fixed_raw = fixed_raw.lower()
elif concrete_policy == "capitalise":
fixed_raw = fixed_raw.capitalize()
elif concrete_policy == "pascal":
# For Pascal we set the first letter in each "word" to uppercase
# We do not lowercase other letters to allow for PascalCase style
# words. This does mean we allow all UPPERCASE and also don't
# correct Pascalcase to PascalCase, but there's only so much we can
# do. We do correct underscore_words to Underscore_Words.
fixed_raw = regex.sub(
"([^a-zA-Z0-9]+|^)([a-zA-Z0-9])([a-zA-Z0-9]*)",
lambda match: match.group(1) + match.group(2).upper() + match.group(3),
context.segment.raw,
)
if fixed_raw == context.segment.raw:
# No need to fix
self.logger.debug(
f"Capitalisation of segment '{context.segment.raw}' already OK with policy "
f"'{concrete_policy}', returning with memory {memory}"
)
return LintResult(memory=memory)
else:
# build description based on the policy in use
consistency = "consistently " if cap_policy == "consistent" else ""
if concrete_policy in ["upper", "lower"]:
policy = f"{concrete_policy} case."
elif concrete_policy == "capitalise":
policy = "capitalised."
elif concrete_policy == "pascal":
policy = "pascal case."
# Return the fixed segment
self.logger.debug(
f"INCONSISTENT Capitalisation of segment '{context.segment.raw}', fixing to "
f"'{fixed_raw}' and returning with memory {memory}"
)
return LintResult(
anchor=context.segment,
fixes=[self._get_fix(context.segment, fixed_raw)],
memory=memory,
description=f"{self._description_elem} must be {consistency}{policy}",
)
def _get_fix(self, segment, fixed_raw):
"""Given a segment found to have a fix, returns a LintFix for it.
May be overridden by subclasses, which is useful when the parse tree
structure varies from this simple base case.
"""
return LintFix.replace(segment, [segment.edit(fixed_raw)])
def _init_capitalisation_policy(self):
"""Called first time rule is evaluated to fetch & cache the policy."""
cap_policy_name = next(
k for k in self.config_keywords if k.endswith("capitalisation_policy")
)
self.cap_policy = getattr(self, cap_policy_name)
self.cap_policy_opts = [
opt
for opt in get_config_info()[cap_policy_name]["validation"]
if opt != "consistent"
]
self.logger.debug(
f"Selected '{cap_policy_name}': '{self.cap_policy}' from options "
f"{self.cap_policy_opts}"
)
cap_policy = self.cap_policy
cap_policy_opts = self.cap_policy_opts
return cap_policy, cap_policy_opts
| 38.42381 | 93 | 0.588921 |
3d78c8c7fd96e551792f553b5408e534347a298f | 1,238 | py | Python | geoq/accounts/utils.py | kaydoh/geoq | 6f10818d0cc3cef4ba8113e8b047d27e79b2f8b0 | [
"MIT"
] | 471 | 2015-01-05T15:16:26.000Z | 2022-03-28T05:06:11.000Z | geoq/accounts/utils.py | kaydoh/geoq | 6f10818d0cc3cef4ba8113e8b047d27e79b2f8b0 | [
"MIT"
] | 109 | 2015-01-06T20:00:58.000Z | 2022-03-11T23:17:53.000Z | geoq/accounts/utils.py | kaydoh/geoq | 6f10818d0cc3cef4ba8113e8b047d27e79b2f8b0 | [
"MIT"
] | 100 | 2015-01-05T15:16:39.000Z | 2021-12-01T12:13:13.000Z | # -*- coding: utf-8 -*-
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
from django.conf import settings
import requests
import collections
def get_openbadges_ids(user):
url = 'http://backpack.openbadges.org/displayer/convert/email'
payload = {'email': user.userprofile.openbadge_id}
userid = groupid = -1
try:
r = requests.post(url, data=payload, timeout=5)
if r.status_code == 200:
userid = r.json()['userId']
# now try to get the user's groups and see if one is for GeoQ
gurl = 'http://backpack.openbadges.org/displayer/%d/groups.json' % userid
r2 = requests.get(gurl, timeout=5)
groups = r2.json()['groups']
for g in groups:
if g['name'].lower().startswith("geoq"):
groupid = g['groupId']
break
except requests.exceptions.ConnectionError as e: # This is the correct syntax
r = "No response"
ids = collections.namedtuple('IDs', ['userid','groupid'])
return ids(userid, groupid)
| 28.790698 | 104 | 0.61874 |
864664decd50a36a55223c676e2cef1671a8abe0 | 663 | py | Python | mamotif/cli/intergrate.py | shao-lab/MAmotif | eae1bfefc74934f49565371b06971a9781d00528 | [
"BSD-3-Clause"
] | 3 | 2019-02-09T02:15:38.000Z | 2020-11-14T07:35:42.000Z | mamotif/cli/intergrate.py | shao-lab/MAmotif | eae1bfefc74934f49565371b06971a9781d00528 | [
"BSD-3-Clause"
] | null | null | null | mamotif/cli/intergrate.py | shao-lab/MAmotif | eae1bfefc74934f49565371b06971a9781d00528 | [
"BSD-3-Clause"
] | null | null | null | """
mamotif.cli.integrate
---------------------
Integrate MAnorm and MotifScan results to run MAmoitf.
"""
from manorm.logging import setup_logger as setup_manorm_logger
from motifscan.logging import setup_logger as setup_motifscan_logger
from mamotif.integration import run_integration
def run(args):
setup_manorm_logger(args.verbose)
setup_motifscan_logger(args.verbose)
run_integration(
f_manorm=args.f_manorm, f_motifscan=args.f_motifscan,
negative=args.negative, genome=args.genome, split=args.split,
upstream=args.upstream, downstream=args.downstream,
correction=args.correction, output_dir=args.output_dir)
| 30.136364 | 69 | 0.757164 |
bd30628f941e974fb6d3ca704d16c7fde928a187 | 19,430 | py | Python | mkt/webapps/indexers.py | eviljeff/zamboni | c446a9fc75513c9eef3ff7b1f0e23bbab29f0e68 | [
"BSD-3-Clause"
] | null | null | null | mkt/webapps/indexers.py | eviljeff/zamboni | c446a9fc75513c9eef3ff7b1f0e23bbab29f0e68 | [
"BSD-3-Clause"
] | null | null | null | mkt/webapps/indexers.py | eviljeff/zamboni | c446a9fc75513c9eef3ff7b1f0e23bbab29f0e68 | [
"BSD-3-Clause"
] | null | null | null | from operator import attrgetter
from django.core.urlresolvers import reverse
from django.db.models import Min
import commonware.log
from elasticsearch_dsl import F
from elasticsearch_dsl.filter import Bool
import mkt
from mkt.constants import APP_FEATURES
from mkt.constants.applications import DEVICE_GAIA
from mkt.prices.models import AddonPremium
from mkt.search.indexers import BaseIndexer
from mkt.search.utils import Search
from mkt.translations.models import attach_trans_dict
log = commonware.log.getLogger('z.addons')
class WebappIndexer(BaseIndexer):
"""Fields we don't need to expose in the results, only used for filtering
or sorting."""
hidden_fields = (
'*.raw',
'*_sort',
'popularity_*',
'trending_*',
'boost',
'owners',
'features',
# 'name' and 'description', as well as the locale variants, are only
# used for filtering. The fields that are used by the API are
# 'name_translations' and 'description_translations'.
'name',
'description',
'name_l10n_*',
'description_l10n_*',
)
"""
Bunch of ES stuff for Webapp include mappings, indexing, search.
"""
@classmethod
def search(cls, using=None):
"""
Returns a `Search` object.
We override this to use our patched version which adds statsd timing.
"""
return (Search(
using=using or cls.get_es(), index=cls.get_index(),
doc_type=cls.get_mapping_type_name())
.extra(_source={'exclude': cls.hidden_fields}))
@classmethod
def get_mapping_type_name(cls):
"""
Returns mapping type name which is used as the key in ES_INDEXES to
determine which index to use.
We override this because Webapp is a proxy model to Addon.
"""
return 'webapp'
@classmethod
def get_model(cls):
from mkt.webapps.models import Webapp
return Webapp
@classmethod
def get_mapping(cls):
doc_type = cls.get_mapping_type_name()
mapping = {
doc_type: {
# Disable _all field to reduce index size.
'_all': {'enabled': False},
'properties': {
# App fields.
'id': {'type': 'long'},
'app_slug': {'type': 'string'},
'app_type': {'type': 'byte'},
'author': {
'type': 'string',
'analyzer': 'default_icu',
'fields': {
# For exact matches. The simple analyzer allows
# for case-insensitive matching.
'raw': {'type': 'string',
'analyzer': 'exact_lowercase'},
},
},
'banner_regions': cls.string_not_indexed(),
'bayesian_rating': {'type': 'float', 'doc_values': True},
'category': cls.string_not_analyzed(),
'content_descriptors': cls.string_not_indexed(),
'content_ratings': {
'type': 'object',
'dynamic': 'true',
},
'created': {'format': 'dateOptionalTime', 'type': 'date',
'doc_values': True},
'current_version': cls.string_not_indexed(),
'default_locale': cls.string_not_indexed(),
'description': {'type': 'string',
'analyzer': 'default_icu',
'position_offset_gap': 100},
'device': {'type': 'byte'},
# The date this app was added to the escalation queue.
'escalation_date': {'format': 'dateOptionalTime',
'type': 'date', 'doc_values': True},
'features': {
'type': 'object',
'properties': dict(
('has_%s' % f.lower(), {'type': 'boolean'})
for f in APP_FEATURES)
},
'file_size': {'type': 'long'},
'has_public_stats': {'type': 'boolean'},
'icon_hash': cls.string_not_indexed(),
'interactive_elements': cls.string_not_indexed(),
'installs_allowed_from': cls.string_not_analyzed(),
'is_disabled': {'type': 'boolean'},
'is_escalated': {'type': 'boolean'},
'is_offline': {'type': 'boolean'},
'is_priority': {'type': 'boolean'},
'is_rereviewed': {'type': 'boolean'},
'last_updated': {'format': 'dateOptionalTime',
'type': 'date'},
'latest_version': {
'type': 'object',
'properties': {
'status': {'type': 'byte'},
'is_privileged': {'type': 'boolean'},
'has_editor_comment': {'type': 'boolean'},
'has_info_request': {'type': 'boolean'},
'nomination_date': {'type': 'date',
'format': 'dateOptionalTime'},
'created_date': {'type': 'date',
'format': 'dateOptionalTime'},
},
},
'manifest_url': cls.string_not_analyzed(),
'modified': {'format': 'dateOptionalTime',
'type': 'date'},
# Name for searching. This is a list of all the localized
# names for the app. We add "position_offset_gap" to work
# around the fact that ES stores the same list of tokens as
# if this were a single string. The offset gap adds 100
# positions between each name and ensures one string from
# one name and one string from another name won't both
# match with a phrase match query.
'name': {
'type': 'string',
'analyzer': 'default_icu',
'position_offset_gap': 100,
# For exact matches. Referenced as `name.raw`.
'fields': {
'raw': cls.string_not_analyzed(
position_offset_gap=100)
},
},
# Name for sorting.
'name_sort': cls.string_not_analyzed(doc_values=True),
# Name for suggestions.
'name_suggest': {'type': 'completion', 'payloads': True},
'owners': {'type': 'long'},
'package_path': cls.string_not_indexed(),
'premium_type': {'type': 'byte'},
'previews': {
'type': 'object',
'dynamic': 'true',
},
'price_tier': cls.string_not_indexed(),
'ratings': {
'type': 'object',
'properties': {
'average': {'type': 'float'},
'count': {'type': 'short'},
}
},
'region_exclusions': {'type': 'short'},
'reviewed': {'format': 'dateOptionalTime', 'type': 'date',
'doc_values': True},
# The date this app was added to the re-review queue.
'rereview_date': {'format': 'dateOptionalTime',
'type': 'date', 'doc_values': True},
'status': {'type': 'byte'},
'supported_locales': cls.string_not_analyzed(),
'tags': {'type': 'string', 'analyzer': 'simple'},
'upsell': {
'type': 'object',
'properties': {
'id': {'type': 'long'},
'app_slug': cls.string_not_indexed(),
'icon_url': cls.string_not_indexed(),
'name': cls.string_not_indexed(),
'region_exclusions': {'type': 'short'},
}
},
'uses_flash': {'type': 'boolean'},
'versions': {
'type': 'object',
'properties': {
'version': cls.string_not_indexed(),
'resource_uri': cls.string_not_indexed(),
}
},
}
}
}
# Attach boost field, because we are going to need search by relevancy.
cls.attach_boost_mapping(mapping)
# Attach popularity and trending.
cls.attach_trending_and_popularity_mappings(mapping)
# Add fields that we expect to return all translations.
cls.attach_translation_mappings(
mapping, ('banner_message', 'description', 'homepage',
'name', 'release_notes', 'support_email',
'support_url'))
# Add language-specific analyzers.
cls.attach_language_specific_analyzers(
mapping, ('name', 'description'))
return mapping
@classmethod
def extract_document(cls, pk=None, obj=None):
"""Extracts the ElasticSearch index document for this instance."""
from mkt.webapps.models import (AppFeatures, attach_devices,
attach_prices, attach_tags,
attach_translations, RatingDescriptors,
RatingInteractives)
if obj is None:
obj = cls.get_model().objects.no_cache().get(pk=pk)
# Attach everything we need to index apps.
for transform in (attach_devices, attach_prices, attach_tags,
attach_translations):
transform([obj])
latest_version = obj.latest_version
version = obj.current_version
geodata = obj.geodata
features = (version.features.to_dict()
if version else AppFeatures().to_dict())
try:
status = latest_version.statuses[0][1] if latest_version else None
except IndexError:
status = None
attrs = ('app_slug', 'bayesian_rating', 'created', 'default_locale',
'icon_hash', 'id', 'is_disabled', 'is_offline', 'file_size',
'last_updated', 'modified', 'premium_type', 'status',
'uses_flash')
d = dict(zip(attrs, attrgetter(*attrs)(obj)))
d['app_type'] = obj.app_type_id
d['author'] = obj.developer_name
d['banner_regions'] = geodata.banner_regions_slugs()
d['category'] = obj.categories if obj.categories else []
d['content_ratings'] = (obj.get_content_ratings_by_body(es=True) or
None)
try:
d['content_descriptors'] = obj.rating_descriptors.to_keys()
except RatingDescriptors.DoesNotExist:
d['content_descriptors'] = []
d['current_version'] = version.version if version else None
d['device'] = getattr(obj, 'device_ids', [])
d['features'] = features
d['has_public_stats'] = obj.public_stats
try:
d['interactive_elements'] = obj.rating_interactives.to_keys()
except RatingInteractives.DoesNotExist:
d['interactive_elements'] = []
d['installs_allowed_from'] = (
version.manifest.get('installs_allowed_from', ['*'])
if version else ['*'])
d['is_priority'] = obj.priority_review
is_escalated = obj.escalationqueue_set.exists()
d['is_escalated'] = is_escalated
d['escalation_date'] = (obj.escalationqueue_set.get().created
if is_escalated else None)
is_rereviewed = obj.rereviewqueue_set.exists()
d['is_rereviewed'] = is_rereviewed
d['rereview_date'] = (obj.rereviewqueue_set.get().created
if is_rereviewed else None)
if latest_version:
d['latest_version'] = {
'status': status,
'is_privileged': latest_version.is_privileged,
'has_editor_comment': latest_version.has_editor_comment,
'has_info_request': latest_version.has_info_request,
'nomination_date': latest_version.nomination,
'created_date': latest_version.created,
}
else:
d['latest_version'] = {
'status': None,
'is_privileged': None,
'has_editor_comment': None,
'has_info_request': None,
'nomination_date': None,
'created_date': None,
}
d['manifest_url'] = obj.get_manifest_url()
d['package_path'] = obj.get_package_path()
d['name_sort'] = unicode(obj.name).lower()
d['owners'] = [au.user.id for au in
obj.addonuser_set.filter(role=mkt.AUTHOR_ROLE_OWNER)]
d['previews'] = [{'filetype': p.filetype, 'modified': p.modified,
'id': p.id, 'sizes': p.sizes}
for p in obj.previews.all()]
try:
p = obj.addonpremium.price
d['price_tier'] = p.name
except AddonPremium.DoesNotExist:
d['price_tier'] = None
d['ratings'] = {
'average': obj.average_rating,
'count': obj.total_reviews,
}
d['region_exclusions'] = obj.get_excluded_region_ids()
d['reviewed'] = obj.versions.filter(
deleted=False).aggregate(Min('reviewed')).get('reviewed__min')
# The default locale of the app is considered "supported" by default.
supported_locales = [obj.default_locale]
other_locales = (filter(None, version.supported_locales.split(','))
if version else [])
if other_locales:
supported_locales.extend(other_locales)
d['supported_locales'] = list(set(supported_locales))
d['tags'] = getattr(obj, 'tag_list', [])
if obj.upsell and obj.upsell.premium.is_published():
upsell_obj = obj.upsell.premium
d['upsell'] = {
'id': upsell_obj.id,
'app_slug': upsell_obj.app_slug,
'icon_url': upsell_obj.get_icon_url(128),
# TODO: Store all localizations of upsell.name.
'name': unicode(upsell_obj.name),
'region_exclusions': upsell_obj.get_excluded_region_ids()
}
d['versions'] = [dict(version=v.version,
resource_uri=reverse_version(v))
for v in obj.versions.all()]
# Handle localized fields.
# This adds both the field used for search and the one with
# all translations for the API.
for field in ('description', 'name'):
d.update(cls.extract_field_translations(
obj, field, include_field_for_search=True))
# This adds only the field with all the translations for the API, we
# don't need to search on those.
for field in ('homepage', 'support_email', 'support_url'):
d.update(cls.extract_field_translations(obj, field))
if version:
attach_trans_dict(version._meta.model, [version])
d.update(cls.extract_field_translations(
version, 'release_notes', db_field='releasenotes_id'))
else:
d['release_notes_translations'] = None
attach_trans_dict(geodata._meta.model, [geodata])
d.update(cls.extract_field_translations(geodata, 'banner_message'))
# Add boost, popularity, trending values.
d.update(cls.extract_popularity_trending_boost(obj))
# If the app is compatible with Firefox OS, push suggestion data in the
# index - This will be used by RocketbarView API, which is specific to
# Firefox OS.
if DEVICE_GAIA.id in d['device'] and obj.is_published():
d['name_suggest'] = {
'input': d['name'],
'output': unicode(obj.id), # We only care about the payload.
'weight': int(d['boost']),
'payload': {
'default_locale': d['default_locale'],
'icon_hash': d['icon_hash'],
'id': d['id'],
'manifest_url': d['manifest_url'],
'modified': d['modified'],
'name_translations': d['name_translations'],
'slug': d['app_slug'],
}
}
for field in ('name', 'description'):
d.update(cls.extract_field_analyzed_translations(obj, field))
return d
@classmethod
def get_indexable(cls):
"""Returns the queryset of ids of all things to be indexed."""
from mkt.webapps.models import Webapp
return Webapp.with_deleted.all()
@classmethod
def run_indexing(cls, ids, ES=None, index=None, **kw):
"""Override run_indexing to use app transformers."""
from mkt.webapps.models import Webapp
log.info('Indexing %s webapps' % len(ids))
qs = Webapp.with_deleted.no_cache().filter(id__in=ids)
ES = ES or cls.get_es()
docs = []
for obj in qs:
try:
docs.append(cls.extract_document(obj.id, obj=obj))
except Exception as e:
log.error('Failed to index webapp {0}: {1}'
.format(obj.id, repr(e)),
# Trying to chase down a cache-machine problem.
exc_info="marketplace:" in str(e))
cls.bulk_index(docs, es=ES, index=index or cls.get_index())
@classmethod
def filter_by_apps(cls, app_ids, queryset=None):
"""
Filters the given queryset by the given app IDs.
This uses a `should` filter, which is equivalent to an "OR".
"""
queryset = queryset or cls.search()
app_ids = list(set(app_ids)) # De-dupe.
queryset = queryset.filter(Bool(should=[F('terms', id=app_ids)]))
return queryset[0:len(app_ids)]
def reverse_version(version):
"""
The try/except AttributeError allows this to be used where the input is
ambiguous, and could be either an already-reversed URL or a Version object.
"""
if version:
try:
return reverse('version-detail', kwargs={'pk': version.pk})
except AttributeError:
return version
return
| 41.875 | 79 | 0.506125 |
72ce6419993ed6fd2a5a602017ffcb245eb6b11c | 1,472 | py | Python | convert.py | fogleman/Punchcard | 9ca59fb696beb6b6f19767bc429e9cef49eb30ff | [
"MIT"
] | 98 | 2015-01-05T19:21:38.000Z | 2022-03-08T20:26:01.000Z | convert.py | fogleman/Punchcard | 9ca59fb696beb6b6f19767bc429e9cef49eb30ff | [
"MIT"
] | 2 | 2016-11-17T15:06:13.000Z | 2018-06-19T18:55:02.000Z | convert.py | fogleman/Punchcard | 9ca59fb696beb6b6f19767bc429e9cef49eb30ff | [
"MIT"
] | 9 | 2015-01-06T01:25:47.000Z | 2021-05-22T19:43:03.000Z | '''
This utility script converts (row, col, value) records like this:
2,6,9
2,7,23
2,8,74
...
6,20,76
6,21,27
6,22,0
Into a tabular format like this:
,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22
2,9,23,74,225,351,434,513,666,710,890,776,610,435,166,100,46,1
3,12,29,53,166,250,369,370,428,549,625,618,516,386,179,101,51,5
4,9,30,79,214,350,460,478,568,677,743,700,448,473,207,138,42,2
5,9,16,84,171,294,342,435,470,552,594,642,518,350,182,95,54,2
6,13,27,93,224,402,568,693,560,527,374,364,223,139,89,76,27,0
The tabular format CSV can then be used with punchcard.py
'''
import csv
def process(path):
with open(path, 'rb') as fp:
reader = csv.reader(fp)
csv_rows = list(reader)
rows = set()
cols = set()
lookup = {}
int_rows = all(x[0].isdigit() for x in csv_rows[1:])
int_cols = all(x[1].isdigit() for x in csv_rows[1:])
for row, col, value in csv_rows[1:]:
if int_rows:
row = int(row)
if int_cols:
col = int(col)
rows.add(row)
cols.add(col)
lookup[(row, col)] = value
rows = sorted(rows)
cols = sorted(cols)
result = [[''] + cols]
for row in rows:
data = [lookup.get((row, col), 0) for col in cols]
result.append([row] + data)
with open(path, 'wb') as fp:
writer = csv.writer(fp)
writer.writerows(result)
if __name__ == '__main__':
import sys
process(sys.argv[1])
| 26.285714 | 65 | 0.589674 |
c9a55f81a0386ea9802fbfbaf46dcaa582f9ea21 | 136 | py | Python | Python3/1020.py | Di-Ca-N/URI-Online-Judge | 160797b534fe8c70e719b1ea41690157dbdbb52e | [
"MIT"
] | null | null | null | Python3/1020.py | Di-Ca-N/URI-Online-Judge | 160797b534fe8c70e719b1ea41690157dbdbb52e | [
"MIT"
] | null | null | null | Python3/1020.py | Di-Ca-N/URI-Online-Judge | 160797b534fe8c70e719b1ea41690157dbdbb52e | [
"MIT"
] | null | null | null | n = int(input())
anos = n//365
meses = n%365//30
dias = n%365%30
print('''{} ano(s)
{} mes(es)
{} dia(s)'''.format(anos, meses, dias))
| 15.111111 | 39 | 0.551471 |
ca3aba4c1388d7f175b57cb35d4ede65f9aafa14 | 535 | py | Python | adventofcode/2020/13/2.py | jan25/code_sorted | f405fd0898f72eb3d5428f9e10aefb4a009d5089 | [
"Unlicense"
] | 2 | 2018-01-18T11:01:36.000Z | 2021-12-20T18:14:48.000Z | adventofcode/2020/13/2.py | jan25/code_sorted | f405fd0898f72eb3d5428f9e10aefb4a009d5089 | [
"Unlicense"
] | null | null | null | adventofcode/2020/13/2.py | jan25/code_sorted | f405fd0898f72eb3d5428f9e10aefb4a009d5089 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
import fileinput
lines = [l.strip() for l in fileinput.input()]
ids = list(filter(lambda p: p[1] != 'x', enumerate(lines[1].split(','))))
ids = [(int(p[1]), p[0]) for p in ids]
def inv(a, m):
prod = 1
for _ in range(m - 2): prod *= a
return prod % m
M = 1
for i in ids: M *= i[0]
# Convert to
# x = a1modm x = a2modm ...
# and use Chinese remainder theorem
x = 0
for p in ids[1:]:
m, a = p
md = M // m
a *= -1
while a < 0:
a += m
x += a * md * inv(md, m)
print(x % M) | 19.107143 | 73 | 0.521495 |
cef76faded51740bfa2c72130d73a780e09e7784 | 17,485 | py | Python | pybind/nos/v7_1_0/interface/port_channel/ipv6/ipv6_config/address/ipv6_address/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/nos/v7_1_0/interface/port_channel/ipv6/ipv6_config/address/ipv6_address/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/nos/v7_1_0/interface/port_channel/ipv6/ipv6_config/address/ipv6_address/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import eui_config
class ipv6_address(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/port-channel/ipv6/ipv6-config/address/ipv6-address. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__address','__secondary','__eui_config','__anycast',)
_yang_name = 'ipv6-address'
_rest_name = 'ipv6-address'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__eui_config = YANGDynClass(base=eui_config.eui_config, is_container='container', presence=False, yang_name="eui-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='container', is_config=True)
self.__secondary = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="secondary", rest_name="secondary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure Secondary ipv6 address on an interface'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
self.__anycast = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="anycast", rest_name="anycast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address as anycast'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
self.__address = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="address", rest_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D/LEN;; IPv6 prefix format: xxxx:xxxx/ml, xxxx:xxxx::/ml, xxxx::xx/128'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='union', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'port-channel', u'ipv6', u'ipv6-config', u'address', u'ipv6-address']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Port-channel', u'ipv6', u'address', u'ipv6-address']
def _get_address(self):
"""
Getter method for address, mapped from YANG variable /interface/port_channel/ipv6/ipv6_config/address/ipv6_address/address (union)
"""
return self.__address
def _set_address(self, v, load=False):
"""
Setter method for address, mapped from YANG variable /interface/port_channel/ipv6/ipv6_config/address/ipv6_address/address (union)
If this variable is read-only (config: false) in the
source YANG file, then _set_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_address() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="address", rest_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D/LEN;; IPv6 prefix format: xxxx:xxxx/ml, xxxx:xxxx::/ml, xxxx::xx/128'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='union', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """address must be of a type compatible with union""",
'defined-type': "brocade-ipv6-config:union",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="address", rest_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D/LEN;; IPv6 prefix format: xxxx:xxxx/ml, xxxx:xxxx::/ml, xxxx::xx/128'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='union', is_config=True)""",
})
self.__address = t
if hasattr(self, '_set'):
self._set()
def _unset_address(self):
self.__address = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="address", rest_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D/LEN;; IPv6 prefix format: xxxx:xxxx/ml, xxxx:xxxx::/ml, xxxx::xx/128'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='union', is_config=True)
def _get_secondary(self):
"""
Getter method for secondary, mapped from YANG variable /interface/port_channel/ipv6/ipv6_config/address/ipv6_address/secondary (empty)
"""
return self.__secondary
def _set_secondary(self, v, load=False):
"""
Setter method for secondary, mapped from YANG variable /interface/port_channel/ipv6/ipv6_config/address/ipv6_address/secondary (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_secondary is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_secondary() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="secondary", rest_name="secondary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure Secondary ipv6 address on an interface'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """secondary must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="secondary", rest_name="secondary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure Secondary ipv6 address on an interface'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)""",
})
self.__secondary = t
if hasattr(self, '_set'):
self._set()
def _unset_secondary(self):
self.__secondary = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="secondary", rest_name="secondary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure Secondary ipv6 address on an interface'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
def _get_eui_config(self):
"""
Getter method for eui_config, mapped from YANG variable /interface/port_channel/ipv6/ipv6_config/address/ipv6_address/eui_config (container)
"""
return self.__eui_config
def _set_eui_config(self, v, load=False):
"""
Setter method for eui_config, mapped from YANG variable /interface/port_channel/ipv6/ipv6_config/address/ipv6_address/eui_config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_eui_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_eui_config() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=eui_config.eui_config, is_container='container', presence=False, yang_name="eui-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """eui_config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=eui_config.eui_config, is_container='container', presence=False, yang_name="eui-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='container', is_config=True)""",
})
self.__eui_config = t
if hasattr(self, '_set'):
self._set()
def _unset_eui_config(self):
self.__eui_config = YANGDynClass(base=eui_config.eui_config, is_container='container', presence=False, yang_name="eui-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='container', is_config=True)
def _get_anycast(self):
"""
Getter method for anycast, mapped from YANG variable /interface/port_channel/ipv6/ipv6_config/address/ipv6_address/anycast (empty)
"""
return self.__anycast
def _set_anycast(self, v, load=False):
"""
Setter method for anycast, mapped from YANG variable /interface/port_channel/ipv6/ipv6_config/address/ipv6_address/anycast (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_anycast is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_anycast() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="anycast", rest_name="anycast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address as anycast'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """anycast must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="anycast", rest_name="anycast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address as anycast'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)""",
})
self.__anycast = t
if hasattr(self, '_set'):
self._set()
def _unset_anycast(self):
self.__anycast = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="anycast", rest_name="anycast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address as anycast'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
address = __builtin__.property(_get_address, _set_address)
secondary = __builtin__.property(_get_secondary, _set_secondary)
eui_config = __builtin__.property(_get_eui_config, _set_eui_config)
anycast = __builtin__.property(_get_anycast, _set_anycast)
_pyangbind_elements = {'address': address, 'secondary': secondary, 'eui_config': eui_config, 'anycast': anycast, }
| 74.722222 | 1,071 | 0.689677 |
d0dda897eb68c789b1e40fbcdd5c8bed246c3935 | 26,745 | py | Python | localstack/utils/bootstrap.py | kubawach/localstack | 641b890e9328ad3d010570766109c646d1f10a3a | [
"Apache-2.0"
] | 1 | 2021-06-08T12:59:41.000Z | 2021-06-08T12:59:41.000Z | localstack/utils/bootstrap.py | kubawach/localstack | 641b890e9328ad3d010570766109c646d1f10a3a | [
"Apache-2.0"
] | 1 | 2021-03-01T13:55:42.000Z | 2021-03-01T13:55:42.000Z | localstack/utils/bootstrap.py | kubawach/localstack | 641b890e9328ad3d010570766109c646d1f10a3a | [
"Apache-2.0"
] | null | null | null | import os
import re
import sys
import json
import time
import select
import pkgutil
import logging
import warnings
import threading
import traceback
import subprocess
import six
import shutil
import pip as pip_mod
from datetime import datetime
from concurrent.futures._base import Future
from localstack import constants, config
from localstack.utils.analytics.profiler import log_duration
# set up logger
LOG = logging.getLogger(os.path.basename(__file__))
# maps plugin scope ("services", "commands") to flags which indicate whether plugins have been loaded
PLUGINS_LOADED = {}
# predefined list of plugin modules, to speed up the plugin loading at startup
# note: make sure to load localstack_ext before localstack
PLUGIN_MODULES = ['localstack_ext', 'localstack']
# marker for extended/ignored libs in requirements.txt
IGNORED_LIB_MARKER = '#extended-lib'
BASIC_LIB_MARKER = '#basic-lib'
# whether or not to manually fix permissions on /var/run/docker.sock (currently disabled)
DO_CHMOD_DOCKER_SOCK = False
# log format strings
LOG_FORMAT = '%(asctime)s:%(levelname)s:%(name)s: %(message)s'
LOG_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'
# plugin scopes
PLUGIN_SCOPE_SERVICES = 'services'
PLUGIN_SCOPE_COMMANDS = 'commands'
# maps from API names to list of other API names that they depend on
API_DEPENDENCIES = {
'dynamodb': ['dynamodbstreams'],
'dynamodbstreams': ['kinesis'],
'es': ['elasticsearch'],
'lambda': ['logs', 'cloudwatch'],
'kinesis': ['dynamodb'],
'firehose': ['kinesis']
}
# composites define an abstract name like "serverless" that maps to a set of services
API_COMPOSITES = {
'serverless': ['cloudformation', 'cloudwatch', 'iam', 'sts', 'lambda', 'dynamodb', 'apigateway', 's3'],
'cognito': ['cognito-idp', 'cognito-identity']
}
# environment variable that indicates that we're executing in
# the context of the script that starts the Docker container
ENV_SCRIPT_STARTING_DOCKER = 'LS_SCRIPT_STARTING_DOCKER'
def bootstrap_installation():
try:
from localstack.services import infra
assert infra
except Exception:
install_dependencies()
def install_dependencies():
# determine requirements
root_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..')
reqs_file = os.path.join(root_folder, 'requirements.txt')
reqs_copy_file = os.path.join(root_folder, 'localstack', 'requirements.copy.txt')
if not os.path.exists(reqs_copy_file):
shutil.copy(reqs_file, reqs_copy_file)
with open(reqs_copy_file) as f:
requirements = f.read()
install_requires = []
for line in re.split('\n', requirements):
if line and line[0] != '#':
if BASIC_LIB_MARKER not in line and IGNORED_LIB_MARKER not in line:
line = line.split(' #')[0].strip()
install_requires.append(line)
LOG.info('Lazily installing missing pip dependencies, this could take a while: %s' %
', '.join(install_requires))
args = ['install'] + install_requires
return run_pip_main(args)
def run_pip_main(args):
if hasattr(pip_mod, 'main'):
return pip_mod.main(args)
import pip._internal
if hasattr(pip._internal, 'main'):
return pip._internal.main(args)
import pip._internal.main
return pip._internal.main.main(args)
@log_duration()
def load_plugin_from_path(file_path, scope=None):
if os.path.exists(file_path):
module = re.sub(r'(^|.+/)([^/]+)/plugins.py', r'\2', file_path)
method_name = 'register_localstack_plugins'
scope = scope or PLUGIN_SCOPE_SERVICES
if scope == PLUGIN_SCOPE_COMMANDS:
method_name = 'register_localstack_commands'
try:
namespace = {}
exec('from %s.plugins import %s' % (module, method_name), namespace)
method_to_execute = namespace[method_name]
except Exception as e:
if (not re.match(r'.*cannot import name .*%s.*' % method_name, str(e)) and
('No module named' not in str(e))):
LOG.debug('Unable to load plugins from module %s: %s' % (module, e))
return
try:
LOG.debug('Loading plugins - scope "%s", module "%s": %s' % (scope, module, method_to_execute))
return method_to_execute()
except Exception as e:
if not os.environ.get(ENV_SCRIPT_STARTING_DOCKER):
LOG.warning('Unable to load plugins from file %s: %s' % (file_path, e))
def should_load_module(module, scope):
if module == 'localstack_ext' and not os.environ.get('LOCALSTACK_API_KEY'):
return False
return True
@log_duration()
def load_plugins(scope=None):
scope = scope or PLUGIN_SCOPE_SERVICES
if PLUGINS_LOADED.get(scope):
return PLUGINS_LOADED[scope]
t1 = now_utc()
is_infra_process = os.environ.get(constants.LOCALSTACK_INFRA_PROCESS) in ['1', 'true'] or '--host' in sys.argv
log_level = logging.WARNING if scope == PLUGIN_SCOPE_COMMANDS and not is_infra_process else None
setup_logging(log_level=log_level)
loaded_files = []
result = []
# Use a predefined list of plugin modules for now, to speed up the plugin loading at startup
# search_modules = pkgutil.iter_modules()
search_modules = PLUGIN_MODULES
for module in search_modules:
if not should_load_module(module, scope):
continue
file_path = None
if isinstance(module, six.string_types):
loader = pkgutil.get_loader(module)
if loader:
path = getattr(loader, 'path', '') or getattr(loader, 'filename', '')
if '__init__.py' in path:
path = os.path.dirname(path)
file_path = os.path.join(path, 'plugins.py')
elif six.PY3 and not isinstance(module, tuple):
file_path = os.path.join(module.module_finder.path, module.name, 'plugins.py')
elif six.PY3 or isinstance(module[0], pkgutil.ImpImporter):
if hasattr(module[0], 'path'):
file_path = os.path.join(module[0].path, module[1], 'plugins.py')
if file_path and file_path not in loaded_files:
plugin_config = load_plugin_from_path(file_path, scope=scope)
if plugin_config:
result.append(plugin_config)
loaded_files.append(file_path)
# set global flag
PLUGINS_LOADED[scope] = result
# debug plugin loading time
load_time = now_utc() - t1
if load_time > 5:
LOG.debug('Plugin loading took %s sec' % load_time)
return result
def docker_container_running(container_name):
container_names = get_docker_container_names()
return container_name in container_names
def get_docker_image_details(image_name=None):
image_name = image_name or get_docker_image_to_start()
try:
result = run('%s inspect %s' % (config.DOCKER_CMD, image_name), print_error=False)
result = json.loads(to_str(result))
assert len(result)
except Exception:
return {}
if len(result) > 1:
LOG.warning('Found multiple images (%s) named "%s"' % (len(result), image_name))
result = result[0]
result = {
'id': result['Id'].replace('sha256:', '')[:12],
'tag': (result.get('RepoTags') or ['latest'])[0].split(':')[-1],
'created': result['Created'].split('.')[0]
}
return result
def get_docker_container_names():
cmd = "%s ps --format '{{.Names}}'" % config.DOCKER_CMD
try:
output = to_str(run(cmd))
container_names = re.split(r'\s+', output.strip().replace('\n', ' '))
return container_names
except Exception as e:
LOG.info('Unable to list Docker containers via "%s": %s' % (cmd, e))
return []
def get_main_container_ip():
container_name = get_main_container_name()
cmd = ("%s inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' %s" %
(config.DOCKER_CMD, container_name))
return run(cmd).strip()
def get_main_container_name():
cmd = "%s inspect -f '{{ .Name }}' %s" % (config.DOCKER_CMD, config.HOSTNAME)
try:
return run(cmd, print_error=False).strip().lstrip('/')
except Exception:
return config.MAIN_CONTAINER_NAME
def get_server_version():
docker_cmd = config.DOCKER_CMD
try:
# try to extract from existing running container
container_name = get_main_container_name()
version = run('%s exec -it %s bin/localstack --version' % (docker_cmd, container_name), print_error=False)
version = version.strip().split('\n')[-1]
return version
except Exception:
try:
# try to extract by starting a new container
img_name = get_docker_image_to_start()
version = run('%s run --entrypoint= -it %s bin/localstack --version' % (docker_cmd, img_name))
version = version.strip().split('\n')[-1]
return version
except Exception:
# fall back to default constant
return constants.VERSION
def setup_logging(log_level=None):
""" Determine and set log level """
if PLUGINS_LOADED.get('_logging_'):
return
PLUGINS_LOADED['_logging_'] = True
# log level set by DEBUG env variable
log_level = log_level or (logging.DEBUG if config.DEBUG else logging.INFO)
# overriding the log level if LS_LOG has been set
if config.LS_LOG:
LS_LOG = str(config.LS_LOG).upper()
LS_LOG = 'WARNING' if LS_LOG == 'WARN' else LS_LOG
log_level = getattr(logging, LS_LOG)
logging.getLogger('').setLevel(log_level)
logging.getLogger('localstack').setLevel(log_level)
logging.basicConfig(level=log_level, format=LOG_FORMAT, datefmt=LOG_DATE_FORMAT)
# set up werkzeug logger
class WerkzeugLogFilter(logging.Filter):
def filter(self, record):
return record.name != 'werkzeug'
root_handlers = logging.getLogger().handlers
if len(root_handlers) > 0:
root_handlers[0].addFilter(WerkzeugLogFilter())
if config.DEBUG:
format = '%(asctime)s:API: %(message)s'
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter(format))
logging.getLogger('werkzeug').addHandler(handler)
# disable some logs and warnings
warnings.filterwarnings('ignore')
logging.captureWarnings(True)
logging.getLogger('asyncio').setLevel(logging.INFO)
logging.getLogger('boto3').setLevel(logging.INFO)
logging.getLogger('s3transfer').setLevel(logging.INFO)
logging.getLogger('docker').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
logging.getLogger('requests').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.ERROR)
logging.getLogger('elasticsearch').setLevel(logging.ERROR)
# --------------
# INFRA STARTUP
# --------------
def canonicalize_api_names(apis=None):
""" Finalize the list of API names by
(1) resolving and adding dependencies (e.g., "dynamodbstreams" requires "kinesis"),
(2) resolving and adding composites (e.g., "serverless" describes an ensemble
including "iam", "lambda", "dynamodb", "apigateway", "s3", "sns", and "logs"), and
(3) removing duplicates from the list. """
# TODO: cache the result, as the code below is a relatively expensive operation!
apis = apis or list(config.SERVICE_PORTS.keys())
def contains(apis, api):
for a in apis:
if a == api:
return True
# resolve composites
for comp, deps in API_COMPOSITES.items():
if contains(apis, comp):
apis.extend(deps)
config.SERVICE_PORTS.pop(comp)
# resolve dependencies
for i, api in enumerate(apis):
for dep in API_DEPENDENCIES.get(api, []):
if not contains(apis, dep):
apis.append(dep)
# remove duplicates and composite names
apis = list(set([a for a in apis if a not in API_COMPOSITES.keys()]))
# make sure we have port mappings for each API
for api in apis:
if api not in config.SERVICE_PORTS:
config.SERVICE_PORTS[api] = config.DEFAULT_SERVICE_PORTS.get(api)
config.populate_configs(config.SERVICE_PORTS)
return apis
def is_api_enabled(api):
apis = canonicalize_api_names()
for a in apis:
if a == api or a.startswith('%s:' % api):
return True
def start_infra_locally():
bootstrap_installation()
from localstack.services import infra
return infra.start_infra()
def validate_localstack_config(name):
dirname = os.getcwd()
compose_file_name = name if os.path.isabs(name) else os.path.join(dirname, name)
# validating docker-compose file
cmd = "docker-compose -f '%s' config" % (compose_file_name)
try:
run(cmd)
except Exception as e:
LOG.warning('Looks like the docker-compose file is not valid: %s' % e)
return False
# validating docker-compose variable
import yaml
with open(compose_file_name) as file:
compose_content = yaml.full_load(file)
localstack_service = [service for service in compose_content['services']
if compose_content['services'][service]['image'] in constants.OFFICIAL_IMAGES]
if len(localstack_service) > 0:
localstack_service = localstack_service[0]
else:
raise Exception('No official docker image found. Please use one of this image: %s'
% (constants.OFFICIAL_IMAGES))
network_mode = compose_content['services'][localstack_service].get('network_mode')
image_name = compose_content['services'][localstack_service]['image']
container_name = compose_content['services'][localstack_service].get('container_name') or ''
docker_ports = (port.split(':')[0] for port in compose_content['services'][localstack_service].get('ports', []))
docker_env = dict((env.split('=')[0], env.split('=')[1])
for env in compose_content['services'][localstack_service]['environment'])
# docker-compose file validation cases
if (docker_env.get('LAMBDA_REMOTE_DOCKER') in constants.FALSE_STRINGS and
docker_env.get('HOST_TMP_FOLDER') in ['${TMPDIR}', None, '']):
LOG.warning('Make sure to properly set the "HOST_TMP_FOLDER" environment variable for the '
'LocalStack container when using "LAMBDA_REMOTE_DOCKER=false"')
if docker_env.get('PORT_WEB_UI') not in ['${PORT_WEB_UI- }', None, ''] and image_name == 'localstack/localstack':
LOG.warning('"PORT_WEB_UI" Web UI is now deprecated, '
'and requires to use the "localstack/localstack-full" image.')
if ('localstack_main' not in container_name) and not docker_env.get('MAIN_CONTAINER_NAME'):
LOG.warning('Please use "container_name: localstack_main" or add "MAIN_CONTAINER_NAME" in "environment".')
if docker_env.get('EDGE_PORT') and docker_env.get('EDGE_PORT') not in docker_ports:
LOG.warning('Using a custom edge port which is not exposed. '
'You may have to add the entry to the "ports" section of the docker-compose file.')
if network_mode != 'bridge':
LOG.warning('Network mode is not set to bridge which may cause networking issues in lambda containers. '
'Consider adding "network_mode: bridge" to you docker-compose file.')
return True
class PortMappings(object):
""" Maps source to target port ranges for Docker port mappings. """
class HashableList(list):
def __hash__(self):
result = 0
for i in self:
result += hash(i)
return result
def __init__(self):
self.mappings = {}
def add(self, port, mapped=None):
mapped = mapped or port
if isinstance(port, list):
for i in range(port[1] - port[0] + 1):
self.add(port[0] + i, mapped[0] + i)
return
if port is None or int(port) <= 0:
raise Exception('Unable to add mapping for invalid port: %s' % port)
if self.contains(port):
return
for from_range, to_range in self.mappings.items():
if not self.in_expanded_range(port, from_range):
continue
if not self.in_expanded_range(mapped, to_range):
continue
self.expand_range(port, from_range)
self.expand_range(mapped, to_range)
return
self.mappings[self.HashableList([port, port])] = [mapped, mapped]
def to_str(self):
def entry(k, v):
if k[0] == k[1] and v[0] == v[1]:
return '-p %s:%s' % (k[0], v[0])
return '-p %s-%s:%s-%s' % (k[0], k[1], v[0], v[1])
return ' '.join([entry(k, v) for k, v in self.mappings.items()])
def contains(self, port):
for from_range, to_range in self.mappings.items():
if self.in_range(port, from_range):
return True
def in_range(self, port, range):
return port >= range[0] and port <= range[1]
def in_expanded_range(self, port, range):
return port >= range[0] - 1 and port <= range[1] + 1
def expand_range(self, port, range):
if self.in_range(port, range):
return
if port == range[0] - 1:
range[0] = port
elif port == range[1] + 1:
range[1] = port
else:
raise Exception('Unable to add port %s to existing range %s' % (port, range))
def get_docker_image_to_start():
image_name = os.environ.get('IMAGE_NAME')
if not image_name:
image_name = constants.DOCKER_IMAGE_NAME
if os.environ.get('USE_LIGHT_IMAGE') in constants.FALSE_STRINGS:
image_name = constants.DOCKER_IMAGE_NAME_FULL
return image_name
def extract_port_flags(user_flags, port_mappings):
regex = r'-p\s+([0-9]+)(\-([0-9]+))?:([0-9]+)(\-([0-9]+))?'
matches = re.match('.*%s' % regex, user_flags)
start = end = 0
if matches:
for match in re.findall(regex, user_flags):
start = int(match[0])
end = int(match[2] or match[0])
start_target = int(match[3] or start)
end_target = int(match[5] or end)
port_mappings.add([start, end], [start_target, end_target])
user_flags = re.sub(regex, r'', user_flags)
return user_flags
def start_infra_in_docker():
container_name = config.MAIN_CONTAINER_NAME
if docker_container_running(container_name):
raise Exception('LocalStack container named "%s" is already running' % container_name)
os.environ[ENV_SCRIPT_STARTING_DOCKER] = '1'
# load plugins before starting the docker container
plugin_configs = load_plugins()
# prepare APIs
canonicalize_api_names()
entrypoint = os.environ.get('ENTRYPOINT', '')
cmd = os.environ.get('CMD', '')
user_flags = config.DOCKER_FLAGS
image_name = get_docker_image_to_start()
service_ports = config.SERVICE_PORTS
force_noninteractive = os.environ.get('FORCE_NONINTERACTIVE', '')
# get run params
plugin_run_params = ' '.join([
entry.get('docker', {}).get('run_flags', '') for entry in plugin_configs])
# container for port mappings
port_mappings = PortMappings()
# get port ranges defined via DOCKER_FLAGS (if any)
user_flags = extract_port_flags(user_flags, port_mappings)
plugin_run_params = extract_port_flags(plugin_run_params, port_mappings)
# construct default port mappings
if service_ports.get('edge') == 0:
service_ports.pop('edge')
service_ports.pop('dashboard', None)
for port in service_ports.values():
port_mappings.add(port)
env_str = ''
for env_var in config.CONFIG_ENV_VARS:
value = os.environ.get(env_var, None)
if value is not None:
env_str += '-e %s="%s" ' % (env_var, value)
data_dir_mount = ''
data_dir = os.environ.get('DATA_DIR', None)
if data_dir is not None:
container_data_dir = '/tmp/localstack_data'
data_dir_mount = '-v "%s:%s"' % (data_dir, container_data_dir)
env_str += '-e DATA_DIR="%s" ' % container_data_dir
interactive = '' if force_noninteractive or in_ci() else '-it '
# append space if parameter is set
user_flags = '%s ' % user_flags if user_flags else user_flags
entrypoint = '%s ' % entrypoint if entrypoint else entrypoint
plugin_run_params = '%s ' % plugin_run_params if plugin_run_params else plugin_run_params
if config.START_WEB:
for port in [config.PORT_WEB_UI, config.PORT_WEB_UI_SSL]:
port_mappings.add(port)
docker_cmd = ('%s run %s%s%s%s%s' +
'--rm --privileged ' +
'--name %s ' +
'%s %s ' +
'-v "%s:/tmp/localstack" -v "%s:%s" ' +
'-e DOCKER_HOST="unix://%s" ' +
'-e HOST_TMP_FOLDER="%s" "%s" %s') % (
config.DOCKER_CMD, interactive, entrypoint, env_str, user_flags, plugin_run_params,
container_name, port_mappings.to_str(), data_dir_mount,
config.TMP_FOLDER, config.DOCKER_SOCK, config.DOCKER_SOCK, config.DOCKER_SOCK,
config.HOST_TMP_FOLDER, image_name, cmd
)
mkdir(config.TMP_FOLDER)
try:
run('chmod -R 777 "%s"' % config.TMP_FOLDER, print_error=False)
except Exception:
pass
class ShellRunnerThread(threading.Thread):
def __init__(self, cmd):
threading.Thread.__init__(self)
self.daemon = True
self.cmd = cmd
def run(self):
self.process = run(self.cmd, asynchronous=True)
print(docker_cmd)
t = ShellRunnerThread(docker_cmd)
t.start()
time.sleep(2)
if DO_CHMOD_DOCKER_SOCK:
# fix permissions on /var/run/docker.sock
for i in range(0, 100):
if docker_container_running(container_name):
break
time.sleep(2)
run('%s exec -u root "%s" chmod 777 /var/run/docker.sock' % (config.DOCKER_CMD, container_name))
t.process.wait()
sys.exit(t.process.returncode)
# ---------------
# UTIL FUNCTIONS
# ---------------
def now_utc():
epoch = datetime.utcfromtimestamp(0)
return (datetime.utcnow() - epoch).total_seconds()
def to_str(obj, errors='strict'):
return obj.decode('utf-8', errors) if isinstance(obj, six.binary_type) else obj
def in_ci():
""" Whether or not we are running in a CI environment """
for key in ('CI', 'TRAVIS'):
if os.environ.get(key, '') not in [False, '', '0', 'false']:
return True
return False
class FuncThread(threading.Thread):
""" Helper class to run a Python function in a background thread. """
def __init__(self, func, params=None, quiet=False):
threading.Thread.__init__(self)
self.daemon = True
self.params = params
self.func = func
self.quiet = quiet
self.result_future = Future()
def run(self):
result = None
try:
result = self.func(self.params)
except Exception as e:
result = e
if not self.quiet:
LOG.warning('Thread run method %s(%s) failed: %s %s' %
(self.func, self.params, e, traceback.format_exc()))
finally:
try:
self.result_future.set_result(result)
except Exception:
# this can happen as InvalidStateError on shutdown, if the task is already canceled
pass
def stop(self, quiet=False):
if not quiet and not self.quiet:
LOG.warning('Not implemented: FuncThread.stop(..)')
def run(cmd, print_error=True, asynchronous=False, stdin=False, stderr=subprocess.STDOUT,
outfile=None, env_vars=None, inherit_cwd=False, inherit_env=True, tty=False):
env_dict = os.environ.copy() if inherit_env else {}
if env_vars:
env_dict.update(env_vars)
env_dict = dict([(k, to_str(str(v))) for k, v in env_dict.items()])
if tty:
asynchronous = True
stdin = True
try:
cwd = os.getcwd() if inherit_cwd else None
if not asynchronous:
if stdin:
return subprocess.check_output(cmd, shell=True, stderr=stderr, env=env_dict,
stdin=subprocess.PIPE, cwd=cwd)
output = subprocess.check_output(cmd, shell=True, stderr=stderr, env=env_dict, cwd=cwd)
return output.decode(config.DEFAULT_ENCODING)
stdin_arg = subprocess.PIPE if stdin else None
stdout_arg = open(outfile, 'ab') if isinstance(outfile, six.string_types) else outfile
stderr_arg = stderr
if tty:
# Note: leave the "pty" import here (not supported in Windows)
import pty
master_fd, slave_fd = pty.openpty()
stdin_arg = slave_fd
stdout_arg = stderr_arg = None
# start the actual sub process
kwargs = {}
if is_linux() or is_mac_os():
kwargs['preexec_fn'] = os.setsid
process = subprocess.Popen(cmd, shell=True, stdin=stdin_arg, bufsize=-1,
stderr=stderr_arg, stdout=stdout_arg, env=env_dict, cwd=cwd, **kwargs)
if tty:
# based on: https://stackoverflow.com/questions/41542960
def pipe_streams(*args):
while process.poll() is None:
r, w, e = select.select([sys.stdin, master_fd], [], [])
if sys.stdin in r:
d = os.read(sys.stdin.fileno(), 10240)
os.write(master_fd, d)
elif master_fd in r:
o = os.read(master_fd, 10240)
if o:
os.write(sys.stdout.fileno(), o)
FuncThread(pipe_streams).start()
return process
except subprocess.CalledProcessError as e:
if print_error:
print("ERROR: '%s': exit code %s; output: %s" % (cmd, e.returncode, e.output))
sys.stdout.flush()
raise e
def is_mac_os():
return 'Darwin' in get_uname()
def is_linux():
return 'Linux' in get_uname()
def get_uname():
try:
return to_str(subprocess.check_output('uname -a', shell=True))
except Exception:
return ''
def mkdir(folder):
if not os.path.exists(folder):
try:
os.makedirs(folder)
except OSError as err:
# Ignore rare 'File exists' race conditions.
if err.errno != 17:
raise
| 35.899329 | 117 | 0.631744 |
f3e0fef01b18881c0ff76fe35ae244f16c41ace6 | 6,334 | py | Python | mail.py | hidenorly/mailpy | 1e8d574d4df39178e5dc9593dfecc26810524116 | [
"Apache-2.0"
] | null | null | null | mail.py | hidenorly/mailpy | 1e8d574d4df39178e5dc9593dfecc26810524116 | [
"Apache-2.0"
] | null | null | null | mail.py | hidenorly/mailpy | 1e8d574d4df39178e5dc9593dfecc26810524116 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
#
# Copyright (C) 2016 hidenorly
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.header import Header
from email import charset
from email import encoders
from email.Utils import formatdate
from os.path import expanduser
import os
from optparse import OptionParser, OptionValueError
import sys
import codecs
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stdin = codecs.getreader('utf-8')(sys.stdin)
sys.stdout = codecs.getwriter('utf_8')(sys.stdout)
charset.add_charset('utf-8', charset.SHORTEST, None, 'utf-8')
cset = 'utf-8'
homeDir = expanduser("~")
def readStdin():
UTF8Reader = codecs.getreader('utf8')
sys.stdin = UTF8Reader(sys.stdin)
buf = ""
for line in sys.stdin:
buf = buf + line
return buf
def loadMailRC():
mailrc = {'smtp':"", 'port':"", 'useTLS':False, 'from_addr': "", 'userId':"", 'password':""}
f = open(homeDir + "/.mailrc")
for line in f:
line = line.strip()
posE=line.find('=')
if line.find('smtp=')!=-1:
pos = line.find('smtp://')
pos2 = line.rfind(':')
if pos2==-1:
pos2 = len(line)
mailrc['port'] = 25
else:
mailrc['port'] = line[pos2+1:len(line)]
if pos!=-1:
mailrc['smtp'] = line[pos+7:pos2]
elif line.find('smtp-auth-user=')!=-1:
mailrc['userId'] = line[posE+1:len(line)]
elif line.find('smtp-auth-password=')!=-1:
mailrc['password'] = line[posE+1:len(line)]
elif line.find('from=')!=-1:
mailrc['from_addr'] = line[posE+1:len(line)]
elif line.find('set smtp-use-starttls')!=-1:
mailrc['useTLS'] = True
f.close
return mailrc
def create_message(from_addr, to_addr, cc_addr, bcc_addr, subject, body, contentType, attachments, relatedAttachments, encoding):
related = None
msg = None
# Create Msg & handle body
if attachments or relatedAttachments:
msg = MIMEMultipart()
if relatedAttachments:
related = MIMEMultipart('related')
alt = MIMEMultipart('alternative')
alt.attach( MIMEText(body, contentType, encoding) )
related.attach(alt)
if attachments and not relatedAttachments:
msg.attach( MIMEText(body, contentType, encoding) )
if not msg:
msg = MIMEText(body, contentType, encoding)
# handle header
msg['Subject'] = Header(subject, encoding)
msg['From'] = from_addr
msg['To'] = to_addr
if cc_addr:
msg['Cc'] = cc_addr
if bcc_addr:
msg['Bcc'] = bcc_addr
msg['Date'] = formatdate(localtime=True)
# handle attachments
if relatedAttachments:
for f in relatedAttachments:
filename = os.path.basename(f)
extPos = filename.rfind('.')
isImage = False
if extPos != -1:
ext = filename[extPos+1:len(filename)]
if ext=='gif' or ext=='png' or ext=='jpg' or ext=='jpeg':
isImage = True
if isImage:
img = MIMEImage(open(f,"rb").read(), ext, name=filename)
img['Content-ID'] = '<%s>' % filename
related.attach(img)
else:
part = MIMEBase('application', "octet-stream")
part.set_payload(open(f,"rb").read() )
encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="{0}"'.format(os.path.basename(f)))
part['Content-ID'] = '<%s>' % filename
related.attach(part)
msg.attach(related)
if attachments:
for f in attachments:
part = MIMEBase('application', "octet-stream")
part.set_payload( open(f,"rb").read() )
encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="{0}"'.format(os.path.basename(f)))
msg.attach(part)
return msg
def getAuthenticatedSMTP(smtpHost, smtpPort, userId, password, useTLS):
s = smtplib.SMTP(smtpHost, smtpPort)
if useTLS:
s.ehlo()
s.starttls()
if userId and password:
s.ehlo()
s.login(userId, password)
return s
def send(s, from_addr, to_addr, msg):
s.sendmail(from_addr, [to_addr], msg.as_string())
def getAttachments(attachments):
if attachments!=None:
if isinstance(attachments, list) and len(attachments)==1:
if attachments[0].find(",")!=-1:
attachments = attachments[0].split(",")
return attachments
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-f", "--from", action="store", type="string", dest="from_addr", help="Specify From:")
parser.add_option("-s", "--subject", action="store", type="string", dest="subject", help="Specify Subject:")
parser.add_option("-c", "--cc", action="store", type="string", dest="sendAsCC", help="Specify Cc:")
parser.add_option("-b", "--bcc", action="store", type="string", dest="sendAsBCC", help="Specify Bcc:")
parser.add_option("-a", "--attach", action="append", type="string", dest="attachments", default=None, help="Specify attachment file(,)")
parser.add_option("-t", "--type", action="store", type="string", dest="contentType", default='plain', help="Specify plain or html")
parser.add_option("-r", "--relatedAttach", action="append", type="string", dest="relatedAttachments", default=None, help="Specify attachment files for html(,)")
(options, args) = parser.parse_args()
if not args:
parser.error("requires To: as last argument")
parser.print_help()
exit()
to_addr = args[0]
mailrc = loadMailRC()
from_addr = mailrc['from_addr']
if options.from_addr:
from_addr = options.from_addr
body = readStdin()
attachments = getAttachments( options.attachments )
relatedAttachments = getAttachments( options.relatedAttachments )
msg = create_message(from_addr, to_addr, options.sendAsCC, options.sendAsBCC, options.subject, body, options.contentType, attachments, relatedAttachments, 'utf-8') #'ISO-2022-JP')
s = getAuthenticatedSMTP(mailrc['smtp'], mailrc['port'], mailrc['userId'], mailrc['password'], mailrc['useTLS'])
send(s, from_addr, to_addr, msg)
s.close
| 31.989899 | 180 | 0.696085 |
e44bbdef0e31d2a9e9daf422e2e00a2846f4b454 | 56,113 | py | Python | doc/conf.py | john-veillette/mne-python | 2b68c6ecb7cb735c00fa393898a8c0996b4cd0b5 | [
"BSD-3-Clause"
] | 1 | 2021-12-21T16:16:40.000Z | 2021-12-21T16:16:40.000Z | doc/conf.py | john-veillette/mne-python | 2b68c6ecb7cb735c00fa393898a8c0996b4cd0b5 | [
"BSD-3-Clause"
] | null | null | null | doc/conf.py | john-veillette/mne-python | 2b68c6ecb7cb735c00fa393898a8c0996b4cd0b5 | [
"BSD-3-Clause"
] | 1 | 2021-07-22T17:57:33.000Z | 2021-07-22T17:57:33.000Z | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import gc
import os
import subprocess
import sys
import time
import warnings
from datetime import datetime, timezone
import numpy as np
import matplotlib
import sphinx
from sphinx_gallery.sorting import FileNameSortKey, ExplicitOrder
from numpydoc import docscrape
import mne
from mne.tests.test_docstring_parameters import error_ignores
from mne.utils import (linkcode_resolve, # noqa, analysis:ignore
_assert_no_instances, sizeof_fmt, run_subprocess)
from mne.viz import Brain # noqa
matplotlib.use('agg')
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curdir = os.path.dirname(__file__)
sys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne')))
sys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))
# -- Project information -----------------------------------------------------
project = 'MNE'
td = datetime.now(tz=timezone.utc)
# We need to triage which date type we use so that incremental builds work
# (Sphinx looks at variable changes and rewrites all files if some change)
copyright = (
f'2012–{td.year}, MNE Developers. Last updated <time datetime="{td.isoformat()}" class="localized">{td.strftime("%Y-%m-%d %H:%M %Z")}</time>\n' # noqa: E501
'<script type="text/javascript">$(function () { $("time.localized").each(function () { var el = $(this); el.text(new Date(el.attr("datetime")).toLocaleString([], {dateStyle: "medium", timeStyle: "long"})); }); } )</script>') # noqa: E501
if os.getenv('MNE_FULL_DATE', 'false').lower() != 'true':
copyright = f'2012–{td.year}, MNE Developers. Last updated locally.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = mne.__version__
# The short X.Y version.
version = '.'.join(release.split('.')[:2])
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '2.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinx.ext.mathjax',
'sphinx.ext.todo',
'sphinx.ext.graphviz',
'numpydoc',
'sphinx_gallery.gen_gallery',
'gen_commands',
'gh_substitutions',
'mne_substitutions',
'newcontrib_substitutions',
'gen_names',
'sphinx_bootstrap_divs',
'sphinxcontrib.bibtex',
'sphinx_copybutton',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_includes']
# The suffix of source filenames.
source_suffix = '.rst'
# The main toctree document.
master_doc = 'index'
# List of documents that shouldn't be included in the build.
unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = "py:obj"
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['mne.']
# -- Sphinx-Copybutton configuration -----------------------------------------
copybutton_prompt_text = r">>> |\.\.\. |\$ "
copybutton_prompt_is_regexp = True
# -- Intersphinx configuration -----------------------------------------------
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/devdocs', None),
'scipy': ('https://scipy.github.io/devdocs', None),
'matplotlib': ('https://matplotlib.org/stable', None),
'sklearn': ('https://scikit-learn.org/stable', None),
'numba': ('https://numba.pydata.org/numba-doc/latest', None),
'joblib': ('https://joblib.readthedocs.io/en/latest', None),
'nibabel': ('https://nipy.org/nibabel', None),
'nilearn': ('http://nilearn.github.io', None),
'nitime': ('https://nipy.org/nitime/', None),
'surfer': ('https://pysurfer.github.io/', None),
'mne_bids': ('https://mne.tools/mne-bids/stable', None),
'mne-connectivity': ('https://mne.tools/mne-connectivity/stable', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'seaborn': ('https://seaborn.pydata.org/', None),
'statsmodels': ('https://www.statsmodels.org/dev', None),
'patsy': ('https://patsy.readthedocs.io/en/latest', None),
'pyvista': ('https://docs.pyvista.org', None),
'imageio': ('https://imageio.readthedocs.io/en/latest', None),
'mne_realtime': ('https://mne.tools/mne-realtime', None),
'picard': ('https://pierreablin.github.io/picard/', None),
'qdarkstyle': ('https://qdarkstylesheet.readthedocs.io/en/latest', None),
'eeglabio': ('https://eeglabio.readthedocs.io/en/latest', None),
'dipy': ('https://dipy.org/documentation/latest/',
'https://dipy.org/documentation/latest/objects.inv/'),
'pooch': ('https://www.fatiando.org/pooch/latest/', None),
}
# NumPyDoc configuration -----------------------------------------------------
# Define what extra methods numpydoc will document
docscrape.ClassDoc.extra_public_methods = mne.utils._doc_special_members
numpydoc_class_members_toctree = False
numpydoc_attributes_as_param_list = True
numpydoc_xref_param_type = True
numpydoc_xref_aliases = {
# Python
'file-like': ':term:`file-like <python:file object>`',
'path-like': ':term:`path-like`',
'array-like': ':term:`array-like`',
# Matplotlib
'colormap': ':doc:`colormap <matplotlib:tutorials/colors/colormaps>`',
'color': ':doc:`color <matplotlib:api/colors_api>`',
'Axes': 'matplotlib.axes.Axes',
'Figure': 'matplotlib.figure.Figure',
'Axes3D': 'mpl_toolkits.mplot3d.axes3d.Axes3D',
'ColorbarBase': 'matplotlib.colorbar.ColorbarBase',
# sklearn
'LeaveOneOut': 'sklearn.model_selection.LeaveOneOut',
# joblib
'joblib.Parallel': 'joblib.Parallel',
# nibabel
'Nifti1Image': 'nibabel.nifti1.Nifti1Image',
'Nifti2Image': 'nibabel.nifti2.Nifti2Image',
'SpatialImage': 'nibabel.spatialimages.SpatialImage',
# MNE
'Label': 'mne.Label', 'Forward': 'mne.Forward', 'Evoked': 'mne.Evoked',
'Info': 'mne.Info', 'SourceSpaces': 'mne.SourceSpaces',
'SourceMorph': 'mne.SourceMorph',
'Epochs': 'mne.Epochs', 'Layout': 'mne.channels.Layout',
'EvokedArray': 'mne.EvokedArray', 'BiHemiLabel': 'mne.BiHemiLabel',
'AverageTFR': 'mne.time_frequency.AverageTFR',
'EpochsTFR': 'mne.time_frequency.EpochsTFR',
'Raw': 'mne.io.Raw', 'ICA': 'mne.preprocessing.ICA',
'Covariance': 'mne.Covariance', 'Annotations': 'mne.Annotations',
'DigMontage': 'mne.channels.DigMontage',
'VectorSourceEstimate': 'mne.VectorSourceEstimate',
'VolSourceEstimate': 'mne.VolSourceEstimate',
'VolVectorSourceEstimate': 'mne.VolVectorSourceEstimate',
'MixedSourceEstimate': 'mne.MixedSourceEstimate',
'MixedVectorSourceEstimate': 'mne.MixedVectorSourceEstimate',
'SourceEstimate': 'mne.SourceEstimate', 'Projection': 'mne.Projection',
'ConductorModel': 'mne.bem.ConductorModel',
'Dipole': 'mne.Dipole', 'DipoleFixed': 'mne.DipoleFixed',
'InverseOperator': 'mne.minimum_norm.InverseOperator',
'CrossSpectralDensity': 'mne.time_frequency.CrossSpectralDensity',
'SourceMorph': 'mne.SourceMorph',
'Xdawn': 'mne.preprocessing.Xdawn',
'Report': 'mne.Report', 'Forward': 'mne.Forward',
'TimeDelayingRidge': 'mne.decoding.TimeDelayingRidge',
'Vectorizer': 'mne.decoding.Vectorizer',
'UnsupervisedSpatialFilter': 'mne.decoding.UnsupervisedSpatialFilter',
'TemporalFilter': 'mne.decoding.TemporalFilter',
'SSD': 'mne.decoding.SSD',
'Scaler': 'mne.decoding.Scaler', 'SPoC': 'mne.decoding.SPoC',
'PSDEstimator': 'mne.decoding.PSDEstimator',
'LinearModel': 'mne.decoding.LinearModel',
'FilterEstimator': 'mne.decoding.FilterEstimator',
'EMS': 'mne.decoding.EMS', 'CSP': 'mne.decoding.CSP',
'Beamformer': 'mne.beamformer.Beamformer',
'Transform': 'mne.transforms.Transform',
'Coregistration': 'mne.coreg.Coregistration',
# dipy
'dipy.align.AffineMap': 'dipy.align.imaffine.AffineMap',
'dipy.align.DiffeomorphicMap': 'dipy.align.imwarp.DiffeomorphicMap',
}
numpydoc_xref_ignore = {
# words
'instance', 'instances', 'of', 'default', 'shape', 'or',
'with', 'length', 'pair', 'matplotlib', 'optional', 'kwargs', 'in',
'dtype', 'object', 'self.verbose',
# shapes
'n_vertices', 'n_faces', 'n_channels', 'm', 'n', 'n_events', 'n_colors',
'n_times', 'obj', 'n_chan', 'n_epochs', 'n_picks', 'n_ch_groups',
'n_dipoles', 'n_ica_components', 'n_pos', 'n_node_names', 'n_tapers',
'n_signals', 'n_step', 'n_freqs', 'wsize', 'Tx', 'M', 'N', 'p', 'q',
'n_observations', 'n_regressors', 'n_cols', 'n_frequencies', 'n_tests',
'n_samples', 'n_permutations', 'nchan', 'n_points', 'n_features',
'n_parts', 'n_features_new', 'n_components', 'n_labels', 'n_events_in',
'n_splits', 'n_scores', 'n_outputs', 'n_trials', 'n_estimators', 'n_tasks',
'nd_features', 'n_classes', 'n_targets', 'n_slices', 'n_hpi', 'n_fids',
'n_elp', 'n_pts', 'n_tris', 'n_nodes', 'n_nonzero', 'n_events_out',
'n_segments', 'n_orient_inv', 'n_orient_fwd', 'n_orient', 'n_dipoles_lcmv',
'n_dipoles_fwd', 'n_picks_ref', 'n_coords', 'n_meg', 'n_good_meg',
'n_moments', 'n_patterns', 'n_new_events',
# Undocumented (on purpose)
'RawKIT', 'RawEximia', 'RawEGI', 'RawEEGLAB', 'RawEDF', 'RawCTF', 'RawBTi',
'RawBrainVision', 'RawCurry', 'RawNIRX', 'RawGDF', 'RawSNIRF', 'RawBOXY',
'RawPersyst', 'RawNihon', 'RawNedf', 'RawHitachi',
# sklearn subclasses
'mapping', 'to', 'any',
# unlinkable
'CoregistrationUI',
'IntracranialElectrodeLocator',
# We need to fix these: "PyVista renderer" and "PyVista surface"
'PyVista', 'renderer', 'surface',
}
numpydoc_validate = True
numpydoc_validation_checks = {'all'} | set(error_ignores)
numpydoc_validation_exclude = { # set of regex
# dict subclasses
r'\.clear', r'\.get$', r'\.copy$', r'\.fromkeys', r'\.items', r'\.keys',
r'\.pop', r'\.popitem', r'\.setdefault', r'\.update', r'\.values',
# list subclasses
r'\.append', r'\.count', r'\.extend', r'\.index', r'\.insert', r'\.remove',
r'\.sort',
# we currently don't document these properly (probably okay)
r'\.__getitem__', r'\.__contains__', r'\.__hash__', r'\.__mul__',
r'\.__sub__', r'\.__add__', r'\.__iter__', r'\.__div__', r'\.__neg__',
# copied from sklearn
r'mne\.utils\.deprecated',
# deprecations
r'mne\.connectivity\.degree', r'mne\.connectivity\.seed_target_indices',
r'mne\.viz\.plot_sensors_connectivity',
r'mne\.viz\.plot_connectivity_circle',
}
# -- Sphinx-gallery configuration --------------------------------------------
class Resetter(object):
"""Simple class to make the str(obj) static for Sphinx build env hash."""
def __init__(self):
self.t0 = time.time()
def __repr__(self):
return f'<{self.__class__.__name__}>'
def __call__(self, gallery_conf, fname):
import matplotlib.pyplot as plt
try:
from pyvista import Plotter # noqa
except ImportError:
Plotter = None # noqa
try:
from pyvistaqt import BackgroundPlotter # noqa
except ImportError:
BackgroundPlotter = None # noqa
try:
from vtk import vtkPolyData # noqa
except ImportError:
vtkPolyData = None # noqa
from mne.viz.backends.renderer import backend
_Renderer = backend._Renderer if backend is not None else None
reset_warnings(gallery_conf, fname)
# in case users have interactive mode turned on in matplotlibrc,
# turn it off here (otherwise the build can be very slow)
plt.ioff()
plt.rcParams['animation.embed_limit'] = 30.
gc.collect()
when = 'mne/conf.py:Resetter.__call__'
if os.getenv('MNE_SKIP_INSTANCE_ASSERTIONS', 'false') not in \
('true', '1'):
_assert_no_instances(Brain, when) # calls gc.collect()
if Plotter is not None:
_assert_no_instances(Plotter, when)
if BackgroundPlotter is not None:
_assert_no_instances(BackgroundPlotter, when)
if vtkPolyData is not None:
_assert_no_instances(vtkPolyData, when)
_assert_no_instances(_Renderer, when)
# This will overwrite some Sphinx printing but it's useful
# for memory timestamps
if os.getenv('SG_STAMP_STARTS', '').lower() == 'true':
import psutil
process = psutil.Process(os.getpid())
mem = sizeof_fmt(process.memory_info().rss)
print(f'{time.time() - self.t0:6.1f} s : {mem}'.ljust(22))
examples_dirs = ['../tutorials', '../examples']
gallery_dirs = ['auto_tutorials', 'auto_examples']
os.environ['_MNE_BUILDING_DOC'] = 'true'
scrapers = ('matplotlib',)
try:
mne.viz.set_3d_backend(mne.viz.get_3d_backend())
except Exception:
report_scraper = None
else:
backend = mne.viz.get_3d_backend()
if backend in ('notebook', 'pyvistaqt'):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import pyvista
pyvista.OFF_SCREEN = False
scrapers += (
mne.gui._LocateScraper(),
mne.viz._brain._BrainScraper(),
'pyvista',
)
report_scraper = mne.report._ReportScraper()
scrapers += (report_scraper,)
del backend
compress_images = ('images', 'thumbnails')
# let's make things easier on Windows users
# (on Linux and macOS it's easy enough to require this)
if sys.platform.startswith('win'):
try:
subprocess.check_call(['optipng', '--version'])
except Exception:
compress_images = ()
sphinx_gallery_conf = {
'doc_module': ('mne',),
'reference_url': dict(mne=None),
'examples_dirs': examples_dirs,
'subsection_order': ExplicitOrder(['../examples/io/',
'../examples/simulation/',
'../examples/preprocessing/',
'../examples/visualization/',
'../examples/time_frequency/',
'../examples/stats/',
'../examples/decoding/',
'../examples/connectivity/',
'../examples/forward/',
'../examples/inverse/',
'../examples/realtime/',
'../examples/datasets/',
'../tutorials/intro/',
'../tutorials/io/',
'../tutorials/raw/',
'../tutorials/preprocessing/',
'../tutorials/epochs/',
'../tutorials/evoked/',
'../tutorials/time-freq/',
'../tutorials/forward/',
'../tutorials/inverse/',
'../tutorials/stats-sensor-space/',
'../tutorials/stats-source-space/',
'../tutorials/machine-learning/',
'../tutorials/clinical/',
'../tutorials/simulation/',
'../tutorials/sample-datasets/',
'../tutorials/misc/']),
'gallery_dirs': gallery_dirs,
'default_thumb_file': os.path.join('_static', 'mne_helmet.png'),
'backreferences_dir': 'generated',
'plot_gallery': 'True', # Avoid annoying Unicode/bool default warning
'thumbnail_size': (160, 112),
'remove_config_comments': True,
'min_reported_time': 1.,
'abort_on_example_error': False,
'reset_modules': ('matplotlib', Resetter()), # called w/each script
'image_scrapers': scrapers,
'show_memory': not sys.platform.startswith('win'),
'line_numbers': False, # messes with style
'within_subsection_order': FileNameSortKey,
'capture_repr': ('_repr_html_',),
'junit': os.path.join('..', 'test-results', 'sphinx-gallery', 'junit.xml'),
'matplotlib_animations': True,
'compress_images': compress_images,
'filename_pattern': '^((?!sgskip).)*$',
}
# Files were renamed from plot_* with:
# find . -type f -name 'plot_*.py' -exec sh -c 'x="{}"; xn=`basename "${x}"`; git mv "$x" `dirname "${x}"`/${xn:5}' \; # noqa
def append_attr_meth_examples(app, what, name, obj, options, lines):
"""Append SG examples backreferences to method and attr docstrings."""
# NumpyDoc nicely embeds method and attribute docstrings for us, but it
# does not respect the autodoc templates that would otherwise insert
# the .. include:: lines, so we need to do it.
# Eventually this could perhaps live in SG.
if what in ('attribute', 'method'):
size = os.path.getsize(os.path.join(
os.path.dirname(__file__), 'generated', '%s.examples' % (name,)))
if size > 0:
lines += """
.. _sphx_glr_backreferences_{1}:
.. rubric:: Examples using ``{0}``:
.. minigallery:: {1}
""".format(name.split('.')[-1], name).split('\n')
# -- Other extension configuration -------------------------------------------
linkcheck_request_headers = dict(user_agent='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36') # noqa: E501
linkcheck_ignore = [ # will be compiled to regex
r'https://datashare.is.ed.ac.uk/handle/10283/2189\?show=full', # noqa Max retries exceeded with url: /handle/10283/2189?show=full (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1123)')))
'https://doi.org/10.1002/mds.870120629', # Read timed out.
'https://doi.org/10.1088/0031-9155/32/1/004', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/0031-9155/40/3/001', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/0031-9155/51/7/008', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/0031-9155/57/7/1937', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/0967-3334/22/4/305', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1088/1741-2552/aacfe4', # noqa Read timed out. (read timeout=15)
'https://doi.org/10.1093/sleep/18.7.557', # noqa 403 Client Error: Forbidden for url: https://academic.oup.com/sleep/article-lookup/doi/10.1093/sleep/18.7.557
'https://doi.org/10.1162/089976699300016719', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/neco/article/11/2/417-441/6242
'https://doi.org/10.1162/jocn.1993.5.2.162', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/jocn/article/5/2/162-176/3095
'https://doi.org/10.1162/neco.1995.7.6.1129', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/neco/article/7/6/1129-1159/5909
'https://doi.org/10.1162/jocn_a_00405', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/jocn/article/25/9/1477-1492/27980
'https://doi.org/10.1167/15.6.4', # noqa 403 Client Error: Forbidden for url: https://jov.arvojournals.org/article.aspx?doi=10.1167/15.6.4
'https://doi.org/10.7488/ds/1556', # noqa Max retries exceeded with url: /handle/10283/2189 (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1122)')))
'https://imaging.mrc-cbu.cam.ac.uk/imaging/MniTalairach', # noqa Max retries exceeded with url: /imaging/MniTalairach (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1122)')))
'https://www.nyu.edu/', # noqa Max retries exceeded with url: / (Caused by SSLError(SSLError(1, '[SSL: DH_KEY_TOO_SMALL] dh key too small (_ssl.c:1122)')))
'https://docs.python.org/3/library/.*', # noqa ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))
'https://hal.archives-ouvertes.fr/hal-01848442.*', # noqa Sometimes: 503 Server Error: Service Unavailable for url: https://hal.archives-ouvertes.fr/hal-01848442/
'http://www.cs.ucl.ac.uk/staff/d.barber/brml.*', # noqa Sometimes: Read timed out
'https://compumedicsneuroscan.com/scan-acquire-configuration-files.*', # noqa SSL certificate error as of 2021/09/28
'https://chrisholdgraf.com', # noqa Max retries exceeded sometimes
'https://www.dtu.dk/english/service/phonebook/person.*', # noqa Too slow
'https://speakerdeck.com/dengemann/eeg-sensor-covariance-using-cross-validation', # noqa Too slow
'https://doi.org/10.1002/hbm.10024', # noqa Too slow sometimes
]
linkcheck_anchors = False # saves a bit of time
linkcheck_timeout = 15 # some can be quite slow
# autodoc / autosummary
autosummary_generate = True
autodoc_default_options = {'inherited-members': None}
# sphinxcontrib-bibtex
bibtex_bibfiles = ['./references.bib']
bibtex_style = 'unsrt'
bibtex_footbibliography_header = ''
# -- Nitpicky ----------------------------------------------------------------
nitpicky = True
nitpick_ignore = [
("py:class", "None. Remove all items from D."),
("py:class", "a set-like object providing a view on D's items"),
("py:class", "a set-like object providing a view on D's keys"),
("py:class", "v, remove specified key and return the corresponding value."), # noqa: E501
("py:class", "None. Update D from dict/iterable E and F."),
("py:class", "an object providing a view on D's values"),
("py:class", "a shallow copy of D"),
("py:class", "(k, v), remove and return some (key, value) pair as a"),
("py:class", "_FuncT"), # type hint used in @verbose decorator
("py:class", "mne.utils._logging._FuncT"),
]
for key in ('AcqParserFIF', 'BiHemiLabel', 'Dipole', 'DipoleFixed', 'Label',
'MixedSourceEstimate', 'MixedVectorSourceEstimate', 'Report',
'SourceEstimate', 'SourceMorph', 'VectorSourceEstimate',
'VolSourceEstimate', 'VolVectorSourceEstimate',
'channels.DigMontage', 'channels.Layout', 'coreg.Coregistration',
'decoding.CSP', 'decoding.EMS', 'decoding.FilterEstimator',
'decoding.GeneralizingEstimator', 'decoding.LinearModel',
'decoding.PSDEstimator', 'decoding.ReceptiveField', 'decoding.SSD',
'decoding.SPoC', 'decoding.Scaler', 'decoding.SlidingEstimator',
'decoding.TemporalFilter', 'decoding.TimeDelayingRidge',
'decoding.TimeFrequency', 'decoding.UnsupervisedSpatialFilter',
'decoding.Vectorizer',
'preprocessing.ICA', 'preprocessing.Xdawn',
'simulation.SourceSimulator',
'time_frequency.CrossSpectralDensity',
'utils.deprecated',
'viz.ClickableImage'):
nitpick_ignore.append(('py:obj', f'mne.{key}.__hash__'))
suppress_warnings = ['image.nonlocal_uri'] # we intentionally link outside
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pydata_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'icon_links': [
dict(name='GitHub',
url='https://github.com/mne-tools/mne-python',
icon='fab fa-github-square'),
dict(name='Twitter',
url='https://twitter.com/mne_python',
icon='fab fa-twitter-square'),
dict(name='Discourse',
url='https://mne.discourse.group/',
icon='fab fa-discourse'),
dict(name='Discord',
url='https://discord.gg/rKfvxTuATa',
icon='fab fa-discord')
],
'icon_links_label': 'Quick Links', # for screen reader
'use_edit_page_button': False,
'navigation_with_keys': False,
'show_toc_level': 1,
'navbar_end': ['version-switcher', 'navbar-icon-links'],
'footer_items': ['copyright'],
'google_analytics_id': 'UA-37225609-1',
}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/mne_logo_small.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'style.css',
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = [
'contributing.html',
'documentation.html',
'getting_started.html',
'install_mne_python.html',
]
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['search-field.html', 'sidebar-quicklinks.html'],
}
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
html_copy_source = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# accommodate different logo shapes (width values in rem)
xs = '2'
sm = '2.5'
md = '3'
lg = '4.5'
xl = '5'
xxl = '6'
# variables to pass to HTML templating engine
html_context = {
'build_dev_html': bool(int(os.environ.get('BUILD_DEV_HTML', False))),
'versions_dropdown': {
'dev': 'v1.0 (devel)',
'stable': 'v0.24 (stable)',
'0.23': 'v0.23',
'0.22': 'v0.22',
'0.21': 'v0.21',
'0.20': 'v0.20',
'0.19': 'v0.19',
'0.18': 'v0.18',
'0.17': 'v0.17',
'0.16': 'v0.16',
'0.15': 'v0.15',
'0.14': 'v0.14',
'0.13': 'v0.13',
'0.12': 'v0.12',
'0.11': 'v0.11',
},
'funders': [
dict(img='nih.png', size='3', title='National Institutes of Health'),
dict(img='nsf.png', size='3.5',
title='US National Science Foundation'),
dict(img='erc.svg', size='3.5', title='European Research Council'),
dict(img='doe.svg', size='3', title='US Department of Energy'),
dict(img='anr.svg', size='4.5',
title='Agence Nationale de la Recherche'),
dict(img='cds.png', size='2.25',
title='Paris-Saclay Center for Data Science'),
dict(img='google.svg', size='2.25', title='Google'),
dict(img='amazon.svg', size='2.5', title='Amazon'),
dict(img='czi.svg', size='2.5', title='Chan Zuckerberg Initiative'),
],
'institutions': [
dict(name='Massachusetts General Hospital',
img='MGH.svg',
url='https://www.massgeneral.org/',
size=sm),
dict(name='Athinoula A. Martinos Center for Biomedical Imaging',
img='Martinos.png',
url='https://martinos.org/',
size=md),
dict(name='Harvard Medical School',
img='Harvard.png',
url='https://hms.harvard.edu/',
size=sm),
dict(name='Massachusetts Institute of Technology',
img='MIT.svg',
url='https://web.mit.edu/',
size=md),
dict(name='New York University',
img='NYU.png',
url='https://www.nyu.edu/',
size=xs),
dict(name='Commissariat à l´énergie atomique et aux énergies alternatives', # noqa E501
img='CEA.png',
url='http://www.cea.fr/',
size=md),
dict(name='Aalto-yliopiston perustieteiden korkeakoulu',
img='Aalto.svg',
url='https://sci.aalto.fi/',
size=md),
dict(name='Télécom ParisTech',
img='Telecom_Paris_Tech.svg',
url='https://www.telecom-paris.fr/',
size=md),
dict(name='University of Washington',
img='Washington.png',
url='https://www.washington.edu/',
size=md),
dict(name='Institut du Cerveau et de la Moelle épinière',
img='ICM.jpg',
url='https://icm-institute.org/',
size=md),
dict(name='Boston University',
img='BU.svg',
url='https://www.bu.edu/',
size=lg),
dict(name='Institut national de la santé et de la recherche médicale',
img='Inserm.svg',
url='https://www.inserm.fr/',
size=xl),
dict(name='Forschungszentrum Jülich',
img='Julich.svg',
url='https://www.fz-juelich.de/',
size=xl),
dict(name='Technische Universität Ilmenau',
img='Ilmenau.gif',
url='https://www.tu-ilmenau.de/',
size=xxl),
dict(name='Berkeley Institute for Data Science',
img='BIDS.png',
url='https://bids.berkeley.edu/',
size=lg),
dict(name='Institut national de recherche en informatique et en automatique', # noqa E501
img='inria.png',
url='https://www.inria.fr/',
size=xl),
dict(name='Aarhus Universitet',
img='Aarhus.png',
url='https://www.au.dk/',
size=xl),
dict(name='Karl-Franzens-Universität Graz',
img='Graz.jpg',
url='https://www.uni-graz.at/',
size=md),
dict(name='SWPS Uniwersytet Humanistycznospołeczny',
img='SWPS.svg',
url='https://www.swps.pl/',
size=xl),
dict(name='Max-Planck-Institut für Bildungsforschung',
img='MPIB.svg',
url='https://www.mpib-berlin.mpg.de/',
size=xxl),
dict(name='Macquarie University',
img='Macquarie.png',
url='https://www.mq.edu.au/',
size=lg),
dict(name='Children’s Hospital of Philadelphia Research Institute',
img='CHOP.svg',
url='https://imaging.research.chop.edu/',
size=xxl),
],
# \u00AD is an optional hyphen (not rendered unless needed)
'carousel': [
dict(title='Source Estimation',
text='Distributed, sparse, mixed-norm, beam\u00ADformers, dipole fitting, and more.', # noqa E501
url='auto_tutorials/inverse/30_mne_dspm_loreta.html',
img='sphx_glr_30_mne_dspm_loreta_008.gif',
alt='dSPM'),
dict(title='Machine Learning',
text='Advanced decoding models including time general\u00ADiza\u00ADtion.', # noqa E501
url='auto_tutorials/machine-learning/50_decoding.html',
img='sphx_glr_50_decoding_006.png',
alt='Decoding'),
dict(title='Encoding Models',
text='Receptive field estima\u00ADtion with optional smooth\u00ADness priors.', # noqa E501
url='auto_tutorials/machine-learning/30_strf.html',
img='sphx_glr_30_strf_001.png',
alt='STRF'),
dict(title='Statistics',
text='Parametric and non-parametric, permutation tests and clustering.', # noqa E501
url='auto_tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.html', # noqa E501
img='sphx_glr_20_cluster_1samp_spatiotemporal_001.png',
alt='Clusters'),
dict(title='Connectivity',
text='All-to-all spectral and effective connec\u00ADtivity measures.', # noqa E501
url='https://mne.tools/mne-connectivity/stable/auto_examples/mne_inverse_label_connectivity.html', # noqa E501
img='https://mne.tools/mne-connectivity/stable/_images/sphx_glr_mne_inverse_label_connectivity_001.png', # noqa E501
alt='Connectivity'),
dict(title='Data Visualization',
text='Explore your data from multiple perspectives.',
url='auto_tutorials/evoked/20_visualize_evoked.html',
img='sphx_glr_20_visualize_evoked_007.png',
alt='Visualization'),
]
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'mne-doc'
# -- Options for LaTeX output ------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = []
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_static/logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_toplevel_sectioning = 'part'
_np_print_defaults = np.get_printoptions()
# -- Warnings management -----------------------------------------------------
def reset_warnings(gallery_conf, fname):
"""Ensure we are future compatible and ignore silly warnings."""
# In principle, our examples should produce no warnings.
# Here we cause warnings to become errors, with a few exceptions.
# This list should be considered alongside
# setup.cfg -> [tool:pytest] -> filterwarnings
# remove tweaks from other module imports or example runs
warnings.resetwarnings()
# restrict
warnings.filterwarnings('error')
# allow these, but show them
warnings.filterwarnings('always', '.*non-standard config type: "foo".*')
warnings.filterwarnings('always', '.*config type: "MNEE_USE_CUUDAA".*')
warnings.filterwarnings('always', '.*cannot make axes width small.*')
warnings.filterwarnings('always', '.*Axes that are not compatible.*')
warnings.filterwarnings('always', '.*FastICA did not converge.*')
# ECoG BIDS spec violations:
warnings.filterwarnings('always', '.*Fiducial point nasion not found.*')
warnings.filterwarnings('always', '.*DigMontage is only a subset of.*')
warnings.filterwarnings( # xhemi morph (should probably update sample)
'always', '.*does not exist, creating it and saving it.*')
warnings.filterwarnings('default', module='sphinx') # internal warnings
warnings.filterwarnings(
'always', '.*converting a masked element to nan.*') # matplotlib?
# allow these warnings, but don't show them
warnings.filterwarnings(
'ignore', '.*OpenSSL\\.rand is deprecated.*')
warnings.filterwarnings('ignore', '.*is currently using agg.*')
warnings.filterwarnings( # SciPy-related warning (maybe 1.2.0 will fix it)
'ignore', '.*the matrix subclass is not the recommended.*')
warnings.filterwarnings( # some joblib warning
'ignore', '.*semaphore_tracker: process died unexpectedly.*')
warnings.filterwarnings( # needed until SciPy 1.2.0 is released
'ignore', '.*will be interpreted as an array index.*', module='scipy')
warnings.filterwarnings(
'ignore', '.*invalid escape sequence.*', lineno=90) # quantities
warnings.filterwarnings(
'ignore', '.*invalid escape sequence.*', lineno=14) # mne-connectivity
warnings.filterwarnings(
'ignore', '.*invalid escape sequence.*', lineno=281) # mne-conn
warnings.filterwarnings(
'ignore', '.*"is not" with a literal.*', module='nilearn')
warnings.filterwarnings( # scikit-learn FastICA whiten=True deprecation
'ignore', r'.*From version 1\.3 whiten.*')
for key in ('HasTraits', r'numpy\.testing', 'importlib', r'np\.loads',
'Using or importing the ABCs from', # internal modules on 3.7
r"it will be an error for 'np\.bool_'", # ndimage
"DocumenterBridge requires a state object", # sphinx dev
"'U' mode is deprecated", # sphinx io
r"joblib is deprecated in 0\.21", # nilearn
'The usage of `cmp` is deprecated and will', # sklearn/pytest
'scipy.* is deprecated and will be removed in', # dipy
r'Converting `np\.character` to a dtype is deprecated', # vtk
r'sphinx\.util\.smartypants is deprecated',
'is a deprecated alias for the builtin', # NumPy
'the old name will be removed', # Jinja, via sphinx
r'Passing a schema to Validator\.iter_errors', # jsonschema
"default value of type 'dict' in an Any trait will", # traits
'rcParams is deprecated', # PyVista rcParams -> global_theme
'to mean no clipping',
):
warnings.filterwarnings( # deal with other modules having bad imports
'ignore', message=".*%s.*" % key, category=DeprecationWarning)
warnings.filterwarnings( # deal with bootstrap-theme bug
'ignore', message=".*modify script_files in the theme.*",
category=Warning)
warnings.filterwarnings( # nilearn
'ignore', message=r'sklearn\.externals\.joblib is deprecated.*',
category=FutureWarning)
warnings.filterwarnings( # nilearn
'ignore', message=r'The sklearn.* module is.*', category=FutureWarning)
warnings.filterwarnings( # nilearn
'ignore', message=r'Fetchers from the nilea.*', category=FutureWarning)
warnings.filterwarnings( # deal with other modules having bad imports
'ignore', message=".*ufunc size changed.*", category=RuntimeWarning)
warnings.filterwarnings( # realtime
'ignore', message=".*unclosed file.*", category=ResourceWarning)
warnings.filterwarnings('ignore', message='Exception ignored in.*')
# allow this ImportWarning, but don't show it
warnings.filterwarnings(
'ignore', message="can't resolve package from", category=ImportWarning)
warnings.filterwarnings(
'ignore', message='.*mne-realtime.*', category=DeprecationWarning)
warnings.filterwarnings(
'ignore', message=r'numpy\.ndarray size changed.*',
category=RuntimeWarning)
# In case we use np.set_printoptions in any tutorials, we only
# want it to affect those:
np.set_printoptions(**_np_print_defaults)
reset_warnings(None, None)
# -- Fontawesome support -----------------------------------------------------
# here the "fab" and "fas" refer to "brand" and "solid" (determines which font
# file to look in). "fw" indicates fixed width.
brand_icons = ('apple', 'linux', 'windows', 'discourse', 'python')
fixed_icons = (
# homepage:
'book', 'code-branch', 'newspaper', 'question-circle', 'quote-left',
# contrib guide:
'bug', 'comment', 'hand-sparkles', 'magic', 'pencil-alt', 'remove-format',
'universal-access', 'discourse', 'python'
)
other_icons = ('hand-paper', 'question', 'rocket', 'server')
icons = dict()
for icon in brand_icons + fixed_icons + other_icons:
font = ('fab' if icon in brand_icons else 'fas',) # brand or solid font
fw = ('fa-fw',) if icon in fixed_icons else () # fixed-width
icons[icon] = font + fw
prolog = ''
for icon, classes in icons.items():
prolog += f'''
.. |{icon}| raw:: html
<i class="{' '.join(classes)} fa-{icon}"></i>
'''
prolog += '''
.. |fix-bug| raw:: html
<span class="fa-stack small-stack">
<i class="fas fa-bug fa-stack-1x"></i>
<i class="fas fa-ban fa-stack-2x"></i>
</span>
'''
# -- Dependency info ----------------------------------------------------------
try:
from importlib.metadata import metadata # new in Python 3.8
min_py = metadata('mne')['Requires-Python']
except ModuleNotFoundError:
from pkg_resources import get_distribution
info = get_distribution('mne').get_metadata_lines('PKG-INFO')
for line in info:
if line.strip().startswith('Requires-Python'):
min_py = line.split(':')[1]
min_py = min_py.lstrip(' =<>')
prolog += f'\n.. |min_python_version| replace:: {min_py}\n'
# -- website redirects --------------------------------------------------------
# Static list created 2021/04/13 based on what we needed to redirect,
# since we don't need to add redirects for examples added after this date.
needed_plot_redirects = {
# tutorials
'10_epochs_overview.py', '10_evoked_overview.py', '10_overview.py',
'10_preprocessing_overview.py', '10_raw_overview.py',
'10_reading_meg_data.py', '15_handling_bad_channels.py',
'20_event_arrays.py', '20_events_from_raw.py', '20_reading_eeg_data.py',
'20_rejecting_bad_data.py', '20_visualize_epochs.py',
'20_visualize_evoked.py', '30_annotate_raw.py', '30_epochs_metadata.py',
'30_filtering_resampling.py', '30_info.py', '30_reading_fnirs_data.py',
'35_artifact_correction_regression.py', '40_artifact_correction_ica.py',
'40_autogenerate_metadata.py', '40_sensor_locations.py',
'40_visualize_raw.py', '45_projectors_background.py',
'50_artifact_correction_ssp.py', '50_configure_mne.py',
'50_epochs_to_data_frame.py', '55_setting_eeg_reference.py',
'59_head_positions.py', '60_make_fixed_length_epochs.py',
'60_maxwell_filtering_sss.py', '70_fnirs_processing.py',
# examples
'3d_to_2d.py', 'brainstorm_data.py', 'channel_epochs_image.py',
'cluster_stats_evoked.py', 'compute_csd.py',
'compute_mne_inverse_epochs_in_label.py',
'compute_mne_inverse_raw_in_label.py', 'compute_mne_inverse_volume.py',
'compute_source_psd_epochs.py', 'covariance_whitening_dspm.py',
'custom_inverse_solver.py',
'decoding_csp_eeg.py', 'decoding_csp_timefreq.py',
'decoding_spatio_temporal_source.py', 'decoding_spoc_CMC.py',
'decoding_time_generalization_conditions.py',
'decoding_unsupervised_spatial_filter.py', 'decoding_xdawn_eeg.py',
'define_target_events.py', 'dics_source_power.py', 'eeg_csd.py',
'eeg_on_scalp.py', 'eeglab_head_sphere.py', 'elekta_epochs.py',
'ems_filtering.py', 'eog_artifact_histogram.py', 'evoked_arrowmap.py',
'evoked_ers_source_power.py', 'evoked_topomap.py', 'evoked_whitening.py',
'fdr_stats_evoked.py', 'find_ref_artifacts.py',
'fnirs_artifact_removal.py', 'forward_sensitivity_maps.py',
'gamma_map_inverse.py', 'hf_sef_data.py', 'ica_comparison.py',
'interpolate_bad_channels.py', 'label_activation_from_stc.py',
'label_from_stc.py', 'label_source_activations.py',
'left_cerebellum_volume_source.py', 'limo_data.py',
'linear_model_patterns.py', 'linear_regression_raw.py',
'meg_sensors.py', 'mixed_norm_inverse.py',
'mixed_source_space_inverse.py',
'mne_cov_power.py', 'mne_helmet.py', 'mne_inverse_coherence_epochs.py',
'mne_inverse_envelope_correlation.py',
'mne_inverse_envelope_correlation_volume.py',
'mne_inverse_psi_visual.py',
'morph_surface_stc.py', 'morph_volume_stc.py', 'movement_compensation.py',
'movement_detection.py', 'multidict_reweighted_tfmxne.py',
'muscle_detection.py', 'opm_data.py', 'otp.py', 'parcellation.py',
'psf_ctf_label_leakage.py', 'psf_ctf_vertices.py',
'psf_ctf_vertices_lcmv.py', 'publication_figure.py', 'rap_music.py',
'read_inverse.py', 'read_neo_format.py', 'read_noise_covariance_matrix.py',
'read_stc.py', 'receptive_field_mtrf.py', 'resolution_metrics.py',
'resolution_metrics_eegmeg.py', 'roi_erpimage_by_rt.py',
'sensor_noise_level.py',
'sensor_permutation_test.py', 'sensor_regression.py',
'shift_evoked.py', 'simulate_evoked_data.py', 'simulate_raw_data.py',
'simulated_raw_data_using_subject_anatomy.py', 'snr_estimate.py',
'source_label_time_frequency.py', 'source_power_spectrum.py',
'source_power_spectrum_opm.py', 'source_simulator.py',
'source_space_morphing.py', 'source_space_snr.py',
'source_space_time_frequency.py', 'ssd_spatial_filters.py',
'ssp_projs_sensitivity_map.py', 'temporal_whitening.py',
'time_frequency_erds.py', 'time_frequency_global_field_power.py',
'time_frequency_mixed_norm_inverse.py', 'time_frequency_simulated.py',
'topo_compare_conditions.py', 'topo_customized.py',
'vector_mne_solution.py', 'virtual_evoked.py', 'xdawn_denoising.py',
'xhemi.py',
}
ex = 'auto_examples'
co = 'connectivity'
mne_conn = 'https://mne.tools/mne-connectivity/stable'
tu = 'auto_tutorials'
di = 'discussions'
sm = 'source-modeling'
fw = 'forward'
nv = 'inverse'
sn = 'stats-sensor-space'
sr = 'stats-source-space'
sd = 'sample-datasets'
ml = 'machine-learning'
tf = 'time-freq'
si = 'simulation'
custom_redirects = {
# Custom redirects (one HTML path to another, relative to outdir)
# can be added here as fr->to key->value mappings
f'{tu}/evoked/plot_eeg_erp.html': f'{tu}/evoked/30_eeg_erp.html',
f'{tu}/evoked/plot_whitened.html': f'{tu}/evoked/40_whitened.html',
f'{tu}/misc/plot_modifying_data_inplace.html': f'{tu}/intro/15_inplace.html', # noqa E501
f'{tu}/misc/plot_report.html': f'{tu}/intro/70_report.html',
f'{tu}/misc/plot_seeg.html': f'{tu}/clinical/20_seeg.html',
f'{tu}/misc/plot_ecog.html': f'{tu}/clinical/30_ecog.html',
f'{tu}/{ml}/plot_receptive_field.html': f'{tu}/{ml}/30_strf.html',
f'{tu}/{ml}/plot_sensors_decoding.html': f'{tu}/{ml}/50_decoding.html',
f'{tu}/{sm}/plot_background_freesurfer.html': f'{tu}/{fw}/10_background_freesurfer.html', # noqa E501
f'{tu}/{sm}/plot_source_alignment.html': f'{tu}/{fw}/20_source_alignment.html', # noqa E501
f'{tu}/{sm}/plot_forward.html': f'{tu}/{fw}/30_forward.html',
f'{tu}/{sm}/plot_eeg_no_mri.html': f'{tu}/{fw}/35_eeg_no_mri.html',
f'{tu}/{sm}/plot_background_freesurfer_mne.html': f'{tu}/{fw}/50_background_freesurfer_mne.html', # noqa E501
f'{tu}/{sm}/plot_fix_bem_in_blender.html': f'{tu}/{fw}/80_fix_bem_in_blender.html', # noqa E501
f'{tu}/{sm}/plot_compute_covariance.html': f'{tu}/{fw}/90_compute_covariance.html', # noqa E501
f'{tu}/{sm}/plot_object_source_estimate.html': f'{tu}/{nv}/10_stc_class.html', # noqa E501
f'{tu}/{sm}/plot_dipole_fit.html': f'{tu}/{nv}/20_dipole_fit.html',
f'{tu}/{sm}/plot_mne_dspm_source_localization.html': f'{tu}/{nv}/30_mne_dspm_loreta.html', # noqa E501
f'{tu}/{sm}/plot_dipole_orientations.html': f'{tu}/{nv}/35_dipole_orientations.html', # noqa E501
f'{tu}/{sm}/plot_mne_solutions.html': f'{tu}/{nv}/40_mne_fixed_free.html',
f'{tu}/{sm}/plot_beamformer_lcmv.html': f'{tu}/{nv}/50_beamformer_lcmv.html', # noqa E501
f'{tu}/{sm}/plot_visualize_stc.html': f'{tu}/{nv}/60_visualize_stc.html',
f'{tu}/{sm}/plot_eeg_mri_coords.html': f'{tu}/{nv}/70_eeg_mri_coords.html',
f'{tu}/{sd}/plot_brainstorm_phantom_elekta.html': f'{tu}/{nv}/80_brainstorm_phantom_elekta.html', # noqa E501
f'{tu}/{sd}/plot_brainstorm_phantom_ctf.html': f'{tu}/{nv}/85_brainstorm_phantom_ctf.html', # noqa E501
f'{tu}/{sd}/plot_phantom_4DBTi.html': f'{tu}/{nv}/90_phantom_4DBTi.html',
f'{tu}/{sd}/plot_brainstorm_auditory.html': f'{tu}/io/60_ctf_bst_auditory.html', # noqa E501
f'{tu}/{sd}/plot_sleep.html': f'{tu}/clinical/60_sleep.html',
f'{tu}/{di}/plot_background_filtering.html': f'{tu}/preprocessing/25_background_filtering.html', # noqa E501
f'{tu}/{di}/plot_background_statistics.html': f'{tu}/{sn}/10_background_stats.html', # noqa E501
f'{tu}/{sn}/plot_stats_cluster_erp.html': f'{tu}/{sn}/20_erp_stats.html',
f'{tu}/{sn}/plot_stats_cluster_1samp_test_time_frequency.html': f'{tu}/{sn}/40_cluster_1samp_time_freq.html', # noqa E501
f'{tu}/{sn}/plot_stats_cluster_time_frequency.html': f'{tu}/{sn}/50_cluster_between_time_freq.html', # noqa E501
f'{tu}/{sn}/plot_stats_spatio_temporal_cluster_sensors.html': f'{tu}/{sn}/75_cluster_ftest_spatiotemporal.html', # noqa E501
f'{tu}/{sr}/plot_stats_cluster_spatio_temporal.html': f'{tu}/{sr}/20_cluster_1samp_spatiotemporal.html', # noqa E501
f'{tu}/{sr}/plot_stats_cluster_spatio_temporal_2samp.html': f'{tu}/{sr}/30_cluster_ftest_spatiotemporal.html', # noqa E501
f'{tu}/{sr}/plot_stats_cluster_spatio_temporal_repeated_measures_anova.html': f'{tu}/{sr}/60_cluster_rmANOVA_spatiotemporal.html', # noqa E501
f'{tu}/{sr}/plot_stats_cluster_time_frequency_repeated_measures_anova.html': f'{tu}/{sr}/70_cluster_rmANOVA_time_freq.html', # noqa E501
f'{tu}/{tf}/plot_sensors_time_frequency.html': f'{tu}/{tf}/20_sensors_time_frequency.html', # noqa E501
f'{tu}/{tf}/plot_ssvep.html': f'{tu}/{tf}/50_ssvep.html',
f'{tu}/{si}/plot_creating_data_structures.html': f'{tu}/{si}/10_array_objs.html', # noqa E501
f'{tu}/{si}/plot_point_spread.html': f'{tu}/{si}/70_point_spread.html',
f'{tu}/{si}/plot_dics.html': f'{tu}/{si}/80_dics.html',
f'{ex}/{co}/mne_inverse_label_connectivity.html': f'{mne_conn}/{ex}/mne_inverse_label_connectivity.html', # noqa E501
f'{ex}/{co}/cwt_sensor_connectivity.html': f'{mne_conn}/{ex}/cwt_sensor_connectivity.html', # noqa E501
f'{ex}/{co}/mixed_source_space_connectivity.html': f'{mne_conn}/{ex}/mixed_source_space_connectivity.html', # noqa E501
f'{ex}/{co}/mne_inverse_coherence_epochs.html': f'{mne_conn}/{ex}/mne_inverse_coherence_epochs.html', # noqa E501
f'{ex}/{co}/mne_inverse_connectivity_spectrum.html': f'{mne_conn}/{ex}/mne_inverse_connectivity_spectrum.html', # noqa E501
f'{ex}/{co}/mne_inverse_envelope_correlation_volume.html': f'{mne_conn}/{ex}/mne_inverse_envelope_correlation_volume.html', # noqa E501
f'{ex}/{co}/mne_inverse_envelope_correlation.html': f'{mne_conn}/{ex}/mne_inverse_envelope_correlation.html', # noqa E501
f'{ex}/{co}/mne_inverse_psi_visual.html': f'{mne_conn}/{ex}/mne_inverse_psi_visual.html', # noqa E501
f'{ex}/{co}/sensor_connectivity.html': f'{mne_conn}/{ex}/sensor_connectivity.html', # noqa E501
}
def make_redirects(app, exception):
"""Make HTML redirects."""
# https://www.sphinx-doc.org/en/master/extdev/appapi.html
# Adapted from sphinxcontrib/redirects (BSD-2-Clause)
if not (isinstance(app.builder,
sphinx.builders.html.StandaloneHTMLBuilder) and
exception is None):
return
logger = sphinx.util.logging.getLogger('mne')
TEMPLATE = """\
<!DOCTYPE HTML>
<html lang="en-US">
<head>
<meta charset="UTF-8">
<meta http-equiv="refresh" content="1; url={to}">
<script type="text/javascript">
window.location.href = "{to}"
</script>
<title>Page Redirection</title>
</head>
<body>
If you are not redirected automatically, follow this <a href='{to}'>link</a>.
</body>
</html>""" # noqa: E501
sphinx_gallery_conf = app.config['sphinx_gallery_conf']
for src_dir, out_dir in zip(sphinx_gallery_conf['examples_dirs'],
sphinx_gallery_conf['gallery_dirs']):
root = os.path.abspath(os.path.join(app.srcdir, src_dir))
fnames = [os.path.join(os.path.relpath(dirpath, root), fname)
for dirpath, _, fnames in os.walk(root)
for fname in fnames
if fname in needed_plot_redirects]
# plot_ redirects
for fname in fnames:
dirname = os.path.join(app.outdir, out_dir, os.path.dirname(fname))
to_fname = os.path.splitext(os.path.basename(fname))[0] + '.html'
fr_fname = f'plot_{to_fname}'
to_path = os.path.join(dirname, to_fname)
fr_path = os.path.join(dirname, fr_fname)
assert os.path.isfile(to_path), (fname, to_path)
with open(fr_path, 'w') as fid:
fid.write(TEMPLATE.format(to=to_fname))
logger.info(
f'Added {len(fnames):3d} HTML plot_* redirects for {out_dir}')
# custom redirects
for fr, to in custom_redirects.items():
if not to.startswith('http'):
assert os.path.isfile(os.path.join(app.outdir, to)), to
# handle links to sibling folders
path_parts = to.split('/')
assert tu in path_parts, path_parts # need to refactor otherwise
path_parts = ['..'] + path_parts[(path_parts.index(tu) + 1):]
to = os.path.join(*path_parts)
assert to.endswith('html'), to
fr_path = os.path.join(app.outdir, fr)
assert fr_path.endswith('html'), fr_path
# allow overwrite if existing file is just a redirect
if os.path.isfile(fr_path):
with open(fr_path, 'r') as fid:
for _ in range(8):
next(fid)
line = fid.readline()
assert 'Page Redirection' in line, line
# handle folders that no longer exist
if fr_path.split('/')[-2] in (
'misc', 'discussions', 'source-modeling', 'sample-datasets',
'connectivity'):
os.makedirs(os.path.dirname(fr_path), exist_ok=True)
with open(fr_path, 'w') as fid:
fid.write(TEMPLATE.format(to=to))
logger.info(
f'Added {len(custom_redirects):3d} HTML custom redirects')
def make_version(app, exception):
"""Make a text file with the git version."""
if not (isinstance(app.builder,
sphinx.builders.html.StandaloneHTMLBuilder) and
exception is None):
return
logger = sphinx.util.logging.getLogger('mne')
try:
stdout, _ = run_subprocess(['git', 'rev-parse', 'HEAD'], verbose=False)
except Exception as exc:
logger.warning(f'Failed to write _version.txt: {exc}')
return
with open(os.path.join(app.outdir, '_version.txt'), 'w') as fid:
fid.write(stdout)
logger.info(f'Added "{stdout.rstrip()}" > _version.txt')
# -- Connect our handlers to the main Sphinx app ---------------------------
def setup(app):
"""Set up the Sphinx app."""
app.connect('autodoc-process-docstring', append_attr_meth_examples)
if report_scraper is not None:
report_scraper.app = app
app.config.rst_prolog = prolog
app.connect('builder-inited', report_scraper.copyfiles)
sphinx_logger = sphinx.util.logging.getLogger('mne')
sphinx_logger.info(
f'Building documentation for MNE {release} ({mne.__file__})')
app.connect('build-finished', make_redirects)
app.connect('build-finished', make_version)
| 47.634126 | 300 | 0.637588 |
4c697a25893d5c5e940ceaf6bf5d418ca5da5178 | 14,412 | py | Python | sdk/python/pulumi_azure_nextgen/datalakestore/v20161101/get_account.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/datalakestore/v20161101/get_account.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/datalakestore/v20161101/get_account.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetAccountResult',
'AwaitableGetAccountResult',
'get_account',
]
@pulumi.output_type
class GetAccountResult:
"""
Data Lake Store account information.
"""
def __init__(__self__, account_id=None, creation_time=None, current_tier=None, default_group=None, encryption_config=None, encryption_provisioning_state=None, encryption_state=None, endpoint=None, firewall_allow_azure_ips=None, firewall_rules=None, firewall_state=None, identity=None, last_modified_time=None, location=None, name=None, new_tier=None, provisioning_state=None, state=None, tags=None, trusted_id_provider_state=None, trusted_id_providers=None, type=None, virtual_network_rules=None):
if account_id and not isinstance(account_id, str):
raise TypeError("Expected argument 'account_id' to be a str")
pulumi.set(__self__, "account_id", account_id)
if creation_time and not isinstance(creation_time, str):
raise TypeError("Expected argument 'creation_time' to be a str")
pulumi.set(__self__, "creation_time", creation_time)
if current_tier and not isinstance(current_tier, str):
raise TypeError("Expected argument 'current_tier' to be a str")
pulumi.set(__self__, "current_tier", current_tier)
if default_group and not isinstance(default_group, str):
raise TypeError("Expected argument 'default_group' to be a str")
pulumi.set(__self__, "default_group", default_group)
if encryption_config and not isinstance(encryption_config, dict):
raise TypeError("Expected argument 'encryption_config' to be a dict")
pulumi.set(__self__, "encryption_config", encryption_config)
if encryption_provisioning_state and not isinstance(encryption_provisioning_state, str):
raise TypeError("Expected argument 'encryption_provisioning_state' to be a str")
pulumi.set(__self__, "encryption_provisioning_state", encryption_provisioning_state)
if encryption_state and not isinstance(encryption_state, str):
raise TypeError("Expected argument 'encryption_state' to be a str")
pulumi.set(__self__, "encryption_state", encryption_state)
if endpoint and not isinstance(endpoint, str):
raise TypeError("Expected argument 'endpoint' to be a str")
pulumi.set(__self__, "endpoint", endpoint)
if firewall_allow_azure_ips and not isinstance(firewall_allow_azure_ips, str):
raise TypeError("Expected argument 'firewall_allow_azure_ips' to be a str")
pulumi.set(__self__, "firewall_allow_azure_ips", firewall_allow_azure_ips)
if firewall_rules and not isinstance(firewall_rules, list):
raise TypeError("Expected argument 'firewall_rules' to be a list")
pulumi.set(__self__, "firewall_rules", firewall_rules)
if firewall_state and not isinstance(firewall_state, str):
raise TypeError("Expected argument 'firewall_state' to be a str")
pulumi.set(__self__, "firewall_state", firewall_state)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if last_modified_time and not isinstance(last_modified_time, str):
raise TypeError("Expected argument 'last_modified_time' to be a str")
pulumi.set(__self__, "last_modified_time", last_modified_time)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if new_tier and not isinstance(new_tier, str):
raise TypeError("Expected argument 'new_tier' to be a str")
pulumi.set(__self__, "new_tier", new_tier)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if trusted_id_provider_state and not isinstance(trusted_id_provider_state, str):
raise TypeError("Expected argument 'trusted_id_provider_state' to be a str")
pulumi.set(__self__, "trusted_id_provider_state", trusted_id_provider_state)
if trusted_id_providers and not isinstance(trusted_id_providers, list):
raise TypeError("Expected argument 'trusted_id_providers' to be a list")
pulumi.set(__self__, "trusted_id_providers", trusted_id_providers)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_network_rules and not isinstance(virtual_network_rules, list):
raise TypeError("Expected argument 'virtual_network_rules' to be a list")
pulumi.set(__self__, "virtual_network_rules", virtual_network_rules)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> str:
"""
The unique identifier associated with this Data Lake Store account.
"""
return pulumi.get(self, "account_id")
@property
@pulumi.getter(name="creationTime")
def creation_time(self) -> str:
"""
The account creation time.
"""
return pulumi.get(self, "creation_time")
@property
@pulumi.getter(name="currentTier")
def current_tier(self) -> str:
"""
The commitment tier in use for the current month.
"""
return pulumi.get(self, "current_tier")
@property
@pulumi.getter(name="defaultGroup")
def default_group(self) -> str:
"""
The default owner group for all new folders and files created in the Data Lake Store account.
"""
return pulumi.get(self, "default_group")
@property
@pulumi.getter(name="encryptionConfig")
def encryption_config(self) -> 'outputs.EncryptionConfigResponse':
"""
The Key Vault encryption configuration.
"""
return pulumi.get(self, "encryption_config")
@property
@pulumi.getter(name="encryptionProvisioningState")
def encryption_provisioning_state(self) -> str:
"""
The current state of encryption provisioning for this Data Lake Store account.
"""
return pulumi.get(self, "encryption_provisioning_state")
@property
@pulumi.getter(name="encryptionState")
def encryption_state(self) -> str:
"""
The current state of encryption for this Data Lake Store account.
"""
return pulumi.get(self, "encryption_state")
@property
@pulumi.getter
def endpoint(self) -> str:
"""
The full CName endpoint for this account.
"""
return pulumi.get(self, "endpoint")
@property
@pulumi.getter(name="firewallAllowAzureIps")
def firewall_allow_azure_ips(self) -> str:
"""
The current state of allowing or disallowing IPs originating within Azure through the firewall. If the firewall is disabled, this is not enforced.
"""
return pulumi.get(self, "firewall_allow_azure_ips")
@property
@pulumi.getter(name="firewallRules")
def firewall_rules(self) -> Sequence['outputs.FirewallRuleResponse']:
"""
The list of firewall rules associated with this Data Lake Store account.
"""
return pulumi.get(self, "firewall_rules")
@property
@pulumi.getter(name="firewallState")
def firewall_state(self) -> str:
"""
The current state of the IP address firewall for this Data Lake Store account.
"""
return pulumi.get(self, "firewall_state")
@property
@pulumi.getter
def identity(self) -> 'outputs.EncryptionIdentityResponse':
"""
The Key Vault encryption identity, if any.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="lastModifiedTime")
def last_modified_time(self) -> str:
"""
The account last modified time.
"""
return pulumi.get(self, "last_modified_time")
@property
@pulumi.getter
def location(self) -> str:
"""
The resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="newTier")
def new_tier(self) -> str:
"""
The commitment tier to use for next month.
"""
return pulumi.get(self, "new_tier")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning status of the Data Lake Store account.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def state(self) -> str:
"""
The state of the Data Lake Store account.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="trustedIdProviderState")
def trusted_id_provider_state(self) -> str:
"""
The current state of the trusted identity provider feature for this Data Lake Store account.
"""
return pulumi.get(self, "trusted_id_provider_state")
@property
@pulumi.getter(name="trustedIdProviders")
def trusted_id_providers(self) -> Sequence['outputs.TrustedIdProviderResponse']:
"""
The list of trusted identity providers associated with this Data Lake Store account.
"""
return pulumi.get(self, "trusted_id_providers")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualNetworkRules")
def virtual_network_rules(self) -> Sequence['outputs.VirtualNetworkRuleResponse']:
"""
The list of virtual network rules associated with this Data Lake Store account.
"""
return pulumi.get(self, "virtual_network_rules")
class AwaitableGetAccountResult(GetAccountResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAccountResult(
account_id=self.account_id,
creation_time=self.creation_time,
current_tier=self.current_tier,
default_group=self.default_group,
encryption_config=self.encryption_config,
encryption_provisioning_state=self.encryption_provisioning_state,
encryption_state=self.encryption_state,
endpoint=self.endpoint,
firewall_allow_azure_ips=self.firewall_allow_azure_ips,
firewall_rules=self.firewall_rules,
firewall_state=self.firewall_state,
identity=self.identity,
last_modified_time=self.last_modified_time,
location=self.location,
name=self.name,
new_tier=self.new_tier,
provisioning_state=self.provisioning_state,
state=self.state,
tags=self.tags,
trusted_id_provider_state=self.trusted_id_provider_state,
trusted_id_providers=self.trusted_id_providers,
type=self.type,
virtual_network_rules=self.virtual_network_rules)
def get_account(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAccountResult:
"""
Use this data source to access information about an existing resource.
:param str account_name: The name of the Data Lake Store account.
:param str resource_group_name: The name of the Azure resource group.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:datalakestore/v20161101:getAccount', __args__, opts=opts, typ=GetAccountResult).value
return AwaitableGetAccountResult(
account_id=__ret__.account_id,
creation_time=__ret__.creation_time,
current_tier=__ret__.current_tier,
default_group=__ret__.default_group,
encryption_config=__ret__.encryption_config,
encryption_provisioning_state=__ret__.encryption_provisioning_state,
encryption_state=__ret__.encryption_state,
endpoint=__ret__.endpoint,
firewall_allow_azure_ips=__ret__.firewall_allow_azure_ips,
firewall_rules=__ret__.firewall_rules,
firewall_state=__ret__.firewall_state,
identity=__ret__.identity,
last_modified_time=__ret__.last_modified_time,
location=__ret__.location,
name=__ret__.name,
new_tier=__ret__.new_tier,
provisioning_state=__ret__.provisioning_state,
state=__ret__.state,
tags=__ret__.tags,
trusted_id_provider_state=__ret__.trusted_id_provider_state,
trusted_id_providers=__ret__.trusted_id_providers,
type=__ret__.type,
virtual_network_rules=__ret__.virtual_network_rules)
| 40.943182 | 501 | 0.672981 |
5b0b8c1d2901c6d121df3ce1be9e57eeb0c76faf | 338 | py | Python | examples/docs_snippets/docs_snippets/concepts/assets/multi_component_asset_key.py | kstennettlull/dagster | dd6f57e170ff03bf145f1dd1417e0b2c3156b1d6 | [
"Apache-2.0"
] | null | null | null | examples/docs_snippets/docs_snippets/concepts/assets/multi_component_asset_key.py | kstennettlull/dagster | dd6f57e170ff03bf145f1dd1417e0b2c3156b1d6 | [
"Apache-2.0"
] | null | null | null | examples/docs_snippets/docs_snippets/concepts/assets/multi_component_asset_key.py | kstennettlull/dagster | dd6f57e170ff03bf145f1dd1417e0b2c3156b1d6 | [
"Apache-2.0"
] | 1 | 2019-09-11T03:02:27.000Z | 2019-09-11T03:02:27.000Z | # pylint: disable=redefined-outer-name
# start_marker
from dagster import AssetIn, asset
@asset(namespace=["one", "two", "three"])
def upstream_asset():
return [1, 2, 3]
@asset(ins={"upstream_asset": AssetIn(namespace=["one", "two", "three"])})
def downstream_asset(upstream_asset):
return upstream_asset + [4]
# end_marker
| 19.882353 | 74 | 0.698225 |
a76154bde212337880ee06fe58ef61c6ad1979ff | 3,502 | py | Python | utils.py | zyan97/Bert-Chinese-Text-Classification-Pytorch | c5d67c15e29eda33db561cb76d9eee73dfc2caa2 | [
"MIT"
] | null | null | null | utils.py | zyan97/Bert-Chinese-Text-Classification-Pytorch | c5d67c15e29eda33db561cb76d9eee73dfc2caa2 | [
"MIT"
] | null | null | null | utils.py | zyan97/Bert-Chinese-Text-Classification-Pytorch | c5d67c15e29eda33db561cb76d9eee73dfc2caa2 | [
"MIT"
] | null | null | null | # coding: UTF-8
import torch
from tqdm import tqdm
import time
from datetime import timedelta
PAD, CLS = '[PAD]', '[CLS]' # padding符号, bert中综合信息符号
def load_dataset(config, path, pad_size=32):
sample_list = {}
sentences = []
contents = []
with open(path, 'r', encoding='UTF-8') as f:
for line in tqdm(f):
lin = line.strip()
if not lin:
continue
content, label = lin.split('\t')
sentences.append(content)
token = config.tokenizer.tokenize(content)
token = [CLS] + token
seq_len = len(token)
mask = []
token_ids = config.tokenizer.convert_tokens_to_ids(token)
if pad_size:
if len(token) < pad_size:
mask = [1] * len(token_ids) + [0] * (pad_size - len(token))
token_ids += ([0] * (pad_size - len(token)))
else:
mask = [1] * pad_size
token_ids = token_ids[:pad_size]
seq_len = pad_size
contents.append((token_ids, int(label), seq_len, mask))
sample_list['sentences'] = sentences
sample_list['contents'] = contents
return sample_list
def build_dataset(config):
sample_list = {}
train = load_dataset(config, config.train_path, config.pad_size)
dev = load_dataset(config, config.dev_path, config.pad_size)
test = load_dataset(config, config.test_path, config.pad_size)
sample_list['train'] = train
sample_list['dev'] = dev
sample_list['test'] = test
return sample_list
class DatasetIterater(object):
def __init__(self, batches, batch_size, device):
self.batch_size = batch_size
self.batches = batches
self.n_batches = len(batches) // batch_size
self.residue = False # 记录batch数量是否为整数
if (self.n_batches==0 and len(batches)>0) or (len(batches) % self.n_batches != 0):
self.residue = True
self.index = 0
self.device = device
def _to_tensor(self, datas):
x = torch.LongTensor([_[0] for _ in datas]).to(self.device)
y = torch.LongTensor([_[1] for _ in datas]).to(self.device)
# pad前的长度(超过pad_size的设为pad_size)
seq_len = torch.LongTensor([_[2] for _ in datas]).to(self.device)
mask = torch.LongTensor([_[3] for _ in datas]).to(self.device)
return (x, seq_len, mask), y
def __next__(self):
if self.residue and self.index == self.n_batches:
batches = self.batches[self.index * self.batch_size: len(self.batches)]
self.index += 1
batches = self._to_tensor(batches)
return batches
elif self.index >= self.n_batches:
self.index = 0
raise StopIteration
else:
batches = self.batches[self.index * self.batch_size: (self.index + 1) * self.batch_size]
self.index += 1
batches = self._to_tensor(batches)
return batches
def __iter__(self):
return self
def __len__(self):
if self.residue:
return self.n_batches + 1
else:
return self.n_batches
def build_iterator(dataset, config):
iter = DatasetIterater(dataset, config.batch_size, config.device)
return iter
def get_time_dif(start_time):
"""获取已使用时间"""
end_time = time.time()
time_dif = end_time - start_time
return timedelta(seconds=int(round(time_dif)))
| 32.12844 | 100 | 0.589377 |
c620286f7f15e6524b8259a1093b0d409b4559fa | 1,395 | py | Python | airflow/migrations/versions/c8ffec048a3b_add_fields_to_dag.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 79 | 2021-10-15T07:32:27.000Z | 2022-03-28T04:10:19.000Z | airflow/migrations/versions/c8ffec048a3b_add_fields_to_dag.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 210 | 2021-07-17T00:25:52.000Z | 2021-12-29T00:44:48.000Z | airflow/migrations/versions/c8ffec048a3b_add_fields_to_dag.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 23 | 2021-10-15T02:36:37.000Z | 2022-03-17T02:59:27.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add fields to dag
Revision ID: c8ffec048a3b
Revises: 41f5f12752f8
Create Date: 2018-12-23 21:55:46.463634
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'c8ffec048a3b'
down_revision = '41f5f12752f8'
branch_labels = None
depends_on = None
def upgrade(): # noqa: D103
op.add_column('dag', sa.Column('description', sa.Text(), nullable=True))
op.add_column('dag', sa.Column('default_view', sa.String(25), nullable=True))
def downgrade(): # noqa: D103
op.drop_column('dag', 'description')
op.drop_column('dag', 'default_view')
| 31 | 81 | 0.746237 |
07957484b18fee08a9cf6f4a595a3caf31b2996b | 9,714 | py | Python | malaya_speech/train/loss.py | dtx525942103/malaya-speech | 212c4e890d0cbcbbca0037c89a698b68b05db393 | [
"MIT"
] | null | null | null | malaya_speech/train/loss.py | dtx525942103/malaya-speech | 212c4e890d0cbcbbca0037c89a698b68b05db393 | [
"MIT"
] | null | null | null | malaya_speech/train/loss.py | dtx525942103/malaya-speech | 212c4e890d0cbcbbca0037c89a698b68b05db393 | [
"MIT"
] | 1 | 2021-08-19T02:34:41.000Z | 2021-08-19T02:34:41.000Z | import tensorflow as tf
from .utils import check_params
def calculate_3d_loss(y_gt, y_pred, loss_fn):
"""Calculate 3d loss, normally it's mel-spectrogram loss."""
y_gt_T = tf.shape(y_gt)[1]
y_pred_T = tf.shape(y_pred)[1]
def f1():
return tf.slice(y_gt, [0, 0, 0], [-1, y_pred_T, -1])
def f3():
return tf.slice(y_pred, [0, 0, 0], [-1, y_gt_T, -1])
def f4():
return y_pred
def f2():
return y_gt
y_gt = tf.cond(tf.greater(y_gt_T, y_pred_T), f1, f2)
# there is a mismath length when training multiple GPU.
# we need slice the longer tensor to make sure the loss
# calculated correctly.
# if y_gt_T > y_pred_T:
# y_gt = tf.slice(y_gt, [0, 0, 0], [-1, y_pred_T, -1])
# elif y_pred_T > y_gt_T:
# y_pred = tf.slice(y_pred, [0, 0, 0], [-1, y_gt_T, -1])
loss = loss_fn(y_gt, y_pred)
# if isinstance(loss, tuple) is False:
# loss = tf.reduce_mean(
# loss, list(range(1, len(loss.shape)))
# ) # shape = [B]
# else:
# loss = list(loss)
# for i in range(len(loss)):
# loss[i] = tf.reduce_mean(
# loss[i], list(range(1, len(loss[i].shape)))
# ) # shape = [B]
return loss
def calculate_2d_loss(y_gt, y_pred, loss_fn):
"""Calculate 2d loss, normally it's durrations/f0s/energys loss."""
y_gt_T = tf.shape(y_gt)[1]
y_pred_T = tf.shape(y_pred)[1]
# there is a mismath length when training multiple GPU.
# we need slice the longer tensor to make sure the loss
# calculated correctly.
# if
# if y_gt_T > y_pred_T:
# y_gt = tf.slice(y_gt, [0, 0], [-1, y_pred_T])
# elif y_pred_T > y_gt_T:
# y_pred = tf.slice(y_pred, [0, 0], [-1, y_gt_T])
def f1():
return tf.slice(y_gt, [0, 0], [-1, y_pred_T])
def f2():
return y_gt
y_gt = tf.cond(tf.greater(y_gt_T, y_pred_T), f1, f2)
def f3():
return tf.slice(y_pred, [0, 0], [-1, y_gt_T])
def f4():
return y_pred
y_pred = tf.cond(tf.greater(y_pred_T, y_gt_T), f3, f4)
loss = loss_fn(y_gt, y_pred)
return loss
# https://github.com/NVIDIA/OpenSeq2Seq/blob/master/open_seq2seq/optimizers/automatic_loss_scaler.py#L11
class AutomaticLossScaler(object):
SUPPORTED_ALGOS = ['backoff', 'logmax']
def __init__(self, algorithm = 'Backoff', params = None):
algorithm = algorithm.lower().strip()
if algorithm == 'backoff':
self.scaler = BackoffScaler(params)
elif algorithm == 'logmax':
self.scaler = LogMaxScaler(params) # ppf(.999)
else:
raise ValueError('Unknown scaling algorithm: {}'.format(algorithm))
def update_op(self, has_nan, amax):
return self.scaler.update_op(has_nan, amax)
@property
def loss_scale(self):
return self.scaler.loss_scale
@staticmethod
def check_grads(grads_and_vars):
has_nan_ops = []
amax_ops = []
for grad, _ in grads_and_vars:
if grad is not None:
if isinstance(grad, tf.IndexedSlices):
x = grad.values
else:
x = grad
has_nan_ops.append(tf.reduce_any(tf.is_nan(x)))
amax_ops.append(tf.reduce_max(tf.abs(x)))
has_nan = tf.reduce_any(has_nan_ops)
amax = tf.reduce_max(amax_ops)
return has_nan, amax
# https://github.com/NVIDIA/OpenSeq2Seq/blob/master/open_seq2seq/optimizers/automatic_loss_scaler.py#L50
class BackoffScaler(object):
def __init__(self, params):
if params is None:
params = {}
check_params(
config = params,
required_dict = {},
optional_dict = {
'scale_min': float,
'scale_max': float,
'step_factor': float,
'step_window': int,
},
)
self.scale_min = params.get('scale_min', 1.0)
self.scale_max = params.get('scale_max', 2.0 ** 24)
self.step_factor = params.get('step_factor', 2.0)
self.step_window = params.get('step_window', 2000)
self.iteration = tf.Variable(
initial_value = 0, trainable = False, dtype = tf.int64
)
self.last_overflow_iteration = tf.Variable(
initial_value = -1, trainable = False, dtype = tf.int64
)
self.scale = tf.Variable(
initial_value = self.scale_max, trainable = False
)
def update_op(self, has_nan, amax):
def overflow_case():
new_scale_val = tf.clip_by_value(
self.scale / self.step_factor, self.scale_min, self.scale_max
)
scale_assign = tf.assign(self.scale, new_scale_val)
overflow_iter_assign = tf.assign(
self.last_overflow_iteration, self.iteration
)
with tf.control_dependencies([scale_assign, overflow_iter_assign]):
return tf.identity(self.scale)
def scale_case():
since_overflow = self.iteration - self.last_overflow_iteration
should_update = tf.equal(since_overflow % self.step_window, 0)
def scale_update_fn():
new_scale_val = tf.clip_by_value(
self.scale * self.step_factor,
self.scale_min,
self.scale_max,
)
return tf.assign(self.scale, new_scale_val)
return tf.cond(should_update, scale_update_fn, lambda: self.scale)
iter_update = tf.assign_add(self.iteration, 1)
overflow = tf.logical_or(has_nan, tf.is_inf(amax))
update_op = tf.cond(overflow, overflow_case, scale_case)
with tf.control_dependencies([update_op]):
return tf.identity(iter_update)
@property
def loss_scale(self):
return self.scale
# https://github.com/NVIDIA/OpenSeq2Seq/blob/master/open_seq2seq/optimizers/automatic_loss_scaler.py#L113
class LogMaxScaler(object):
def __init__(self, params):
if params is None:
params = {}
check_params(
config = params,
required_dict = {},
optional_dict = {
'scale_min': float,
'scale_max': float,
'log_max': float,
'beta1': float,
'beta2': float,
'overflow_std_dev': float,
},
)
self.scale_min = params.get('scale_min', 1.0)
self.scale_max = params.get('scale_max', 2.0 ** 24)
self.log_max = params.get('log_max', 16.0)
self.beta1 = params.get('beta1', 0.99)
self.beta2 = params.get('beta2', 0.999)
self.overflow_std_dev = params.get('overflow_std_dev', 3.09)
self.iteration = tf.Variable(
initial_value = 0, trainable = False, dtype = tf.int64
)
self.scale = tf.Variable(initial_value = 1.0, trainable = False)
self.x_hat = tf.Variable(
initial_value = 0, trainable = False, dtype = tf.float32
)
self.slow_x_hat = tf.Variable(
initial_value = 0, trainable = False, dtype = tf.float32
)
self.xsquared_hat = tf.Variable(
initial_value = 0, trainable = False, dtype = tf.float32
)
self.b1_correction = tf.Variable(
initial_value = 1.0, trainable = False, dtype = tf.float32
)
self.b2_correction = tf.Variable(
initial_value = 1.0, trainable = False, dtype = tf.float32
)
# NB: assumes that `amax` is already has been downscaled
def update_op(self, has_nan, amax):
is_nonfinite = tf.logical_or(has_nan, tf.is_inf(amax))
x = tf.cond(
is_nonfinite,
lambda: tf.pow(2.0, self.log_max),
lambda: tf.log(amax) / tf.log(tf.constant(2.0)),
)
x_hat_assn = tf.assign(
self.x_hat, self.beta1 * self.x_hat + (1 - self.beta1) * x
)
b1_corr_assn = tf.assign(
self.b1_correction, self.b1_correction * self.beta1
)
with tf.control_dependencies([x_hat_assn, b1_corr_assn]):
mu = self.x_hat.read_value() / (1 - self.b1_correction.read_value())
slow_x_hat_assn = tf.assign(
self.slow_x_hat, self.beta2 * self.slow_x_hat + (1 - self.beta2) * x
)
xsquared_hat_assn = tf.assign(
self.xsquared_hat,
self.beta2 * self.xsquared_hat + (1 - self.beta2) * (x * x),
)
b2_corr_assn = tf.assign(
self.b2_correction, self.b2_correction * self.beta2
)
with tf.control_dependencies(
[slow_x_hat_assn, xsquared_hat_assn, b2_corr_assn]
):
e_xsquared = self.xsquared_hat.read_value() / (
1 - self.b2_correction.read_value()
)
slow_mu = self.slow_x_hat.read_value() / (
1 - self.b2_correction.read_value()
)
sigma2 = e_xsquared - (slow_mu * slow_mu)
sigma = tf.sqrt(tf.maximum(sigma2, tf.constant(0.0)))
log_cutoff = sigma * self.overflow_std_dev + mu
log_difference = 16 - log_cutoff
proposed_scale = tf.pow(2.0, log_difference)
scale_update = tf.assign(
self.scale,
tf.clip_by_value(proposed_scale, self.scale_min, self.scale_max),
)
iter_update = tf.assign_add(self.iteration, 1)
with tf.control_dependencies([scale_update]):
return tf.identity(iter_update)
@property
def loss_scale(self):
return self.scale
| 33.612457 | 105 | 0.574635 |
07ebf9a52eb74250204d6e5118a4569dd1867c76 | 22,978 | py | Python | test/functional/test_framework/util.py | donPabloNow/digiwage | 87491caf8563779b1bb69866e102cb8a1439b427 | [
"MIT"
] | 14 | 2018-03-19T23:28:42.000Z | 2022-03-11T08:58:01.000Z | test/functional/test_framework/util.py | donPabloNow/digiwage | 87491caf8563779b1bb69866e102cb8a1439b427 | [
"MIT"
] | 4 | 2018-03-30T13:55:22.000Z | 2022-01-30T21:17:25.000Z | test/functional/test_framework/util.py | donPabloNow/digiwage | 87491caf8563779b1bb69866e102cb8a1439b427 | [
"MIT"
] | 22 | 2018-04-08T07:41:41.000Z | 2022-03-11T03:29:25.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 20) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring (%s) not found in: %s" % (message, e.error['message']))
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
timeout += time.time()
while attempt < attempts and time.time() < timeout:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.5)
# Print the cause of the timeout
assert_greater_than(attempts, attempt)
assert_greater_than(timeout, time.time())
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "digiwage.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("listenonion=0\n")
f.write("spendzeroconfchange=1\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser�' + str(n), 'rpcpass�' + str(n)
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "digiwage.conf")):
with open(os.path.join(datadir, "digiwage.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r') as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir):
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, "regtest", ".cookie"))
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for addr in [peer['addr'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(addr)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['addr'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def connect_nodes_clique(nodes):
l = len(nodes)
for a in range(l):
for b in range(a, l):
connect_nodes_bi(nodes, a, b)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
time.sleep(5)
maxheight = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
if all(t["height"] == maxheight for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same best block
"""
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]] * len(best_hash):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match + 1
if num_match == len(rpc_connections):
#if flush_scheduler:
#for r in rpc_connections:
# r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
# Transaction/Block functions
#############################
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = float(satoshi_round(send_value / 2))
outputs[addr2] = float(satoshi_round(send_value / 2))
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{"txid": coinbase, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = float(satoshi_round(change))
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
### DIGIWAGE specific utils ###
vZC_DENOMS = [1, 5, 10, 50, 100, 500, 1000, 5000]
DEFAULT_FEE = 0.01
SPORK_ACTIVATION_TIME = 1563253447
SPORK_DEACTIVATION_TIME = 4070908800
def DecimalAmt(x):
"""Return Decimal from float for equality checks against rpc outputs"""
return Decimal("{:0.8f}".format(x))
| 38.043046 | 142 | 0.6501 |
853352959a7a13afe97c9dde1ff7127d059db19e | 4,365 | py | Python | test/global_test/test_global_version_match.py | gfer151/efs-utils | 1a374d7f60b1f48df22e6ae1dd218fabb2f80538 | [
"MIT"
] | null | null | null | test/global_test/test_global_version_match.py | gfer151/efs-utils | 1a374d7f60b1f48df22e6ae1dd218fabb2f80538 | [
"MIT"
] | null | null | null | test/global_test/test_global_version_match.py | gfer151/efs-utils | 1a374d7f60b1f48df22e6ae1dd218fabb2f80538 | [
"MIT"
] | null | null | null | #
# Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved.
#
# Licensed under the MIT License. See the LICENSE accompanying this file
# for the specific language governing permissions and limitations under
# the License.
#
import os
try:
import ConfigParser
except ImportError:
from configparser import ConfigParser
SPEC_FILE = 'dist/amazon-efs-utils.spec'
NON_SPEC_FILE_LIST = ['build-deb.sh', 'src/watchdog/__init__.py', 'src/mount_efs/__init__.py',
'dist/amazon-efs-utils.control', 'build-deb.sh']
GLOBAL_CONFIG = 'config.ini'
def test_spec_file_version_release_match():
global_version = get_global_value('version')
global_release = get_global_value('release')
version_in_spec_file = get_version_for_file(SPEC_FILE)
release_in_spec_file = get_release_for_file(SPEC_FILE)
assert version_in_spec_file == global_version, \
'version in {} is {}, does not match global version {}'.format(SPEC_FILE, version_in_spec_file, global_version)
assert release_in_spec_file == global_release, \
'release in {} is {}, does not match global release {}'.format(SPEC_FILE, release_in_spec_file, global_release)
def test_non_spec_file_version_release_match():
global_version_release = get_expected_version_release()
for f in NON_SPEC_FILE_LIST:
version_release_in_file = get_version_for_file(f)
assert version_release_in_file == global_version_release, 'version-release in {} is {}, does not match global version {}'\
.format(f, version_release_in_file, global_version_release)
def test_changelog_version_match():
expected_version_release = get_expected_version_release()
version_release_in_changelog = get_version_for_changelog(SPEC_FILE)
assert version_release_in_changelog is not None and version_release_in_changelog == expected_version_release, \
'version in {} is {}, does not match expected_version_release {}, you need to add changelog in the spec file'\
.format(SPEC_FILE, version_release_in_changelog, expected_version_release)
def get_expected_version_release():
global_version = get_global_value('version')
global_release = get_global_value('release')
return global_version + '-' + global_release
def get_version_for_changelog(file_path):
mount_helper_root_folder = uppath(os.path.abspath(__file__), 3)
file_to_check = os.path.join(mount_helper_root_folder, file_path)
has_changelog = False
with open(file_to_check) as fp:
lines = fp.readlines()
for line in lines:
if line.startswith('%changelog'):
has_changelog = True
if has_changelog and line.startswith('*'):
return line.split(' ')[-1].strip()
return None
def get_version_for_file(file_path):
mount_helper_root_folder = uppath(os.path.abspath(__file__), 3)
file_to_check = os.path.join(mount_helper_root_folder, file_path)
with open(file_to_check) as fp:
lines = fp.readlines()
for line in lines:
if line.startswith('VERSION'):
return line.split('=')[1].strip().replace("'", '')
if line.startswith('Version'):
return line.split(':')[1].strip()
return None
def get_release_for_file(file_path):
mount_helper_root_folder = uppath(os.path.abspath(__file__), 3)
file_to_check = os.path.join(mount_helper_root_folder, file_path)
with open(file_to_check) as fp:
lines = fp.readlines()
for line in lines:
if line.startswith('Release'):
return line.split(':')[1].strip().split('%')[0]
return None
def get_global_value(key):
mount_helper_root_folder = uppath(os.path.abspath(__file__), 3)
config_file = os.path.join(mount_helper_root_folder, GLOBAL_CONFIG)
cp = read_config(config_file)
value = str(cp.get('global', key))
return value
# Given: path : file path
# n : the number of parent level we want to reach
# Returns: parent path of certain level n
# Example: uppath('/usr/lib/java', 1) -> '/usr/lib'
# uppath('/usr/lib/java', 2) -> '/usr'
def uppath(path, n):
return os.sep.join(path.split(os.sep)[:-n])
def read_config(config_file):
try:
p = ConfigParser.SafeConfigParser()
except AttributeError:
p = ConfigParser()
p.read(config_file)
return p
| 36.375 | 130 | 0.707446 |
8cf575f3bfe1f448e5f1ee27e878312239c14078 | 2,854 | py | Python | topdown/topdown_classifier.py | microsoft/NTT | e23a63791c3abd95cc21df60b705f3e1417acf23 | [
"MIT"
] | 4 | 2021-07-19T15:24:37.000Z | 2022-02-28T01:28:11.000Z | topdown/topdown_classifier.py | microsoft/NTT | e23a63791c3abd95cc21df60b705f3e1417acf23 | [
"MIT"
] | null | null | null | topdown/topdown_classifier.py | microsoft/NTT | e23a63791c3abd95cc21df60b705f3e1417acf23 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------------
# Copyright (c) 2021 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# --------------------------------------------------------------------------------------------------
import torchvision
import torch
import torch.nn as nn
class TopdownClassifier(nn.Module):
def __init__(self, device, dropout=0.0, hidden_size=0):
super(TopdownClassifier, self).__init__()
self.device = device
# Define topdown model
self.vgg16 = torchvision.models.vgg16(pretrained=True)
# freeze convolution weights
for param in self.vgg16.features.parameters():
param.requires_grad = False
# replace last layer with a new layer with only 2 outputs
self.num_features = self.vgg16.classifier[6].in_features
self.features = list(
self.vgg16.classifier.children())[
:-2] # Remove last layer
self.features.extend([nn.Dropout(dropout)])
if hidden_size > 0:
self.features.extend([nn.Linear(self.num_features, hidden_size)])
self.num_features = hidden_size
# Add our layer with 2 outputs
self.features.extend([nn.Linear(self.num_features, 2)])
self.vgg16.classifier = nn.Sequential(
*self.features) # Replace the model classifier
self.model = self.vgg16.to(device)
def forward(self, x):
return self.model(x)
def loss_function(self, x, y):
return nn.CrossEntropyLoss()(x, y.long())
def correct_predictions(self, model_output, labels):
_, predictions = torch.max(model_output.data, 1)
return (predictions == labels).sum().item()
def load_state_dict(self, f):
self.model.load_state_dict(f)
| 43.907692 | 100 | 0.649965 |
ef9928f1e5b8339ff86f2983e8949fa7568653e6 | 179 | py | Python | Resume_Project/custom_storage.py | kannuprieya/resume-builder | a53f9286233e90c9d749db51abe2ac7c0ad99fb2 | [
"MIT"
] | 2 | 2020-09-03T17:37:37.000Z | 2021-03-19T09:27:01.000Z | Resume_Project/custom_storage.py | kannuprieya/resume-builder | a53f9286233e90c9d749db51abe2ac7c0ad99fb2 | [
"MIT"
] | null | null | null | Resume_Project/custom_storage.py | kannuprieya/resume-builder | a53f9286233e90c9d749db51abe2ac7c0ad99fb2 | [
"MIT"
] | 1 | 2021-11-25T16:24:22.000Z | 2021-11-25T16:24:22.000Z | from storages.backends.s3boto3 import S3Boto3Storage
class PublicMediaStorage(S3Boto3Storage):
location = 'media'
default_acl = 'public-read'
file_overwrite = False | 22.375 | 52 | 0.765363 |
0e9b27f54ab1d8d830c3f33f199ca61919fe7109 | 3,328 | py | Python | wltoys-v202-micropython/server.py | nobotro/mkimax | 557f5e570e2fb373f88f5f0f7254c48e90106de0 | [
"MIT"
] | 2 | 2020-10-15T18:12:08.000Z | 2021-11-12T08:57:56.000Z | wltoys-v202-micropython/server.py | nobotro/mkimax | 557f5e570e2fb373f88f5f0f7254c48e90106de0 | [
"MIT"
] | null | null | null | wltoys-v202-micropython/server.py | nobotro/mkimax | 557f5e570e2fb373f88f5f0f7254c48e90106de0 | [
"MIT"
] | 1 | 2020-04-10T08:43:04.000Z | 2020-04-10T08:43:04.000Z | import machine
import socket
import utime
import network
import rc
import time
machine.freq(160000000)
steer_key=None
gear=1
do_steer=False
#steer_directio ,[0-127] steer right, [128-255] steer left
#speed_direction ,[0-127] going forward, [128-255] going backward
steering_step=30
steering_trim=124
always_exec_keys=['Key.up','Key.down','Key.left','Key.right']
state=[0,0] #[speed_value,steer_value,steer trim]
def get_next_steer():
global steer_key,do_steer,state,gear,steering_step
if state[1]<128:
state[1]=state[1]+steering_step if state[1]+steering_step<128 else 127
else:
state[1]=state[1]+steering_step if state[1]+steering_step<256 else 255
def driver():
global steer_key,do_steer,state,gear,steering_trim,always_exec_keys
ap = network.WLAN(network.AP_IF) # create access-point interface
ap.active(True) # activate the interface
ap.config(essid='mkimax',authmode=3,password='dubarazarodubara') # set the ESSID of the access point
UDP_IP = "0.0.0.0"
UDP_PORT = 1327
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((UDP_IP, UDP_PORT))
# data, addr = sock.recvfrom(1024)
# data=data.decode().split(';')
# print(str(data))
# if data[0]=="1":
# sock.close()
# ap.active(False)
# wlan = network.WLAN(network.STA_IF)
# wlan.active(True)
# wlan.scan()
# wlan.connect(data[1], data[2])
# while not wlan.isconnected():
# pass
# wlan.config("mac")
# sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# sock.sendto("hello".encode(),(data[3],int(data[4])))
sock.setblocking(0)
prev_data=None
rc.setup()
rc.bind()
while True:
#do v202 stuffs in loop
rc.process(state[1],state[0],steering_trim)
#if steer key pressed ,steering step by step
if do_steer:
get_next_steer()
data=None
try:
data, addr = sock.recvfrom(1024)
data=data.decode()
if data not in always_exec_keys:
if data==prev_data:
continue
else:
prev_data=data
except:
pass
if not data:
continue
key = data
print(key)
if key==steer_key:
continue
elif key == "'w'":
state[0]=30*gear
elif key == "'s'":
state[0]=128+(30*gear)
elif key == "'d'":
steer_key='d'
state[1]=0
do_steer=True
elif key == "'a'":
steer_key='a'
state[1]=128
do_steer=True
elif key == "!'w'":
state[0]=0
elif key == "!'s'":
state[0]=0
elif key == "!'d'":
if steer_key=='d':
do_steer=False
state[1]=0
elif key == "!'a'":
if steer_key=='a':
do_steer=False
state[1]=0
elif key == "Key.up":
if gear<4:
gear+=1
#to change gear in runtime
if state[0]!=0:
state[0]+=30
elif key == "Key.down":
if gear>1:
gear-=1
#to change gear in runtime
if state[0]!=0:
state[0]-=30
elif key == "Key.left":
if steering_trim>=10:
steering_trim-=2
elif key == "Key.right":
if steering_trim<=245:
steering_trim+=2
| 22.039735 | 102 | 0.570012 |
56810de10dce0f582e67f12cc50ad10eac2c82d2 | 2,814 | py | Python | userbot/modules/salam.py | dadkuy/Man-Userbot | 89720af8cacf40443a4a18106bcb2bf7a646acd3 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/salam.py | dadkuy/Man-Userbot | 89720af8cacf40443a4a18106bcb2bf7a646acd3 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/salam.py | dadkuy/Man-Userbot | 89720af8cacf40443a4a18106bcb2bf7a646acd3 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | from time import sleep
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP
from userbot.utils import edit_or_reply, man_cmd
@man_cmd(pattern="p(?: |$)(.*)")
async def _(event):
await event.client.send_message(
event.chat_id,
"**Assalamualaikum Om Tante**",
reply_to=event.reply_to_msg_id,
)
await event.delete()
@man_cmd(pattern="pe(?: |$)(.*)")
async def _(event):
await event.client.send_message(
event.chat_id,
"**Assalamualaikum Warahmatullahi Wabarakatuh**",
reply_to=event.reply_to_msg_id,
)
await event.delete()
@man_cmd(pattern="P(?: |$)(.*)")
async def _(event):
me = await event.client.get_me()
xx = await edit_or_reply(event, f"**Haii Salken Saya {me.first_name}**")
sleep(2)
await xx.edit("**Assalamualaikum...**")
@man_cmd(pattern="l(?: |$)(.*)")
async def _(event):
await event.client.send_message(
event.chat_id, "**Wa'alaikumsalam**", reply_to=event.reply_to_msg_id
)
await event.delete()
@man_cmd(pattern="a(?: |$)(.*)")
async def _(event):
me = await event.client.get_me()
xx = await edit_or_reply(event, f"**Haii Salken Saya {me.first_name}**")
sleep(2)
await xx.edit("**Assalamualaikum**")
@man_cmd(pattern="j(?: |$)(.*)")
async def _(event):
xx = await edit_or_reply(event, "**JAKA SEMBUNG BAWA GOLOK**")
sleep(3)
await xx.edit("**NIMBRUNG GOBLOKK!!!🔥**")
@man_cmd(pattern="k(?: |$)(.*)")
async def _(event):
me = await event.client.get_me()
xx = await edit_or_reply(event, f"**Hallo KIMAAKK SAYA {me.first_name}**")
sleep(2)
await xx.edit("**LU SEMUA NGENTOT 🔥**")
@man_cmd(pattern="ass(?: |$)(.*)")
async def _(event):
xx = await edit_or_reply(event, "**Salam Dulu Biar Sopan**")
sleep(2)
await xx.edit("**السَّلاَمُ عَلَيْكُمْ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ**")
CMD_HELP.update(
{
"salam": f"**Plugin : **`salam`\
\n\n • **Syntax :** `{cmd}p`\
\n • **Function : **Assalamualaikum Dulu Biar Sopan..\
\n\n • **Syntax :** `{cmd}pe`\
\n • **Function : **salam Kenal dan salam\
\n\n • **Syntax :** `{cmd}l`\
\n • **Function : **Untuk Menjawab salam\
\n\n • **Syntax :** `{cmd}ass`\
\n • **Function : **Salam Bahas arab\
\n\n • **Syntax :** `{cmd}semangat`\
\n • **Function : **Memberikan Semangat.\
\n\n • **Syntax :** `{cmd}ywc`\
\n • **Function : **nMenampilkan Sama sama\
\n\n • **Syntax :** `{cmd}sayang`\
\n • **Function : **Kata I Love You.\
\n\n • **Syntax :** `{cmd}k`\
\n • **Function : **LU SEMUA NGENTOT 🔥\
\n\n • **Syntax :** `{cmd}j`\
\n • **Function : **NIMBRUNG GOBLOKK!!!🔥\
"
}
)
| 28.714286 | 78 | 0.556148 |
c2511f59edbcb7d41a2664baf6ff455ca5672ccb | 2,015 | py | Python | data_input.py | iwtw/tf_tools | 22185cbac7e365f5cbd8ab021159de77893fb0a7 | [
"MIT"
] | null | null | null | data_input.py | iwtw/tf_tools | 22185cbac7e365f5cbd8ab021159de77893fb0a7 | [
"MIT"
] | null | null | null | data_input.py | iwtw/tf_tools | 22185cbac7e365f5cbd8ab021159de77893fb0a7 | [
"MIT"
] | null | null | null | import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
def parse_single_data(file_queue):
reader = tf.TFRecordReader()
key, value = reader.read(file_queue)
features = {
'label': tf.FixedLenFeature([], tf.int64),
#'img_raw': tf.FixedLenFeature([], tf.string)
'img_raw': tf.FixedLenFeature([], tf.string)
}
example = tf.parse_single_example(value, features=features)
image = tf.image.decode_image(example['img_raw'], 3)
print(tf.shape(image))
#image = tf.image.decode_image(example['image_raw'], 3)
#image = tf.decode_raw( example['img_raw'] , uint8 )
#image.set_shape([None, None, 3])
image.set_shape([None,None,3])
label = tf.cast(example['label'], tf.int32)
return image, label
def preprocessing(image, image_size, is_training ):
# image = tf.image.resize_images(image, image_size, tf.image.ResizeMethod.BILINEAR)
#if is_training:
#image = tf.image.random_flip_left_right(image)
# image = tf.cast( image , tf.float32 )
#Ex = tf.reduce_mean(image)
#Ex2 = tf.reduce_mean(image**2)
#variance = Ex2 - Ex**2
#image_ = ( image - tf.reduce_mean( image ) ) / tf.sqrt(variance)
# image_ = image /127.5 - 1.
#image_ = image
return image
def get_batch(file_queue, image_size, batch_size, n_threads, min_after_dequeue, is_training ):
t_list = []
for i in range(n_threads):
image, label = parse_single_data(file_queue)
#image = preprocessing(image, image_size, is_training = is_training)
image.set_shape([image_size[0], image_size[1], 3])
t_list.append([image,label])
#batch images
capacity = min_after_dequeue + (n_threads + 5) * batch_size
image_batch, label_batch = tf.train.shuffle_batch_join(
t_list, batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue,#enqueue_many=True,
name='data')
return image_batch, label_batch
| 34.152542 | 95 | 0.648139 |
85b6c0e584286986885c0a50f7b19d458d93dd7c | 3,711 | py | Python | ansible_mitogen/loaders.py | Rosa-Luxemburgstiftung-Berlin/mitogen | a564d8a268afe89ab73423f23fb9191c786f511f | [
"BSD-3-Clause"
] | null | null | null | ansible_mitogen/loaders.py | Rosa-Luxemburgstiftung-Berlin/mitogen | a564d8a268afe89ab73423f23fb9191c786f511f | [
"BSD-3-Clause"
] | null | null | null | ansible_mitogen/loaders.py | Rosa-Luxemburgstiftung-Berlin/mitogen | a564d8a268afe89ab73423f23fb9191c786f511f | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2019, David Wilson
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Stable names for PluginLoader instances across Ansible versions.
"""
from __future__ import absolute_import
import ansible_mitogen.utils
__all__ = [
'action_loader',
'connection_loader',
'module_loader',
'module_utils_loader',
'shell_loader',
'strategy_loader',
]
ANSIBLE_VERSION_MIN = (2, 10)
ANSIBLE_VERSION_MAX = (2, 11)
NEW_VERSION_MSG = (
"Your Ansible version (%s) is too recent. The most recent version\n"
"supported by Mitogen for Ansible is %s.x. Please check the Mitogen\n"
"release notes to see if a new version is available, otherwise\n"
"subscribe to the corresponding GitHub issue to be notified when\n"
"support becomes available.\n"
"\n"
" https://mitogen.rtfd.io/en/latest/changelog.html\n"
" https://github.com/mitogen-hq/mitogen/issues/\n"
)
OLD_VERSION_MSG = (
"Your version of Ansible (%s) is too old. The oldest version supported by "
"Mitogen for Ansible is %s."
)
def assert_supported_release():
"""
Throw AnsibleError with a descriptive message in case of being loaded into
an unsupported Ansible release.
"""
v = ansible_mitogen.utils.ansible_version
if v[:2] < ANSIBLE_VERSION_MIN:
raise ansible.errors.AnsibleError(
OLD_VERSION_MSG % (v, ANSIBLE_VERSION_MIN)
)
if v[:2] > ANSIBLE_VERSION_MAX:
raise ansible.errors.AnsibleError(
NEW_VERSION_MSG % (v, ANSIBLE_VERSION_MAX)
)
# this is the first file our strategy plugins import, so we need to check this here
# in prior Ansible versions, connection_loader.get_with_context didn't exist, so if a user
# is trying to load an old Ansible version, we'll fail and error gracefully
assert_supported_release()
from ansible.plugins.loader import action_loader
from ansible.plugins.loader import connection_loader
from ansible.plugins.loader import module_loader
from ansible.plugins.loader import module_utils_loader
from ansible.plugins.loader import shell_loader
from ansible.plugins.loader import strategy_loader
# These are original, unwrapped implementations
action_loader__get = action_loader.get
connection_loader__get = connection_loader.get_with_context
| 37.484848 | 90 | 0.759634 |
cd62ef24e0c5729d3a29d8725f5b101e032c1c93 | 1,398 | py | Python | validation_utils/convert_ground_truth_labels/convert_ground_truth_labels.py | codestorm04/keras-inceptionV4 | c97a549508ff6bf863d3506263618d056e62edf4 | [
"Apache-2.0"
] | 495 | 2017-01-26T10:42:46.000Z | 2022-03-18T13:45:44.000Z | validation_utils/convert_ground_truth_labels/convert_ground_truth_labels.py | codestorm04/keras-inceptionV4 | c97a549508ff6bf863d3506263618d056e62edf4 | [
"Apache-2.0"
] | 28 | 2017-01-26T10:04:46.000Z | 2021-04-28T02:40:19.000Z | validation_utils/convert_ground_truth_labels/convert_ground_truth_labels.py | codestorm04/keras-inceptionV4 | c97a549508ff6bf863d3506263618d056e62edf4 | [
"Apache-2.0"
] | 213 | 2017-01-26T16:58:30.000Z | 2022-02-28T23:20:06.000Z | '''
Copyright 2017 Kent Sommer
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
classes = eval(open('correct_classes.txt', 'r').read())
correct_classes = {}
old_classes = {}
correct_labels = []
lines = open('old_classes_raw.txt').read().splitlines()
for line in lines:
_, uid, name = line.split()
name = name.replace("_", " ")
name = name.lower()
uid = int(uid)
old_classes[uid] = name
for key, value in classes.iteritems():
uid = key
name = value.split(",")[0]
name = name.lower()
correct_classes[name] = uid
lines = open('val_ground_truth_labels.txt').read().splitlines()
for line in lines:
key = int(line)
name = old_classes[key]
new_label = correct_classes[name]
print("Old label was: ", key, ". New label is: ", new_label)
correct_labels.append(new_label)
print("Total labels = ", len(correct_labels))
f=open('../GTL.txt','w')
for label in correct_labels:
f.write(str(label)+'\n')
f.close()
| 26.884615 | 72 | 0.719599 |
f79192db1b2cf94bc217c9833bc401bb525e9034 | 2,260 | py | Python | test/python/WMCore_t/Cache_t/GenericDataCache_t.py | khurtado/WMCore | f74e252412e49189a92962945a94f93bec81cd1e | [
"Apache-2.0"
] | 1 | 2015-02-05T13:43:46.000Z | 2015-02-05T13:43:46.000Z | test/python/WMCore_t/Cache_t/GenericDataCache_t.py | khurtado/WMCore | f74e252412e49189a92962945a94f93bec81cd1e | [
"Apache-2.0"
] | 1 | 2016-10-13T14:57:35.000Z | 2016-10-13T14:57:35.000Z | test/python/WMCore_t/Cache_t/GenericDataCache_t.py | khurtado/WMCore | f74e252412e49189a92962945a94f93bec81cd1e | [
"Apache-2.0"
] | null | null | null | """
_WMConfigCache_t_
Test class for the WMConfigCache
"""
from __future__ import print_function, division
import unittest
import time
from WMCore.Cache.GenericDataCache import GenericDataCache, CacheExistException, \
CacheWithWrongStructException, MemoryCacheStruct
class Foo(object):
pass
class GenericDataCacheTest(unittest.TestCase):
def testBasic(self):
"""
_testBasic_
Basic stuff.
"""
mc = MemoryCacheStruct(1, lambda x: int(time.time()), kwargs={'x':1})
self.assertIsNone(mc.data)
self.assertEqual(mc.lastUpdated, -1)
GenericDataCache.registerCache("test", mc)
with self.assertRaises(CacheExistException):
GenericDataCache.registerCache("test", mc)
with self.assertRaises(CacheWithWrongStructException):
GenericDataCache.registerCache("test2", {'a': 1})
mc2 = GenericDataCache.getCacheData('test')
before = mc2.getData()
time.sleep(2)
after = mc2.getData()
self.assertFalse(before == after)
self.assertFalse(mc2.lastUpdated == -1)
return
def testBasicInit(self):
"""
_testBasicInit_
Test init values
"""
mc = MemoryCacheStruct(0, lambda x: x, initCacheValue=Foo())
self.assertIsInstance(mc.data, Foo)
mc1 = MemoryCacheStruct(0, lambda x: sum(x), [], kwargs={'x': [1, 2]})
self.assertEqual(mc1.data, [])
after = mc1.getData()
self.assertEqual(after, 3)
mc2 = MemoryCacheStruct(0, lambda x: x, {}, kwargs={'x': {'one':1, 'two':2}})
self.assertEqual(mc2.data, {})
after = mc2.getData()
self.assertItemsEqual(after.keys(), ['one', 'two'])
return
def testExists(self):
"""
Tests whether a given cache already exists
"""
mc = MemoryCacheStruct(1, lambda x: int(time.time()), kwargs={'x':1})
self.assertFalse(GenericDataCache.cacheExists("tCache"))
GenericDataCache.registerCache("tCache", mc)
self.assertTrue(GenericDataCache.cacheExists("tCache"))
self.assertFalse(GenericDataCache.cacheExists("tCache2"))
if __name__ == "__main__":
unittest.main()
| 28.974359 | 85 | 0.623009 |
aa23dc39b552850b14e360de12b1c8df193e0c13 | 913 | py | Python | day-3-crossed-wires/one.py | dylannakahodo/advent-of-code-2019 | fe5939c656b9b6a93acfb638bace069c1bf60da8 | [
"MIT"
] | null | null | null | day-3-crossed-wires/one.py | dylannakahodo/advent-of-code-2019 | fe5939c656b9b6a93acfb638bace069c1bf60da8 | [
"MIT"
] | null | null | null | day-3-crossed-wires/one.py | dylannakahodo/advent-of-code-2019 | fe5939c656b9b6a93acfb638bace069c1bf60da8 | [
"MIT"
] | null | null | null | def manhattan_distance(point):
return abs(point[0]) + abs(point[1])
def parse_wire_positions(wire_path):
x,y = 0,0
wire_positions = set()
directions = {"R": (1,0), "L": (-1,0), "U": (0,1), "D": (0,-1)}
for vector in wire_path:
for _ in range(int(vector[1:])):
direction = vector[0]
x += directions[direction][0]
y += directions[direction][1]
wire_positions.add((x,y))
return wire_positions
if __name__ == "__main__":
with open('input.txt') as f:
wire_1_path = f.readline().strip().split(',')
wire_2_path = f.readline().strip().split(',')
wire_1_points = parse_wire_positions(wire_1_path)
wire_2_points = parse_wire_positions(wire_2_path)
intersections = wire_1_points.intersection(wire_2_points)
closest = min(map(manhattan_distance,intersections))
print(closest) | 29.451613 | 67 | 0.607886 |
bb1e293722135570e046cf8380a29e352207ed24 | 2,700 | py | Python | utils/test.py | moshe-kabala/BRemesher | b0ad8282cd3fa0f94aa6f862a25cb3c78b4b0cdd | [
"MIT"
] | 1 | 2019-11-15T09:11:03.000Z | 2019-11-15T09:11:03.000Z | utils/test.py | moshe-kabala/BRemesher | b0ad8282cd3fa0f94aa6f862a25cb3c78b4b0cdd | [
"MIT"
] | null | null | null | utils/test.py | moshe-kabala/BRemesher | b0ad8282cd3fa0f94aa6f862a25cb3c78b4b0cdd | [
"MIT"
] | 1 | 2019-11-25T13:59:26.000Z | 2019-11-25T13:59:26.000Z | import bpy
import bmesh
from .. utils.applay_map import applay_map
from .. algorithms.map_vertex import map_vertex_by_face
# task
# show the vertex color
VERTEX_COLOR_MAT_NAME = "VERTEX_COLOR_MAT_NAME"
def clear_all():
bpy.ops.object.select_all(action="DESELECT")
bpy.ops.object.select_all()
bpy.ops.object.delete(use_global=False, confirm=False)
def set_environment():
"""
this function create material and change the mode of the view
"""
# bpy.data.screens["Scripting"].shading.type = 'MATERIAL'
# create material
pass
def get_vertex_mat():
mat = bpy.data.materials.get("VERTEX_COLOR_MAT_NAME")
if mat is None:
# create material
mat = bpy.data.materials.new(name=VERTEX_COLOR_MAT_NAME)
mat.use_nodes = True
matnodes = mat.node_tree.nodes
# new attribute
node = matnodes.new('ShaderNodeAttribute')
node.attribute_name = "Col"
# assign texture to material's displacement
disp = matnodes['Material Output'].inputs[0]
mat.node_tree.links.new(disp, node.outputs[0])
return mat
def assign_mat(obj, mat):
# Assign it to object
if obj.data.materials:
# assign to 1st material slot
obj.data.materials[0] = mat
else:
# no slots
obj.data.materials.append(mat)
def preper_obj(obj, save_edges=False, smooth=0):
obj.modifiers.new(name="sub", type='SUBSURF')
obj.modifiers["sub"].levels = 3
if save_edges:
obj.modifiers["sub"].subdivision_type = 'SIMPLE'
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="sub")
bpy.ops.sculpt.sculptmode_toggle()
bpy.ops.sculpt.dynamic_topology_toggle()
bpy.ops.sculpt.optimize()
bpy.ops.sculpt.sculptmode_toggle()
if True:
bpy.ops.object.shade_smooth()
else:
bpy.ops.object.shade_flat()
# assgin the material that created in set_environment function
# Get material
mat = get_vertex_mat()
assign_mat(obj, mat)
return obj
def add_monky(location=(0, 0, 0)):
bpy.ops.mesh.primitive_monkey_add(
size=2, enter_editmode=False, location=location)
monk = bpy.context.object
preper_obj(monk)
return monk
def add_cube(location=(0, 0, 0)):
bpy.ops.mesh.primitive_cube_add(
size=2, enter_editmode=False, location=location)
cube = bpy.context.object
preper_obj(cube, True)
return cube
def test_map(obj):
bm = bmesh.new() # create an empty BMesh
bm.from_mesh(obj.data) # fill it in from a Mesh
# mappin each vertex
map_data, min, max = map_vertex_by_face(bm)
# add the map
applay_map(obj, map_data, min, max)
bm.free()
| 24.770642 | 66 | 0.669259 |
2f34194bfc45474fd58fa7235552cb574062b17d | 3,987 | py | Python | salt/states/selinux.py | jkur/salt | 3e62675550f9869d550d7787800270e632955d2f | [
"Apache-2.0"
] | 3 | 2016-09-03T06:26:42.000Z | 2019-06-30T13:04:53.000Z | salt/states/selinux.py | jkur/salt | 3e62675550f9869d550d7787800270e632955d2f | [
"Apache-2.0"
] | 16 | 2015-11-18T00:44:03.000Z | 2018-10-29T20:48:27.000Z | salt/states/selinux.py | jkur/salt | 3e62675550f9869d550d7787800270e632955d2f | [
"Apache-2.0"
] | 3 | 2021-02-23T08:12:48.000Z | 2021-02-23T08:13:13.000Z | # -*- coding: utf-8 -*-
'''
Management of SELinux rules
===========================
If SELinux is available for the running system, the mode can be managed and
booleans can be set.
.. code-block:: yaml
enforcing:
selinux.mode
samba_create_home_dirs:
selinux.boolean:
- value: True
- persist: True
.. note::
Use of these states require that the :mod:`selinux <salt.modules.selinux>`
execution module is available.
'''
def __virtual__():
'''
Only make this state available if the selinux module is available.
'''
return 'selinux' if 'selinux.getenforce' in __salt__ else False
def _refine_mode(mode):
'''
Return a mode value that is predictable
'''
mode = str(mode).lower()
if any([mode.startswith('e'),
mode == '1',
mode == 'on']):
return 'Enforcing'
if any([mode.startswith('p'),
mode == '0',
mode == 'off']):
return 'Permissive'
return 'unknown'
def _refine_value(value):
'''
Return a yes/no value, or None if the input is invalid
'''
value = str(value).lower()
if value in ('1', 'on', 'yes', 'true'):
return 'on'
if value in ('0', 'off', 'no', 'false'):
return 'off'
return None
def mode(name):
'''
Verifies the mode SELinux is running in, can be set to enforcing or
permissive
name
The mode to run SELinux in, permissive or enforcing
'''
ret = {'name': name,
'result': False,
'comment': '',
'changes': {}}
tmode = _refine_mode(name)
if tmode == 'unknown':
ret['comment'] = '{0} is not an accepted mode'.format(name)
return ret
mode = __salt__['selinux.getenforce']()
if mode == tmode:
ret['result'] = True
ret['comment'] = 'SELinux is already in {0} mode'.format(tmode)
return ret
# The mode needs to change...
if __opts__['test']:
ret['comment'] = 'SELinux mode is set to be changed to {0}'.format(
tmode)
ret['result'] = None
return ret
mode = __salt__['selinux.setenforce'](tmode)
if mode == tmode:
ret['result'] = True
ret['comment'] = 'SELinux has been set to {0} mode'.format(tmode)
return ret
ret['comment'] = 'Failed to set SELinux to {0} mode'.format(tmode)
return ret
def boolean(name, value, persist=False):
'''
Set up an SELinux boolean
name
The name of the boolean to set
value
The value to set on the boolean
persist
Defaults to False, set persist to true to make the boolean apply on a
reboot
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
bools = __salt__['selinux.list_sebool']()
if name not in bools:
ret['comment'] = 'Boolean {0} is not available'.format(name)
ret['result'] = False
return ret
rvalue = _refine_value(value)
if rvalue is None:
ret['comment'] = '{0} is not a valid value for the ' \
'boolean'.format(value)
ret['result'] = False
return ret
state = bools[name]['State'] == rvalue
default = bools[name]['Default'] == rvalue
if persist:
if state and default:
ret['comment'] = 'Boolean is in the correct state'
return ret
else:
if state:
ret['comment'] = 'Boolean is in the correct state'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Boolean {0} is set to be changed to {1}'.format(
name, rvalue)
return ret
if __salt__['selinux.setsebool'](name, rvalue, persist):
ret['comment'] = 'Boolean {0} has been set to {1}'.format(name, rvalue)
return ret
ret['comment'] = 'Failed to set the boolean {0} to {1}'.format(name, rvalue)
return ret
| 27.122449 | 80 | 0.556057 |
ffef35b7acc2769495a91e412fa2373552a2f71e | 10,771 | py | Python | python/paddle/fluid/trainer_desc.py | laipaang/Paddle | d7f35434b761707a8479b75636546a624399369a | [
"Apache-2.0"
] | 1 | 2020-06-24T14:53:24.000Z | 2020-06-24T14:53:24.000Z | python/paddle/fluid/trainer_desc.py | MaJun-cn/Paddle | 0ec3a42e9740a5f5066053bb49a923d538eba24a | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/trainer_desc.py | MaJun-cn/Paddle | 0ec3a42e9740a5f5066053bb49a923d538eba24a | [
"Apache-2.0"
] | 4 | 2020-07-27T13:24:03.000Z | 2020-08-06T08:20:32.000Z | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defination of trainers."""
import sys
import os
__all__ = ['TrainerDesc', 'MultiTrainer', 'DistMultiTrainer', 'PipelineTrainer']
class TrainerDesc(object):
'''
Set proto from python to c++.
Can be initialized from train_desc.
'''
def __init__(self):
'''
self.proto_desc = data_feed_pb2.DataFeedDesc()
with open(proto_file, 'r') as f:
text_format.Parse(f.read(), self.proto_desc)
'''
# Workaround for relative import in protobuf under python3
# TODO: should be fixed
cur_path = os.path.dirname(__file__)
if cur_path not in sys.path:
sys.path.append(cur_path)
if cur_path + "/proto" not in sys.path:
sys.path.append(cur_path + "/proto")
from proto import trainer_desc_pb2
self.proto_desc = trainer_desc_pb2.TrainerDesc()
import multiprocessing as mp
# set default thread num == cpu count
self.proto_desc.thread_num = mp.cpu_count()
self._fleet_desc = None
self._device_worker = None
self._program = None
self._infer = False
def _set_fetch_var_and_info(self, fetch_vars, fetch_info, print_period):
for i, v in enumerate(fetch_vars):
self.proto_desc.fetch_config.fetch_var_names.extend([v.name])
self.proto_desc.fetch_config.fetch_var_str_format.extend(
[fetch_info[i]])
self.proto_desc.fetch_config.print_period = print_period
def _set_debug(self, debug):
self.proto_desc.debug = debug
def _set_thread(self, thread_num):
self.proto_desc.thread_num = thread_num
def _set_device_worker(self, device_worker):
self._device_worker = device_worker
def _set_infer(self, infer):
self._infer = infer
def _set_fleet_desc(self, fleet_desc):
self._fleet_desc = fleet_desc
def _gen_trainer_desc(self):
pass
def _set_program(self, program):
self._program = program
def _set_use_cvm(self, use_cvm=False):
self.proto_desc.use_cvm = use_cvm
def _set_no_cvm(self, no_cvm=False):
self.proto_desc.no_cvm = no_cvm
def _set_scale_datanorm(self, scale_datanorm=-1):
self.proto_desc.scale_datanorm = scale_datanorm
def _set_dump_slot(self, dump_slot):
self.proto_desc.dump_slot = dump_slot
def _set_mpi_rank(self, mpi_rank):
self.proto_desc.mpi_rank = mpi_rank
def _set_mpi_size(self, mpi_size):
self.proto_desc.mpi_size = mpi_size
def _set_dump_fields(self, dump_fields):
for field in dump_fields:
self.proto_desc.dump_fields.append(field)
def _set_dump_fields_path(self, path):
self.proto_desc.dump_fields_path = path
def _set_dump_file_num(self, dump_file_num):
self.proto_desc.dump_file_num = dump_file_num
def _set_dump_converter(self, converter):
self.proto_desc.dump_converter = converter
def _set_enable_random_dump(self, enable_random_dump):
self.proto_desc.enable_random_dump = enable_random_dump
def _set_dump_interval(self, dump_interval):
self.proto_desc.dump_interval = dump_interval
def _set_random_with_lineid(self, random_with_lineid):
self.proto_desc.random_with_lineid = random_with_lineid
def _set_dump_param(self, dump_param):
for param in dump_param:
self.proto_desc.dump_param.append(param)
def _set_thread_barrier(self, thread_barrier):
self.proto_desc.thread_barrier = thread_barrier
def _set_check_nan_var_names(self, check_nan_var_names):
for var in check_nan_var_names:
self.proto_desc.check_nan_var_names.append(var)
def _set_loss_names(self, loss_names):
for loss in loss_names:
self.proto_desc.loss_names.append(loss)
def _set_adjust_ins_weight(self, config_dict):
self.proto_desc.adjust_ins_weight_config.need_adjust = \
config_dict.get("need_adjust", False)
self.proto_desc.adjust_ins_weight_config.nid_slot = \
config_dict.get("nid_slot", "")
self.proto_desc.adjust_ins_weight_config.nid_adjw_threshold = \
config_dict.get("nid_adjw_threshold", 0.0)
self.proto_desc.adjust_ins_weight_config.nid_adjw_ratio = \
config_dict.get("nid_adjw_ratio", 0.0)
self.proto_desc.adjust_ins_weight_config.ins_weight_slot = \
config_dict.get("ins_weight_slot", "")
def _set_copy_table_config(self, config_dict):
config = self.proto_desc.copy_table_config
config.need_copy = config_dict.get("need_copy", False)
config.batch_num = config_dict.get("batch_num", 100)
src_sparse_tables = config_dict.get("src_sparse_tables", [])
if not isinstance(src_sparse_tables, list):
src_sparse_tables = [src_sparse_tables]
dest_sparse_tables = config_dict.get("dest_sparse_tables", [])
if not isinstance(dest_sparse_tables, list):
dest_sparse_tables = [dest_sparse_tables]
if len(src_sparse_tables) != len(dest_sparse_tables):
raise ValueError(
"len(src_sparse_tables) != len(dest_sparse_tables)," \
" %s vs %s" % (len(src_sparse_tables), \
len(dest_sparse_tables)))
for i in src_sparse_tables:
config.src_sparse_tables.append(i)
for i in dest_sparse_tables:
config.dest_sparse_tables.append(i)
src_dense_tables = config_dict.get("src_dense_tables", [])
if not isinstance(src_dense_tables, list):
src_dense_tables = [src_dense_tables]
dest_dense_tables = config_dict.get("dest_dense_tables", [])
if not isinstance(dest_dense_tables, list):
dest_dense_tables = [dest_dense_tables]
if len(src_dense_tables) != len(dest_dense_tables):
raise ValueError(
"len(src_dense_tables) != len(dest_dense_tables)," \
" %s vs %s" % (len(src_dense_tables), \
len(dest_dense_tables)))
for i in src_dense_tables:
config.src_dense_tables.append(i)
for i in dest_dense_tables:
config.dest_dense_tables.append(i)
# user can also specify dense variables to copy,
# instead of copy dense table
src_var_list = config_dict.get("src_var_list", [])
if not isinstance(src_var_list, list):
src_var_list = [src_var_list]
dest_var_list = config_dict.get("dest_var_list", [])
if not isinstance(dest_var_list, list):
dest_var_list = [dest_var_list]
if len(src_var_list) != len(dest_var_list):
raise ValueError(
"len(src_var_list) != len(dest_var_list), %s vs" \
" %s" % (len(src_var_list), len(dest_var_list)))
for i in src_var_list:
config.src_var_list.append(i)
for i in dest_var_list:
config.dest_var_list.append(i)
dependency_map = config_dict.get("dependency_map", {})
for key in dependency_map:
m = config.table_denpendency_map.add()
m.key = key
values = dependency_map[key]
if not isinstance(values, list):
values = [values]
if len(values) != 1:
raise ValueError("dependency len %s != 1" % len(values))
for value in values:
m.values.append(value)
config.dense_pull_after_copy = \
config_dict.get("dense_pull_after_copy", True)
config.enable_dependency = \
config_dict.get("enable_dependency", False)
config.sparse_copy_by_feasign = \
config_dict.get("sparse_copy_by_feasign", True)
def _desc(self):
from google.protobuf import text_format
return self.proto_desc.SerializeToString()
def __str__(self):
from google.protobuf import text_format
return text_format.MessageToString(self.proto_desc)
class MultiTrainer(TrainerDesc):
'''
Implement of MultiTrainer.
Can be init from TrainerDesc.
'''
def __init__(self):
super(MultiTrainer, self).__init__()
pass
def _set_program(self, program):
super(MultiTrainer, self)._set_program(program)
self._program = program
def _gen_trainer_desc(self):
super(MultiTrainer, self)._gen_trainer_desc()
self.proto_desc.class_name = "MultiTrainer"
self._device_worker._set_infer(self._infer)
self._device_worker._set_program(self._program)
self._device_worker._gen_worker_desc(self.proto_desc)
class DistMultiTrainer(TrainerDesc):
"""
Implement of DistMultiTrainer.
It's for Distributed training.
"""
def __init__(self):
super(DistMultiTrainer, self).__init__()
pass
def _set_program(self, program):
super(DistMultiTrainer, self)._set_program(program)
self._program = program
def _gen_trainer_desc(self):
super(DistMultiTrainer, self)._gen_trainer_desc()
self.proto_desc.class_name = "DistMultiTrainer"
if self._program == None:
raise RuntimeError("None Program")
self._device_worker._set_infer(self._infer)
self._device_worker._set_program(self._program)
self._device_worker._gen_worker_desc(self.proto_desc)
class PipelineTrainer(TrainerDesc):
"""
Implement of PipelineTrainer.
It's for Pipeline.
"""
def __init__(self):
super(PipelineTrainer, self).__init__()
pass
def _set_program(self, program):
super(PipelineTrainer, self)._set_program(program)
self._program = program
def _gen_trainer_desc(self):
super(PipelineTrainer, self)._gen_trainer_desc()
self.proto_desc.class_name = "PipelineTrainer"
if self._program == None:
raise RuntimeError("None Program")
self._device_worker._set_infer(self._infer)
self._device_worker._set_program(self._program)
self._device_worker._gen_worker_desc(self.proto_desc)
| 36.511864 | 80 | 0.665119 |
4d65b0f527c13ef24db4017fabe075ec894385e1 | 2,778 | py | Python | boa/code/ast_preprocess.py | skyinglyh1/neo-boa | 90fcd8edf627f1e18191b30f6ec10609b4d1d330 | [
"MIT"
] | null | null | null | boa/code/ast_preprocess.py | skyinglyh1/neo-boa | 90fcd8edf627f1e18191b30f6ec10609b4d1d330 | [
"MIT"
] | null | null | null | boa/code/ast_preprocess.py | skyinglyh1/neo-boa | 90fcd8edf627f1e18191b30f6ec10609b4d1d330 | [
"MIT"
] | null | null | null | from bytecode import Bytecode
import ast
from ast import NodeTransformer, NodeVisitor
import inspect
import pdb
import dis
class RewriteDicts(NodeTransformer):
last_store_name = None
updated_dicts = []
def visit_Dict(self, node):
if len(node.keys):
if self.last_store_name and self.last_store_name.id and self.last_store_name.lineno == node.lineno:
for item in node.values:
if isinstance(item, ast.Dict):
raise Exception("Cannot use dictionaries inside of dictionaries")
node.name = self.last_store_name.id
self.updated_dicts.append(node)
self.last_store_name = None
else:
raise Exception("Dictionary names must be declared")
return ast.Dict(keys=[], values=[], lineno=node.lineno)
def visit_Name(self, node):
if isinstance(node.ctx, ast.Store):
self.last_store_name = node
else:
self.last_store_name = None
return node
class ABI(ast.NodeVisitor):
def visit_Module(self, node):
self.Funclist = []
self.AbiFunclist = []
self.FuncMap = []
self.home_module_in = False
self.generic_visit(node)
#self.ABI_result= {"functions":self.AbiFunclist}
self.ABI_result= {}
def visit_FunctionDef(self, node):
args =[]
if node.name in self.Funclist:
# contruct args list first
for arg in node.args.args:
args.append({"name": arg.arg, "type":""})
self.AbiFunclist.append({"name":node.name, "parameters":args})
self.checking_abilist = False
if node.name == 'Main':
self.checking_abilist = True
self.home_module_in = True
self.generic_visit(node)
self.checking_abilist = False
def visit_Compare(self, node):
if self.checking_abilist == True and type(node.left).__name__ == 'Name' and node.left.id == 'operation' and len(node.ops) == 1 and type(node.ops[0]).__name__ == 'Eq' and len(node.comparators) == 1 and type(node.comparators[0]).__name__ == 'Str':
self.Funclist.append(node.comparators[0].s)
self.generic_visit(node)
def preprocess_method_body(source_code_obj, MethodName):
src = inspect.getsource(source_code_obj)
ast_tree = ast.parse(src)
visitor = RewriteDicts()
ast_tree = visitor.visit(ast_tree)
ast.fix_missing_locations(ast_tree)
updated_code = compile(ast_tree, filename='<ast>', mode='exec')
bc = Bytecode.from_code(updated_code)
dlist = visitor.updated_dicts
RewriteDicts.updated_dicts = []
RewriteDicts.last_store_name = None
return bc[0].arg, dlist
| 31.568182 | 253 | 0.62671 |
153f4f4a292d532c3c8d80b51dda90d9c65d4052 | 16,526 | py | Python | ak8s/client.py | maternity/ak8s | 142fcb0049ed1a84508826bf0b231e9acd9841aa | [
"Apache-2.0"
] | null | null | null | ak8s/client.py | maternity/ak8s | 142fcb0049ed1a84508826bf0b231e9acd9841aa | [
"Apache-2.0"
] | null | null | null | ak8s/client.py | maternity/ak8s | 142fcb0049ed1a84508826bf0b231e9acd9841aa | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Kai Groner
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import asyncio
import json
import logging
import os
from pathlib import Path
import ssl
from urllib.parse import urljoin
import aiohttp
import yaml
from .apis import APIRegistry
from .models import ModelBase
async def main():
parser = argparse.ArgumentParser()
parser.add_argument('spec', metavar='SPEC')
args = parser.parse_args()
registry = APIRegistry()
registry.load_spec(args.spec)
async with AK8sClient(registry=registry) as ak8s:
apis = ak8s.bind_api_group(registry.apis)
async for ev, obj in apis.core_v1.watch_pod_list_for_all_namespaces(
includeUninitialized=True):
print(ev, f'{obj.metadata.name:55} {obj.metadata.namespace:20} {obj.status.phase}')
class AK8sClient:
def __init__(
self, url=None, *,
registry,
**kw):
if not ('ca_file' in kw and (
('client_cert_file' in kw and 'client_key_file' in kw) or
'token' in kw)):
sa_conf = self._read_serviceaccount()
kc_conf = self._read_kubeconfig()
if sa_conf:
kw = {**sa_conf, **kw}
elif kc_conf:
kw = {**kc_conf, **kw}
url = kw.pop('url', url)
ca_file = kw.pop('ca_file')
token = kw.pop('token', None)
client_cert_file = kw.pop('client_cert_file', None)
client_key_file = kw.pop('client_key_file', None)
for k in kw:
raise TypeError(
f'AK8sClient() got an unexpected keyword argument {k!r}')
sslcontext = ssl.create_default_context(cafile=ca_file)
if client_cert_file is not None and client_key_file is not None:
sslcontext.load_cert_chain(client_cert_file, client_key_file)
elif token is not None:
# 1*( ALPHA / DIGIT / "-" / "." / "_" / "~" / "+" / "/" ) *"="
assert all( c.isalnum() or c in '-._~+/=' for c in token )
else:
raise TypeError(
'AK8sClient() requires client_cert_file and '
'client_key_file or token to be provided.')
self._url = url
self._token = token
self._sslcontext = sslcontext
self._session = None
self._models = registry.models_by_gvk
self._logger = logging.getLogger(self.__class__.__qualname__)
#TODO: service account config
# > Accessing the API from a Pod
# >
# > When accessing the API from a pod, locating and
# > authenticating to the apiserver are somewhat different.
# >
# > The recommended way to locate the apiserver within the pod
# > is with the kubernetes DNS name, which resolves to a
# > Service IP which in turn will be routed to an apiserver.
# >
# > The recommended way to authenticate to the apiserver is
# > with a service account credential. By kube-system, a pod is
# > associated with a service account, and a credential (token)
# > for that service account is placed into the filesystem tree
# > of each container in that pod, at
# > /var/run/secrets/kubernetes.io/serviceaccount/token.
# >
# > If available, a certificate bundle is placed into the
# > filesystem tree of each container at
# > /var/run/secrets/kubernetes.io/serviceaccount/ca.crt, and
# > should be used to verify the serving certificate of the
# > apiserver.
# >
# > Finally, the default namespace to be used for namespaced
# > API operations is placed in a file at
# > /var/run/secrets/kubernetes.io/serviceaccount/namespace in
# > each container.
@classmethod
def _read_kubeconfig(cls, kubeconfig=None, context=None):
if kubeconfig is None:
kubeconfig = os.environ.get('KUBECONFIG')
if kubeconfig is None:
kubeconfig = Path.home()/'.kube/config'
if not kubeconfig.is_file():
return
kubeconfig = Path(kubeconfig)
with kubeconfig.open() as fh:
doc = yaml.safe_load(fh)
if context is None:
context = doc['current-context']
for d in doc['contexts']:
if d['name'] == context:
ctx = d['context']
break
else:
raise RuntimeError(f'Context {context} was not found in {kubeconfig}')
for d in doc['clusters']:
if d['name'] == ctx['cluster']:
cluster = d['cluster']
break
else:
raise RuntimeError(f'Cluster {ctx["cluster"]} was not found in {kubeconfig}')
for d in doc['users']:
if d['name'] == ctx['user']:
user = d['user']
break
else:
raise RuntimeError(f'User {ctx["user"]} was not found in {kubeconfig}')
return dict(
url=cluster['server'],
ca_file=kubeconfig.parent/cluster['certificate-authority'],
client_cert_file=kubeconfig.parent/user['client-certificate'],
client_key_file=kubeconfig.parent/user['client-key'])
@classmethod
def _read_serviceaccount(cls):
sa_dir = Path('/var/run/secrets/kubernetes.io/serviceaccount')
if not sa_dir.is_dir():
return
token_file = sa_dir/'token'
ca_file = sa_dir/'ca.crt'
#ns_file = sa_dir/'namespace'
return dict(
url='https://kubernetes.default',
ca_file=ca_file,
token=token_file.read_text())
async def __aenter__(self):
self._session = await aiohttp.ClientSession(
conn_timeout=60).__aenter__()
return self
async def __aexit__(self, *exc):
await self._session.__aexit__(*exc)
self._session = None
def bind_api_group(self, api_group):
return AK8sClientAPIGroupBinding(self, api_group)
def _set_authorization(self, headers):
if self._token is not None:
headers.update(authorization=f'Bearer {self._token}')
async def op(self, op):
body, headers = None, {}
self._set_authorization(headers)
if op.body is not None:
body = ak8s_payload(op.body)
if 'application/json' in op.produces:
headers['accept'] = 'application/json'
url = urljoin(self._url, op.uri)
self._logger.debug('%(method)s %(path)s', dict(method=op.method, path=op.uri))
async with self._session.request(
op.method, url,
headers=headers,
data=body,
ssl_context=self._sslcontext) as resp:
try:
resp.raise_for_status()
except aiohttp.ClientResponseError as e:
if resp.content_type == 'application/json':
e.detail = self._load_model(await resp.json())
#from pprint import pprint
#pprint(e.detail)
if e.detail.reason == 'NotFound':
raise AK8sNotFound(e.detail) from None
raise
if resp.content_type == 'application/json':
return self._load_model(await resp.json())
if resp.content_type == 'text/plain':
return resp.text()
raise NotImplementedError(f'What do with resp={resp} to op={op}')
self._logger.debug('end %(method)s %(path)s', dict(method=op.method, path=op.uri))
async def stream_op(self, op):
headers = {}
self._set_authorization(headers)
if 'application/json;stream=watch' in op.produces:
headers['accept'] = 'application/json;stream=watch'
url = urljoin(self._url, op.uri)
self._logger.debug('%(method)s %(path)s', dict(method=op.method, path=op.uri))
async with self._session.request(
op.method, url,
headers=headers,
timeout=None,
ssl_context=self._sslcontext) as resp:
try:
resp.raise_for_status()
except aiohttp.ClientResponseError as e:
if resp.content_type == 'application/json':
e.detail = self._load_model(await resp.json())
#from pprint import pprint
#pprint(e.detail)
if e.detail.reason == 'NotFound':
raise AK8sNotFound(e.detail) from None
raise
if resp.content_type == 'application/json':
async for line in resp.content:
ev = json.loads(line)
type_ = ev['type']
data = ev.get('object')
if data is not None:
obj = self._load_model(data)
yield type_, obj
else:
yield type_, None
self._logger.debug('end %(method)s %(path)s',
dict(method=op.method, path=op.uri))
return
elif resp.content_type == 'text/plain':
# async yield from, where are you?
async for line in resp.content:
yield line
self._logger.debug('end %(method)s %(path)s',
dict(method=op.method, path=op.uri))
return
raise NotImplementedError(f'What do with resp={resp} to op={op}')
async def watch(self, op):
if not op.stream:
raise ValueError(f'Cannot watch {op}')
# The k8s documentation on concurrency and consistency has a bunch of
# caveats about treating resourceVersion opaquely.
# From https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
# """
# Clients should not assume that the resource version has meaning
# across namespaces, different kinds of resources, or different
# servers.
# """
# In order to use the watch APIs, it seems we have to violate this
# somewhat. Although list endpoints use list types that have a
# collective resourceVersion, when watch=True that endpoint returns
# watch events for each record. The initial batch of events has no
# particular order, so we need to select the greatest one to continue
# from when the current request times out or ends. This means we are
# potentially comparing resources across namespaces. The document does
# hint that versions are used for ordering, but it makes no guarantee
# that they can be compared, numerically, lexicographically, or
# otherwise. For the moment, versions are just an etcd serial, but
# this potentially breaks if sharding is implemented, or possibly with
# federation (if different regions have different etcd clusters). I
# suspect k8s will address this at some point, and likely some API
# changes will result. It's notable that `kubectl get -w` does not
# implement retries, so it may be that k8s does not think these
# continuations are supportable at all.
#
# And I just figured out that the openapi spec (swagger 2.0) is what I
# should be using instaed of the 1.2 spec. We'll use the 1.2 spec for
# now, but the 2.0 spec should provide a bit more structure, as well as
# useful names for api groups (from tags).
last_version = op.args.get('resourceVersion') or None
too_old_version = None
while True:
if last_version:
if too_old_version and last_version == too_old_version:
last_version = None
op = op.replace(resourceVersion=last_version)
try:
async for ev, obj in self.stream_op(op):
if ev == 'ERROR':
if obj.status == 'Failure' and obj.reason == 'Gone':
# too old resource version
# In this situation, either there is a newer
# version or there isn't. If there isn't a newer
# version, then the error seems to be bogus. If
# there is a newer version, then the error means we
# might miss continuity. Either way, we should
# resume from whatever is current.
too_old_version = last_version
self._logger.debug(
'Restarting %r because version '
'%r is too old',
op.uri, last_version)
break
self._logger.error('Watch %r: %s', op.uri, obj.message)
else:
if not last_version or int(obj.metadata.resourceVersion) > int(last_version):
last_version = obj.metadata.resourceVersion
# I think the first event will always be 'ADDED', but
# I'm not sure.
if ev == 'ADDED' and too_old_version and int(obj.metadata.resourceVersion) <= int(too_old_version):
# If the resource version is too old and we had to
# resume without, then discard the repeat events.
continue
yield ev, obj
except asyncio.TimeoutError:
pass # retry
except aiohttp.ClientPayloadError as err:
self._logger.exception('Watch %r', op.uri)
await asyncio.sleep(1) # retry
def _model_for_kind(self, data):
group, _, version = data['apiVersion'].rpartition('/')
kind = data['kind']
return self._models[group, version, kind]
def _load_model(self, data):
model = self._model_for_kind(data)
return model._project(data)
class AK8sClientAPIGroupBinding:
def __init__(self, ak8s, api_group):
self._ak8s = ak8s
self._api_group = api_group
def __getattr__(self, k):
api = getattr(self._api_group, k)
if callable(api):
return AK8sClientAPIBinding(self._ak8s, api)
return self.__class__(self._ak8s, api)
def __dir__(self):
yield from dir(self._api_group)
class AK8sClientAPIBinding:
__slots__ = ('_ak8s', '_api', '_method')
def __init__(self, ak8s, api, method=None):
self._ak8s = ak8s
self._api = api
self._method = method
def __call__(self, *a, **kw):
op = self._api(*a, **kw)
if self._method is not None:
method = getattr(self._ak8s, self._method)
elif op.stream:
method = self._ak8s.stream_op
else:
method = self._ak8s.op
return method(op)
@property
def __doc__(self):
return self._api.__doc__
@property
def __signature__(self):
return self._api.__signature__
def __getattr__(self, k):
if hasattr(self._ak8s, k):
return self.__class__(self._ak8s, self._api, k)
raise AttributeError(k)
class AK8sNotFound(Exception):
def __init__(self, detail):
self.detail = detail
def __str__(self):
return self.detail.message
def ak8s_payload(obj, content_type='application/json'):
if isinstance(obj, ModelBase):
return aiohttp.JsonPayload(obj._data, content_type=content_type)
raise TypeError(f'unsupported type for ak8s_payload: {obj.__class__}')
if __name__ == '__main__':
try:
exit(asyncio.get_event_loop().run_until_complete(main()) or 0)
except KeyboardInterrupt as e:
exit(1)
| 37.30474 | 123 | 0.577333 |
ffa04e565b0d937b80158062b38f6c5b4ef60d0e | 573 | py | Python | app/log_setup.py | immortel32/Sword_Sorcery_Story_Generator | 7978dfc335813362b2d94c455b970f58421123c8 | [
"MIT"
] | 2 | 2021-04-01T00:50:22.000Z | 2021-04-01T02:18:45.000Z | app/log_setup.py | immortel32/Sword_Sorcery_Story_Generator | 7978dfc335813362b2d94c455b970f58421123c8 | [
"MIT"
] | 1 | 2021-04-01T21:39:44.000Z | 2021-04-01T21:39:44.000Z | app/log_setup.py | immortel32/Sword_Sorcery_Story_Generator | 7978dfc335813362b2d94c455b970f58421123c8 | [
"MIT"
] | 1 | 2021-04-01T01:03:33.000Z | 2021-04-01T01:03:33.000Z | import logging
from logging.handlers import TimedRotatingFileHandler
log = logging.getLogger()
handler = TimedRotatingFileHandler("logs/ss_story_generator.log", "midnight", 1, 7)
fmt = "%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s"
formatter = logging.Formatter(fmt=fmt, datefmt="%m/%d/%Y %H:%M:%S")
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
# console = logging.StreamHandler()
# console.setFormatter(formatter)
# console.setLevel(logging.INFO)
# logger.addHandler(console)
log.addHandler(handler)
log.setLevel(logging.INFO)
| 31.833333 | 83 | 0.769634 |
349cf7eae9d540ca3fc86866a35386a5c7c47611 | 10,088 | py | Python | exporters/writers/s3_writer.py | scrapinghub/exporters | b14f70530826bbbd6163d9e56e74345e762a9189 | [
"BSD-3-Clause"
] | 41 | 2016-06-16T15:29:39.000Z | 2021-08-06T03:29:13.000Z | exporters/writers/s3_writer.py | bbotella/fluxo | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | [
"BSD-3-Clause"
] | 52 | 2016-06-20T12:46:57.000Z | 2018-02-08T12:22:03.000Z | exporters/writers/s3_writer.py | bbotella/fluxo | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | [
"BSD-3-Clause"
] | 10 | 2016-06-23T08:49:36.000Z | 2018-01-13T10:12:10.000Z | import os
from collections import Counter
from contextlib import closing, contextmanager
import six
from exporters.default_retries import retry_long
from exporters.progress_callback import BotoDownloadProgress
from exporters.utils import CHUNK_SIZE, split_file, calculate_multipart_etag, get_bucket_name, \
get_boto_connection
from exporters.writers.base_writer import InconsistentWriteState
from exporters.writers.filebase_base_writer import FilebaseBaseWriter
DEFAULT_BUCKET_REGION = 'us-east-1'
@contextmanager
def multipart_upload(bucket, key_name, **kwargs):
mp = bucket.initiate_multipart_upload(key_name, **kwargs)
try:
yield mp
mp.complete_upload()
except:
mp.cancel_upload()
raise
def should_use_multipart_upload(path, bucket):
from boto.exception import S3ResponseError
# We need to check if we have READ permissions on this bucket, as they are
# needed to perform the complete_upload operation.
try:
acl = bucket.get_acl()
for grant in acl.acl.grants:
if grant.permission == 'READ':
break
except S3ResponseError:
return False
return os.path.getsize(path) > CHUNK_SIZE
class S3Writer(FilebaseBaseWriter):
"""
Writes items to S3 bucket. It is a File Based writer, so it has filebase
option available
- bucket (str)
Name of the bucket to write the items to.
- aws_access_key_id (str)
Public acces key to the s3 bucket.
- aws_secret_access_key (str)
Secret access key to the s3 bucket.
- filebase (str)
Base path to store the items in the bucket.
- aws_region (str)
AWS region to connect to.
- save_metadata (bool)
Save key's items count as metadata. Default: True
- filebase
Path to store the exported files
"""
supported_options = {
'bucket': {'type': six.string_types},
'aws_access_key_id': {
'type': six.string_types,
'env_fallback': 'EXPORTERS_S3WRITER_AWS_LOGIN'
},
'aws_secret_access_key': {
'type': six.string_types,
'env_fallback': 'EXPORTERS_S3WRITER_AWS_SECRET'
},
'aws_region': {'type': six.string_types, 'default': None},
'host': {'type': six.string_types, 'default': None},
'save_pointer': {'type': six.string_types, 'default': None},
'save_metadata': {'type': bool, 'default': True, 'required': False}
}
def __init__(self, options, *args, **kwargs):
super(S3Writer, self).__init__(options, *args, **kwargs)
access_key = self.read_option('aws_access_key_id')
secret_key = self.read_option('aws_secret_access_key')
self.aws_region = self.read_option('aws_region')
self.host = self.read_option('host')
bucket_name = get_bucket_name(self.read_option('bucket'))
self.logger.info('Starting S3Writer for bucket: %s' % bucket_name)
if self.aws_region is None:
self.aws_region = self._get_bucket_location(access_key, secret_key,
bucket_name)
self.conn = get_boto_connection(access_key, secret_key, self.aws_region,
bucket_name, self.host)
self.bucket = self.conn.get_bucket(bucket_name, validate=False)
self.save_metadata = self.read_option('save_metadata')
self.set_metadata('files_counter', Counter())
self.set_metadata('keys_written', [])
def _get_bucket_location(self, access_key, secret_key, bucket):
try:
conn = get_boto_connection(access_key, secret_key, bucketname=bucket, host=self.host)
return conn.get_bucket(bucket).get_location() or DEFAULT_BUCKET_REGION
except:
return DEFAULT_BUCKET_REGION
def _update_metadata(self, dump_path, key_name):
buffer_info = self.write_buffer.get_metadata(dump_path)
key_info = {
'key_name': key_name,
'size': buffer_info['size'],
'number_of_records': buffer_info['number_of_records']
}
keys_written = self.get_metadata('keys_written')
keys_written.append(key_info)
self.set_metadata('keys_written', keys_written)
def _get_total_count(self, dump_path):
return self.write_buffer.get_metadata_for_file(dump_path, 'number_of_records') or 0
def _ensure_proper_key_permissions(self, key):
from boto.exception import S3ResponseError
try:
key.set_acl('bucket-owner-full-control')
except S3ResponseError:
self.logger.warning('We have no READ_ACP/WRITE_ACP permissions')
def _create_key_metadata(self, dump_path, md5=None):
from boto.utils import compute_md5
metadata = {}
metadata['total'] = self._get_total_count(dump_path)
if md5:
metadata['md5'] = md5
else:
with open(dump_path, 'r') as f:
metadata['md5'] = compute_md5(f)
return metadata
def _save_metadata_for_key(self, key, dump_path, md5=None):
from boto.exception import S3ResponseError
metadata = self._create_key_metadata(dump_path, md5)
try:
for k, v in metadata.items():
key.set_metadata(k, v)
except S3ResponseError:
self.logger.warning(
'We have no READ_ACP/WRITE_ACP permissions, '
'so we could not add metadata info')
def _upload_small_file(self, dump_path, key_name):
with closing(self.bucket.new_key(key_name)) as key, open(dump_path, 'r') as f:
buffer_info = self.write_buffer.get_metadata(dump_path)
md5 = key.get_md5_from_hexdigest(buffer_info['file_hash'])
if self.save_metadata:
self._save_metadata_for_key(key, dump_path, md5)
progress = BotoDownloadProgress(self.logger)
key.set_contents_from_file(f, cb=progress, md5=md5)
self._ensure_proper_key_permissions(key)
@retry_long
def _upload_chunk(self, mp, chunk):
mp.upload_part_from_file(chunk.bytes, part_num=chunk.number)
def _upload_large_file(self, dump_path, key_name):
self.logger.debug('Using multipart S3 uploader')
md5 = None
if self.save_metadata:
md5 = calculate_multipart_etag(dump_path, CHUNK_SIZE)
metadata = self._create_key_metadata(dump_path, md5=md5)
with multipart_upload(self.bucket, key_name, metadata=metadata) as mp:
for chunk in split_file(dump_path):
self._upload_chunk(mp, chunk)
self.logger.debug(
'Uploaded chunk number {}'.format(chunk.number))
def _write_s3_key(self, dump_path, key_name):
destination = 's3://{}/{}'.format(self.bucket.name, key_name)
self.logger.info('Start uploading {} to {}'.format(dump_path, destination))
if should_use_multipart_upload(dump_path, self.bucket):
self._upload_large_file(dump_path, key_name)
else:
self._upload_small_file(dump_path, key_name)
self.last_written_file = destination
self.logger.info('Saved {}'.format(destination))
def write(self, dump_path, group_key=None, file_name=None):
if group_key is None:
group_key = []
filebase_path, file_name = self.create_filebase_name(group_key, file_name=file_name)
key_name = filebase_path + '/' + file_name
self._write_s3_key(dump_path, key_name)
self._update_metadata(dump_path, key_name)
self.get_metadata('files_counter')[filebase_path] += 1
@retry_long
def _write_s3_pointer(self, save_pointer, filebase):
with closing(self.bucket.new_key(save_pointer)) as key:
key.set_contents_from_string(filebase)
def _update_last_pointer(self):
save_pointer = self.read_option('save_pointer')
self._write_s3_pointer(save_pointer, self.filebase.dirname_template + '/')
def close(self):
"""
Called to clean all possible tmp files created during the process.
"""
if self.read_option('save_pointer'):
self._update_last_pointer()
super(S3Writer, self).close()
def get_file_suffix(self, path, prefix):
number_of_keys = self.get_metadata('files_counter').get(path, 0)
suffix = '{}'.format(str(number_of_keys))
return suffix
def _check_write_consistency(self):
from boto.exception import S3ResponseError
for key_info in self.get_metadata('keys_written'):
try:
key = self.bucket.get_key(key_info['key_name'])
if not key:
raise InconsistentWriteState('Key {} not found in bucket'.format(
key_info['key_name']))
if str(key.content_length) != str(key_info['size']):
raise InconsistentWriteState(
'Key {} has unexpected size. (expected {} - got {})'.format(
key_info['key_name'], key_info['size'], key.content_length))
if self.save_metadata:
if str(key.get_metadata('total')) != str(key_info['number_of_records']):
raise InconsistentWriteState(
'Unexpected number of records for key {}. ('
'expected {} - got {})'.format(key_info['key_name'],
key_info['number_of_records'],
key.get_metadata('total')))
except S3ResponseError:
self.logger.warning(
'Skipping consistency check for key {}. Probably due to lack of '
'read permissions'.format(key_info['key_name']))
self.logger.info('Consistency check passed')
| 41.344262 | 97 | 0.625595 |
172db0b45da2a9f20403a7fba1a9b23762a3ff48 | 2,099 | py | Python | ade25/widgets/browser/view.py | ade25/ade25.widgets | 272cf1c74a3b97f4e25161c50f178ebe3c1a70d1 | [
"MIT"
] | null | null | null | ade25/widgets/browser/view.py | ade25/ade25.widgets | 272cf1c74a3b97f4e25161c50f178ebe3c1a70d1 | [
"MIT"
] | null | null | null | ade25/widgets/browser/view.py | ade25/ade25.widgets | 272cf1c74a3b97f4e25161c50f178ebe3c1a70d1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Module providing views for the folderish content page type"""
import uuid as uuid_tool
from Acquisition import aq_inner
from Products.Five.browser import BrowserView
class ContentWidgetView(BrowserView):
""" Default widget view
Renders the provided template and view by the widget in question
"""
def __call__(self,
widget_type='base',
identifier=None,
data_set=None,
widget_mode='view',
**kw):
self.params = {
'widget_name': identifier,
'widget_type': widget_type,
'widget_mode': widget_mode,
'widget_data': data_set,
}
return self.render()
def render(self):
return self.index()
@property
def edit_mode(self):
if self.params['widget_mode'] == 'edit':
return True
return False
@property
def record(self):
return self.params['widget_data']
def widget_uid(self):
try:
widget_id = self.record['id']
except (KeyError, TypeError):
widget_id = str(uuid_tool.uuid4())
return widget_id
def rendered_widget(self):
context = aq_inner(self.context)
if self.params['widget_type']:
view_name = '@@content-widget-{0}'.format(
self.params['widget_type']
)
try:
rendered_widget = context.restrictedTraverse(view_name)(
widget_mode=self.params['widget_mode'],
widget_data=self.params['widget_data']
)
except:
#view_name = '@@content-widget-error'
rendered_widget = context.restrictedTraverse(view_name)(
widget_mode=self.params['widget_mode'],
widget_data=self.params['widget_data']
)
else:
view_name = '@@content-widget-base'
rendered_widget = context.restrictedTraverse(view_name)()
return rendered_widget
| 30.867647 | 72 | 0.558838 |
99bfae0752f83419bb6938894647e2ff2e83597e | 3,486 | py | Python | tests/intr/models/ci/base/test_system.py | rhos-infra/cibyl | 842a993ddf3552d1b4f2e85025dcf928f76fe7fb | [
"Apache-2.0"
] | 3 | 2022-02-17T18:07:07.000Z | 2022-03-19T10:22:38.000Z | tests/intr/models/ci/base/test_system.py | rhos-infra/cibyl | 842a993ddf3552d1b4f2e85025dcf928f76fe7fb | [
"Apache-2.0"
] | 58 | 2022-02-14T14:41:22.000Z | 2022-03-31T10:54:28.000Z | tests/intr/models/ci/base/test_system.py | rhos-infra/cibyl | 842a993ddf3552d1b4f2e85025dcf928f76fe7fb | [
"Apache-2.0"
] | 6 | 2022-02-14T19:21:26.000Z | 2022-03-29T09:31:31.000Z | """
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
from unittest import TestCase
from cibyl.config import AppConfig
from cibyl.models.ci.base.system import JobsSystem, System
from cibyl.models.ci.zuul.system import ZuulSystem
from cibyl.orchestrator import Orchestrator
class TestAPI(TestCase):
"""Test that the System API reflects the configuration environment."""
def setUp(self):
self.config = {
'environments': {
'env3': {
'system3': {
'system_type': 'jenkins',
'sources': {}},
'system4': {
'system_type': 'zuul',
'sources': {}}
}}}
self.config_reverse = {
'environments': {
'env3': {
'system4': {
'system_type': 'zuul',
'sources': {}},
'system3': {
'system_type': 'jenkins',
'sources': {}}
}}}
self.config_jenkins = {
'environments': {
'env3': {
'system3': {
'system_type': 'jenkins',
'sources': {}}}}}
self.config_zuul = {
'environments': {
'env3': {
'system3': {
'system_type': 'zuul',
'sources': {}}}}}
self.orchestrator = Orchestrator()
def test_system_api_zuul_jenkins(self):
"""Checks that the creation of multiple types of systems leads to
the combined API of all of them.
"""
self.orchestrator.config = AppConfig(data=self.config)
self.orchestrator.create_ci_environments()
self.assertEqual(System.API, ZuulSystem.API)
def test_system_api_zuul_jenkins_reverse_order(self):
"""Checks that the creation of multiple types of systems leads to
the combined API of all of them.
"""
self.orchestrator.config = AppConfig(data=self.config_reverse)
self.orchestrator.create_ci_environments()
self.assertEqual(System.API, ZuulSystem.API)
def test_system_api_zuul(self):
"""Checks that the creation of multiple types of systems leads to
the combined API of all of them.
"""
self.orchestrator.config = AppConfig(data=self.config_zuul)
self.orchestrator.create_ci_environments()
self.assertEqual(System.API, ZuulSystem.API)
def test_system_api_jenkins(self):
"""Checks that the creation of multiple types of systems leads to
the combined API of all of them.
"""
self.orchestrator.config = AppConfig(data=self.config_jenkins)
self.orchestrator.create_ci_environments()
self.assertEqual(System.API, JobsSystem.API)
| 37.891304 | 78 | 0.572289 |
3bcb6fe4c99310496b3914d6156441fe240855c2 | 1,628 | py | Python | idao/dataloader.py | bu4er88/IDAO_2021 | 687f7caa666ab8f9189e1728f3bc74f2985bfdb8 | [
"Apache-2.0"
] | 7 | 2021-03-11T15:25:01.000Z | 2021-03-16T23:31:37.000Z | idao/dataloader.py | bu4er88/IDAO_2021 | 687f7caa666ab8f9189e1728f3bc74f2985bfdb8 | [
"Apache-2.0"
] | 4 | 2021-03-15T10:39:21.000Z | 2021-03-16T22:03:32.000Z | idao/dataloader.py | bu4er88/IDAO_2021 | 687f7caa666ab8f9189e1728f3bc74f2985bfdb8 | [
"Apache-2.0"
] | 11 | 2021-03-12T08:17:02.000Z | 2021-07-19T13:56:34.000Z | import numpy as np
import torch
import os
import pathlib as path
from PIL import Image
from torchvision.datasets import DatasetFolder
from torch.utils.data import Dataset
class IDAODataset(DatasetFolder):
def name_to_energy(self, name):
try:
names = os.path.split(name)[-1].split("_")
idx = [i for i, v in enumerate(names) if v == "keV"][0]
return torch.tensor(float(names[idx - 1]))
except Exception as e:
return torch.tensor(-1.0)
def name_to_index(self, name):
return os.path.split(name)[-1].split('.')[0]
def __getitem__(self, index: int):
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target, self.name_to_energy(path), self.name_to_index(path)
class InferenceDataset(Dataset):
def __init__(self, main_dir, transform, loader=None):
self.img_loaderj= img_loader
self.main_dir = path.Path(main_dir)
self.transform = transform
self.all_imgs = list(self.main_dir.glob("*.png"))
self.loader = loader
def __len__(self):
return len(self.all_imgs)
def __getitem__(self, idx):
img_loc = self.all_imgs[idx]
image = self.loader(img_loc)
tensor_image = self.transform(image)
return tensor_image, img_loc.name
def img_loader(path: str):
with Image.open(path) as img:
img = np.array(img)
return img
| 29.6 | 82 | 0.641278 |
52a9d36ca6e9c2571f134b7be552631e181816d3 | 5,214 | py | Python | test/test_issue084.py | MarkHuisjes/RDFOWLtyping | c5849501bd76ee60e3ea02632990e3b8eaee6365 | [
"BSD-3-Clause"
] | 1 | 2017-04-05T16:11:54.000Z | 2017-04-05T16:11:54.000Z | test/test_issue084.py | MarkHuisjes/RDFOWLtyping | c5849501bd76ee60e3ea02632990e3b8eaee6365 | [
"BSD-3-Clause"
] | null | null | null | test/test_issue084.py | MarkHuisjes/RDFOWLtyping | c5849501bd76ee60e3ea02632990e3b8eaee6365 | [
"BSD-3-Clause"
] | null | null | null | from codecs import getreader
try:
from io import BytesIO, StringIO
except ImportError:
from StringIO import StringIO
BytesIO = StringIO
from rdflib import URIRef, Literal
from rdflib.graph import Graph
rdf = u"""@prefix skos:
<http://www.w3.org/2004/02/skos/core#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix : <http://www.test.org/#> .
:world rdf:type skos:Concept;
skos:prefLabel "World"@en.
:africa rdf:type skos:Concept;
skos:prefLabel "Africa"@en;
skos:broaderTransitive :world.
:CI rdf:type skos:Concept;
skos:prefLabel "C\u00f4te d'Ivoire"@fr;
skos:broaderTransitive :africa.
"""
rdf_utf8 = rdf.encode('utf-8')
rdf_reader = getreader('utf-8')(BytesIO(rdf.encode('utf-8')))
def test_a():
"""Test reading N3 from a unicode objects as data"""
g = Graph()
g.parse(data=rdf, format='n3')
v = g.value(subject=URIRef("http://www.test.org/#CI"), predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
assert v==Literal(u"C\u00f4te d'Ivoire", lang='fr')
def test_b():
"""Test reading N3 from a utf8 encoded string as data"""
g = Graph()
g.parse(data=rdf_utf8, format='n3')
v = g.value(subject=URIRef("http://www.test.org/#CI"), predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
assert v==Literal(u"C\u00f4te d'Ivoire", lang='fr')
def test_c():
"""Test reading N3 from a codecs.StreamReader, outputting unicode"""
g = Graph()
# rdf_reader.seek(0)
g.parse(source=rdf_reader, format='n3')
v = g.value(subject=URIRef("http://www.test.org/#CI"), predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
assert v==Literal(u"C\u00f4te d'Ivoire", lang='fr')
def test_d():
"""Test reading N3 from a StringIO over the unicode object"""
g = Graph()
g.parse(source=StringIO(rdf), format='n3')
v = g.value(subject=URIRef("http://www.test.org/#CI"), predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
assert v==Literal(u"C\u00f4te d'Ivoire", lang='fr')
def test_e():
"""Test reading N3 from a BytesIO over the string object"""
g = Graph()
g.parse(source=BytesIO(rdf_utf8), format='n3')
v = g.value(subject=URIRef("http://www.test.org/#CI"), predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
assert v==Literal(u"C\u00f4te d'Ivoire", lang='fr')
# this is unicode
rdfxml=u"""<?xml version="1.0" encoding="UTF-8"?>
<rdf:RDF
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:skos="http://www.w3.org/2004/02/skos/core#"
>
<rdf:Description rdf:about="http://www.test.org/#CI">
<rdf:type rdf:resource="http://www.w3.org/2004/02/skos/core#Concept"/>
<skos:prefLabel xml:lang="fr">C\u00f4te d\'Ivoire</skos:prefLabel>
<skos:broaderTransitive rdf:resource="http://www.test.org/#africa"/>
</rdf:Description>
</rdf:RDF>
"""
# this is a str
rdfxml_utf8 = rdfxml.encode('utf-8')
rdfxml_reader = getreader('utf-8')(BytesIO(rdfxml.encode('utf-8')))
def test_xml_a():
"""Test reading XML from a unicode object as data"""
import platform
if platform.system() == 'Java':
from nose import SkipTest
raise SkipTest('unicode issue for Jython2.5')
g = Graph()
g.parse(data=rdfxml, format='xml')
v = g.value(subject=URIRef("http://www.test.org/#CI"), predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
assert v==Literal(u"C\u00f4te d'Ivoire", lang='fr')
def test_xml_b():
"""Test reading XML from a utf8 encoded string object as data"""
import platform
if platform.system() == 'Java':
from nose import SkipTest
raise SkipTest('unicode issue for Jython2.5')
g = Graph()
g.parse(data=rdfxml_utf8, format='xml')
v = g.value(subject=URIRef("http://www.test.org/#CI"), predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
assert v==Literal(u"C\u00f4te d'Ivoire", lang='fr')
# The following two cases are currently not supported by Graph.parse
# def test_xml_c():
# """Test reading XML from a codecs.StreamReader, outputting unicode"""
# g = Graph()
# g.parse(source=rdfxml_reader, format='xml')
# v = g.value(subject=URIRef("http://www.test.org/#CI"), predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
# assert v==Literal(u"C\u00f4te d'Ivoire", lang='fr')
# def test_xml_d():
# """Test reading XML from a BytesIO created from unicode object"""
# g = Graph()
# g.parse(source=BytesIO(rdfxml), format='xml')
# v = g.value(subject=URIRef("http://www.test.org/#CI"), predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
# assert v==Literal(u"C\u00f4te d'Ivoire", lang='fr')
def test_xml_e():
"""Test reading XML from a BytesIO created from utf8 encoded string"""
import platform
if platform.system() == 'Java':
from nose import SkipTest
raise SkipTest('unicode issue for Jython2.5')
g = Graph()
g.parse(source=BytesIO(rdfxml_utf8), format='xml')
v = g.value(subject=URIRef("http://www.test.org/#CI"), predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
assert v==Literal(u"C\u00f4te d'Ivoire", lang='fr')
| 37.782609 | 127 | 0.662064 |
bf0830cf74113cda7b93f32dba91ae83b8e09e25 | 965 | py | Python | tests/test_echo.py | pbst/angr | a67010c8ef20166b32a14feb4611fdbbfb1f9ab3 | [
"BSD-2-Clause"
] | 2 | 2019-12-20T13:42:57.000Z | 2021-07-07T09:34:46.000Z | tests/test_echo.py | pbst/angr | a67010c8ef20166b32a14feb4611fdbbfb1f9ab3 | [
"BSD-2-Clause"
] | 2 | 2018-11-13T16:19:16.000Z | 2018-12-10T15:45:53.000Z | tests/test_echo.py | pbst/angr | a67010c8ef20166b32a14feb4611fdbbfb1f9ab3 | [
"BSD-2-Clause"
] | null | null | null | import angr
import logging
l = logging.getLogger("angr.tests")
import os
test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests'))
target_arches = {
#'i386',
'x86_64',
#'ppc',
#'armel',
#'mips',
}
def run_echo_haha(arch):
p = angr.Project(os.path.join(test_location, arch, 'echo'), use_sim_procedures=False)
s = p.factory.full_init_state(mode='symbolic_approximating', args=['echo', 'haha'], add_options={angr.options.STRICT_PAGE_ACCESS})
pg = p.factory.simulation_manager(s)
pg.run(until=lambda lpg: len(lpg.active) != 1)
assert len(pg.deadended) == 1
assert len(pg.active) == 0
# Need to dump by path because the program closes stdout
assert pg.deadended[0].posix.stdout.concretize() == [b'haha\n']
def test_echo_haha():
for arch in target_arches:
yield run_echo_haha, arch
if __name__ == "__main__":
for r,a in test_echo_haha():
r(a)
| 27.571429 | 134 | 0.66943 |
0f8f74b68c8a0b624bce25aac5565a54b5d07692 | 653 | py | Python | pentest-scripts/python-web-penetration-testing-cookbook/Chapter_01_Code/6_screenshot_by_port.py | paulveillard/cybersecurity-penetration-testing | a5afff13ec25afd0cf16ef966d35bddb91518af4 | [
"Apache-2.0"
] | 6 | 2021-12-07T21:02:12.000Z | 2022-03-03T12:08:14.000Z | pentest-scripts/python-web-penetration-testing-cookbook/Chapter_01_Code/6_screenshot_by_port.py | paulveillard/cybersecurity-penetration-testing | a5afff13ec25afd0cf16ef966d35bddb91518af4 | [
"Apache-2.0"
] | null | null | null | pentest-scripts/python-web-penetration-testing-cookbook/Chapter_01_Code/6_screenshot_by_port.py | paulveillard/cybersecurity-penetration-testing | a5afff13ec25afd0cf16ef966d35bddb91518af4 | [
"Apache-2.0"
] | 1 | 2022-01-15T23:57:36.000Z | 2022-01-15T23:57:36.000Z | import screenshot
import requests
portList = [80,443,2082,2083,2086,2087,2095,2096,8080,8880,8443,9998,4643,9001,4489]
IP = '127.0.0.1'
http = 'http://'
https = 'https://'
def testAndSave(protocol, portNumber):
url = protocol + IP + ':' + str(portNumber)
try:
r = requests.get(url,timeout=1)
if r.status_code == 200:
print 'Found site on ' + url
s = screenshot.Screenshot()
image = s.get_image(url)
image.save(str(portNumber) + '.png')
except:
pass
for port in portList:
testAndSave(http, port)
testAndSave(https, port)
| 24.185185 | 85 | 0.568147 |
8693b20679fcdf377cabf8f33c452d3086f6b573 | 17,841 | py | Python | piccolo/columns/base.py | cheesycod/piccolo | 254750cdd2f40f118a200074f97e93c7dae4461c | [
"MIT"
] | null | null | null | piccolo/columns/base.py | cheesycod/piccolo | 254750cdd2f40f118a200074f97e93c7dae4461c | [
"MIT"
] | null | null | null | piccolo/columns/base.py | cheesycod/piccolo | 254750cdd2f40f118a200074f97e93c7dae4461c | [
"MIT"
] | null | null | null | from __future__ import annotations
from abc import ABCMeta, abstractmethod
import copy
from dataclasses import dataclass, field
import datetime
import decimal
from enum import Enum
import inspect
import typing as t
from piccolo.columns.operators.comparison import (
ComparisonOperator,
Equal,
GreaterEqualThan,
GreaterThan,
ILike,
In,
IsNotNull,
IsNull,
LessEqualThan,
LessThan,
Like,
NotEqual,
NotIn,
NotLike,
)
from piccolo.columns.combination import Where
from piccolo.columns.defaults.base import Default
from piccolo.columns.reference import LazyTableReference
from piccolo.columns.indexes import IndexMethod
from piccolo.querystring import QueryString
from piccolo.utils.warnings import colored_warning
if t.TYPE_CHECKING: # pragma: no cover
from piccolo.columns.column_types import ForeignKey
from piccolo.table import Table
class OnDelete(str, Enum):
cascade = "CASCADE"
restrict = "RESTRICT"
no_action = "NO ACTION"
set_null = "SET NULL"
set_default = "SET DEFAULT"
def __str__(self):
return f"{self.__class__.__name__}.{self.name}"
def __repr__(self):
return self.__str__()
class OnUpdate(str, Enum):
cascade = "CASCADE"
restrict = "RESTRICT"
no_action = "NO ACTION"
set_null = "SET NULL"
set_default = "SET DEFAULT"
def __str__(self):
return f"{self.__class__.__name__}.{self.name}"
def __repr__(self):
return self.__str__()
@dataclass
class ForeignKeyMeta:
references: t.Union[t.Type[Table], LazyTableReference]
on_delete: OnDelete
on_update: OnUpdate
proxy_columns: t.List[Column] = field(default_factory=list)
@property
def resolved_references(self) -> t.Type[Table]:
"""
Evaluates the ``references`` attribute if it's a LazyTableReference,
raising a ``ValueError`` if it fails, otherwise returns a ``Table``
subclass.
"""
from piccolo.table import Table
if isinstance(self.references, LazyTableReference):
return self.references.resolve()
elif inspect.isclass(self.references) and issubclass(
self.references, Table
):
return self.references
else:
raise ValueError(
"The references attribute is neither a Table sublclass or a "
"LazyTableReference instance."
)
def copy(self) -> ForeignKeyMeta:
kwargs = self.__dict__.copy()
kwargs.update(proxy_columns=self.proxy_columns.copy())
return self.__class__(**kwargs)
def __copy__(self) -> ForeignKeyMeta:
return self.copy()
def __deepcopy__(self, memo) -> ForeignKeyMeta:
"""
We override deepcopy, as it's too slow if it has to recreate
everything.
"""
return self.copy()
@dataclass
class ColumnMeta:
"""
We store as many attributes in ColumnMeta as possible, to help avoid name
clashes with user defined attributes.
"""
# General attributes:
null: bool = False
primary: bool = False
key: bool = False
unique: bool = False
index: bool = False
index_method: IndexMethod = IndexMethod.btree
required: bool = False
help_text: t.Optional[str] = None
choices: t.Optional[Enum] = None
# Used for representing the table in migrations and the playground.
params: t.Dict[str, t.Any] = field(default_factory=dict)
# Set by the Table Metaclass:
_name: t.Optional[str] = None
_table: t.Optional[t.Type[Table]] = None
# Used by Foreign Keys:
call_chain: t.List["ForeignKey"] = field(default_factory=lambda: [])
table_alias: t.Optional[str] = None
@property
def name(self) -> str:
if not self._name:
raise ValueError(
"`_name` isn't defined - the Table Metaclass should set it."
)
return self._name
@name.setter
def name(self, value: str):
self._name = value
@property
def table(self) -> t.Type[Table]:
if not self._table:
raise ValueError(
"`_table` isn't defined - the Table Metaclass should set it."
)
return self._table
@property
def engine_type(self) -> str:
engine = self.table._meta.db
if engine:
return engine.engine_type
else:
raise ValueError("The table has no engine defined.")
@property
def get_choices(self) -> Optional(list):
"""
Returns choices as a list of choice tuples (choice name, value. Custom fields should add type checking to keys as well
"""
if isinstance(self.choices, t.Enum):
return [(choice.name.replace("_", " ").title(), choice.value) for choice in self.choices]
elif isinstance(self.choices, tuple):
return list(self.choices) # Guarantee a list
elif isinstance(self.choices, list):
return self.choices
else:
return None
def get_full_name(self, just_alias=False) -> str:
"""
Returns the full column name, taking into account joins.
"""
column_name = self.name
if not self.call_chain:
return f"{self.table._meta.tablename}.{column_name}"
column_name = (
"$".join([i._meta.name for i in self.call_chain])
+ f"${column_name}"
)
alias = f"{self.call_chain[-1]._meta.table_alias}.{self.name}"
if just_alias:
return alias
else:
return f'{alias} AS "{column_name}"'
def copy(self) -> ColumnMeta:
kwargs = self.__dict__.copy()
kwargs.update(
params=self.params.copy(), call_chain=self.call_chain.copy(),
)
return self.__class__(**kwargs)
def __copy__(self) -> ColumnMeta:
return self.copy()
def __deepcopy__(self, memo) -> ColumnMeta:
"""
We override deepcopy, as it's too slow if it has to recreate
everything.
"""
return self.copy()
class Selectable(metaclass=ABCMeta):
@abstractmethod
def get_select_string(self, engine_type: str, just_alias=False) -> str:
"""
In a query, what to output after the select statement - could be a
column name, a sub query, a function etc. For a column it will be the
column name.
"""
pass
class Column(Selectable):
"""
All other columns inherit from ``Column``. Don't use it directly.
The following arguments apply to all column types:
:param null:
Whether the column is nullable.
:param primary:
If set, the column is used as a primary key.
:param key:
If set, the column is treated as a key.
:param default:
The column value to use if not specified by the user.
:param unique:
If set, a unique contraint will be added to the column.
:param index:
Whether an index is created for the column, which can improve
the speed of selects, but can slow down inserts.
:param index_method:
If index is set to True, this specifies what type of index is created.
:param required:
This isn't used by the database - it's to indicate to other tools that
the user must provide this value. Example uses are in serialisers for
API endpoints, and form fields.
:param help_text:
This provides some context about what the column is being used for. For
example, for a `Decimal` column called `value`, it could say
'The units are millions of dollars'. The database doesn't use this
value, but tools such as Piccolo Admin use it to show a tooltip in the
GUI.
"""
value_type: t.Type = int
def __init__(
self,
null: bool = False,
primary: bool = False,
key: bool = False,
unique: bool = False,
index: bool = False,
index_method: IndexMethod = IndexMethod.btree,
required: bool = False,
help_text: t.Optional[str] = None,
**kwargs,
) -> None:
# Used for migrations.
# We deliberately omit 'required', and 'help_text' as they don't effect
# the actual schema.
kwargs.update(
{
"null": null,
"primary": primary,
"key": key,
"unique": unique,
"index": index,
"index_method": index_method,
}
)
if kwargs.get("default", ...) is None and not null:
raise ValueError(
"A default value of None isn't allowed if the column is "
"not nullable."
)
self._meta = ColumnMeta(
null=null,
primary=primary,
key=key,
unique=unique,
index=index,
index_method=index_method,
params=kwargs,
required=required,
help_text=help_text,
)
self.alias: t.Optional[str] = None
def _validate_default(
self,
default: t.Any,
allowed_types: t.Iterable[t.Union[None, t.Type[t.Any]]],
) -> bool:
"""
Make sure that the default value is of the allowed types.
"""
if getattr(self, "_validated", None):
# If it has previously been validated by a subclass, don't
# validate again.
return True
elif (
default is None
and None in allowed_types
or type(default) in allowed_types
):
self._validated = True
return True
elif callable(default):
self._validated = True
return True
else:
raise ValueError(
f"The default {default} isn't one of the permitted types - "
f"{allowed_types}"
)
def is_in(self, values: t.List[t.Any]) -> Where:
if len(values) == 0:
raise ValueError(
"The `values` list argument must contain at least one value."
)
return Where(column=self, values=values, operator=In)
def not_in(self, values: t.List[t.Any]) -> Where:
if len(values) == 0:
raise ValueError(
"The `values` list argument must contain at least one value."
)
return Where(column=self, values=values, operator=NotIn)
def like(self, value: str) -> Where:
if "%" not in value:
raise ValueError("% is required for like operators")
return Where(column=self, value=value, operator=Like)
def ilike(self, value: str) -> Where:
if "%" not in value:
raise ValueError("% is required for ilike operators")
if self._meta.engine_type == "postgres":
operator: t.Type[ComparisonOperator] = ILike
else:
colored_warning(
"SQLite doesn't support ILIKE currently, falling back to LIKE."
)
operator = Like
return Where(column=self, value=value, operator=operator)
def not_like(self, value: str) -> Where:
if "%" not in value:
raise ValueError("% is required for like operators")
return Where(column=self, value=value, operator=NotLike)
def __lt__(self, value) -> Where:
return Where(column=self, value=value, operator=LessThan)
def __le__(self, value) -> Where:
return Where(column=self, value=value, operator=LessEqualThan)
def __gt__(self, value) -> Where:
return Where(column=self, value=value, operator=GreaterThan)
def __ge__(self, value) -> Where:
return Where(column=self, value=value, operator=GreaterEqualThan)
def __eq__(self, value) -> Where: # type: ignore
if value is None:
return Where(column=self, operator=IsNull)
else:
return Where(column=self, value=value, operator=Equal)
def __ne__(self, value) -> Where: # type: ignore
if value is None:
return Where(column=self, operator=IsNotNull)
else:
return Where(column=self, value=value, operator=NotEqual)
def __hash__(self):
return hash(self._meta.name)
def is_null(self) -> Where:
"""
Can be used instead of `MyTable.column != None`, because some linters
don't like a comparison to None.
"""
return Where(column=self, operator=IsNull)
def is_not_null(self) -> Where:
"""
Can be used instead of `MyTable.column == None`, because some linters
don't like a comparison to None.
"""
return Where(column=self, operator=IsNotNull)
def as_alias(self, name: str) -> Column:
"""
Allows column names to be changed in the result of a select.
For example:
>>> await Band.select(Band.name.as_alias('title')).run()
{'title': 'Pythonistas'}
"""
column = copy.deepcopy(self)
column.alias = name
return column
def get_default_value(self) -> t.Any:
"""
If the column has a default attribute, return it. If it's callable,
return the response instead.
"""
default = getattr(self, "default", ...)
if default is not ...:
default = default.value if isinstance(default, Enum) else default
is_callable = hasattr(default, "__call__")
value = default() if is_callable else default
return value
return None
def get_select_string(self, engine_type: str, just_alias=False) -> str:
"""
How to refer to this column in a SQL query.
"""
if self.alias is None:
return self._meta.get_full_name(just_alias=just_alias)
else:
original_name = self._meta.get_full_name(just_alias=True)
return f"{original_name} AS {self.alias}"
def get_where_string(self, engine_type: str) -> str:
return self.get_select_string(engine_type=engine_type, just_alias=True)
def get_sql_value(self, value: t.Any) -> t.Any:
"""
When using DDL statements, we can't parameterise the values. An example
is when setting the default for a column. So we have to convert from
the Python type to a string representation which we can include in our
DDL statements.
:param value:
The Python value to convert to a string usable in a DDL statement
e.g. 1.
:returns:
The string usable in the DDL statement e.g. '1'.
"""
if isinstance(value, Default):
output = getattr(value, self._meta.engine_type)
elif value is None:
output = "null"
elif isinstance(value, (float, decimal.Decimal)):
output = str(value)
elif isinstance(value, str):
output = f"'{value}'"
elif isinstance(value, bool):
output = str(value).lower()
elif isinstance(value, datetime.datetime):
output = f"'{value.isoformat().replace('T', '')}'"
elif isinstance(value, bytes):
output = f"'{value.hex()}'"
elif isinstance(value, list):
# Convert to the array syntax.
output = (
"'{" + ", ".join([self.get_sql_value(i) for i in value]) + "}'"
)
else:
output = value
return output
@property
def column_type(self):
return self.__class__.__name__.upper()
@property
def querystring(self) -> QueryString:
"""
Used when creating tables.
"""
query = f'"{self._meta.name}" {self.column_type}'
if self._meta.primary:
query += " PRIMARY"
if self._meta.key:
query += " KEY"
if self._meta.unique:
query += " UNIQUE"
if not self._meta.null:
query += " NOT NULL"
foreign_key_meta: t.Optional[ForeignKeyMeta] = getattr(
self, "_foreign_key_meta", None
)
if foreign_key_meta:
tablename = foreign_key_meta.resolved_references._meta.tablename
on_delete = foreign_key_meta.on_delete.value
on_update = foreign_key_meta.on_update.value
query += (
f" REFERENCES {tablename} (id)"
f" ON DELETE {on_delete}"
f" ON UPDATE {on_update}"
)
if not self._meta.primary:
default = self.get_default_value()
sql_value = self.get_sql_value(value=default)
# Escape the value if it contains a pair of curly braces, otherwise
# an empty value will appear in the compiled querystring.
sql_value = (
sql_value.replace("{}", "{{}}")
if isinstance(sql_value, str)
else sql_value
)
query += f" DEFAULT {sql_value}"
return QueryString(query)
def copy(self) -> Column:
column: Column = copy.copy(self)
column._meta = self._meta.copy()
return column
def __deepcopy__(self, memo) -> Column:
"""
We override deepcopy, as it's too slow if it has to recreate
everything.
"""
return self.copy()
def __str__(self):
return self.querystring.__str__()
def __repr__(self):
try:
table = self._meta.table
except ValueError:
table_class_name = "Unknown"
else:
table_class_name = table.__name__
return (
f"{table_class_name}.{self._meta.name} - "
f"{self.__class__.__name__}"
)
| 31.027826 | 126 | 0.588644 |
0f14aef0a25b4e124c35a7cf6ad42b11e06be9f0 | 996 | py | Python | python/hk/util.py | tskisner/so3g | 75c1d8dea84f862bdd2c9fa2c2f9d1c5b8da5eec | [
"MIT"
] | 5 | 2019-09-02T14:17:31.000Z | 2022-01-21T16:43:14.000Z | python/hk/util.py | tskisner/so3g | 75c1d8dea84f862bdd2c9fa2c2f9d1c5b8da5eec | [
"MIT"
] | 70 | 2019-05-16T23:42:40.000Z | 2022-03-23T14:35:35.000Z | python/hk/util.py | tskisner/so3g | 75c1d8dea84f862bdd2c9fa2c2f9d1c5b8da5eec | [
"MIT"
] | 2 | 2020-05-17T18:20:33.000Z | 2020-10-22T20:35:44.000Z | import numpy as np
from spt3g import core
def get_unix_time(g3_time):
"""Convert a G3Time or G3VectorTime time object to a unix timestamp or
numpy vector (double) of unix timestamps."""
if isinstance(g3_time, core.G3Time):
return g3_time.time / core.G3Units.seconds
if isinstance(g3_time, core.G3VectorTime):
output = np.array([t.time for t in g3_time], dtype=float)
output /= core.G3Units.seconds
return output
def get_g3_time(unix_time):
"""Convert a double or numpy array of floats to G3Time or
G3VectorTime."""
src = None
if isinstance(unix_time, core.G3VectorDouble):
src = (np.array(unix_time) * core.G3Units.seconds).astype('int')
elif isinstance(unix_time, np.ndarray) and unix_time.ndim == 1:
src = (unix_time * core.G3Units.seconds).astype('int')
if src is not None:
return core.G3VectorTime([core.G3Time(t) for t in src])
return core.G3Time(int(unix_time * core.G3Units.seconds))
| 35.571429 | 74 | 0.685743 |
80fec4fd4580e7f1c2d0be61b3f54a29791a724e | 9,111 | py | Python | cli/tests/test_polypod/test_resolvers/test_core_resolver.py | polyaxon/cli | 3543c0220a8a7c06fc9573cd2a740f8ae4930641 | [
"Apache-2.0"
] | null | null | null | cli/tests/test_polypod/test_resolvers/test_core_resolver.py | polyaxon/cli | 3543c0220a8a7c06fc9573cd2a740f8ae4930641 | [
"Apache-2.0"
] | 1 | 2022-01-24T11:26:47.000Z | 2022-03-18T23:17:58.000Z | cli/tests/test_polypod/test_resolvers/test_core_resolver.py | polyaxon/cli | 3543c0220a8a7c06fc9573cd2a740f8ae4930641 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tempfile
from polyaxon import settings
from polyaxon.auxiliaries import (
get_default_init_container,
get_default_sidecar_container,
)
from polyaxon.connections.kinds import V1ConnectionKind
from polyaxon.connections.schemas import V1BucketConnection, V1K8sResourceSchema
from polyaxon.exceptions import PolyaxonCompilerError
from polyaxon.managers.agent import AgentConfigManager
from polyaxon.polyaxonfile.specs import kinds
from polyaxon.polyflow import V1CompiledOperation, V1RunKind
from polyaxon.polypod.compiler.resolver import BaseResolver
from polyaxon.schemas.cli.agent_config import AgentConfig
from polyaxon.schemas.types import V1ConnectionType, V1K8sResourceType
from polyaxon.utils.test_utils import BaseTestCase
@pytest.mark.polypod_mark
class TestResolver(BaseTestCase):
def setUp(self):
super().setUp()
self.compiled_operation = V1CompiledOperation.read(
{
"version": 1.1,
"kind": kinds.COMPILED_OPERATION,
"plugins": {
"auth": False,
"shm": False,
"collectLogs": False,
"collectArtifacts": False,
"collectResources": False,
},
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"}},
}
)
def test_core_resolver_instance(self):
resolver = BaseResolver(
run=None,
compiled_operation=self.compiled_operation,
owner_name="user",
project_name="p1",
project_uuid=None,
run_name="j1",
run_uuid=None,
run_path="test",
params=None,
)
assert resolver.project_uuid == resolver.project_name
assert resolver.run_uuid == resolver.run_name
resolver = BaseResolver(
run=None,
compiled_operation=self.compiled_operation,
owner_name="user",
project_name="p1",
run_name="j1",
run_path="test",
project_uuid="some_uuid",
run_uuid="some_uuid",
params=None,
)
assert resolver.project_uuid != resolver.project_name
assert resolver.run_uuid != resolver.run_name
def test_resolve_connections_with_no_config(self):
settings.AGENT_CONFIG = None
resolver = BaseResolver(
run=None,
compiled_operation=self.compiled_operation,
owner_name="user",
project_name="p1",
project_uuid=None,
run_name="j1",
run_uuid=None,
run_path="test",
params=None,
)
with self.assertRaises(PolyaxonCompilerError):
resolver.resolve_connections()
def test_resolve_without_compiled_operation(self):
with self.assertRaises(PolyaxonCompilerError):
BaseResolver(
run=None,
compiled_operation=None,
owner_name="user",
project_name="p1",
project_uuid=None,
run_name="j1",
run_uuid=None,
run_path="test",
params=None,
)
def test_resolve_connections_with_invalid_config(self):
fpath = tempfile.mkdtemp()
AgentConfigManager.CONFIG_PATH = fpath
secret1 = V1K8sResourceType(
name="secret1",
schema=V1K8sResourceSchema(name="secret1"),
is_requested=True,
)
secret2 = V1K8sResourceType(
name="secret2",
schema=V1K8sResourceSchema(name="secret2"),
is_requested=True,
)
connection1 = V1ConnectionType(
name="test_s3",
kind=V1ConnectionKind.S3,
schema=V1BucketConnection(bucket="s3//:foo"),
secret=secret1.schema,
)
connection2 = V1ConnectionType(
name="test_gcs",
kind=V1ConnectionKind.GCS,
schema=V1BucketConnection(bucket="gcs//:foo"),
secret=secret1.schema,
)
connection3 = V1ConnectionType(
name="test_wasb",
kind=V1ConnectionKind.WASB,
schema=V1BucketConnection(bucket="wasbs//:foo"),
secret=secret2.schema,
)
settings.AGENT_CONFIG = AgentConfig(
namespace="foo",
artifacts_store=connection1,
connections=[connection2, connection3],
)
resolver = BaseResolver(
run=None,
compiled_operation=self.compiled_operation,
owner_name="user",
project_name="p1",
project_uuid=None,
run_name="j1",
run_uuid=None,
run_path="test",
params=None,
)
resolver.resolve_connections()
assert resolver.namespace == "foo"
assert resolver.connection_by_names == {connection1.name: connection1}
assert resolver.artifacts_store == connection1
assert [s.schema for s in resolver.secrets] == [secret1.schema, secret2.schema]
assert resolver.polyaxon_sidecar == get_default_sidecar_container()
assert resolver.polyaxon_init == get_default_init_container()
# Add run spec to resolve connections
compiled_operation = V1CompiledOperation.read(
{
"version": 1.1,
"kind": kinds.COMPILED_OPERATION,
"plugins": {
"auth": False,
"shm": False,
"collectLogs": False,
"collectArtifacts": False,
"collectResources": False,
},
"run": {
"kind": V1RunKind.JOB,
"container": {"image": "test"},
"connections": {connection3.name},
},
}
)
resolver = BaseResolver(
run=None,
compiled_operation=compiled_operation,
owner_name="user",
project_name="p1",
project_uuid=None,
run_name="j1",
run_uuid=None,
run_path="test",
params=None,
)
resolver.resolve_connections()
assert resolver.namespace == "foo"
assert resolver.connection_by_names == {
connection1.name: connection1,
connection3.name: connection3,
}
assert [s.schema for s in resolver.secrets] == [secret1.schema, secret2.schema]
assert resolver.artifacts_store == connection1
assert resolver.polyaxon_sidecar == get_default_sidecar_container()
assert resolver.polyaxon_init == get_default_init_container()
# Add run spec to resolve connections
compiled_operation = V1CompiledOperation.read(
{
"version": 1.1,
"kind": kinds.COMPILED_OPERATION,
"plugins": {
"auth": False,
"shm": False,
"collectLogs": False,
"collectArtifacts": False,
"collectResources": False,
},
"run": {
"kind": V1RunKind.JOB,
"container": {"image": "test"},
"connections": {
connection1.name,
connection2.name,
connection3.name,
},
},
}
)
resolver = BaseResolver(
run=None,
compiled_operation=compiled_operation,
owner_name="user",
project_name="p1",
project_uuid=None,
run_name="j1",
run_uuid=None,
run_path="test",
params=None,
)
resolver.resolve_connections()
assert resolver.namespace == "foo"
assert resolver.connection_by_names == {
connection3.name: connection3,
connection2.name: connection2,
connection1.name: connection1,
}
assert [s.schema for s in resolver.secrets] == [secret1.schema, secret2.schema]
assert resolver.artifacts_store == connection1
assert resolver.polyaxon_sidecar == get_default_sidecar_container()
assert resolver.polyaxon_init == get_default_init_container()
| 35.313953 | 87 | 0.570409 |
1ba7cc0ce3dd09943696f8b0f4d71c9bbcbfd049 | 23,954 | py | Python | stable_baselines3/common/base_class.py | SonsOfRL/stable-baselines3 | 3253ee11e7a941946c3a252e63d981d6735df401 | [
"MIT"
] | null | null | null | stable_baselines3/common/base_class.py | SonsOfRL/stable-baselines3 | 3253ee11e7a941946c3a252e63d981d6735df401 | [
"MIT"
] | null | null | null | stable_baselines3/common/base_class.py | SonsOfRL/stable-baselines3 | 3253ee11e7a941946c3a252e63d981d6735df401 | [
"MIT"
] | 1 | 2022-03-15T03:07:41.000Z | 2022-03-15T03:07:41.000Z | """Abstract base classes for RL algorithms."""
import io
import pathlib
import time
from abc import ABC, abstractmethod
from collections import deque
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, Union
import gym
import numpy as np
import torch as th
from stable_baselines3.common import logger, utils
from stable_baselines3.common.callbacks import BaseCallback, CallbackList, ConvertCallback, EvalCallback
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.noise import ActionNoise
from stable_baselines3.common.policies import BasePolicy, get_policy_from_name
from stable_baselines3.common.preprocessing import is_image_space
from stable_baselines3.common.save_util import load_from_zip_file, recursive_getattr, recursive_setattr, save_to_zip_file
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback
from stable_baselines3.common.utils import (
check_for_correct_spaces,
get_device,
get_schedule_fn,
set_random_seed,
update_learning_rate,
)
from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv, VecNormalize, VecTransposeImage, unwrap_vec_normalize
def maybe_make_env(env: Union[GymEnv, str, None], monitor_wrapper: bool, verbose: int) -> Optional[GymEnv]:
"""If env is a string, make the environment; otherwise, return env.
:param env: (Union[GymEnv, str, None]) The environment to learn from.
:param monitor_wrapper: (bool) Whether to wrap env in a Monitor when creating env.
:param verbose: (int) logging verbosity
:return A Gym (vector) environment.
"""
if isinstance(env, str):
if verbose >= 1:
print(f"Creating environment from the given name '{env}'")
env = gym.make(env)
if monitor_wrapper:
env = Monitor(env, filename=None)
return env
class BaseAlgorithm(ABC):
"""
The base of RL algorithms
:param policy: (Type[BasePolicy]) Policy object
:param env: (Union[GymEnv, str, None]) The environment to learn from
(if registered in Gym, can be str. Can be None for loading trained models)
:param policy_base: (Type[BasePolicy]) The base policy used by this method
:param learning_rate: (float or callable) learning rate for the optimizer,
it can be a function of the current progress remaining (from 1 to 0)
:param policy_kwargs: (Dict[str, Any]) Additional arguments to be passed to the policy on creation
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param verbose: (int) The verbosity level: 0 none, 1 training information, 2 debug
:param device: (Union[th.device, str]) Device on which the code should run.
By default, it will try to use a Cuda compatible device and fallback to cpu
if it is not possible.
:param support_multi_env: (bool) Whether the algorithm supports training
with multiple environments (as in A2C)
:param create_eval_env: (bool) Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param monitor_wrapper: (bool) When creating an environment, whether to wrap it
or not in a Monitor wrapper.
:param seed: (Optional[int]) Seed for the pseudo random generators
:param use_sde: (bool) Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: (int) Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
"""
def __init__(
self,
policy: Type[BasePolicy],
env: Union[GymEnv, str, None],
policy_base: Type[BasePolicy],
learning_rate: Union[float, Callable],
policy_kwargs: Dict[str, Any] = None,
tensorboard_log: Optional[str] = None,
verbose: int = 0,
device: Union[th.device, str] = "auto",
support_multi_env: bool = False,
create_eval_env: bool = False,
monitor_wrapper: bool = True,
seed: Optional[int] = None,
use_sde: bool = False,
sde_sample_freq: int = -1,
):
if isinstance(policy, str) and policy_base is not None:
self.policy_class = get_policy_from_name(policy_base, policy)
else:
self.policy_class = policy
self.device = get_device(device)
if verbose > 0:
print(f"Using {self.device} device")
self.env = None # type: Optional[GymEnv]
# get VecNormalize object if needed
self._vec_normalize_env = unwrap_vec_normalize(env)
self.verbose = verbose
self.policy_kwargs = {} if policy_kwargs is None else policy_kwargs
self.observation_space = None # type: Optional[gym.spaces.Space]
self.action_space = None # type: Optional[gym.spaces.Space]
self.n_envs = None
self.num_timesteps = 0
# Used for updating schedules
self._total_timesteps = 0
self.eval_env = None
self.seed = seed
self.action_noise = None # type: Optional[ActionNoise]
self.start_time = None
self.policy = None
self.learning_rate = learning_rate
self.tensorboard_log = tensorboard_log
self.lr_schedule = None # type: Optional[Callable]
self._last_obs = None # type: Optional[np.ndarray]
# When using VecNormalize:
self._last_original_obs = None # type: Optional[np.ndarray]
self._episode_num = 0
# Used for gSDE only
self.use_sde = use_sde
self.sde_sample_freq = sde_sample_freq
# Track the training progress remaining (from 1 to 0)
# this is used to update the learning rate
self._current_progress_remaining = 1
# Buffers for logging
self.ep_info_buffer = None # type: Optional[deque]
self.ep_success_buffer = None # type: Optional[deque]
# For logging
self._n_updates = 0 # type: int
# Create and wrap the env if needed
if env is not None:
if isinstance(env, str):
if create_eval_env:
self.eval_env = maybe_make_env(env, monitor_wrapper, self.verbose)
env = maybe_make_env(env, monitor_wrapper, self.verbose)
env = self._wrap_env(env)
self.observation_space = env.observation_space
self.action_space = env.action_space
self.n_envs = env.num_envs
self.env = env
if not support_multi_env and self.n_envs > 1:
raise ValueError(
"Error: the model does not support multiple envs; it requires " "a single vectorized environment."
)
if self.use_sde and not isinstance(self.observation_space, gym.spaces.Box):
raise ValueError("generalized State-Dependent Exploration (gSDE) can only be used with continuous actions.")
def _wrap_env(self, env: GymEnv) -> VecEnv:
if not isinstance(env, VecEnv):
if self.verbose >= 1:
print("Wrapping the env in a DummyVecEnv.")
env = DummyVecEnv([lambda: env])
if is_image_space(env.observation_space) and not isinstance(env, VecTransposeImage):
if self.verbose >= 1:
print("Wrapping the env in a VecTransposeImage.")
env = VecTransposeImage(env)
return env
@abstractmethod
def _setup_model(self) -> None:
"""Create networks, buffer and optimizers."""
def _get_eval_env(self, eval_env: Optional[GymEnv]) -> Optional[GymEnv]:
"""
Return the environment that will be used for evaluation.
:param eval_env: (Optional[GymEnv]))
:return: (Optional[GymEnv])
"""
if eval_env is None:
eval_env = self.eval_env
if eval_env is not None:
eval_env = self._wrap_env(eval_env)
assert eval_env.num_envs == 1
return eval_env
def _setup_lr_schedule(self) -> None:
"""Transform to callable if needed."""
self.lr_schedule = get_schedule_fn(self.learning_rate)
def _update_current_progress_remaining(self, num_timesteps: int, total_timesteps: int) -> None:
"""
Compute current progress remaining (starts from 1 and ends to 0)
:param num_timesteps: current number of timesteps
:param total_timesteps:
"""
self._current_progress_remaining = 1.0 - float(num_timesteps) / float(total_timesteps)
def _update_learning_rate(self, optimizers: Union[List[th.optim.Optimizer], th.optim.Optimizer]) -> None:
"""
Update the optimizers learning rate using the current learning rate schedule
and the current progress remaining (from 1 to 0).
:param optimizers: (Union[List[th.optim.Optimizer], th.optim.Optimizer])
An optimizer or a list of optimizers.
"""
# Log the current learning rate
logger.record("train/learning_rate", self.lr_schedule(self._current_progress_remaining))
if not isinstance(optimizers, list):
optimizers = [optimizers]
for optimizer in optimizers:
update_learning_rate(optimizer, self.lr_schedule(self._current_progress_remaining))
def get_env(self) -> Optional[VecEnv]:
"""
Returns the current environment (can be None if not defined).
:return: (Optional[VecEnv]) The current environment
"""
return self.env
def get_vec_normalize_env(self) -> Optional[VecNormalize]:
"""
Return the ``VecNormalize`` wrapper of the training env
if it exists.
:return: Optional[VecNormalize] The ``VecNormalize`` env.
"""
return self._vec_normalize_env
def set_env(self, env: GymEnv) -> None:
"""
Checks the validity of the environment, and if it is coherent, set it as the current environment.
Furthermore wrap any non vectorized env into a vectorized
checked parameters:
- observation_space
- action_space
:param env: The environment for learning a policy
"""
check_for_correct_spaces(env, self.observation_space, self.action_space)
# it must be coherent now
# if it is not a VecEnv, make it a VecEnv
env = self._wrap_env(env)
self.n_envs = env.num_envs
self.env = env
def get_torch_variables(self) -> Tuple[List[str], List[str]]:
"""
Get the name of the torch variables that will be saved.
``th.save`` and ``th.load`` will be used with the right device
instead of the default pickling strategy.
:return: (Tuple[List[str], List[str]])
name of the variables with state dicts to save, name of additional torch tensors,
"""
state_dicts = ["policy"]
return state_dicts, []
@abstractmethod
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 100,
tb_log_name: str = "run",
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> "BaseAlgorithm":
"""
Return a trained model.
:param total_timesteps: (int) The total number of samples (env steps) to train on
:param callback: (MaybeCallback) callback(s) called at every step with state of the algorithm.
:param log_interval: (int) The number of timesteps before logging.
:param tb_log_name: (str) the name of the run for TensorBoard logging
:param eval_env: (gym.Env) Environment that will be used to evaluate the agent
:param eval_freq: (int) Evaluate the agent every ``eval_freq`` timesteps (this may vary a little)
:param n_eval_episodes: (int) Number of episode to evaluate the agent
:param eval_log_path: (Optional[str]) Path to a folder where the evaluations will be saved
:param reset_num_timesteps: (bool) whether or not to reset the current timestep number (used in logging)
:return: (BaseAlgorithm) the trained model
"""
def predict(
self,
observation: np.ndarray,
state: Optional[np.ndarray] = None,
mask: Optional[np.ndarray] = None,
deterministic: bool = False,
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""
Get the model's action(s) from an observation
:param observation: (np.ndarray) the input observation
:param state: (Optional[np.ndarray]) The last states (can be None, used in recurrent policies)
:param mask: (Optional[np.ndarray]) The last masks (can be None, used in recurrent policies)
:param deterministic: (bool) Whether or not to return deterministic actions.
:return: (Tuple[np.ndarray, Optional[np.ndarray]]) the model's action and the next state
(used in recurrent policies)
"""
return self.policy.predict(observation, state, mask, deterministic)
@classmethod
def load(cls, load_path: str, env: Optional[GymEnv] = None, **kwargs) -> "BaseAlgorithm":
"""
Load the model from a zip-file
:param load_path: the location of the saved data
:param env: the new environment to run the loaded model on
(can be None if you only need prediction from a trained model) has priority over any saved environment
:param kwargs: extra arguments to change the model when loading
"""
data, params, tensors = load_from_zip_file(load_path)
if "policy_kwargs" in data:
for arg_to_remove in ["device"]:
if arg_to_remove in data["policy_kwargs"]:
del data["policy_kwargs"][arg_to_remove]
if "policy_kwargs" in kwargs and kwargs["policy_kwargs"] != data["policy_kwargs"]:
raise ValueError(
f"The specified policy kwargs do not equal the stored policy kwargs."
f"Stored kwargs: {data['policy_kwargs']}, specified kwargs: {kwargs['policy_kwargs']}"
)
# check if observation space and action space are part of the saved parameters
if "observation_space" not in data or "action_space" not in data:
raise KeyError("The observation_space and action_space were not given, can't verify new environments")
# check if given env is valid
if env is not None:
check_for_correct_spaces(env, data["observation_space"], data["action_space"])
# if no new env was given use stored env if possible
if env is None and "env" in data:
env = data["env"]
# noinspection PyArgumentList
model = cls(
policy=data["policy_class"],
env=env,
device="auto",
_init_setup_model=False, # pytype: disable=not-instantiable,wrong-keyword-args
)
# load parameters
model.__dict__.update(data)
model.__dict__.update(kwargs)
model._setup_model()
# put state_dicts back in place
for name in params:
attr = recursive_getattr(model, name)
attr.load_state_dict(params[name])
# put tensors back in place
if tensors is not None:
for name in tensors:
recursive_setattr(model, name, tensors[name])
# Sample gSDE exploration matrix, so it uses the right device
# see issue #44
if model.use_sde:
model.policy.reset_noise() # pytype: disable=attribute-error
return model
def set_random_seed(self, seed: Optional[int] = None) -> None:
"""
Set the seed of the pseudo-random generators
(python, numpy, pytorch, gym, action_space)
:param seed: (int)
"""
if seed is None:
return
set_random_seed(seed, using_cuda=self.device == th.device("cuda"))
self.action_space.seed(seed)
if self.env is not None:
self.env.seed(seed)
if self.eval_env is not None:
self.eval_env.seed(seed)
def _init_callback(
self,
callback: MaybeCallback,
eval_env: Optional[VecEnv] = None,
eval_freq: int = 10000,
n_eval_episodes: int = 5,
log_path: Optional[str] = None,
) -> BaseCallback:
"""
:param callback: (MaybeCallback) Callback(s) called at every step with state of the algorithm.
:param eval_freq: (Optional[int]) How many steps between evaluations; if None, do not evaluate.
:param n_eval_episodes: (int) How many episodes to play per evaluation
:param n_eval_episodes: (int) Number of episodes to rollout during evaluation.
:param log_path: (Optional[str]) Path to a folder where the evaluations will be saved
:return: (BaseCallback) A hybrid callback calling `callback` and performing evaluation.
"""
# Convert a list of callbacks into a callback
if isinstance(callback, list):
callback = CallbackList(callback)
# Convert functional callback to object
if not isinstance(callback, BaseCallback):
callback = ConvertCallback(callback)
# Create eval callback in charge of the evaluation
if eval_env is not None:
eval_callback = EvalCallback(
eval_env,
best_model_save_path=log_path,
log_path=log_path,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
)
callback = CallbackList([callback, eval_callback])
callback.init_callback(self)
return callback
def _setup_learn(
self,
total_timesteps: int,
eval_env: Optional[GymEnv],
callback: MaybeCallback = None,
eval_freq: int = 10000,
n_eval_episodes: int = 5,
log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
tb_log_name: str = "run",
) -> Tuple[int, BaseCallback]:
"""
Initialize different variables needed for training.
:param total_timesteps: (int) The total number of samples (env steps) to train on
:param eval_env: (Optional[VecEnv]) Environment to use for evaluation.
:param callback: (MaybeCallback) Callback(s) called at every step with state of the algorithm.
:param eval_freq: (int) How many steps between evaluations
:param n_eval_episodes: (int) How many episodes to play per evaluation
:param log_path: (Optional[str]) Path to a folder where the evaluations will be saved
:param reset_num_timesteps: (bool) Whether to reset or not the ``num_timesteps`` attribute
:param tb_log_name: (str) the name of the run for tensorboard log
:return: (Tuple[int, BaseCallback])
"""
self.start_time = time.time()
if self.ep_info_buffer is None or reset_num_timesteps:
# Initialize buffers if they don't exist, or reinitialize if resetting counters
self.ep_info_buffer = deque(maxlen=100)
self.ep_success_buffer = deque(maxlen=100)
if self.action_noise is not None:
self.action_noise.reset()
if reset_num_timesteps:
self.num_timesteps = 0
self._episode_num = 0
else:
# Make sure training timesteps are ahead of the internal counter
total_timesteps += self.num_timesteps
self._total_timesteps = total_timesteps
# Avoid resetting the environment when calling ``.learn()`` consecutive times
if reset_num_timesteps or self._last_obs is None:
self._last_obs = self.env.reset()
# Retrieve unnormalized observation for saving into the buffer
if self._vec_normalize_env is not None:
self._last_original_obs = self._vec_normalize_env.get_original_obs()
if eval_env is not None and self.seed is not None:
eval_env.seed(self.seed)
eval_env = self._get_eval_env(eval_env)
# Configure logger's outputs
utils.configure_logger(self.verbose, self.tensorboard_log, tb_log_name, reset_num_timesteps)
# Create eval callback if needed
callback = self._init_callback(callback, eval_env, eval_freq, n_eval_episodes, log_path)
return total_timesteps, callback
def _update_info_buffer(self, infos: List[Dict[str, Any]], dones: Optional[np.ndarray] = None) -> None:
"""
Retrieve reward and episode length and update the buffer
if using Monitor wrapper.
:param infos: ([dict])
"""
if dones is None:
dones = np.array([False] * len(infos))
for idx, info in enumerate(infos):
maybe_ep_info = info.get("episode")
maybe_is_success = info.get("is_success")
if maybe_ep_info is not None:
self.ep_info_buffer.extend([maybe_ep_info])
if maybe_is_success is not None and dones[idx]:
self.ep_success_buffer.append(maybe_is_success)
def excluded_save_params(self) -> List[str]:
"""
Returns the names of the parameters that should be excluded by default
when saving the model.
:return: ([str]) List of parameters that should be excluded from save
"""
return ["policy", "device", "env", "eval_env", "replay_buffer", "rollout_buffer", "_vec_normalize_env"]
def save(
self,
path: Union[str, pathlib.Path, io.BufferedIOBase],
exclude: Optional[Iterable[str]] = None,
include: Optional[Iterable[str]] = None,
) -> None:
"""
Save all the attributes of the object and the model parameters in a zip-file.
:param (Union[str, pathlib.Path, io.BufferedIOBase]): path to the file where the rl agent should be saved
:param exclude: name of parameters that should be excluded in addition to the default one
:param include: name of parameters that might be excluded but should be included anyway
"""
# copy parameter list so we don't mutate the original dict
data = self.__dict__.copy()
# Exclude is union of specified parameters (if any) and standard exclusions
if exclude is None:
exclude = []
exclude = set(exclude).union(self.excluded_save_params())
# Do not exclude params if they are specifically included
if include is not None:
exclude = exclude.difference(include)
state_dicts_names, tensors_names = self.get_torch_variables()
# any params that are in the save vars must not be saved by data
torch_variables = state_dicts_names + tensors_names
for torch_var in torch_variables:
# we need to get only the name of the top most module as we'll remove that
var_name = torch_var.split(".")[0]
exclude.add(var_name)
# Remove parameter entries of parameters which are to be excluded
for param_name in exclude:
data.pop(param_name, None)
# Build dict of tensor variables
tensors = None
if tensors_names is not None:
tensors = {}
for name in tensors_names:
attr = recursive_getattr(self, name)
tensors[name] = attr
# Build dict of state_dicts
params_to_save = {}
for name in state_dicts_names:
attr = recursive_getattr(self, name)
# Retrieve state dict
params_to_save[name] = attr.state_dict()
save_to_zip_file(path, data=data, params=params_to_save, tensors=tensors)
| 41.804538 | 121 | 0.647199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.