hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72d97b65416bedcb025aa3932d05d4b6dca345b | 8,906 | py | Python | tensorflow/contrib/distributions/python/ops/inverse_gamma.py | bhbai/tensorflow | d4b5c606fc9fbd1a20b5b113b4bc831f31d889a3 | [
"Apache-2.0"
] | 65 | 2016-09-26T01:30:40.000Z | 2021-08-11T17:00:41.000Z | tensorflow/contrib/distributions/python/ops/inverse_gamma.py | bhbai/tensorflow | d4b5c606fc9fbd1a20b5b113b4bc831f31d889a3 | [
"Apache-2.0"
] | 5 | 2017-02-21T08:37:52.000Z | 2017-03-29T05:46:05.000Z | tensorflow/contrib/distributions/python/ops/inverse_gamma.py | bhbai/tensorflow | d4b5c606fc9fbd1a20b5b113b4bc831f31d889a3 | [
"Apache-2.0"
] | 11 | 2017-09-10T16:22:21.000Z | 2021-08-09T09:24:50.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The InverseGamma distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
class InverseGamma(distribution.Distribution):
"""The `InverseGamma` distribution with parameter alpha and beta.
The parameters are the shape and inverse scale parameters alpha, beta.
The PDF of this distribution is:
```pdf(x) = (beta^alpha)/Gamma(alpha)(x^(-alpha-1))e^(-beta/x), x > 0```
and the CDF of this distribution is:
```cdf(x) = GammaInc(alpha, beta / x) / Gamma(alpha), x > 0```
where GammaInc is the upper incomplete Gamma function.
Examples:
```python
dist = InverseGamma(alpha=3.0, beta=2.0)
dist2 = InverseGamma(alpha=[3.0, 4.0], beta=[2.0, 3.0])
```
"""
def __init__(self,
alpha,
beta,
validate_args=False,
allow_nan_stats=True,
name="InverseGamma"):
"""Construct InverseGamma distributions with parameters `alpha` and `beta`.
The parameters `alpha` and `beta` must be shaped in a way that supports
broadcasting (e.g. `alpha + beta` is a valid operation).
Args:
alpha: Floating point tensor, the shape params of the
distribution(s).
alpha must contain only positive values.
beta: Floating point tensor, the scale params of the distribution(s).
beta must contain only positive values.
validate_args: `Boolean`, default `False`. Whether to assert that
`a > 0`, `b > 0`, and that `x > 0` in the methods `prob(x)` and
`log_prob(x)`. If `validate_args` is `False` and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to prepend to all ops created by this distribution.
Raises:
TypeError: if `alpha` and `beta` are different dtypes.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[alpha, beta]) as ns:
with ops.control_dependencies([
check_ops.assert_positive(alpha),
check_ops.assert_positive(beta),
] if validate_args else []):
self._alpha = array_ops.identity(alpha, name="alpha")
self._beta = array_ops.identity(beta, name="beta")
super(InverseGamma, self).__init__(
dtype=self._alpha.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
is_continuous=True,
is_reparameterized=False,
parameters=parameters,
graph_parents=[self._alpha, self._beta],
name=ns)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("alpha", "beta"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def alpha(self):
"""Shape parameter."""
return self._alpha
@property
def beta(self):
"""Scale parameter."""
return self._beta
def _batch_shape(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.alpha), array_ops.shape(self.beta))
def _get_batch_shape(self):
return array_ops.broadcast_static_shape(
self.alpha.get_shape(), self.beta.get_shape())
def _event_shape(self):
return constant_op.constant([], dtype=dtypes.int32)
def _get_event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
"""See the documentation for tf.random_gamma for more details."""
return 1. / random_ops.random_gamma([n], self.alpha, beta=self.beta,
dtype=self.dtype, seed=seed)
def _log_prob(self, x):
x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if
self.validate_args else [], x)
return (self.alpha * math_ops.log(self.beta) -
math_ops.lgamma(self.alpha) -
(self.alpha + 1.) * math_ops.log(x) - self.beta / x)
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return math_ops.log(self._cdf(x))
def _cdf(self, x):
x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if
self.validate_args else [], x)
# Note that igammac returns the upper regularized incomplete gamma
# function Q(a, x), which is what we want for the CDF.
return math_ops.igammac(self.alpha, self.beta / x)
@distribution_util.AppendDocstring(
"""This is defined to be
```
entropy = alpha - log(beta) + log(Gamma(alpha))
+ (1-alpha)digamma(alpha)
```
where digamma(alpha) is the digamma function.""")
def _entropy(self):
return (self.alpha +
math_ops.log(self.beta) +
math_ops.lgamma(self.alpha) -
(1. + self.alpha) * math_ops.digamma(self.alpha))
@distribution_util.AppendDocstring(
"""The mean of an inverse gamma distribution is `beta / (alpha - 1)`,
when `alpha > 1`, and `NaN` otherwise. If `self.allow_nan_stats` is
`False`, an exception will be raised rather than returning `NaN`""")
def _mean(self):
mean = self.beta / (self.alpha - 1.)
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return array_ops.where(
self.alpha > 1., mean,
array_ops.fill(self.batch_shape(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones((), self.dtype), self.alpha,
message="mean not defined for components of self.alpha <= 1"),
], mean)
@distribution_util.AppendDocstring(
"""Variance for inverse gamma is defined only for `alpha > 2`. If
`self.allow_nan_stats` is `False`, an exception will be raised rather
than returning `NaN`.""")
def _variance(self):
var = (math_ops.square(self.beta) /
(math_ops.square(self.alpha - 1.) * (self.alpha - 2.)))
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return array_ops.where(
self.alpha > 2., var,
array_ops.fill(self.batch_shape(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
constant_op.constant(2., dtype=self.dtype), self.alpha,
message="variance not defined for components of alpha <= 2"),
], var)
def _mode(self):
"""The mode of an inverse gamma distribution is `beta / (alpha + 1)`."""
return self.beta / (self.alpha + 1.)
class InverseGammaWithSoftplusAlphaBeta(InverseGamma):
"""Inverse Gamma with softplus applied to `alpha` and `beta`."""
def __init__(self,
alpha,
beta,
validate_args=False,
allow_nan_stats=True,
name="InverseGammaWithSoftplusAlphaBeta"):
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[alpha, beta]) as ns:
super(InverseGammaWithSoftplusAlphaBeta, self).__init__(
alpha=nn.softplus(alpha, name="softplus_alpha"),
beta=nn.softplus(beta, name="softplus_gamma"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
| 36.954357 | 80 | 0.656861 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
class InverseGamma(distribution.Distribution):
def __init__(self,
alpha,
beta,
validate_args=False,
allow_nan_stats=True,
name="InverseGamma"):
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[alpha, beta]) as ns:
with ops.control_dependencies([
check_ops.assert_positive(alpha),
check_ops.assert_positive(beta),
] if validate_args else []):
self._alpha = array_ops.identity(alpha, name="alpha")
self._beta = array_ops.identity(beta, name="beta")
super(InverseGamma, self).__init__(
dtype=self._alpha.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
is_continuous=True,
is_reparameterized=False,
parameters=parameters,
graph_parents=[self._alpha, self._beta],
name=ns)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("alpha", "beta"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def alpha(self):
return self._alpha
@property
def beta(self):
return self._beta
def _batch_shape(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.alpha), array_ops.shape(self.beta))
def _get_batch_shape(self):
return array_ops.broadcast_static_shape(
self.alpha.get_shape(), self.beta.get_shape())
def _event_shape(self):
return constant_op.constant([], dtype=dtypes.int32)
def _get_event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
return 1. / random_ops.random_gamma([n], self.alpha, beta=self.beta,
dtype=self.dtype, seed=seed)
def _log_prob(self, x):
x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if
self.validate_args else [], x)
return (self.alpha * math_ops.log(self.beta) -
math_ops.lgamma(self.alpha) -
(self.alpha + 1.) * math_ops.log(x) - self.beta / x)
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return math_ops.log(self._cdf(x))
def _cdf(self, x):
x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if
self.validate_args else [], x)
return math_ops.igammac(self.alpha, self.beta / x)
@distribution_util.AppendDocstring(
"""This is defined to be
```
entropy = alpha - log(beta) + log(Gamma(alpha))
+ (1-alpha)digamma(alpha)
```
where digamma(alpha) is the digamma function.""")
def _entropy(self):
return (self.alpha +
math_ops.log(self.beta) +
math_ops.lgamma(self.alpha) -
(1. + self.alpha) * math_ops.digamma(self.alpha))
@distribution_util.AppendDocstring(
"""The mean of an inverse gamma distribution is `beta / (alpha - 1)`,
when `alpha > 1`, and `NaN` otherwise. If `self.allow_nan_stats` is
`False`, an exception will be raised rather than returning `NaN`""")
def _mean(self):
mean = self.beta / (self.alpha - 1.)
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return array_ops.where(
self.alpha > 1., mean,
array_ops.fill(self.batch_shape(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones((), self.dtype), self.alpha,
message="mean not defined for components of self.alpha <= 1"),
], mean)
@distribution_util.AppendDocstring(
"""Variance for inverse gamma is defined only for `alpha > 2`. If
`self.allow_nan_stats` is `False`, an exception will be raised rather
than returning `NaN`.""")
def _variance(self):
var = (math_ops.square(self.beta) /
(math_ops.square(self.alpha - 1.) * (self.alpha - 2.)))
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return array_ops.where(
self.alpha > 2., var,
array_ops.fill(self.batch_shape(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
constant_op.constant(2., dtype=self.dtype), self.alpha,
message="variance not defined for components of alpha <= 2"),
], var)
def _mode(self):
return self.beta / (self.alpha + 1.)
class InverseGammaWithSoftplusAlphaBeta(InverseGamma):
def __init__(self,
alpha,
beta,
validate_args=False,
allow_nan_stats=True,
name="InverseGammaWithSoftplusAlphaBeta"):
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[alpha, beta]) as ns:
super(InverseGammaWithSoftplusAlphaBeta, self).__init__(
alpha=nn.softplus(alpha, name="softplus_alpha"),
beta=nn.softplus(beta, name="softplus_gamma"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
| true | true |
f72d97d1ff332aa397f3106364df1be3656a74db | 7,814 | py | Python | make_prg/subcommands/update.py | leoisl/make_prg | 9204cb8a60d8fae0985b4eb464c5dd99c1338d45 | [
"MIT"
] | 1 | 2021-05-07T02:04:07.000Z | 2021-05-07T02:04:07.000Z | make_prg/subcommands/update.py | leoisl/make_prg | 9204cb8a60d8fae0985b4eb464c5dd99c1338d45 | [
"MIT"
] | 9 | 2021-03-22T12:28:06.000Z | 2021-12-17T06:46:51.000Z | make_prg/subcommands/update.py | leoisl/make_prg | 9204cb8a60d8fae0985b4eb464c5dd99c1338d45 | [
"MIT"
] | 2 | 2021-06-29T04:54:22.000Z | 2022-01-03T12:19:59.000Z | import multiprocessing
import os
import shutil
from pathlib import Path
from loguru import logger
from make_prg import io_utils
from make_prg.denovo_paths_reader import DenovoPathsDB
from make_prg.prg_builder import PrgBuilderCollection, PrgBuilder, LeafNotFoundException
from make_prg.utils import output_files_already_exist
def register_parser(subparsers):
subparser_update_prg = subparsers.add_parser(
"update",
usage="make_prg update",
help="Update PRGs given new sequences output by pandora.",
)
subparser_update_prg.add_argument(
"-u",
"--update_DS",
action="store",
type=str,
required=True,
help=(
"Filepath to the update data structures. Should point to a file *.update_DS."
),
)
subparser_update_prg.add_argument(
"-d",
"--denovo_paths",
action="store",
type=str,
required=True,
help=(
"Filepath containing denovo sequences output by pandora. Should point to a denovo_paths.txt file."
),
)
subparser_update_prg.add_argument(
"-o",
"--output_prefix",
action="store",
type=str,
required=True,
help="Output prefix: prefix for the output files",
)
subparser_update_prg.add_argument(
"-t",
"--threads",
action="store",
type=int,
default=1,
help="Number of threads",
)
subparser_update_prg.add_argument(
"--mafft",
help="Path to MAFFT executable. By default, it is assumed to be on PATH",
default="mafft",
)
subparser_update_prg.add_argument(
"--keep_temp", action="store_true", default=False, help="Keep temp files."
)
subparser_update_prg.set_defaults(func=run)
return subparser_update_prg
def get_stats_on_variants(stats_files):
nb_of_variants_successfully_applied = 0
nb_of_variants_that_failed_to_be_applied = 0
for stat_file in stats_files:
with open(stat_file) as stat_file_fh:
line_split = stat_file_fh.readline().strip().split()
nb_of_variants_successfully_applied_for_this_locus = int(line_split[1])
nb_of_variants_successfully_applied += (
nb_of_variants_successfully_applied_for_this_locus
)
nb_of_variants_that_failed_to_be_applied_for_this_locus = int(line_split[2])
nb_of_variants_that_failed_to_be_applied += (
nb_of_variants_that_failed_to_be_applied_for_this_locus
)
return nb_of_variants_successfully_applied, nb_of_variants_that_failed_to_be_applied
def update(
locus_name,
prg_builder_pickle_filepath,
variant_nodes_with_mutation,
temp_dir,
mafft: str,
):
prg_builder_for_locus = PrgBuilder.deserialize(prg_builder_pickle_filepath)
nb_of_variants_sucessfully_updated = 0
nb_of_variants_with_failed_update = 0
we_have_variants = len(variant_nodes_with_mutation) > 0
if we_have_variants:
logger.debug(f"Updating {locus_name} ...")
leaves_to_update = set()
for variant_node_with_mutation in variant_nodes_with_mutation:
try:
prg_builder_tree_node = prg_builder_for_locus.get_node_given_interval(
variant_node_with_mutation.key
)
prg_builder_tree_node.add_seq_to_batch_update(
variant_node_with_mutation.mutated_node_sequence
)
leaves_to_update.add(prg_builder_tree_node)
nb_of_variants_sucessfully_updated += 1
except LeafNotFoundException as exc:
logger.debug(f"Failed finding leaf: {exc}")
nb_of_variants_with_failed_update += 1
# update the changed leaves
for leaf in leaves_to_update:
leaf.batch_update(temp_dir, mafft=mafft)
logger.debug(
f"Updated {locus_name}: {len(variant_nodes_with_mutation)} denovo sequences added!"
)
else:
logger.debug(f"{locus_name} has no new variants, no update needed")
# regenerate PRG
locus_prefix = temp_dir / locus_name / locus_name
locus_prefix_parent = locus_prefix.parent
os.makedirs(locus_prefix_parent, exist_ok=True)
prg = prg_builder_for_locus.build_prg()
logger.info(f"Write PRG file to {locus_prefix}.prg.fa")
io_utils.write_prg(str(locus_prefix), prg)
with open(f"{locus_prefix}.stats", "w") as stats_filehandler:
print(
f"{locus_name} {nb_of_variants_sucessfully_updated} {nb_of_variants_with_failed_update}",
file=stats_filehandler,
)
# Note: we intentionally do not regenerate updateable data structure here because we don't want to update
# PRGs on top of already updated PRGs
# TODO: change this?
def run(options):
if output_files_already_exist(options.output_prefix):
raise RuntimeError("One or more output files already exists, aborting run...")
# NB: don't use logging, it causes deadlocks: https://pythonspeed.com/articles/python-multiprocessing/
logger.info("Reading update data structures...")
prg_builder_collection = PrgBuilderCollection.deserialize(options.update_DS)
prg_builder_collection.to_absolute_paths(Path(options.update_DS).parent)
logger.info(f"Reading {options.denovo_paths}...")
denovo_paths_db = DenovoPathsDB(options.denovo_paths)
output_dir = Path(options.output_prefix).parent
os.makedirs(output_dir, exist_ok=True)
temp_path = Path(options.output_prefix + "_tmp")
os.makedirs(temp_path, exist_ok=True)
# update all PRGs with denovo sequences
logger.debug(f"Using {options.threads} threads to update PRGs...")
multithreaded_input = []
for (
locus_name,
prg_builder_pickle_filepath,
) in (
prg_builder_collection.locus_name_to_pickle_files.items()
): # we do for all PRGs as those that don't have denovo variants will be generated also
variant_nodes_with_mutation = (
denovo_paths_db.locus_name_to_variant_nodes_with_mutation.get(
locus_name, []
)
)
multithreaded_input.append(
(
locus_name,
prg_builder_pickle_filepath,
variant_nodes_with_mutation,
temp_path,
options.mafft,
)
)
with multiprocessing.Pool(options.threads, maxtasksperchild=1) as pool:
pool.starmap(update, multithreaded_input, chunksize=1)
logger.success(f"All PRGs updated!")
# concatenate output PRGs
logger.info("Concatenating files from several threads into single, final file...")
prg_files = [
f"{temp_path}/{locus_name}/{locus_name}.prg.fa"
for locus_name in prg_builder_collection.locus_name_to_pickle_files.keys()
]
io_utils.concatenate_text_files(prg_files, options.output_prefix + ".prg.fa")
# sum up stats files and output stats
stats_files = [
f"{temp_path}/{locus_name}/{locus_name}.stats"
for locus_name in prg_builder_collection.locus_name_to_pickle_files.keys()
]
(
nb_of_variants_successfully_applied,
nb_of_variants_that_failed_to_be_applied,
) = get_stats_on_variants(stats_files)
logger.success(
f"Number of variants successfully applied: {nb_of_variants_successfully_applied}"
)
logger.warning(
f"Number of variants that failed to be applied: {nb_of_variants_that_failed_to_be_applied}"
)
# remove temp files if needed
if not options.keep_temp and temp_path.exists():
logger.debug("Removing temp files...")
shutil.rmtree(temp_path)
logger.success("All done!")
| 35.680365 | 110 | 0.677374 | import multiprocessing
import os
import shutil
from pathlib import Path
from loguru import logger
from make_prg import io_utils
from make_prg.denovo_paths_reader import DenovoPathsDB
from make_prg.prg_builder import PrgBuilderCollection, PrgBuilder, LeafNotFoundException
from make_prg.utils import output_files_already_exist
def register_parser(subparsers):
subparser_update_prg = subparsers.add_parser(
"update",
usage="make_prg update",
help="Update PRGs given new sequences output by pandora.",
)
subparser_update_prg.add_argument(
"-u",
"--update_DS",
action="store",
type=str,
required=True,
help=(
"Filepath to the update data structures. Should point to a file *.update_DS."
),
)
subparser_update_prg.add_argument(
"-d",
"--denovo_paths",
action="store",
type=str,
required=True,
help=(
"Filepath containing denovo sequences output by pandora. Should point to a denovo_paths.txt file."
),
)
subparser_update_prg.add_argument(
"-o",
"--output_prefix",
action="store",
type=str,
required=True,
help="Output prefix: prefix for the output files",
)
subparser_update_prg.add_argument(
"-t",
"--threads",
action="store",
type=int,
default=1,
help="Number of threads",
)
subparser_update_prg.add_argument(
"--mafft",
help="Path to MAFFT executable. By default, it is assumed to be on PATH",
default="mafft",
)
subparser_update_prg.add_argument(
"--keep_temp", action="store_true", default=False, help="Keep temp files."
)
subparser_update_prg.set_defaults(func=run)
return subparser_update_prg
def get_stats_on_variants(stats_files):
nb_of_variants_successfully_applied = 0
nb_of_variants_that_failed_to_be_applied = 0
for stat_file in stats_files:
with open(stat_file) as stat_file_fh:
line_split = stat_file_fh.readline().strip().split()
nb_of_variants_successfully_applied_for_this_locus = int(line_split[1])
nb_of_variants_successfully_applied += (
nb_of_variants_successfully_applied_for_this_locus
)
nb_of_variants_that_failed_to_be_applied_for_this_locus = int(line_split[2])
nb_of_variants_that_failed_to_be_applied += (
nb_of_variants_that_failed_to_be_applied_for_this_locus
)
return nb_of_variants_successfully_applied, nb_of_variants_that_failed_to_be_applied
def update(
locus_name,
prg_builder_pickle_filepath,
variant_nodes_with_mutation,
temp_dir,
mafft: str,
):
prg_builder_for_locus = PrgBuilder.deserialize(prg_builder_pickle_filepath)
nb_of_variants_sucessfully_updated = 0
nb_of_variants_with_failed_update = 0
we_have_variants = len(variant_nodes_with_mutation) > 0
if we_have_variants:
logger.debug(f"Updating {locus_name} ...")
leaves_to_update = set()
for variant_node_with_mutation in variant_nodes_with_mutation:
try:
prg_builder_tree_node = prg_builder_for_locus.get_node_given_interval(
variant_node_with_mutation.key
)
prg_builder_tree_node.add_seq_to_batch_update(
variant_node_with_mutation.mutated_node_sequence
)
leaves_to_update.add(prg_builder_tree_node)
nb_of_variants_sucessfully_updated += 1
except LeafNotFoundException as exc:
logger.debug(f"Failed finding leaf: {exc}")
nb_of_variants_with_failed_update += 1
for leaf in leaves_to_update:
leaf.batch_update(temp_dir, mafft=mafft)
logger.debug(
f"Updated {locus_name}: {len(variant_nodes_with_mutation)} denovo sequences added!"
)
else:
logger.debug(f"{locus_name} has no new variants, no update needed")
locus_prefix = temp_dir / locus_name / locus_name
locus_prefix_parent = locus_prefix.parent
os.makedirs(locus_prefix_parent, exist_ok=True)
prg = prg_builder_for_locus.build_prg()
logger.info(f"Write PRG file to {locus_prefix}.prg.fa")
io_utils.write_prg(str(locus_prefix), prg)
with open(f"{locus_prefix}.stats", "w") as stats_filehandler:
print(
f"{locus_name} {nb_of_variants_sucessfully_updated} {nb_of_variants_with_failed_update}",
file=stats_filehandler,
)
# PRGs on top of already updated PRGs
# TODO: change this?
def run(options):
if output_files_already_exist(options.output_prefix):
raise RuntimeError("One or more output files already exists, aborting run...")
# NB: don't use logging, it causes deadlocks: https://pythonspeed.com/articles/python-multiprocessing/
logger.info("Reading update data structures...")
prg_builder_collection = PrgBuilderCollection.deserialize(options.update_DS)
prg_builder_collection.to_absolute_paths(Path(options.update_DS).parent)
logger.info(f"Reading {options.denovo_paths}...")
denovo_paths_db = DenovoPathsDB(options.denovo_paths)
output_dir = Path(options.output_prefix).parent
os.makedirs(output_dir, exist_ok=True)
temp_path = Path(options.output_prefix + "_tmp")
os.makedirs(temp_path, exist_ok=True)
logger.debug(f"Using {options.threads} threads to update PRGs...")
multithreaded_input = []
for (
locus_name,
prg_builder_pickle_filepath,
) in (
prg_builder_collection.locus_name_to_pickle_files.items()
):
variant_nodes_with_mutation = (
denovo_paths_db.locus_name_to_variant_nodes_with_mutation.get(
locus_name, []
)
)
multithreaded_input.append(
(
locus_name,
prg_builder_pickle_filepath,
variant_nodes_with_mutation,
temp_path,
options.mafft,
)
)
with multiprocessing.Pool(options.threads, maxtasksperchild=1) as pool:
pool.starmap(update, multithreaded_input, chunksize=1)
logger.success(f"All PRGs updated!")
# concatenate output PRGs
logger.info("Concatenating files from several threads into single, final file...")
prg_files = [
f"{temp_path}/{locus_name}/{locus_name}.prg.fa"
for locus_name in prg_builder_collection.locus_name_to_pickle_files.keys()
]
io_utils.concatenate_text_files(prg_files, options.output_prefix + ".prg.fa")
# sum up stats files and output stats
stats_files = [
f"{temp_path}/{locus_name}/{locus_name}.stats"
for locus_name in prg_builder_collection.locus_name_to_pickle_files.keys()
]
(
nb_of_variants_successfully_applied,
nb_of_variants_that_failed_to_be_applied,
) = get_stats_on_variants(stats_files)
logger.success(
f"Number of variants successfully applied: {nb_of_variants_successfully_applied}"
)
logger.warning(
f"Number of variants that failed to be applied: {nb_of_variants_that_failed_to_be_applied}"
)
# remove temp files if needed
if not options.keep_temp and temp_path.exists():
logger.debug("Removing temp files...")
shutil.rmtree(temp_path)
logger.success("All done!")
| true | true |
f72d9834477d0704ff5a7f19701aa575f0729d88 | 9,096 | py | Python | tests/contrib/sensors/test_hdfs_sensor.py | tekn0ir/incubator-airflow | 7df4405aa5a0c99e51722321caa7af660d35794b | [
"Apache-2.0"
] | 4 | 2019-01-17T06:21:45.000Z | 2020-06-20T01:59:57.000Z | tests/contrib/sensors/test_hdfs_sensor.py | tekn0ir/incubator-airflow | 7df4405aa5a0c99e51722321caa7af660d35794b | [
"Apache-2.0"
] | 14 | 2018-10-24T03:15:11.000Z | 2019-01-02T19:02:58.000Z | tests/contrib/sensors/test_hdfs_sensor.py | tekn0ir/incubator-airflow | 7df4405aa5a0c99e51722321caa7af660d35794b | [
"Apache-2.0"
] | 6 | 2020-06-09T02:16:58.000Z | 2021-12-27T15:46:32.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import unittest
import re
from datetime import timedelta
from airflow.contrib.sensors.hdfs_sensor import HdfsSensorFolder, HdfsSensorRegex
from airflow.exceptions import AirflowSensorTimeout
class HdfsSensorFolderTests(unittest.TestCase):
def setUp(self):
from tests.core import FakeHDFSHook
self.hook = FakeHDFSHook
self.log = logging.getLogger()
self.log.setLevel(logging.DEBUG)
def test_should_be_empty_directory(self):
"""
test the empty directory behaviour
:return:
"""
# Given
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
task = HdfsSensorFolder(task_id='Should_be_empty_directory',
filepath='/datadirectory/empty_directory',
be_empty=True,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
task.execute(None)
# Then
# Nothing happens, nothing is raised exec is ok
def test_should_be_empty_directory_fail(self):
"""
test the empty directory behaviour
:return:
"""
# Given
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
task = HdfsSensorFolder(task_id='Should_be_empty_directory_fail',
filepath='/datadirectory/not_empty_directory',
be_empty=True,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
# Then
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
def test_should_be_a_non_empty_directory(self):
"""
test the empty directory behaviour
:return:
"""
# Given
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
task = HdfsSensorFolder(task_id='Should_be_non_empty_directory',
filepath='/datadirectory/not_empty_directory',
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
task.execute(None)
# Then
# Nothing happens, nothing is raised exec is ok
def test_should_be_non_empty_directory_fail(self):
"""
test the empty directory behaviour
:return:
"""
# Given
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
task = HdfsSensorFolder(task_id='Should_be_empty_directory_fail',
filepath='/datadirectory/empty_directory',
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
# Then
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
class HdfsSensorRegexTests(unittest.TestCase):
def setUp(self):
from tests.core import FakeHDFSHook
self.hook = FakeHDFSHook
self.log = logging.getLogger()
self.log.setLevel(logging.DEBUG)
def test_should_match_regex(self):
"""
test the empty directory behaviour
:return:
"""
# Given
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
compiled_regex = re.compile("test[1-2]file")
task = HdfsSensorRegex(task_id='Should_match_the_regex',
filepath='/datadirectory/regex_dir',
regex=compiled_regex,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
task.execute(None)
# Then
# Nothing happens, nothing is raised exec is ok
def test_should_not_match_regex(self):
"""
test the empty directory behaviour
:return:
"""
# Given
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
compiled_regex = re.compile("^IDoNotExist")
task = HdfsSensorRegex(task_id='Should_not_match_the_regex',
filepath='/datadirectory/regex_dir',
regex=compiled_regex,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
# Then
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
def test_should_match_regex_and_filesize(self):
"""
test the file size behaviour with regex
:return:
"""
# Given
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
compiled_regex = re.compile("test[1-2]file")
task = HdfsSensorRegex(task_id='Should_match_the_regex_and_filesize',
filepath='/datadirectory/regex_dir',
regex=compiled_regex,
ignore_copying=True,
ignored_ext=['_COPYING_', 'sftp'],
file_size=10,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
task.execute(None)
# Then
# Nothing happens, nothing is raised exec is ok
def test_should_match_regex_but_filesize(self):
"""
test the file size behaviour with regex
:return:
"""
# Given
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
compiled_regex = re.compile("test[1-2]file")
task = HdfsSensorRegex(task_id='Should_match_the_regex_but_filesize',
filepath='/datadirectory/regex_dir',
regex=compiled_regex,
file_size=20,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
# Then
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
def test_should_match_regex_but_copyingext(self):
"""
test the file size behaviour with regex
:return:
"""
# Given
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
compiled_regex = re.compile("copying_file_\d+.txt")
task = HdfsSensorRegex(task_id='Should_match_the_regex_but_filesize',
filepath='/datadirectory/regex_dir',
regex=compiled_regex,
ignored_ext=['_COPYING_', 'sftp'],
file_size=20,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
# Then
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
| 35.811024 | 81 | 0.523637 |
import logging
import unittest
import re
from datetime import timedelta
from airflow.contrib.sensors.hdfs_sensor import HdfsSensorFolder, HdfsSensorRegex
from airflow.exceptions import AirflowSensorTimeout
class HdfsSensorFolderTests(unittest.TestCase):
def setUp(self):
from tests.core import FakeHDFSHook
self.hook = FakeHDFSHook
self.log = logging.getLogger()
self.log.setLevel(logging.DEBUG)
def test_should_be_empty_directory(self):
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
task = HdfsSensorFolder(task_id='Should_be_empty_directory',
filepath='/datadirectory/empty_directory',
be_empty=True,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
task.execute(None)
def test_should_be_empty_directory_fail(self):
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
task = HdfsSensorFolder(task_id='Should_be_empty_directory_fail',
filepath='/datadirectory/not_empty_directory',
be_empty=True,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
def test_should_be_a_non_empty_directory(self):
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
task = HdfsSensorFolder(task_id='Should_be_non_empty_directory',
filepath='/datadirectory/not_empty_directory',
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
task.execute(None)
def test_should_be_non_empty_directory_fail(self):
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
task = HdfsSensorFolder(task_id='Should_be_empty_directory_fail',
filepath='/datadirectory/empty_directory',
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
class HdfsSensorRegexTests(unittest.TestCase):
def setUp(self):
from tests.core import FakeHDFSHook
self.hook = FakeHDFSHook
self.log = logging.getLogger()
self.log.setLevel(logging.DEBUG)
def test_should_match_regex(self):
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
compiled_regex = re.compile("test[1-2]file")
task = HdfsSensorRegex(task_id='Should_match_the_regex',
filepath='/datadirectory/regex_dir',
regex=compiled_regex,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
task.execute(None)
def test_should_not_match_regex(self):
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
compiled_regex = re.compile("^IDoNotExist")
task = HdfsSensorRegex(task_id='Should_not_match_the_regex',
filepath='/datadirectory/regex_dir',
regex=compiled_regex,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
def test_should_match_regex_and_filesize(self):
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
compiled_regex = re.compile("test[1-2]file")
task = HdfsSensorRegex(task_id='Should_match_the_regex_and_filesize',
filepath='/datadirectory/regex_dir',
regex=compiled_regex,
ignore_copying=True,
ignored_ext=['_COPYING_', 'sftp'],
file_size=10,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
task.execute(None)
def test_should_match_regex_but_filesize(self):
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
compiled_regex = re.compile("test[1-2]file")
task = HdfsSensorRegex(task_id='Should_match_the_regex_but_filesize',
filepath='/datadirectory/regex_dir',
regex=compiled_regex,
file_size=20,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
def test_should_match_regex_but_copyingext(self):
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
compiled_regex = re.compile("copying_file_\d+.txt")
task = HdfsSensorRegex(task_id='Should_match_the_regex_but_filesize',
filepath='/datadirectory/regex_dir',
regex=compiled_regex,
ignored_ext=['_COPYING_', 'sftp'],
file_size=20,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
| true | true |
f72d98fe41914415c21b32188f41c6849b2a500f | 6,359 | py | Python | Game_2048.py | Sandro-Tan/Game-2048 | dce87c1791f4fb1cd993089bd042f803c98e0d65 | [
"MIT"
] | null | null | null | Game_2048.py | Sandro-Tan/Game-2048 | dce87c1791f4fb1cd993089bd042f803c98e0d65 | [
"MIT"
] | null | null | null | Game_2048.py | Sandro-Tan/Game-2048 | dce87c1791f4fb1cd993089bd042f803c98e0d65 | [
"MIT"
] | null | null | null | """
2048 game
Move and merge squares using arrow keys
Get a 2048-value tile to win
Author: Sandro Tan
Date: Aug 2019
Version: 1.0
"""
import GUI_2048
import random
import SimpleGUICS2Pygame.simpleguics2pygame as simplegui
# Directions, DO NOT MODIFY
UP = 1
DOWN = 2
LEFT = 3
RIGHT = 4
# Offsets for computing tile indices in each direction.
# DO NOT MODIFY this dictionary.
OFFSETS = {UP: (1, 0),
DOWN: (-1, 0),
LEFT: (0, 1),
RIGHT: (0, -1)}
def merge(line):
"""
Helper function that merges a single row or column in 2048
"""
# remove all zeros in original line and output into a new list
newlist = []
output = []
for item in line:
if item != 0:
newlist.append(item)
# merge the numbers
for index in range(len(newlist) - 1):
if newlist[index] == newlist[index + 1]:
newlist[index] *= 2
newlist[index + 1] = 0
for item in newlist:
if item != 0:
output.append(item)
while len(output) < len(line):
output.append(0)
return output
# helper function to return number 2 (90%) or 4 (10%)
def random_number(nums, probs):
seed = random.random()
if seed > probs[0]:
return nums[1]
else:
return nums[0]
class TwentyFortyEight:
"""
Class to run the game logic.
"""
def __init__(self, grid_height, grid_width):
self.grid_height = grid_height
self.grid_width = grid_width
# initial tiles indices
self.indices_up = [[0, col] for col in range(self.get_grid_width())]
self.indices_down = [[self.get_grid_height() - 1, col] for col in range(self.get_grid_width())]
self.indices_left = [[row, 0] for row in range(self.get_grid_height())]
self.indices_right = [[row, self.get_grid_width() - 1] for row in range(self.get_grid_height())]
self.indices_dict = {UP: self.indices_up,
DOWN: self.indices_down,
LEFT: self.indices_left,
RIGHT: self.indices_right}
self.reset()
def reset(self):
"""
Reset the game so the grid is empty except for two
initial tiles.
"""
# stores intitial values
self.cells_value = [[0 for row in range(self.grid_height)] for col in range(self.grid_width)]
for dummy_idx in range(2):
self.new_tile()
def __str__(self):
"""
Return a string representation of the grid for debugging.
"""
output = 'Height:' + str(self.get_grid_height())
output += ' Width:' + str(self.get_grid_width())
return output
def get_grid_height(self):
"""
Get the height of the board.
"""
return self.grid_height
def get_grid_width(self):
"""
Get the width of the board.
"""
return self.grid_width
def move(self, direction):
"""
Move all tiles in the given direction and add
a new tile if any tiles moved.
"""
'''
indices dictionary stores the indices of edge cells
For example, after pressing up arrow key,
edge tiles variable will store the indices of the top row
'''
edge_tiles = self.indices_dict[direction]
# Get the lines that hold values
line = []
for item in edge_tiles:
temp = []
row_index = item[0]
col_index = item[1]
temp.append(self.get_tile(row_index, col_index))
for dummy_idx in range(len(edge_tiles) - 1):
row_index += OFFSETS[direction][0]
col_index += OFFSETS[direction][1]
temp.append(self.get_tile(row_index, col_index))
line.append(temp)
# Merge the lines and put them in a new list
merged = []
for item in line:
merged.append(merge(item))
# Convert row and col in merged list to those in a grid to be painted
# Still thinking about some way to simplify these codes
if direction == UP:
for row in range(len(merged[0])):
for col in range(len(merged)):
self.set_tile(col, row, merged[row][col])
if direction == DOWN:
for row in range(len(merged[0])):
for col in range(len(merged)):
self.set_tile(self.get_grid_height() - col - 1, row, merged[row][col])
if direction == LEFT:
for row in range(len(merged)):
for col in range(len(merged[0])):
self.set_tile(row, col, merged[row][col])
if direction == RIGHT:
for row in range(len(merged)):
for col in range(len(merged[0])):
self.set_tile(row, self.get_grid_width() - col - 1, merged[row][col])
self.new_tile()
def new_tile(self):
"""
Create a new tile in a randomly selected empty
square. The tile should be 2 90% of the time and
4 10% of the time.
"""
random_row = random.randint(0, self.get_grid_height() - 1)
random_col = random.randint(0, self.get_grid_width() - 1)
value = random_number((2, 4), (0.9, 0.1))
if self.get_tile(random_row, random_col) == 0:
self.set_tile(random_row, random_col, value)
# no two tiles at the same location
else:
self.new_tile()
def set_tile(self, row, col, value):
"""
Set the tile at position row, col to have the given value.
"""
self.cells_value[row][col] = value
def get_tile(self, row, col):
"""
Return the value of the tile at position row, col.
"""
return self.cells_value[row][col]
def game_win(self):
for row in range(self.get_grid_height()):
for col in range(self.get_grid_width()):
if self.get_tile(row, col) == 2048:
print("You win!")
self.reset()
game = TwentyFortyEight(4,4)
GUI_2048.run_gui(game)
| 30.425837 | 105 | 0.545841 |
import GUI_2048
import random
import SimpleGUICS2Pygame.simpleguics2pygame as simplegui
UP = 1
DOWN = 2
LEFT = 3
RIGHT = 4
OFFSETS = {UP: (1, 0),
DOWN: (-1, 0),
LEFT: (0, 1),
RIGHT: (0, -1)}
def merge(line):
newlist = []
output = []
for item in line:
if item != 0:
newlist.append(item)
for index in range(len(newlist) - 1):
if newlist[index] == newlist[index + 1]:
newlist[index] *= 2
newlist[index + 1] = 0
for item in newlist:
if item != 0:
output.append(item)
while len(output) < len(line):
output.append(0)
return output
def random_number(nums, probs):
seed = random.random()
if seed > probs[0]:
return nums[1]
else:
return nums[0]
class TwentyFortyEight:
def __init__(self, grid_height, grid_width):
self.grid_height = grid_height
self.grid_width = grid_width
self.indices_up = [[0, col] for col in range(self.get_grid_width())]
self.indices_down = [[self.get_grid_height() - 1, col] for col in range(self.get_grid_width())]
self.indices_left = [[row, 0] for row in range(self.get_grid_height())]
self.indices_right = [[row, self.get_grid_width() - 1] for row in range(self.get_grid_height())]
self.indices_dict = {UP: self.indices_up,
DOWN: self.indices_down,
LEFT: self.indices_left,
RIGHT: self.indices_right}
self.reset()
def reset(self):
self.cells_value = [[0 for row in range(self.grid_height)] for col in range(self.grid_width)]
for dummy_idx in range(2):
self.new_tile()
def __str__(self):
output = 'Height:' + str(self.get_grid_height())
output += ' Width:' + str(self.get_grid_width())
return output
def get_grid_height(self):
return self.grid_height
def get_grid_width(self):
return self.grid_width
def move(self, direction):
edge_tiles = self.indices_dict[direction]
line = []
for item in edge_tiles:
temp = []
row_index = item[0]
col_index = item[1]
temp.append(self.get_tile(row_index, col_index))
for dummy_idx in range(len(edge_tiles) - 1):
row_index += OFFSETS[direction][0]
col_index += OFFSETS[direction][1]
temp.append(self.get_tile(row_index, col_index))
line.append(temp)
merged = []
for item in line:
merged.append(merge(item))
if direction == UP:
for row in range(len(merged[0])):
for col in range(len(merged)):
self.set_tile(col, row, merged[row][col])
if direction == DOWN:
for row in range(len(merged[0])):
for col in range(len(merged)):
self.set_tile(self.get_grid_height() - col - 1, row, merged[row][col])
if direction == LEFT:
for row in range(len(merged)):
for col in range(len(merged[0])):
self.set_tile(row, col, merged[row][col])
if direction == RIGHT:
for row in range(len(merged)):
for col in range(len(merged[0])):
self.set_tile(row, self.get_grid_width() - col - 1, merged[row][col])
self.new_tile()
def new_tile(self):
random_row = random.randint(0, self.get_grid_height() - 1)
random_col = random.randint(0, self.get_grid_width() - 1)
value = random_number((2, 4), (0.9, 0.1))
if self.get_tile(random_row, random_col) == 0:
self.set_tile(random_row, random_col, value)
else:
self.new_tile()
def set_tile(self, row, col, value):
self.cells_value[row][col] = value
def get_tile(self, row, col):
return self.cells_value[row][col]
def game_win(self):
for row in range(self.get_grid_height()):
for col in range(self.get_grid_width()):
if self.get_tile(row, col) == 2048:
print("You win!")
self.reset()
game = TwentyFortyEight(4,4)
GUI_2048.run_gui(game)
| true | true |
f72d99157240f89ff49e8a9ea4de3274257776d1 | 4,869 | py | Python | evaluation/plot_WDS_topo_with_sensitivity.py | BME-SmartLab/GraphConvWat | 6cdcb3cb1bd22eb274c19ad4a45a78e334462e44 | [
"MIT"
] | null | null | null | evaluation/plot_WDS_topo_with_sensitivity.py | BME-SmartLab/GraphConvWat | 6cdcb3cb1bd22eb274c19ad4a45a78e334462e44 | [
"MIT"
] | null | null | null | evaluation/plot_WDS_topo_with_sensitivity.py | BME-SmartLab/GraphConvWat | 6cdcb3cb1bd22eb274c19ad4a45a78e334462e44 | [
"MIT"
] | 5 | 2021-05-05T12:35:14.000Z | 2022-03-23T14:20:38.000Z | # -*- coding: utf-8 -*-
import argparse
import os
import sys
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import collections as mc
import matplotlib.pyplot as plt
from epynet import Network
sys.path.insert(0, os.path.join('..'))
from utils.graph_utils import get_nx_graph, get_sensitivity_matrix
from utils.SensorInstaller import SensorInstaller
# ----- ----- ----- ----- ----- -----
# Command line arguments
# ----- ----- ----- ----- ----- -----
parser = argparse.ArgumentParser()
parser.add_argument(
'--wds',
default = 'anytown',
type = str
)
parser.add_argument(
'--nodesize',
default = 7,
type = int,
help = "Size of nodes on the plot."
)
parser.add_argument(
'--perturb',
action = "store_true",
)
args = parser.parse_args()
pathToRoot = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
pathToModels = os.path.join(pathToRoot, 'experiments', 'models')
wds = Network(os.path.join('..', 'water_networks', args.wds+'.inp'))
wds.solve()
print('Calculating nodal sensitivity to demand change...\n')
ptb = np.max(wds.junctions.basedemand) / 100
if args.perturb:
for pump in wds.pumps:
pump.speed *= 1.1
for junc in wds.junctions:
tempo = np.random.rand()
if tempo < .3:
junc.basedemand *= 1.1
elif tempo > .6:
junc.basedemand *= .9
S = get_sensitivity_matrix(wds, ptb)
def get_node_df(elements, get_head=False):
data = []
for junc in elements:
ser = pd.Series({
'uid': junc.uid,
'x': junc.coordinates[0],
'y': junc.coordinates[1],
})
if get_head:
ser['head'] = junc.head
data.append(ser)
data = pd.DataFrame(data)
if get_head:
data['head'] = (data['head'] - data['head'].min()) / (data['head'].max()-data['head'].min())
return data
def get_elem_df(elements, nodes):
data= []
df = pd.DataFrame(data)
if elements:
for elem in elements:
ser = pd.Series({
'uid': elem.uid,
'x1': nodes.loc[nodes['uid'] == elem.from_node.uid, 'x'].values,
'y1': nodes.loc[nodes['uid'] == elem.from_node.uid, 'y'].values,
'x2': nodes.loc[nodes['uid'] == elem.to_node.uid, 'x'].values,
'y2': nodes.loc[nodes['uid'] == elem.to_node.uid, 'y'].values,
})
data.append(ser)
df = pd.DataFrame(data)
df['x1'] = df['x1'].str[0]
df['y1'] = df['y1'].str[0]
df['x2'] = df['x2'].str[0]
df['y2'] = df['y2'].str[0]
df['center_x'] = (df['x1']+df['x2']) / 2
df['center_y'] = (df['y1']+df['y2']) / 2
df['orient'] = np.degrees(np.arctan((df['y2']-df['y1'])/(df['x2']-df['x1']))) + 90
return df
def build_lc_from(df):
line_collection = []
for elem_id in df['uid']:
line_collection.append([
(df.loc[df['uid'] == elem_id, 'x1'].values[0],
df.loc[df['uid'] == elem_id, 'y1'].values[0]),
(df.loc[df['uid'] == elem_id, 'x2'].values[0],
df.loc[df['uid'] == elem_id, 'y2'].values[0])
])
return line_collection
nodes = get_node_df(wds.nodes, get_head=True)
juncs = get_node_df(wds.junctions, get_head=True)
tanks = get_node_df(wds.tanks)
reservoirs = get_node_df(wds.reservoirs)
pipes = get_elem_df(wds.pipes, nodes)
pumps = get_elem_df(wds.pumps, nodes)
valves= get_elem_df(wds.valves, nodes)
pipe_collection = build_lc_from(pipes)
pump_collection = build_lc_from(pumps)
if not valves.empty:
valve_collection = build_lc_from(valves)
mew = .5
fig, ax = plt.subplots()
lc = mc.LineCollection(pipe_collection, linewidths=mew, color='k')
ax.add_collection(lc)
lc = mc.LineCollection(pump_collection, linewidths=mew, color='k')
ax.add_collection(lc)
if not valves.empty:
lc = mc.LineCollection(valve_collection, linewidths=mew, color='k')
ax.add_collection(lc)
nodal_s = np.sum(np.abs(S), axis=0)
nodal_s = (nodal_s-nodal_s.min()) / nodal_s.max()
colors = []
cmap = plt.get_cmap('plasma')
for idx, junc in juncs.iterrows():
color = cmap(nodal_s[idx])
colors.append(color)
ax.plot(junc['x'], junc['y'], 'ko', mfc=color, mec='k', ms=args.nodesize, mew=mew)
for _, tank in tanks.iterrows():
ax.plot(tank['x'], tank['y'], marker=7, mfc='k', mec='k', ms=7, mew=mew)
for _, reservoir in reservoirs.iterrows():
ax.plot(reservoir['x'], reservoir['y'], marker='o', mfc='k', mec='k', ms=3, mew=mew)
ax.plot(pumps['center_x'], pumps['center_y'], 'ko', ms=7, mfc='white', mew=mew)
for _, pump in pumps.iterrows():
ax.plot(pump['center_x'], pump['center_y'],
marker=(3, 0, pump['orient']),
color='k',
ms=5
)
ax.autoscale()
ax.axis('off')
plt.tight_layout()
plt.show()
| 31.616883 | 100 | 0.589443 |
import argparse
import os
import sys
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import collections as mc
import matplotlib.pyplot as plt
from epynet import Network
sys.path.insert(0, os.path.join('..'))
from utils.graph_utils import get_nx_graph, get_sensitivity_matrix
from utils.SensorInstaller import SensorInstaller
parser = argparse.ArgumentParser()
parser.add_argument(
'--wds',
default = 'anytown',
type = str
)
parser.add_argument(
'--nodesize',
default = 7,
type = int,
help = "Size of nodes on the plot."
)
parser.add_argument(
'--perturb',
action = "store_true",
)
args = parser.parse_args()
pathToRoot = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
pathToModels = os.path.join(pathToRoot, 'experiments', 'models')
wds = Network(os.path.join('..', 'water_networks', args.wds+'.inp'))
wds.solve()
print('Calculating nodal sensitivity to demand change...\n')
ptb = np.max(wds.junctions.basedemand) / 100
if args.perturb:
for pump in wds.pumps:
pump.speed *= 1.1
for junc in wds.junctions:
tempo = np.random.rand()
if tempo < .3:
junc.basedemand *= 1.1
elif tempo > .6:
junc.basedemand *= .9
S = get_sensitivity_matrix(wds, ptb)
def get_node_df(elements, get_head=False):
data = []
for junc in elements:
ser = pd.Series({
'uid': junc.uid,
'x': junc.coordinates[0],
'y': junc.coordinates[1],
})
if get_head:
ser['head'] = junc.head
data.append(ser)
data = pd.DataFrame(data)
if get_head:
data['head'] = (data['head'] - data['head'].min()) / (data['head'].max()-data['head'].min())
return data
def get_elem_df(elements, nodes):
data= []
df = pd.DataFrame(data)
if elements:
for elem in elements:
ser = pd.Series({
'uid': elem.uid,
'x1': nodes.loc[nodes['uid'] == elem.from_node.uid, 'x'].values,
'y1': nodes.loc[nodes['uid'] == elem.from_node.uid, 'y'].values,
'x2': nodes.loc[nodes['uid'] == elem.to_node.uid, 'x'].values,
'y2': nodes.loc[nodes['uid'] == elem.to_node.uid, 'y'].values,
})
data.append(ser)
df = pd.DataFrame(data)
df['x1'] = df['x1'].str[0]
df['y1'] = df['y1'].str[0]
df['x2'] = df['x2'].str[0]
df['y2'] = df['y2'].str[0]
df['center_x'] = (df['x1']+df['x2']) / 2
df['center_y'] = (df['y1']+df['y2']) / 2
df['orient'] = np.degrees(np.arctan((df['y2']-df['y1'])/(df['x2']-df['x1']))) + 90
return df
def build_lc_from(df):
line_collection = []
for elem_id in df['uid']:
line_collection.append([
(df.loc[df['uid'] == elem_id, 'x1'].values[0],
df.loc[df['uid'] == elem_id, 'y1'].values[0]),
(df.loc[df['uid'] == elem_id, 'x2'].values[0],
df.loc[df['uid'] == elem_id, 'y2'].values[0])
])
return line_collection
nodes = get_node_df(wds.nodes, get_head=True)
juncs = get_node_df(wds.junctions, get_head=True)
tanks = get_node_df(wds.tanks)
reservoirs = get_node_df(wds.reservoirs)
pipes = get_elem_df(wds.pipes, nodes)
pumps = get_elem_df(wds.pumps, nodes)
valves= get_elem_df(wds.valves, nodes)
pipe_collection = build_lc_from(pipes)
pump_collection = build_lc_from(pumps)
if not valves.empty:
valve_collection = build_lc_from(valves)
mew = .5
fig, ax = plt.subplots()
lc = mc.LineCollection(pipe_collection, linewidths=mew, color='k')
ax.add_collection(lc)
lc = mc.LineCollection(pump_collection, linewidths=mew, color='k')
ax.add_collection(lc)
if not valves.empty:
lc = mc.LineCollection(valve_collection, linewidths=mew, color='k')
ax.add_collection(lc)
nodal_s = np.sum(np.abs(S), axis=0)
nodal_s = (nodal_s-nodal_s.min()) / nodal_s.max()
colors = []
cmap = plt.get_cmap('plasma')
for idx, junc in juncs.iterrows():
color = cmap(nodal_s[idx])
colors.append(color)
ax.plot(junc['x'], junc['y'], 'ko', mfc=color, mec='k', ms=args.nodesize, mew=mew)
for _, tank in tanks.iterrows():
ax.plot(tank['x'], tank['y'], marker=7, mfc='k', mec='k', ms=7, mew=mew)
for _, reservoir in reservoirs.iterrows():
ax.plot(reservoir['x'], reservoir['y'], marker='o', mfc='k', mec='k', ms=3, mew=mew)
ax.plot(pumps['center_x'], pumps['center_y'], 'ko', ms=7, mfc='white', mew=mew)
for _, pump in pumps.iterrows():
ax.plot(pump['center_x'], pump['center_y'],
marker=(3, 0, pump['orient']),
color='k',
ms=5
)
ax.autoscale()
ax.axis('off')
plt.tight_layout()
plt.show()
| true | true |
f72d997030ed151a6efe6095e41b32f999a086f1 | 1,132 | py | Python | src/homework/tests/cross_check/tests_question_crosscheck_dispatcher.py | denkasyanov/education-backend | c796b6f2f1cc1cd09f83cab2ca0cc45344906ef5 | [
"MIT"
] | 151 | 2020-04-21T09:58:57.000Z | 2021-09-12T09:01:21.000Z | src/homework/tests/cross_check/tests_question_crosscheck_dispatcher.py | denkasyanov/education-backend | c796b6f2f1cc1cd09f83cab2ca0cc45344906ef5 | [
"MIT"
] | 163 | 2020-05-29T20:52:00.000Z | 2021-09-11T12:44:56.000Z | src/homework/tests/cross_check/tests_question_crosscheck_dispatcher.py | boochamoocha/education-backend | c6ffb0c00bc066c8f1e0a8c0ffe4d0215c7c416a | [
"MIT"
] | 39 | 2020-04-21T12:28:16.000Z | 2021-09-12T15:33:47.000Z | import pytest
from homework import tasks
from homework.models import AnswerCrossCheck
pytestmark = [pytest.mark.django_db]
def test_crosschecks_are_created(question_dispatcher):
question_dispatcher()
assert AnswerCrossCheck.objects.count() == 2
def test_question_method_does_the_same(question):
question.dispatch_crosscheck(answers_per_user=1)
assert AnswerCrossCheck.objects.count() == 2
def test_task_does_the_same(question):
tasks.disptach_crosscheck.delay(question_id=question.pk, answers_per_user=1)
assert AnswerCrossCheck.objects.count() == 2
def test_email_is_sent(question_dispatcher, send_mail, mocker, answers):
question_dispatcher()
assert send_mail.call_count == 2
send_mail.assert_has_calls([
mocker.call(
to=answers[0].author.email,
template_id='new-answers-to-check',
disable_antispam=True,
ctx={
'answers': [
{
'url': mocker.ANY,
'text': mocker.ANY,
},
],
},
),
])
| 24.608696 | 80 | 0.630742 | import pytest
from homework import tasks
from homework.models import AnswerCrossCheck
pytestmark = [pytest.mark.django_db]
def test_crosschecks_are_created(question_dispatcher):
question_dispatcher()
assert AnswerCrossCheck.objects.count() == 2
def test_question_method_does_the_same(question):
question.dispatch_crosscheck(answers_per_user=1)
assert AnswerCrossCheck.objects.count() == 2
def test_task_does_the_same(question):
tasks.disptach_crosscheck.delay(question_id=question.pk, answers_per_user=1)
assert AnswerCrossCheck.objects.count() == 2
def test_email_is_sent(question_dispatcher, send_mail, mocker, answers):
question_dispatcher()
assert send_mail.call_count == 2
send_mail.assert_has_calls([
mocker.call(
to=answers[0].author.email,
template_id='new-answers-to-check',
disable_antispam=True,
ctx={
'answers': [
{
'url': mocker.ANY,
'text': mocker.ANY,
},
],
},
),
])
| true | true |
f72d9972f9f6432ea86db5aa71f6ae923c068235 | 38,676 | py | Python | boilerpy3/filters.py | anwala/BoilerPy3 | f45cd9c2e846b4e5a804df5826e01cd2a5260a51 | [
"Apache-2.0"
] | 1 | 2021-02-25T10:21:58.000Z | 2021-02-25T10:21:58.000Z | boilerpy3/filters.py | gongmusian/BoilerPy3 | 7d4cad1eeae7bd9976c4bb68511b15fefc640a60 | [
"Apache-2.0"
] | null | null | null | boilerpy3/filters.py | gongmusian/BoilerPy3 | 7d4cad1eeae7bd9976c4bb68511b15fefc640a60 | [
"Apache-2.0"
] | null | null | null | """
This file is licensed under the terms of the Apache License, Version 2.0. See the LICENSE file in the root of this
repository for complete details.
"""
# -----------------------------------------------------------------------
# FILTER MANIFEST
# -----------------------------------------------------------------------
#
# --------------------- Simple Filters: -----------------------
# MarkEverythingContentFilter - Marks all blocks as content.
# InvertedFilter - Reverts the "is_content" flag for all TextBlocks
# BoilerplateBlockFilter - Removes TextBlocks which have explicitly been marked as "not content".
# MinWordsFilter - Keeps only those content blocks which contain at least k words.
# MinClauseWordsFilter - Keeps only blocks that have at least one segment fragment ("clause") with at least k words
# SplitParagraphBlocksFilter - Splits TextBlocks at paragraph boundaries
# SurroundingToContentFilter
# LabelToBoilerplateFilter - Marks all blocks that contain a given label as "boilerplate".
# LabelToContentFilter - Marks all blocks that contain a given label as "content".
#
# --------------------- Heuristic Filters: -----------------------
# SimpleBlockFusionProcessor - Merges two subsequent blocks if their text densities are equal.
# ContentFusion
# LabelFusion - Fuses adjacent blocks if their labels are equal.
# BlockProximityFusion - Fuses adjacent blocks if their distance (in blocks) does not exceed a certain limit.
# KeepLargestBlockFilter - Keeps the largest TextBlock only (by the number of words)
# ExpandTitleToContentFilter - Marks all TextBlocks "content" which are between the headline and the part that has
# already been marked content, if they are marked MIGHT_BE_CONTENT
# ArticleMetadataFilter
# AddPrecedingLabelsFilter - Adds the labels of the preceding block to the current block, optionally adding a prefix.
# DocumentTitleMatchClassifier - Marks TextBlocks which contain parts of the HTML TITLE tag
#
# --------------------- English-trained Heuristic Filters: -----------------------
# MinFulltextWordsFilter - Keeps only those content blocks which contain at least k full-text words
# KeepLargestFulltextBlockFilter - Keeps the largest TextBlock only (by the number of words)
# IgnoreBlocksAfterContentFilter - Marks all blocks as "non-content" that occur after blocks that have been marked
# INDICATES_END_OF_TEXT
# IgnoreBlocksAfterContentFromEndFilter - like above
# TerminatingBlocksFinder - Finds blocks which are potentially indicating the end of an article text and marks them with
# INDICATES_END_OF_TEXT
# NumWordsRulesClassifier - Classifies TextBlocks as content/not-content through rules that have been determined using
# the C4.8 machine learning algorithm
# DensityRulesClassifier - Classifies TextBlocks as content/not-content through rules that have been determined using
# the C4.8 machine learning algorithm
# CanolaFilter - A full-text extractor trained on krdwrd Canola
import re
from typing import List, Pattern, Union
from boilerpy3.document import DefaultLabels, TextBlock, TextDocument
class BoilerpipeFilter:
"""
Boilerpipe abstract interface
"""
def process(self, doc: TextDocument) -> bool:
pass
def subtract_blocks(self, block_arr: List[TextBlock], blocks_to_remove: List[TextBlock]) -> List[TextBlock]:
"""
inefficient but in place: for block in blocksToRemove: blockArr.remove(blocksToRemove) efficiently subtracts
second array from first assuming blocksToRemove shows up in the same order as blocArr
"""
if len(blocks_to_remove) == 0:
return block_arr
new_block_arr = []
remove_iter = iter(blocks_to_remove)
cur_block_to_remove = next(remove_iter)
for idx, block in enumerate(block_arr):
if block == cur_block_to_remove:
try:
cur_block_to_remove = next(remove_iter)
except StopIteration:
# add the rest
new_block_arr.extend(block_arr[idx + 1:])
break
else:
new_block_arr.append(block)
return new_block_arr
class FilterChain(BoilerpipeFilter):
"""
Chain together multiple filters in sequence
"""
def __init__(self, filter_arr: List[BoilerpipeFilter]) -> None:
super(FilterChain, self).__init__()
self.filter_arr = filter_arr
def process(self, doc: TextDocument) -> bool:
is_updated = False
for filtr in self.filter_arr:
is_updated |= filtr.process(doc)
return is_updated
# -----------------------------------------------------------------------
# SIMPLE FILTERS
# -----------------------------------------------------------------------
class MarkEverythingContentFilter(BoilerpipeFilter):
"""
Marks all blocks as content.
"""
def process(self, doc: TextDocument) -> bool:
changes = False
for tb in doc.text_blocks:
if not tb.is_content:
tb.is_content = True
changes = True
return changes
class InvertedFilter(BoilerpipeFilter):
"""
Reverts the "is_content" flag for all TextBlocks
"""
def process(self, doc: TextDocument) -> bool:
tbs = doc.text_blocks
if len(tbs) == 0:
return False
for tb in tbs:
tb.is_content = not tb.is_content
return True
class BoilerplateBlockFilter(BoilerpipeFilter):
"""
Removes TextBlocks which have explicitly been marked as "not content".
"""
def process(self, doc: TextDocument) -> bool:
text_blocks = doc.text_blocks
new_blocks = [tb for tb in text_blocks if tb.is_content]
has_changes = len(new_blocks) < len(text_blocks)
doc.text_blocks = new_blocks
return has_changes
class MinWordsFilter(BoilerpipeFilter):
"""
Keeps only those content blocks which contain at least <em>k</em> words.
"""
def __init__(self, min_words: int) -> None:
super(MinWordsFilter, self).__init__()
self.min_words = min_words
def process(self, doc: TextDocument) -> bool:
changes = False
for tb in doc.text_blocks:
if not tb.is_content:
continue
if tb.num_words < self.min_words:
tb.is_content = False
changes = True
return changes
class MinClauseWordsFilter(BoilerpipeFilter):
"""
Keeps only blocks that have at least one segment fragment ("clause") with at least <em>k</em> words (default: 5).
NOTE: You might consider using the SplitParagraphBlocksFilter
upstream.
See SplitParagraphBlocksFilter
"""
PAT_CLAUSE_DELIMITER = re.compile(r"\b[,.:;!?]+(?:\s+|\Z)", re.UNICODE)
PAT_WHITESPACE = re.compile(r"\s+")
def __init__(self, min_words: int = 5, accept_clauses_without_delimiter: bool = False) -> None:
super(MinClauseWordsFilter, self).__init__()
self.min_words = min_words
self.accept_clauses_without_delimiter = accept_clauses_without_delimiter
def process(self, doc: TextDocument) -> bool:
changes = False
for tb in doc.text_blocks:
if not tb.is_content:
continue
has_clause = False
possible_clause_arr = self.PAT_CLAUSE_DELIMITER.split(tb.text)
for possible_clause in possible_clause_arr[:-1]:
has_clause = self.is_clause_accepted(possible_clause)
if has_clause:
break
# since clauses should *always end* with a delimiter, we normally don't consider text without one
if self.accept_clauses_without_delimiter:
has_clause |= self.is_clause_accepted(possible_clause_arr[-1])
if not has_clause:
tb.is_content = False
changes = True
return changes
def is_clause_accepted(self, text: str):
n = 1
for _ in self.PAT_WHITESPACE.finditer(text):
n += 1
if n >= self.min_words:
return True
return n >= self.min_words
class SplitParagraphBlocksFilter(BoilerpipeFilter):
"""
Splits TextBlocks at paragraph boundaries.
NOTE: This is not fully supported (i.e., it will break highlighting support via #getContainedTextElements()), but
this one probably is necessary for some other filters.
See MinClauseWordsFilter
"""
NEWLINE_REGEX = re.compile(r"[\n\r]+")
def process(self, doc: TextDocument) -> bool:
changes = False
blocks = doc.text_blocks
blocks_new = []
for tb in blocks:
text = tb.text
paragraphs = self.NEWLINE_REGEX.split(text)
if len(paragraphs) < 2:
blocks_new.append(tb)
continue
is_content = tb.is_content
labels = tb.labels
for p in paragraphs:
tb_p = TextBlock(p)
tb_p.is_content = is_content
tb_p.add_labels(labels)
blocks_new.append(tb_p)
changes = True
if changes:
doc.text_blocks = blocks_new
return changes
class SurroundingToContentFilter(BoilerpipeFilter):
def __init__(self, condition: callable = lambda tb: tb.linkDensity == 0 and tb.num_words > 6) -> None:
"""
this is now default when no arguments are passed
INSTANCE_TEXT = SurroundingToContentFilter(TextBlockCondition())
ctor - condition is an function for an additional condition to determine if it can be made content
"""
super(SurroundingToContentFilter, self).__init__()
self.cond = condition
def process(self, doc: TextDocument) -> bool:
tbs = doc.text_blocks
n = len(tbs)
has_changes = False
i = 1
while i < n - 1:
prev_block = tbs[i - 1]
cur_block = tbs[i]
next_block = tbs[i + 1]
if not cur_block.is_content and prev_block.is_content and next_block.is_content and self.cond(cur_block):
cur_block.is_content = True
has_changes = True
i += 2
else:
# WARNING: POSSIBLE BUG - in original i+=2 regardless of whether content is found. this seems illogical
# to me - should be +=1
i += 1
return has_changes
class LabelToBoilerplateFilter(BoilerpipeFilter):
"""
Marks all blocks that contain a given label as "boilerplate".
INSTANCE_STRICTLY_NOT_CONTENT = LabelToBoilerplateFilter(DefaultLabels.STRICTLY_NOT_CONTENT)
"""
def __init__(self, *labels: str) -> None:
super(LabelToBoilerplateFilter, self).__init__()
self.labels = labels
def process(self, doc: TextDocument) -> bool:
changes = False
for tb in doc.text_blocks:
if tb.is_content and any(tb.has_label(label) for label in self.labels):
tb.is_content = False
changes = True
return changes
class LabelToContentFilter(BoilerpipeFilter):
"""
Marks all blocks that contain a given label as "content".
"""
def __init__(self, *labels: str) -> None:
super(LabelToContentFilter, self).__init__()
self.labels = labels
def process(self, doc: TextDocument) -> bool:
changes = False
for tb in doc.text_blocks:
if not tb.is_content and any(tb.has_label(label) for label in self.labels):
tb.is_content = True
changes = True
return changes
# -----------------------------------------------------------------------
# GENERIC HEURISTIC FILTERS
# -----------------------------------------------------------------------
class SimpleBlockFusionProcessor(BoilerpipeFilter):
"""
Merges two subsequent blocks if their text densities are equal.
"""
def process(self, doc: TextDocument) -> bool:
text_blocks = doc.text_blocks
changes = False
if len(text_blocks) < 2:
return False
prev_block = text_blocks[0]
blocks_to_remove = []
for block in text_blocks[1:]:
if prev_block.text_density == block.text_density:
prev_block.merge_next(block)
blocks_to_remove.append(block)
changes = True
else:
prev_block = block
if changes:
doc.text_blocks = self.subtract_blocks(text_blocks, blocks_to_remove)
return changes
class ContentFusion(BoilerpipeFilter):
def process(self, doc: TextDocument) -> bool:
text_blocks = doc.text_blocks
if len(text_blocks) < 2:
return False
# WARNING: POSSIBLE BUG FOUND: shouldn't prev_block be reset every passthrough?
changes = False
# if it has been changed on the previous passthrough
changed_on_pass = True
while changed_on_pass:
changed_on_pass = False
prev_block = text_blocks[0]
blocks_to_remove = []
for block in text_blocks[1:]:
if prev_block.is_content and block.link_density < 0.56 \
and not block.has_label(DefaultLabels.STRICTLY_NOT_CONTENT):
prev_block.merge_next(block)
blocks_to_remove.append(block)
changed_on_pass = True
changes = True
else:
prev_block = block
text_blocks = self.subtract_blocks(text_blocks, blocks_to_remove)
if changes:
doc.text_blocks = text_blocks
return changes
class LabelFusion(BoilerpipeFilter):
"""
Fuses adjacent blocks if their labels are equal.
"""
def __init__(self, label_prefix: str = "") -> None:
"""
Creates a new LabelFusion instance.
:param label_prefix: The maximum distance in blocks.
"""
super(LabelFusion, self).__init__()
self.label_prefix = label_prefix
def process(self, doc: TextDocument) -> bool:
text_blocks = doc.text_blocks
if len(text_blocks) < 2:
return False
changes = False
prev_block = text_blocks[0]
blocks_to_remove = []
for block in text_blocks[1::]:
if self.equal_labels(prev_block.labels, block.labels):
prev_block.merge_next(block)
blocks_to_remove.append(block)
changes = True
else:
prev_block = block
if changes:
doc.text_blocks = self.subtract_blocks(text_blocks, blocks_to_remove)
return changes
def equal_labels(self, labels1: List[str], labels2: List[str]) -> bool:
if labels1 is None or labels2 is None:
return False
# NOTE: Should blocks be merged if neither of them have labels??? i.e. labels1==labels2==empty set
return self.markup_labels_only(labels1) == self.markup_labels_only(labels2)
def markup_labels_only(self, labels: List[str]) -> set:
return {label for label in labels if label.startswith(DefaultLabels.MARKUP_PREFIX)}
class BlockProximityFusion(BoilerpipeFilter):
"""
Fuses adjacent blocks if their distance (in blocks) does not exceed a certain limit. This probably makes sense only
in cases where an upstream filter already has removed some blocks.
MAX_DISTANCE_1 = BlockProximityFusion(1, False, False)
MAX_DISTANCE_1_SAME_TAGLEVEL = BlockProximityFusion(1, False, True)
MAX_DISTANCE_1_CONTENT_ONLY = BlockProximityFusion(1, True, False)
MAX_DISTANCE_1_CONTENT_ONLY_SAME_TAGLEVEL = BlockProximityFusion(1, True, True)
"""
def __init__(self, max_blocks_distance: int = 1, content_only: bool = False,
same_tag_level_only: bool = False) -> None:
"""
Creates a new BlockProximityFusion instance.
:param max_blocks_distance: The maximum distance in blocks.
:param content_only:
:param same_tag_level_only:
"""
super(BlockProximityFusion, self).__init__()
self.max_blocks_distance = max_blocks_distance
self.content_only = content_only
self.same_tag_level_only = same_tag_level_only
def process(self, doc: TextDocument) -> bool:
text_blocks = doc.text_blocks
if len(text_blocks) < 2:
return False
changes = False
if self.content_only:
start_idx = None
for idx, block in enumerate(text_blocks):
if block.is_content:
start_idx = idx
break
if start_idx is None:
return False
else:
start_idx = 0
prev_block = text_blocks[start_idx]
blocks_to_remove = []
for block in text_blocks[start_idx + 1:]:
if not block.is_content:
prev_block = block
continue
diff_blocks = block.offset_blocks_start - prev_block.offset_blocks_end - 1
if diff_blocks <= self.max_blocks_distance:
ok = True
if self.content_only:
if not prev_block.is_content or not block.is_content:
ok = False
if self.same_tag_level_only and prev_block.tag_level != block.tag_level:
ok = False
if ok:
prev_block.merge_next(block)
# remove current block
blocks_to_remove.append(block)
changes = True
else:
prev_block = block
else:
prev_block = block
if len(blocks_to_remove) > 0:
doc.text_blocks = self.subtract_blocks(text_blocks, blocks_to_remove)
changes = True
return changes
class KeepLargestBlockFilter(BoilerpipeFilter):
"""
Keeps the largest TextBlock only (by the number of words). In case of more than one block with the same
number of words, the first block is chosen. All discarded blocks are marked "not content" and flagged as
DefaultLabels.
Note that, by default, only TextBlocks marked as "content" are taken into consideration.
INSTANCE = KeepLargestBlockFilter(False)
INSTANCE_EXPAND_TO_SAME_TAGLEVEL = KeepLargestBlockFilter(True)
"""
def __init__(self, expand_to_same_level_text: bool = False) -> None:
super(KeepLargestBlockFilter, self).__init__()
self.expand_to_same_level_text = expand_to_same_level_text
def process(self, doc: TextDocument) -> bool:
text_blocks = doc.text_blocks
if len(text_blocks) < 2:
return False
try:
largest_block = max((tb for tb in text_blocks if tb.is_content), key=lambda tb: tb.num_words)
except ValueError:
# no content blocks exist / largest block not found
largest_block = None
for tb in text_blocks:
if tb == largest_block:
tb.is_content = True
else:
tb.is_content = False
tb.add_label(DefaultLabels.MIGHT_BE_CONTENT)
if self.expand_to_same_level_text and largest_block is not None:
level = largest_block.tag_level
largest_block_idx = text_blocks.index(largest_block)
for tb in text_blocks[largest_block_idx::-1]:
tl = tb.tag_level
if tl < level:
break
elif tl == level:
tb.is_content = True
for tb in text_blocks[largest_block_idx:]:
tl = tb.tag_level
if tl < level:
break
elif tl == level:
tb.is_content = True
return True
class ExpandTitleToContentFilter(BoilerpipeFilter):
"""
Marks all TextBlocks "content" which are between the headline and the part that has already been marked
content, if they are marked DefaultLabels#MIGHT_BE_CONTENT.
This filter is quite specific to the news domain.
"""
def process(self, doc: TextDocument) -> bool:
i = 0
title_idx = -1
content_start = -1
for tb in doc.text_blocks:
if content_start == -1 and tb.has_label(DefaultLabels.TITLE):
title_idx = i
if content_start == -1 and tb.is_content:
content_start = i
i += 1
if content_start <= title_idx or title_idx == -1:
return False
changes = False
for tb in doc.text_blocks[title_idx:content_start]:
if tb.has_label(DefaultLabels.MIGHT_BE_CONTENT):
if tb.is_content is not True:
tb.is_content = True
changes = True
return changes
class ArticleMetadataFilter(BoilerpipeFilter):
# checks for date/time/author blocks
PATTERNS_SHORT = [
re.compile(r"^[0-9 ,./]*\b(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|June|"
r"July|August|September|October|November|December)?\b[0-9 ,:apm./]*(?:[CPSDMGET]{2,3})?$"),
re.compile("^[Bb]y ")
]
def process(self, doc: TextDocument) -> bool:
changed = False
for tb in doc.text_blocks:
if tb.num_words > 10:
continue
for p in self.PATTERNS_SHORT:
text = tb.text
if p.search(text):
changed = True
tb.is_content = True
tb.add_label(DefaultLabels.ARTICLE_METADATA)
break
return changed
class AddPrecedingLabelsFilter(BoilerpipeFilter):
"""
Adds the labels of the preceding block to the current block, optionally adding a prefix.
"""
def __init__(self, label_prefix: str = "") -> None:
"""
Creates a new AddPrecedingLabelsFilter instance.
INSTANCE = AddPrecedingLabelsFilter("")
INSTANCE_PRE = AddPrecedingLabelsFilter("^")
"""
super(AddPrecedingLabelsFilter, self).__init__()
self.label_prefix = label_prefix
def process(self, doc: TextDocument) -> bool:
text_blocks = doc.text_blocks
if len(text_blocks) < 2:
return False
changes = False
block_below = None
for block in text_blocks[::-1]:
if block_below is not None:
labels = block.labels
if labels is not None and len(labels) > 0:
for l in labels:
block_below.add_label(self.label_prefix + l)
changes = True
block_below = block
return changes
class DocumentTitleMatchClassifier(BoilerpipeFilter):
"""
Marks TextBlocks which contain parts of the HTML <code><TITLE></code> tag, using some heuristics which
are quite specific to the news domain.
"""
TITLE_REGEXES = [
re.compile(r"[ ]*[|:][ ]*"),
re.compile(r"[ ]*[|:()][ ]*"),
re.compile(r"[ ]*[|:()\-][ ]*"),
re.compile(r"[ ]*[|,:()\-][ ]*")
]
WORD_REGEX = re.compile(r"\w+", re.UNICODE)
def __init__(self, title: Union[str, None], use_doc_title: bool = False) -> None:
super(DocumentTitleMatchClassifier, self).__init__()
self.use_doc_title = use_doc_title
if use_doc_title:
self.potential_titles = None
else:
self.potential_titles = self.find_potential_titles(title)
def find_potential_titles(self, title: str):
if title is None:
return None
title = title.strip()
if len(title) == 0:
return None
else:
potential_titles = set()
potential_titles.add(title)
for regex in self.TITLE_REGEXES:
p = self.get_longest_part(title, regex)
if p is not None:
potential_titles.add(p)
return potential_titles
def get_longest_part(self, title: str, pattern: Pattern):
parts = pattern.split(title)
if len(parts) == 1:
return None
longest_num_words = 0
longest_part = ""
for p in parts:
if ".com" in p:
continue
num_words = self.get_num_words(p)
if num_words > longest_num_words or len(p) > len(longest_part):
longest_num_words = num_words
longest_part = p
if len(longest_part) == 0:
return None
else:
return longest_part.strip()
def get_num_words(self, text: str):
return len(self.WORD_REGEX.findall(text))
def process(self, doc: TextDocument) -> bool:
if self.use_doc_title:
self.potential_titles = self.find_potential_titles(doc.title)
if self.potential_titles is None:
return False
changes = False
for tb in doc.text_blocks:
text = tb.text.strip().lower()
if any(candidate.lower() == text for candidate in self.potential_titles):
tb.add_label(DefaultLabels.TITLE)
changes = True
return changes
# -----------------------------------------------------------------------
# ENGLISH HEURISTIC FILTERS
# -----------------------------------------------------------------------
# --- Heuristic Filters that have been trained on English laguage text
class HeuristicFilterBase(BoilerpipeFilter):
"""
Base class for some heuristics that are used by boilerpipe filters.
"""
def get_num_full_text_words(self, tb: TextBlock, min_text_density: int = 9):
if tb.text_density >= min_text_density:
return tb.num_words
else:
return 0
class MinFulltextWordsFilter(HeuristicFilterBase):
"""
Keeps only those content blocks which contain at least k full-text words (measured by
HeuristicFilterBase#get_num_full_text_words(TextBlock). k is 30 by default.
"""
def __init__(self, min_words: int = 30) -> None:
self.min_words = min_words
def process(self, doc: TextDocument) -> bool:
changes = False
for tb in doc.text_blocks:
if tb.is_content and self.get_num_full_text_words(tb) < self.min_words:
tb.is_content = False
changes = True
return changes
class KeepLargestFulltextBlockFilter(HeuristicFilterBase):
"""
Keeps the largest TextBlock only (by the number of words). In case of more than one block with the same
number of words, the first block is chosen. All discarded blocks are marked "not content" and flagged as
DefaultLabels. As opposed to KeepLargestBlockFilter, the number of words are computed using HeuristicFilterBase
get_num_full_text_words(TextBlock), which only counts words that occur in text elements with at least 9 words and
are thus believed to be full text.
NOTE: Without language-specific fine-tuning (i.e., running the default instance), this filter may lead to suboptimal
results. You better use KeepLargestBlockFilter instead, which works at the level of number-of-words instead
of text densities.
"""
def process(self, doc: TextDocument) -> bool:
text_blocks = doc.text_blocks
if len(text_blocks) < 2:
return False
content_blocks = [block for block in text_blocks if block.is_content]
if len(content_blocks) == 0:
return False
largest_block = max(content_blocks, key=self.get_num_full_text_words)
for tb in text_blocks:
if tb == largest_block:
tb.is_content = True
else:
tb.is_content = False
tb.add_label(DefaultLabels.MIGHT_BE_CONTENT)
return True
class IgnoreBlocksAfterContentFilter(HeuristicFilterBase):
"""
Marks all blocks as "non-content" that occur after blocks that have been marked DefaultLabels#INDICATES_END_OF_TEXT.
These marks are ignored unless a minimum number of words in content blocks occur before this mark (default: 60).
This can be used in conjunction with an upstream TerminatingBlocksFinder.
"""
def __init__(self, min_num_words: int = 60) -> None:
"""
DEFAULT_INSTANCE = IgnoreBlocksAfterContentFilter(60)
INSTANCE_200 = IgnoreBlocksAfterContentFilter(200)
"""
self.min_num_words = min_num_words
def process(self, doc: TextDocument) -> bool:
changes = False
num_words = 0
found_end_of_text = False
for block in doc.text_blocks:
if block.is_content:
num_words += self.get_num_full_text_words(block)
if block.has_label(DefaultLabels.INDICATES_END_OF_TEXT) and num_words >= self.min_num_words:
found_end_of_text = True
if found_end_of_text:
changes = True
block.is_content = False
return changes
class IgnoreBlocksAfterContentFromEndFilter(HeuristicFilterBase):
"""
Marks all blocks as "non-content" that occur after blocks that have been marked DefaultLabels#INDICATES_END_OF_TEXT,
and after any content block. This filter can be used in conjunction with an upstream TerminatingBlocksFinder.
See TerminatingBlocksFinder
"""
def process(self, doc: TextDocument) -> bool:
changes = False
words = 0
blocks = doc.text_blocks
if len(blocks) == 0:
return False
for tb in blocks[::-1]:
if tb.has_label(DefaultLabels.INDICATES_END_OF_TEXT):
tb.add_label(DefaultLabels.STRICTLY_NOT_CONTENT)
tb.remove_label(DefaultLabels.MIGHT_BE_CONTENT)
tb.is_content = False
changes = True
elif tb.is_content:
words += tb.num_words
if words > 200:
break
return changes
class TerminatingBlocksFinder(BoilerpipeFilter):
"""
Finds blocks which are potentially indicating the end of an article text and marks them with
DefaultLabels#INDICATES_END_OF_TEXT. This can be used in conjunction with a downstream
IgnoreBlocksAfterContentFilter.
"""
DIGIT_REGEX = re.compile(r'\D')
def process(self, doc: TextDocument) -> bool:
changes = False
for tb in doc.text_blocks:
if tb.num_words >= 15:
continue
text = tb.text.strip()
if len(text) < 8:
continue
text_lc = text.lower()
startmatches = (" reuters", "please rate this", "post a comment")
inmatches = ("what you think...", "add your comment", "add comment", "reader views", "have your say",
"reader comments", "rtta artikeln")
eqmatch = "thanks for your comments - this feedback is now closed"
if text_lc.startswith("comments") or self.starts_with_number(text_lc, " comments", " users responded in") \
or any(text_lc.startswith(match_str) for match_str in startmatches) \
or any(match_str in text_lc for match_str in inmatches) or text_lc == eqmatch:
tb.add_label(DefaultLabels.INDICATES_END_OF_TEXT)
changes = True
return changes
def starts_with_number(self, text: str, *match_str_arr: str):
"""
Checks whether the given text t starts with a sequence of digits, followed by one of the given strings.
:param text: The text to examine
:param match_str_arr: Any strings that may follow the digits.
:return: true if at least one combination matches
"""
number_match = self.DIGIT_REGEX.search(text)
if number_match is None:
pos = len(text)
else:
pos = number_match.start()
if pos == 0:
return False
else:
return any(text.startswith(match_str, pos) for match_str in match_str_arr)
class NumWordsRulesClassifier(BoilerpipeFilter):
"""
Classifies TextBlocks as content/not-content through rules that have been determined using the C4.8 machine
learning algorithm, as described in the paper "Boilerplate Detection using Shallow Text Features" (WSDM 2010),
particularly using number of words per block and link density per block.
"""
def process(self, doc: TextDocument) -> bool:
text_blocks = doc.text_blocks
has_changes = False
n = len(text_blocks)
for i, currentBlock in enumerate(text_blocks):
if i > 0:
prev_block = text_blocks[i - 1]
else:
prev_block = TextBlock.EMPTY_START
if i + 1 < n:
next_block = text_blocks[i + 1]
else:
next_block = TextBlock.EMPTY_START
has_changes |= self.classify(prev_block, currentBlock, next_block)
return has_changes
def classify(self, prev_block: TextBlock, curr_block: TextBlock, next_block: TextBlock):
if curr_block.link_density <= 0.333333:
if prev_block.link_density <= 0.555556:
if curr_block.num_words <= 16:
if next_block.num_words <= 15:
if prev_block.num_words <= 4:
is_content = False
else:
is_content = True
else:
is_content = True
else:
is_content = True
else:
if curr_block.num_words <= 40:
if next_block.num_words <= 17:
is_content = False
else:
is_content = True
else:
is_content = True
else:
is_content = False
changes = curr_block.is_content is is_content
curr_block.is_content = is_content
return changes
class DensityRulesClassifier(BoilerpipeFilter):
"""
Classifies TextBlocks as content/not-content through rules that have been determined using the C4.8 machine learning
algorithm, as described in the paper "Boilerplate Detection using Shallow Text Features", particularly using text
densities and link densities.
"""
def process(self, doc: TextDocument) -> bool:
text_blocks = doc.text_blocks
has_changes = False
n = len(text_blocks)
for i, current_block in enumerate(text_blocks):
if i > 0:
prev_block = text_blocks[i - 1]
else:
prev_block = TextBlock.EMPTY_START
if i + 1 < n:
next_block = text_blocks[i + 1]
else:
next_block = TextBlock.EMPTY_START
has_changes |= self.classify(prev_block, current_block, next_block)
return has_changes
def classify(self, prev_block: TextBlock, curr_block: TextBlock, next_block: TextBlock):
if curr_block.link_density <= 0.333333:
if prev_block.link_density <= 0.555556:
if curr_block.text_density <= 9:
if next_block.text_density <= 10:
if prev_block.text_density <= 4:
is_content = False
else:
is_content = True
else:
is_content = True
else:
if next_block.text_density == 0:
is_content = False
else:
is_content = True
else:
if next_block.text_density <= 11:
is_content = False
else:
is_content = True
else:
is_content = False
changes = curr_block.is_content is is_content
curr_block.is_content = is_content
return changes
class CanolaFilter(BoilerpipeFilter):
"""
A full-text extractor trained on http://krdwrd.org/,
https://krdwrd.org/trac/attachment/wiki/Corpora/Canola/CANOLA.pdf. Works well with SimpleEstimator, too.
"""
def process(self, doc: TextDocument) -> bool:
text_blocks = doc.text_blocks
has_changes = False
n = len(text_blocks)
for i, current_block in enumerate(text_blocks):
if i > 0:
prev_block = text_blocks[i - 1]
else:
prev_block = TextBlock.EMPTY_START
if i + 1 < n:
next_block = text_blocks[i + 1]
else:
next_block = TextBlock.EMPTY_START
has_changes |= self.classify(prev_block, current_block, next_block)
return has_changes
def classify(self, prev_block: TextBlock, curr_block: TextBlock, next_block: TextBlock):
cond1 = curr_block.link_density > 0 and next_block.num_words > 11
cond2 = curr_block.num_words > 19
cond3 = next_block.num_words > 6 and next_block.link_density == 0 and prev_block.link_density == 0 and \
(curr_block.num_words > 6 or prev_block.num_words > 7 or next_block.num_words > 19)
is_content = cond1 or cond2 or cond3
changes = curr_block.is_content is is_content
curr_block.is_content = is_content
return changes
| 37.117083 | 120 | 0.589797 |
import re
from typing import List, Pattern, Union
from boilerpy3.document import DefaultLabels, TextBlock, TextDocument
class BoilerpipeFilter:
def process(self, doc: TextDocument) -> bool:
pass
def subtract_blocks(self, block_arr: List[TextBlock], blocks_to_remove: List[TextBlock]) -> List[TextBlock]:
if len(blocks_to_remove) == 0:
return block_arr
new_block_arr = []
remove_iter = iter(blocks_to_remove)
cur_block_to_remove = next(remove_iter)
for idx, block in enumerate(block_arr):
if block == cur_block_to_remove:
try:
cur_block_to_remove = next(remove_iter)
except StopIteration:
new_block_arr.extend(block_arr[idx + 1:])
break
else:
new_block_arr.append(block)
return new_block_arr
class FilterChain(BoilerpipeFilter):
def __init__(self, filter_arr: List[BoilerpipeFilter]) -> None:
super(FilterChain, self).__init__()
self.filter_arr = filter_arr
def process(self, doc: TextDocument) -> bool:
is_updated = False
for filtr in self.filter_arr:
is_updated |= filtr.process(doc)
return is_updated
class MarkEverythingContentFilter(BoilerpipeFilter):
def process(self, doc: TextDocument) -> bool:
changes = False
for tb in doc.text_blocks:
if not tb.is_content:
tb.is_content = True
changes = True
return changes
class InvertedFilter(BoilerpipeFilter):
def process(self, doc: TextDocument) -> bool:
tbs = doc.text_blocks
if len(tbs) == 0:
return False
for tb in tbs:
tb.is_content = not tb.is_content
return True
class BoilerplateBlockFilter(BoilerpipeFilter):
def process(self, doc: TextDocument) -> bool:
text_blocks = doc.text_blocks
new_blocks = [tb for tb in text_blocks if tb.is_content]
has_changes = len(new_blocks) < len(text_blocks)
doc.text_blocks = new_blocks
return has_changes
class MinWordsFilter(BoilerpipeFilter):
def __init__(self, min_words: int) -> None:
super(MinWordsFilter, self).__init__()
self.min_words = min_words
def process(self, doc: TextDocument) -> bool:
changes = False
for tb in doc.text_blocks:
if not tb.is_content:
continue
if tb.num_words < self.min_words:
tb.is_content = False
changes = True
return changes
class MinClauseWordsFilter(BoilerpipeFilter):
PAT_CLAUSE_DELIMITER = re.compile(r"\b[,.:;!?]+(?:\s+|\Z)", re.UNICODE)
PAT_WHITESPACE = re.compile(r"\s+")
def __init__(self, min_words: int = 5, accept_clauses_without_delimiter: bool = False) -> None:
super(MinClauseWordsFilter, self).__init__()
self.min_words = min_words
self.accept_clauses_without_delimiter = accept_clauses_without_delimiter
def process(self, doc: TextDocument) -> bool:
changes = False
for tb in doc.text_blocks:
if not tb.is_content:
continue
has_clause = False
possible_clause_arr = self.PAT_CLAUSE_DELIMITER.split(tb.text)
for possible_clause in possible_clause_arr[:-1]:
has_clause = self.is_clause_accepted(possible_clause)
if has_clause:
break
if self.accept_clauses_without_delimiter:
has_clause |= self.is_clause_accepted(possible_clause_arr[-1])
if not has_clause:
tb.is_content = False
changes = True
return changes
def is_clause_accepted(self, text: str):
n = 1
for _ in self.PAT_WHITESPACE.finditer(text):
n += 1
if n >= self.min_words:
return True
return n >= self.min_words
class SplitParagraphBlocksFilter(BoilerpipeFilter):
NEWLINE_REGEX = re.compile(r"[\n\r]+")
def process(self, doc: TextDocument) -> bool:
changes = False
blocks = doc.text_blocks
blocks_new = []
for tb in blocks:
text = tb.text
paragraphs = self.NEWLINE_REGEX.split(text)
if len(paragraphs) < 2:
blocks_new.append(tb)
continue
is_content = tb.is_content
labels = tb.labels
for p in paragraphs:
tb_p = TextBlock(p)
tb_p.is_content = is_content
tb_p.add_labels(labels)
blocks_new.append(tb_p)
changes = True
if changes:
doc.text_blocks = blocks_new
return changes
class SurroundingToContentFilter(BoilerpipeFilter):
def __init__(self, condition: callable = lambda tb: tb.linkDensity == 0 and tb.num_words > 6) -> None:
super(SurroundingToContentFilter, self).__init__()
self.cond = condition
def process(self, doc: TextDocument) -> bool:
tbs = doc.text_blocks
n = len(tbs)
has_changes = False
i = 1
while i < n - 1:
prev_block = tbs[i - 1]
cur_block = tbs[i]
next_block = tbs[i + 1]
if not cur_block.is_content and prev_block.is_content and next_block.is_content and self.cond(cur_block):
cur_block.is_content = True
has_changes = True
i += 2
else:
# WARNING: POSSIBLE BUG - in original i+=2 regardless of whether content is found. this seems illogical
# to me - should be +=1
i += 1
return has_changes
class LabelToBoilerplateFilter(BoilerpipeFilter):
def __init__(self, *labels: str) -> None:
super(LabelToBoilerplateFilter, self).__init__()
self.labels = labels
def process(self, doc: TextDocument) -> bool:
changes = False
for tb in doc.text_blocks:
if tb.is_content and any(tb.has_label(label) for label in self.labels):
tb.is_content = False
changes = True
return changes
class LabelToContentFilter(BoilerpipeFilter):
def __init__(self, *labels: str) -> None:
super(LabelToContentFilter, self).__init__()
self.labels = labels
def process(self, doc: TextDocument) -> bool:
changes = False
for tb in doc.text_blocks:
if not tb.is_content and any(tb.has_label(label) for label in self.labels):
tb.is_content = True
changes = True
return changes
# -----------------------------------------------------------------------
# GENERIC HEURISTIC FILTERS
# -----------------------------------------------------------------------
class SimpleBlockFusionProcessor(BoilerpipeFilter):
def process(self, doc: TextDocument) -> bool:
text_blocks = doc.text_blocks
changes = False
if len(text_blocks) < 2:
return False
prev_block = text_blocks[0]
blocks_to_remove = []
for block in text_blocks[1:]:
if prev_block.text_density == block.text_density:
prev_block.merge_next(block)
blocks_to_remove.append(block)
changes = True
else:
prev_block = block
if changes:
doc.text_blocks = self.subtract_blocks(text_blocks, blocks_to_remove)
return changes
class ContentFusion(BoilerpipeFilter):
def process(self, doc: TextDocument) -> bool:
text_blocks = doc.text_blocks
if len(text_blocks) < 2:
return False
# WARNING: POSSIBLE BUG FOUND: shouldn't prev_block be reset every passthrough?
changes = False
changed_on_pass = True
while changed_on_pass:
changed_on_pass = False
prev_block = text_blocks[0]
blocks_to_remove = []
for block in text_blocks[1:]:
if prev_block.is_content and block.link_density < 0.56 \
and not block.has_label(DefaultLabels.STRICTLY_NOT_CONTENT):
prev_block.merge_next(block)
blocks_to_remove.append(block)
changed_on_pass = True
changes = True
else:
prev_block = block
text_blocks = self.subtract_blocks(text_blocks, blocks_to_remove)
if changes:
doc.text_blocks = text_blocks
return changes
class LabelFusion(BoilerpipeFilter):
def __init__(self, label_prefix: str = "") -> None:
super(LabelFusion, self).__init__()
self.label_prefix = label_prefix
def process(self, doc: TextDocument) -> bool:
text_blocks = doc.text_blocks
if len(text_blocks) < 2:
return False
changes = False
prev_block = text_blocks[0]
blocks_to_remove = []
for block in text_blocks[1::]:
if self.equal_labels(prev_block.labels, block.labels):
prev_block.merge_next(block)
blocks_to_remove.append(block)
changes = True
else:
prev_block = block
if changes:
doc.text_blocks = self.subtract_blocks(text_blocks, blocks_to_remove)
return changes
def equal_labels(self, labels1: List[str], labels2: List[str]) -> bool:
if labels1 is None or labels2 is None:
return False
return self.markup_labels_only(labels1) == self.markup_labels_only(labels2)
def markup_labels_only(self, labels: List[str]) -> set:
return {label for label in labels if label.startswith(DefaultLabels.MARKUP_PREFIX)}
class BlockProximityFusion(BoilerpipeFilter):
def __init__(self, max_blocks_distance: int = 1, content_only: bool = False,
same_tag_level_only: bool = False) -> None:
super(BlockProximityFusion, self).__init__()
self.max_blocks_distance = max_blocks_distance
self.content_only = content_only
self.same_tag_level_only = same_tag_level_only
def process(self, doc: TextDocument) -> bool:
text_blocks = doc.text_blocks
if len(text_blocks) < 2:
return False
changes = False
if self.content_only:
start_idx = None
for idx, block in enumerate(text_blocks):
if block.is_content:
start_idx = idx
break
if start_idx is None:
return False
else:
start_idx = 0
prev_block = text_blocks[start_idx]
blocks_to_remove = []
for block in text_blocks[start_idx + 1:]:
if not block.is_content:
prev_block = block
continue
diff_blocks = block.offset_blocks_start - prev_block.offset_blocks_end - 1
if diff_blocks <= self.max_blocks_distance:
ok = True
if self.content_only:
if not prev_block.is_content or not block.is_content:
ok = False
if self.same_tag_level_only and prev_block.tag_level != block.tag_level:
ok = False
if ok:
prev_block.merge_next(block)
blocks_to_remove.append(block)
changes = True
else:
prev_block = block
else:
prev_block = block
if len(blocks_to_remove) > 0:
doc.text_blocks = self.subtract_blocks(text_blocks, blocks_to_remove)
changes = True
return changes
class KeepLargestBlockFilter(BoilerpipeFilter):
def __init__(self, expand_to_same_level_text: bool = False) -> None:
super(KeepLargestBlockFilter, self).__init__()
self.expand_to_same_level_text = expand_to_same_level_text
def process(self, doc: TextDocument) -> bool:
text_blocks = doc.text_blocks
if len(text_blocks) < 2:
return False
try:
largest_block = max((tb for tb in text_blocks if tb.is_content), key=lambda tb: tb.num_words)
except ValueError:
largest_block = None
for tb in text_blocks:
if tb == largest_block:
tb.is_content = True
else:
tb.is_content = False
tb.add_label(DefaultLabels.MIGHT_BE_CONTENT)
if self.expand_to_same_level_text and largest_block is not None:
level = largest_block.tag_level
largest_block_idx = text_blocks.index(largest_block)
for tb in text_blocks[largest_block_idx::-1]:
tl = tb.tag_level
if tl < level:
break
elif tl == level:
tb.is_content = True
for tb in text_blocks[largest_block_idx:]:
tl = tb.tag_level
if tl < level:
break
elif tl == level:
tb.is_content = True
return True
class ExpandTitleToContentFilter(BoilerpipeFilter):
def process(self, doc: TextDocument) -> bool:
i = 0
title_idx = -1
content_start = -1
for tb in doc.text_blocks:
if content_start == -1 and tb.has_label(DefaultLabels.TITLE):
title_idx = i
if content_start == -1 and tb.is_content:
content_start = i
i += 1
if content_start <= title_idx or title_idx == -1:
return False
changes = False
for tb in doc.text_blocks[title_idx:content_start]:
if tb.has_label(DefaultLabels.MIGHT_BE_CONTENT):
if tb.is_content is not True:
tb.is_content = True
changes = True
return changes
class ArticleMetadataFilter(BoilerpipeFilter):
PATTERNS_SHORT = [
re.compile(r"^[0-9 ,./]*\b(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|June|"
r"July|August|September|October|November|December)?\b[0-9 ,:apm./]*(?:[CPSDMGET]{2,3})?$"),
re.compile("^[Bb]y ")
]
def process(self, doc: TextDocument) -> bool:
changed = False
for tb in doc.text_blocks:
if tb.num_words > 10:
continue
for p in self.PATTERNS_SHORT:
text = tb.text
if p.search(text):
changed = True
tb.is_content = True
tb.add_label(DefaultLabels.ARTICLE_METADATA)
break
return changed
class AddPrecedingLabelsFilter(BoilerpipeFilter):
def __init__(self, label_prefix: str = "") -> None:
super(AddPrecedingLabelsFilter, self).__init__()
self.label_prefix = label_prefix
def process(self, doc: TextDocument) -> bool:
text_blocks = doc.text_blocks
if len(text_blocks) < 2:
return False
changes = False
block_below = None
for block in text_blocks[::-1]:
if block_below is not None:
labels = block.labels
if labels is not None and len(labels) > 0:
for l in labels:
block_below.add_label(self.label_prefix + l)
changes = True
block_below = block
return changes
class DocumentTitleMatchClassifier(BoilerpipeFilter):
TITLE_REGEXES = [
re.compile(r"[ ]*[|:][ ]*"),
re.compile(r"[ ]*[|:()][ ]*"),
re.compile(r"[ ]*[|:()\-][ ]*"),
re.compile(r"[ ]*[|,:()\-][ ]*")
]
WORD_REGEX = re.compile(r"\w+", re.UNICODE)
def __init__(self, title: Union[str, None], use_doc_title: bool = False) -> None:
super(DocumentTitleMatchClassifier, self).__init__()
self.use_doc_title = use_doc_title
if use_doc_title:
self.potential_titles = None
else:
self.potential_titles = self.find_potential_titles(title)
def find_potential_titles(self, title: str):
if title is None:
return None
title = title.strip()
if len(title) == 0:
return None
else:
potential_titles = set()
potential_titles.add(title)
for regex in self.TITLE_REGEXES:
p = self.get_longest_part(title, regex)
if p is not None:
potential_titles.add(p)
return potential_titles
def get_longest_part(self, title: str, pattern: Pattern):
parts = pattern.split(title)
if len(parts) == 1:
return None
longest_num_words = 0
longest_part = ""
for p in parts:
if ".com" in p:
continue
num_words = self.get_num_words(p)
if num_words > longest_num_words or len(p) > len(longest_part):
longest_num_words = num_words
longest_part = p
if len(longest_part) == 0:
return None
else:
return longest_part.strip()
def get_num_words(self, text: str):
return len(self.WORD_REGEX.findall(text))
def process(self, doc: TextDocument) -> bool:
if self.use_doc_title:
self.potential_titles = self.find_potential_titles(doc.title)
if self.potential_titles is None:
return False
changes = False
for tb in doc.text_blocks:
text = tb.text.strip().lower()
if any(candidate.lower() == text for candidate in self.potential_titles):
tb.add_label(DefaultLabels.TITLE)
changes = True
return changes
class HeuristicFilterBase(BoilerpipeFilter):
def get_num_full_text_words(self, tb: TextBlock, min_text_density: int = 9):
if tb.text_density >= min_text_density:
return tb.num_words
else:
return 0
class MinFulltextWordsFilter(HeuristicFilterBase):
def __init__(self, min_words: int = 30) -> None:
self.min_words = min_words
def process(self, doc: TextDocument) -> bool:
changes = False
for tb in doc.text_blocks:
if tb.is_content and self.get_num_full_text_words(tb) < self.min_words:
tb.is_content = False
changes = True
return changes
class KeepLargestFulltextBlockFilter(HeuristicFilterBase):
def process(self, doc: TextDocument) -> bool:
text_blocks = doc.text_blocks
if len(text_blocks) < 2:
return False
content_blocks = [block for block in text_blocks if block.is_content]
if len(content_blocks) == 0:
return False
largest_block = max(content_blocks, key=self.get_num_full_text_words)
for tb in text_blocks:
if tb == largest_block:
tb.is_content = True
else:
tb.is_content = False
tb.add_label(DefaultLabels.MIGHT_BE_CONTENT)
return True
class IgnoreBlocksAfterContentFilter(HeuristicFilterBase):
def __init__(self, min_num_words: int = 60) -> None:
self.min_num_words = min_num_words
def process(self, doc: TextDocument) -> bool:
changes = False
num_words = 0
found_end_of_text = False
for block in doc.text_blocks:
if block.is_content:
num_words += self.get_num_full_text_words(block)
if block.has_label(DefaultLabels.INDICATES_END_OF_TEXT) and num_words >= self.min_num_words:
found_end_of_text = True
if found_end_of_text:
changes = True
block.is_content = False
return changes
class IgnoreBlocksAfterContentFromEndFilter(HeuristicFilterBase):
def process(self, doc: TextDocument) -> bool:
changes = False
words = 0
blocks = doc.text_blocks
if len(blocks) == 0:
return False
for tb in blocks[::-1]:
if tb.has_label(DefaultLabels.INDICATES_END_OF_TEXT):
tb.add_label(DefaultLabels.STRICTLY_NOT_CONTENT)
tb.remove_label(DefaultLabels.MIGHT_BE_CONTENT)
tb.is_content = False
changes = True
elif tb.is_content:
words += tb.num_words
if words > 200:
break
return changes
class TerminatingBlocksFinder(BoilerpipeFilter):
DIGIT_REGEX = re.compile(r'\D')
def process(self, doc: TextDocument) -> bool:
changes = False
for tb in doc.text_blocks:
if tb.num_words >= 15:
continue
text = tb.text.strip()
if len(text) < 8:
continue
text_lc = text.lower()
startmatches = (" reuters", "please rate this", "post a comment")
inmatches = ("what you think...", "add your comment", "add comment", "reader views", "have your say",
"reader comments", "rtta artikeln")
eqmatch = "thanks for your comments - this feedback is now closed"
if text_lc.startswith("comments") or self.starts_with_number(text_lc, " comments", " users responded in") \
or any(text_lc.startswith(match_str) for match_str in startmatches) \
or any(match_str in text_lc for match_str in inmatches) or text_lc == eqmatch:
tb.add_label(DefaultLabels.INDICATES_END_OF_TEXT)
changes = True
return changes
def starts_with_number(self, text: str, *match_str_arr: str):
number_match = self.DIGIT_REGEX.search(text)
if number_match is None:
pos = len(text)
else:
pos = number_match.start()
if pos == 0:
return False
else:
return any(text.startswith(match_str, pos) for match_str in match_str_arr)
class NumWordsRulesClassifier(BoilerpipeFilter):
def process(self, doc: TextDocument) -> bool:
text_blocks = doc.text_blocks
has_changes = False
n = len(text_blocks)
for i, currentBlock in enumerate(text_blocks):
if i > 0:
prev_block = text_blocks[i - 1]
else:
prev_block = TextBlock.EMPTY_START
if i + 1 < n:
next_block = text_blocks[i + 1]
else:
next_block = TextBlock.EMPTY_START
has_changes |= self.classify(prev_block, currentBlock, next_block)
return has_changes
def classify(self, prev_block: TextBlock, curr_block: TextBlock, next_block: TextBlock):
if curr_block.link_density <= 0.333333:
if prev_block.link_density <= 0.555556:
if curr_block.num_words <= 16:
if next_block.num_words <= 15:
if prev_block.num_words <= 4:
is_content = False
else:
is_content = True
else:
is_content = True
else:
is_content = True
else:
if curr_block.num_words <= 40:
if next_block.num_words <= 17:
is_content = False
else:
is_content = True
else:
is_content = True
else:
is_content = False
changes = curr_block.is_content is is_content
curr_block.is_content = is_content
return changes
class DensityRulesClassifier(BoilerpipeFilter):
def process(self, doc: TextDocument) -> bool:
text_blocks = doc.text_blocks
has_changes = False
n = len(text_blocks)
for i, current_block in enumerate(text_blocks):
if i > 0:
prev_block = text_blocks[i - 1]
else:
prev_block = TextBlock.EMPTY_START
if i + 1 < n:
next_block = text_blocks[i + 1]
else:
next_block = TextBlock.EMPTY_START
has_changes |= self.classify(prev_block, current_block, next_block)
return has_changes
def classify(self, prev_block: TextBlock, curr_block: TextBlock, next_block: TextBlock):
if curr_block.link_density <= 0.333333:
if prev_block.link_density <= 0.555556:
if curr_block.text_density <= 9:
if next_block.text_density <= 10:
if prev_block.text_density <= 4:
is_content = False
else:
is_content = True
else:
is_content = True
else:
if next_block.text_density == 0:
is_content = False
else:
is_content = True
else:
if next_block.text_density <= 11:
is_content = False
else:
is_content = True
else:
is_content = False
changes = curr_block.is_content is is_content
curr_block.is_content = is_content
return changes
class CanolaFilter(BoilerpipeFilter):
def process(self, doc: TextDocument) -> bool:
text_blocks = doc.text_blocks
has_changes = False
n = len(text_blocks)
for i, current_block in enumerate(text_blocks):
if i > 0:
prev_block = text_blocks[i - 1]
else:
prev_block = TextBlock.EMPTY_START
if i + 1 < n:
next_block = text_blocks[i + 1]
else:
next_block = TextBlock.EMPTY_START
has_changes |= self.classify(prev_block, current_block, next_block)
return has_changes
def classify(self, prev_block: TextBlock, curr_block: TextBlock, next_block: TextBlock):
cond1 = curr_block.link_density > 0 and next_block.num_words > 11
cond2 = curr_block.num_words > 19
cond3 = next_block.num_words > 6 and next_block.link_density == 0 and prev_block.link_density == 0 and \
(curr_block.num_words > 6 or prev_block.num_words > 7 or next_block.num_words > 19)
is_content = cond1 or cond2 or cond3
changes = curr_block.is_content is is_content
curr_block.is_content = is_content
return changes
| true | true |
f72d9a61e10ccaed81de8d86da78582e8ecd06cd | 272 | py | Python | mindhome_alpha/erpnext/accounts/doctype/pos_closing_entry_taxes/pos_closing_entry_taxes.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | mindhome_alpha/erpnext/accounts/doctype/pos_closing_entry_taxes/pos_closing_entry_taxes.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | null | null | null | mindhome_alpha/erpnext/accounts/doctype/pos_closing_entry_taxes/pos_closing_entry_taxes.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from frappe.model.document import Document
class POSClosingEntryTaxes(Document):
pass
| 27.2 | 68 | 0.783088 |
from __future__ import unicode_literals
from frappe.model.document import Document
class POSClosingEntryTaxes(Document):
pass
| true | true |
f72d9a83a7754fc68372926e669955113ee0529a | 498 | py | Python | env/Lib/site-packages/plotly/validators/histogram2d/colorbar/title/_side.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | env/Lib/site-packages/plotly/validators/histogram2d/colorbar/title/_side.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | env/Lib/site-packages/plotly/validators/histogram2d/colorbar/title/_side.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class SideValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="side", parent_name="histogram2d.colorbar.title", **kwargs
):
super(SideValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["right", "top", "bottom"]),
**kwargs
)
| 33.2 | 84 | 0.640562 | import _plotly_utils.basevalidators
class SideValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="side", parent_name="histogram2d.colorbar.title", **kwargs
):
super(SideValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["right", "top", "bottom"]),
**kwargs
)
| true | true |
f72d9a8c9454e3853ac6f075276e317d79d1bf42 | 275 | py | Python | gunicorn.py | Frederick-S/one-mark | b2a85dce82f56fc0d08df7519178b4a7e4f89b70 | [
"MIT"
] | 8 | 2018-07-12T02:33:04.000Z | 2020-11-12T20:28:49.000Z | gunicorn.py | Frederick-S/one-mark | b2a85dce82f56fc0d08df7519178b4a7e4f89b70 | [
"MIT"
] | 353 | 2018-05-21T03:34:36.000Z | 2022-03-28T21:06:43.000Z | gunicorn.py | Frederick-S/one-mark | b2a85dce82f56fc0d08df7519178b4a7e4f89b70 | [
"MIT"
] | 2 | 2019-08-04T23:39:09.000Z | 2020-04-14T12:22:20.000Z | import os
bind = '0.0.0.0:5000'
accesslog = 'app.log'
access_log_format = \
'%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
raw_env = [
'GRAPH_CLIENT_ID=' + os.getenv('GRAPH_CLIENT_ID'),
'GRAPH_CLIENT_SECRET=' + os.getenv('GRAPH_CLIENT_SECRET')
]
| 22.916667 | 65 | 0.585455 | import os
bind = '0.0.0.0:5000'
accesslog = 'app.log'
access_log_format = \
'%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
raw_env = [
'GRAPH_CLIENT_ID=' + os.getenv('GRAPH_CLIENT_ID'),
'GRAPH_CLIENT_SECRET=' + os.getenv('GRAPH_CLIENT_SECRET')
]
| true | true |
f72d9bfbd3fe8904996d342401e1c556009901cd | 12,498 | py | Python | jina/logging/profile.py | arijitdas123student/jina | 54d916e362bde0391b0af0f12241d531b8453247 | [
"Apache-2.0"
] | 2 | 2021-10-18T04:43:27.000Z | 2021-10-18T04:43:47.000Z | jina/logging/profile.py | arijitdas123student/jina | 54d916e362bde0391b0af0f12241d531b8453247 | [
"Apache-2.0"
] | null | null | null | jina/logging/profile.py | arijitdas123student/jina | 54d916e362bde0391b0af0f12241d531b8453247 | [
"Apache-2.0"
] | null | null | null | import datetime
import itertools
import math
import sys
import threading
import time
from collections import defaultdict
from functools import wraps
from typing import Optional, Union, Callable
from jina.enums import ProgressBarStatus
from .logger import JinaLogger
from .. import __windows__
from ..helper import colored, get_readable_size, get_readable_time
def used_memory(unit: int = 1024 * 1024 * 1024) -> float:
"""
Get the memory usage of the current process.
:param unit: Unit of the memory, default in Gigabytes.
:return: Memory usage of the current process.
"""
if __windows__:
# TODO: windows doesn't include `resource` module
return 0
import resource
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / unit
def used_memory_readable() -> str:
"""
Get the memory usage of the current process in a human-readable format.
:return: Memory usage of the current process.
"""
return get_readable_size(used_memory(1))
def profiling(func):
"""
Create the Decorator to mark a function for profiling. The time and memory usage will be recorded and printed.
Example:
.. highlight:: python
.. code-block:: python
@profiling
def foo():
print(1)
:param func: function to be profiled
:return: arguments wrapper
"""
from .predefined import default_logger
@wraps(func)
def arg_wrapper(*args, **kwargs):
start_t = time.perf_counter()
start_mem = used_memory(unit=1)
r = func(*args, **kwargs)
elapsed = time.perf_counter() - start_t
end_mem = used_memory(unit=1)
# level_prefix = ''.join('-' for v in inspect.stack() if v and v.index is not None and v.index >= 0)
level_prefix = ''
mem_status = f'memory Δ {get_readable_size(end_mem - start_mem)} {get_readable_size(start_mem)} -> {get_readable_size(end_mem)}'
default_logger.info(
f'{level_prefix} {func.__qualname__} time: {elapsed}s {mem_status}'
)
return r
return arg_wrapper
class TimeDict:
"""Records of time information."""
def __init__(self):
self.accum_time = defaultdict(float)
self.first_start_time = defaultdict(float)
self.start_time = defaultdict(float)
self.end_time = defaultdict(float)
self._key_stack = []
self._pending_reset = False
def __enter__(self):
_key = self._key_stack[-1]
# store only the first enter time
if _key not in self.first_start_time:
self.first_start_time[_key] = time.perf_counter()
self.start_time[_key] = time.perf_counter()
return self
def __exit__(self, typ, value, traceback):
_key = self._key_stack.pop()
self.end_time[_key] = time.perf_counter()
self.accum_time[_key] += self.end_time[_key] - self.start_time[_key]
if self._pending_reset:
self.reset()
def __call__(self, key: str, *args, **kwargs):
"""
Add time counter.
:param key: key name of the counter
:param args: extra arguments
:param kwargs: keyword arguments
:return: self object
"""
self._key_stack.append(key)
return self
def reset(self):
"""Clear the time information."""
if self._key_stack:
self._pending_reset = True
else:
self.accum_time.clear()
self.start_time.clear()
self.first_start_time.clear()
self.end_time.clear()
self._key_stack.clear()
self._pending_reset = False
def __str__(self):
return ' '.join(f'{k}: {v:3.1f}s' for k, v in self.accum_time.items())
class TimeContext:
"""Timing a code snippet with a context manager."""
time_attrs = ['years', 'months', 'days', 'hours', 'minutes', 'seconds']
def __init__(self, task_name: str, logger: 'JinaLogger' = None):
"""
Create the context manager to timing a code snippet.
:param task_name: The context/message.
:param logger: Use existing logger or use naive :func:`print`.
Example:
.. highlight:: python
.. code-block:: python
with TimeContext('loop'):
do_busy()
"""
self.task_name = task_name
self._logger = logger
self.duration = 0
def __enter__(self):
self.start = time.perf_counter()
self._enter_msg()
return self
def _enter_msg(self):
if self._logger:
self._logger.info(self.task_name + '...')
else:
print(self.task_name, end=' ...\t', flush=True)
def __exit__(self, typ, value, traceback):
self.duration = self.now()
self.readable_duration = get_readable_time(seconds=self.duration)
self._exit_msg()
def now(self) -> float:
"""
Get the passed time from start to now.
:return: passed time
"""
return time.perf_counter() - self.start
def _exit_msg(self):
if self._logger:
self._logger.info(
f'{self.task_name} takes {self.readable_duration} ({self.duration:.2f}s)'
)
else:
print(
colored(
f'{self.task_name} takes {self.readable_duration} ({self.duration:.2f}s)'
),
flush=True,
)
class ProgressBar(TimeContext):
"""
A simple progress bar.
Example:
.. highlight:: python
.. code-block:: python
with ProgressBar('loop'):
do_busy()
"""
col_width = 100
clear_line = '\r{}\r'.format(' ' * col_width)
spinner = itertools.cycle(['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏'])
def __init__(
self,
description: str = 'Working...',
message_on_done: Union[str, Callable[..., str], None] = None,
final_line_feed: bool = True,
total_length: Optional[int] = None,
):
"""
Create the ProgressBar.
:param description: The name of the task, will be displayed in front of the bar.
:param message_on_done: The final message to print when the progress is complete
:param final_line_feed: if False, the line will not get a Line Feed and thus is easily overwritable.
:param total_length: if set, then every :py:meth:`.update` increases the bar by `1/total_length * _bars_on_row`
"""
super().__init__(description, None)
self._bars_on_row = 40
self._completed_progress = 0
self._last_rendered_progress = 0
self._num_update_called = 0
self._on_done = message_on_done
self._final_line_feed = final_line_feed
self._total_length = total_length
self._stop_event = threading.Event()
def update(
self,
progress: float = 1.0,
description: Optional[str] = None,
message: Optional[str] = None,
status: ProgressBarStatus = ProgressBarStatus.WORKING,
first_enter: bool = False,
) -> None:
"""
Increment the progress bar by one unit.
:param progress: The number of unit to increment.
:param description: Change the description text before the progress bar on update.
:param message: Change the message text followed after the progress bar on update.
:param status: If set to a value, it will mark the task as complete, can be either "Done" or "Canceled"
:param first_enter: if this method is called by `__enter__`
"""
if self._total_length:
progress = progress / self._total_length * self._bars_on_row
self._completed_progress += progress
self._last_rendered_progress = self._completed_progress
elapsed = time.perf_counter() - self.start
num_bars = self._completed_progress % self._bars_on_row
num_bars = (
self._bars_on_row
if not num_bars and self._completed_progress
else max(num_bars, 1)
)
num_fullbars = math.floor(num_bars)
num_halfbars = 1 if (num_bars - num_fullbars <= 0.5) else 0
if status in {ProgressBarStatus.DONE, ProgressBarStatus.CANCELED}:
bar_color, unfinished_bar_color = 'yellow', 'yellow'
elif status == ProgressBarStatus.ERROR:
bar_color, unfinished_bar_color = 'red', 'red'
else:
bar_color, unfinished_bar_color = 'green', 'green'
if first_enter:
speed_str = 'estimating...'
elif self._total_length:
_prog = self._num_update_called / self._total_length
speed_str = f'{(_prog * 100):.0f}% ETA: {get_readable_time(seconds=self.now() / (_prog + 1e-6) * (1 - _prog + 1e-6))}'
else:
speed_str = f'{self._num_update_called / elapsed:4.1f} step/s'
self._num_update_called += 0 if first_enter else 1
description_str = description or self.task_name or ''
if status != ProgressBarStatus.WORKING:
description_str = str(status)
msg_str = message or ''
self._bar_info = dict(
bar_color=bar_color,
description_str=description_str,
msg_str=msg_str,
num_fullbars=num_fullbars,
num_halfbars=num_halfbars,
speed_str=speed_str,
unfinished_bar_color=unfinished_bar_color,
)
def _print_bar(self, bar_info):
time_str = str(
datetime.timedelta(seconds=time.perf_counter() - self.start)
).split('.')[0]
sys.stdout.write(self.clear_line)
sys.stdout.write(
'{} {:>10} {:<}{:<} {} {} {}'.format(
colored(next(self.spinner), 'green'),
bar_info['description_str'],
colored('━' * bar_info['num_fullbars'], bar_info['bar_color'])
+ (
colored(
'╸',
bar_info['bar_color']
if bar_info['num_halfbars']
else bar_info['unfinished_bar_color'],
)
),
colored(
'━' * (self._bars_on_row - bar_info['num_fullbars']),
bar_info['unfinished_bar_color'],
attrs=['dark'],
),
colored(time_str, 'cyan'),
bar_info['speed_str'],
bar_info['msg_str'],
)
)
sys.stdout.flush()
def _update_thread(self):
sys.stdout.flush()
while not self._stop_event.is_set():
self._print_bar(self._bar_info)
time.sleep(0.1)
def _enter_msg(self):
self.update(first_enter=True)
self._progress_thread = threading.Thread(
target=self._update_thread, daemon=True
)
self._progress_thread.start()
def __exit__(self, exc_type, value, traceback):
self.duration = self.now()
self.readable_duration = get_readable_time(seconds=self.duration)
if exc_type in {KeyboardInterrupt, SystemExit}:
self._stop_event.set()
self.update(0, status=ProgressBarStatus.CANCELED)
self._print_bar(self._bar_info)
elif exc_type and issubclass(exc_type, Exception):
self._stop_event.set()
self.update(0, status=ProgressBarStatus.ERROR)
self._print_bar(self._bar_info)
else:
# normal ending, i.e. task is complete
self._stop_event.set()
self._progress_thread.join()
self.update(0, status=ProgressBarStatus.DONE)
self._print_bar(self._bar_info)
self._print_final_msg()
def _print_final_msg(self):
if self._last_rendered_progress > 1:
final_msg = f'\033[K{self._completed_progress:.0f} steps done in {self.readable_duration}'
if self._on_done:
if isinstance(self._on_done, str):
final_msg = self._on_done
elif callable(self._on_done):
final_msg = self._on_done()
sys.stdout.write(final_msg)
if self._final_line_feed:
sys.stdout.write('\n')
else:
# no actual render happens
sys.stdout.write(self.clear_line)
sys.stdout.flush()
| 33.063492 | 136 | 0.589774 | import datetime
import itertools
import math
import sys
import threading
import time
from collections import defaultdict
from functools import wraps
from typing import Optional, Union, Callable
from jina.enums import ProgressBarStatus
from .logger import JinaLogger
from .. import __windows__
from ..helper import colored, get_readable_size, get_readable_time
def used_memory(unit: int = 1024 * 1024 * 1024) -> float:
if __windows__:
return 0
import resource
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / unit
def used_memory_readable() -> str:
return get_readable_size(used_memory(1))
def profiling(func):
from .predefined import default_logger
@wraps(func)
def arg_wrapper(*args, **kwargs):
start_t = time.perf_counter()
start_mem = used_memory(unit=1)
r = func(*args, **kwargs)
elapsed = time.perf_counter() - start_t
end_mem = used_memory(unit=1)
# level_prefix = ''.join('-' for v in inspect.stack() if v and v.index is not None and v.index >= 0)
level_prefix = ''
mem_status = f'memory Δ {get_readable_size(end_mem - start_mem)} {get_readable_size(start_mem)} -> {get_readable_size(end_mem)}'
default_logger.info(
f'{level_prefix} {func.__qualname__} time: {elapsed}s {mem_status}'
)
return r
return arg_wrapper
class TimeDict:
def __init__(self):
self.accum_time = defaultdict(float)
self.first_start_time = defaultdict(float)
self.start_time = defaultdict(float)
self.end_time = defaultdict(float)
self._key_stack = []
self._pending_reset = False
def __enter__(self):
_key = self._key_stack[-1]
# store only the first enter time
if _key not in self.first_start_time:
self.first_start_time[_key] = time.perf_counter()
self.start_time[_key] = time.perf_counter()
return self
def __exit__(self, typ, value, traceback):
_key = self._key_stack.pop()
self.end_time[_key] = time.perf_counter()
self.accum_time[_key] += self.end_time[_key] - self.start_time[_key]
if self._pending_reset:
self.reset()
def __call__(self, key: str, *args, **kwargs):
self._key_stack.append(key)
return self
def reset(self):
if self._key_stack:
self._pending_reset = True
else:
self.accum_time.clear()
self.start_time.clear()
self.first_start_time.clear()
self.end_time.clear()
self._key_stack.clear()
self._pending_reset = False
def __str__(self):
return ' '.join(f'{k}: {v:3.1f}s' for k, v in self.accum_time.items())
class TimeContext:
time_attrs = ['years', 'months', 'days', 'hours', 'minutes', 'seconds']
def __init__(self, task_name: str, logger: 'JinaLogger' = None):
self.task_name = task_name
self._logger = logger
self.duration = 0
def __enter__(self):
self.start = time.perf_counter()
self._enter_msg()
return self
def _enter_msg(self):
if self._logger:
self._logger.info(self.task_name + '...')
else:
print(self.task_name, end=' ...\t', flush=True)
def __exit__(self, typ, value, traceback):
self.duration = self.now()
self.readable_duration = get_readable_time(seconds=self.duration)
self._exit_msg()
def now(self) -> float:
return time.perf_counter() - self.start
def _exit_msg(self):
if self._logger:
self._logger.info(
f'{self.task_name} takes {self.readable_duration} ({self.duration:.2f}s)'
)
else:
print(
colored(
f'{self.task_name} takes {self.readable_duration} ({self.duration:.2f}s)'
),
flush=True,
)
class ProgressBar(TimeContext):
col_width = 100
clear_line = '\r{}\r'.format(' ' * col_width)
spinner = itertools.cycle(['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏'])
def __init__(
self,
description: str = 'Working...',
message_on_done: Union[str, Callable[..., str], None] = None,
final_line_feed: bool = True,
total_length: Optional[int] = None,
):
super().__init__(description, None)
self._bars_on_row = 40
self._completed_progress = 0
self._last_rendered_progress = 0
self._num_update_called = 0
self._on_done = message_on_done
self._final_line_feed = final_line_feed
self._total_length = total_length
self._stop_event = threading.Event()
def update(
self,
progress: float = 1.0,
description: Optional[str] = None,
message: Optional[str] = None,
status: ProgressBarStatus = ProgressBarStatus.WORKING,
first_enter: bool = False,
) -> None:
if self._total_length:
progress = progress / self._total_length * self._bars_on_row
self._completed_progress += progress
self._last_rendered_progress = self._completed_progress
elapsed = time.perf_counter() - self.start
num_bars = self._completed_progress % self._bars_on_row
num_bars = (
self._bars_on_row
if not num_bars and self._completed_progress
else max(num_bars, 1)
)
num_fullbars = math.floor(num_bars)
num_halfbars = 1 if (num_bars - num_fullbars <= 0.5) else 0
if status in {ProgressBarStatus.DONE, ProgressBarStatus.CANCELED}:
bar_color, unfinished_bar_color = 'yellow', 'yellow'
elif status == ProgressBarStatus.ERROR:
bar_color, unfinished_bar_color = 'red', 'red'
else:
bar_color, unfinished_bar_color = 'green', 'green'
if first_enter:
speed_str = 'estimating...'
elif self._total_length:
_prog = self._num_update_called / self._total_length
speed_str = f'{(_prog * 100):.0f}% ETA: {get_readable_time(seconds=self.now() / (_prog + 1e-6) * (1 - _prog + 1e-6))}'
else:
speed_str = f'{self._num_update_called / elapsed:4.1f} step/s'
self._num_update_called += 0 if first_enter else 1
description_str = description or self.task_name or ''
if status != ProgressBarStatus.WORKING:
description_str = str(status)
msg_str = message or ''
self._bar_info = dict(
bar_color=bar_color,
description_str=description_str,
msg_str=msg_str,
num_fullbars=num_fullbars,
num_halfbars=num_halfbars,
speed_str=speed_str,
unfinished_bar_color=unfinished_bar_color,
)
def _print_bar(self, bar_info):
time_str = str(
datetime.timedelta(seconds=time.perf_counter() - self.start)
).split('.')[0]
sys.stdout.write(self.clear_line)
sys.stdout.write(
'{} {:>10} {:<}{:<} {} {} {}'.format(
colored(next(self.spinner), 'green'),
bar_info['description_str'],
colored('━' * bar_info['num_fullbars'], bar_info['bar_color'])
+ (
colored(
'╸',
bar_info['bar_color']
if bar_info['num_halfbars']
else bar_info['unfinished_bar_color'],
)
),
colored(
'━' * (self._bars_on_row - bar_info['num_fullbars']),
bar_info['unfinished_bar_color'],
attrs=['dark'],
),
colored(time_str, 'cyan'),
bar_info['speed_str'],
bar_info['msg_str'],
)
)
sys.stdout.flush()
def _update_thread(self):
sys.stdout.flush()
while not self._stop_event.is_set():
self._print_bar(self._bar_info)
time.sleep(0.1)
def _enter_msg(self):
self.update(first_enter=True)
self._progress_thread = threading.Thread(
target=self._update_thread, daemon=True
)
self._progress_thread.start()
def __exit__(self, exc_type, value, traceback):
self.duration = self.now()
self.readable_duration = get_readable_time(seconds=self.duration)
if exc_type in {KeyboardInterrupt, SystemExit}:
self._stop_event.set()
self.update(0, status=ProgressBarStatus.CANCELED)
self._print_bar(self._bar_info)
elif exc_type and issubclass(exc_type, Exception):
self._stop_event.set()
self.update(0, status=ProgressBarStatus.ERROR)
self._print_bar(self._bar_info)
else:
# normal ending, i.e. task is complete
self._stop_event.set()
self._progress_thread.join()
self.update(0, status=ProgressBarStatus.DONE)
self._print_bar(self._bar_info)
self._print_final_msg()
def _print_final_msg(self):
if self._last_rendered_progress > 1:
final_msg = f'\033[K{self._completed_progress:.0f} steps done in {self.readable_duration}'
if self._on_done:
if isinstance(self._on_done, str):
final_msg = self._on_done
elif callable(self._on_done):
final_msg = self._on_done()
sys.stdout.write(final_msg)
if self._final_line_feed:
sys.stdout.write('\n')
else:
# no actual render happens
sys.stdout.write(self.clear_line)
sys.stdout.flush()
| true | true |
f72d9c8b3378c16b14842c4efffd09d4801f1f6c | 3,611 | py | Python | bin/EnhancedScrapy/spider/middlewares.py | LABZHG/IHPACrawler | b68101364b0e3fff69283d23b2be46f61e8dc853 | [
"MIT"
] | null | null | null | bin/EnhancedScrapy/spider/middlewares.py | LABZHG/IHPACrawler | b68101364b0e3fff69283d23b2be46f61e8dc853 | [
"MIT"
] | null | null | null | bin/EnhancedScrapy/spider/middlewares.py | LABZHG/IHPACrawler | b68101364b0e3fff69283d23b2be46f61e8dc853 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class EnhancedspiderSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class EnhancedspiderDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 34.721154 | 78 | 0.667405 |
from scrapy import signals
class EnhancedspiderSpiderMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
return None
def process_spider_output(self, response, result, spider):
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
pass
def process_start_requests(self, start_requests, spider):
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class EnhancedspiderDownloaderMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
return None
def process_response(self, request, response, spider):
return response
def process_exception(self, request, exception, spider):
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| true | true |
f72d9e5657caeeace2c74e9759c3b45caf7c913b | 283 | py | Python | src/052.py | mackorone/euler | 1b0c2271690d1598d2643e75b7e1f693b4155f49 | [
"MIT"
] | null | null | null | src/052.py | mackorone/euler | 1b0c2271690d1598d2643e75b7e1f693b4155f49 | [
"MIT"
] | null | null | null | src/052.py | mackorone/euler | 1b0c2271690d1598d2643e75b7e1f693b4155f49 | [
"MIT"
] | null | null | null | def helper(num):
return ''.join(sorted(str(num)))
def ans():
num = 100000
while True:
string = helper(num)
if all(helper(num * i) == string for i in range(2, 7)):
return num
num += 1
if __name__ == '__main__':
print(ans())
| 17.6875 | 63 | 0.515901 | def helper(num):
return ''.join(sorted(str(num)))
def ans():
num = 100000
while True:
string = helper(num)
if all(helper(num * i) == string for i in range(2, 7)):
return num
num += 1
if __name__ == '__main__':
print(ans())
| true | true |
f72da0db59ad5454e517786cd1bc98eb3f7fd29f | 5,772 | py | Python | srcOld/loss.py | OphirGenomica/proteinFolding | b4b6ea19307e176e58aa9d39ae161003c340416d | [
"MIT"
] | null | null | null | srcOld/loss.py | OphirGenomica/proteinFolding | b4b6ea19307e176e58aa9d39ae161003c340416d | [
"MIT"
] | null | null | null | srcOld/loss.py | OphirGenomica/proteinFolding | b4b6ea19307e176e58aa9d39ae161003c340416d | [
"MIT"
] | null | null | null | import time
import matplotlib
import numpy as np
matplotlib.use('Agg')
import torch
import torch.nn as nn
class LossMultiTargets(nn.Module):
def __init__(self,loss_fnc=torch.nn.CrossEntropyLoss()):
super(LossMultiTargets, self).__init__()
self.loss = loss_fnc
def forward(self, inputs,targets):
# loss = []
# for (input,target) in zip(inputs,targets):
# loss.append(self.loss(input,target))
loss = 0
nb = len(targets)
for (input,target) in zip(inputs,targets):
loss += self.loss(input,target)
loss /= nb
return loss
class MSELoss(torch.nn.Module):
def __init__(self):
super(MSELoss,self).__init__()
def forward(self, input, target):
#We only want places where the target is larger than zero (remember this is for distances)
# mask = target > 0
# result = torch.mean((input[mask] - target[mask])**2)
# result = torch.norm((input[mask] - target[mask])) ** 2 / torch.norm(target[mask]) ** 2
nb = target.shape[0]
result = 0
for i in range(nb):
inputi = input[i,:,:]
targeti = target[i,:,:]
maski = targeti > 0
if torch.sum(maski) == 0: #nothing to learn from this one
continue
assert torch.norm(targeti[maski]) > 0
result += torch.norm((inputi[maski] - targeti[maski])) ** 2 / torch.norm(targeti[maski]) ** 2
return result/nb
def pc_translation_rotation_matching(r1,r2):
'''
Given two sets of 3D points of equal size. It computes the distance between these two sets of points, when allowing translation and rotation of the point clouds.
We compute both chirality, and take whichever one has the lowest loss.
r1 -> Tensor of shape (3,n)
r2 -> Tensor of shape (3,n)
'''
#First we translate the two sets, by setting both their centroids to origin
r1c = r1 - torch.mean(r1, dim=1, keepdim=True)
r2c = r2 - torch.mean(r2, dim=1, keepdim=True)
H = r1c @ r2c.transpose(0,1)
t1 = time.time()
U, S, V = torch.svd(H)
t2 = time.time()
d = torch.sign(torch.det(V @ U.transpose(0,1)))
t3 = time.time()
tmp = torch.diag_embed(torch.tensor([1, 1, d])).to(device=V.device)
t4 = time.time()
R = V @ tmp @ U.transpose(0,1)
t5 = time.time()
# tmp2 = torch.diag_embed(torch.tensor([1, 1, -d])).to(device=V.device)
# R2 = V @ tmp2 @ U.transpose(0,1)
r1cr = R @ r1c
# r1cr2 = R2 @ r1c
assert torch.norm(r2c) > 0
loss_tr1 = torch.norm(r1cr - r2c) ** 2 / torch.norm(r2c) ** 2
# loss_tr2 = torch.norm(r1cr2 - r2c) ** 2 / torch.norm(r2c) ** 2
# if loss_tr1 < loss_tr2:
loss_tr = loss_tr1
# pred = r1cr.squeeze().cpu().detach().numpy()
# else:
# pred = r1cr2.squeeze().cpu().detach().numpy()
# loss_tr = loss_tr2
# target = r2c.squeeze().cpu().detach().numpy()
print("{:2.4f},{:2.4f},{:2.4f},{:2.4f}".format(t2-t1,t3-t2,t4-t3,t5-t4))
return loss_tr#, pred, target
def loss_tr_wrapper(r1,r2):
'''
Note that any point with r2 coordinates set to zero is considered masked and will not be included in the calculation. (so use r1 for prediction and r2 for target, and just make sure no target point are accidently zero. Remember the point cloud is translation invariant, so you can just translate all points if needed)
'''
nb = r1.shape[0]
loss_tr = 0
for i in range(nb):
r1i = r1[i, :, :]
r2i = r2[i,:,:]
mask = (r2i != 0).reshape(3, -1)
mask = torch.sum(mask,dim=0) > 0
r1i = r1i[:,mask]
r2i = r2i[:,mask]
# loss_tri, predi, targeti = pc_translation_rotation_matching(r1i, r2i)
loss_tri = pc_translation_rotation_matching(r1i, r2i)
loss_tr += loss_tri
loss_tr /= nb
return loss_tr#, predi, targeti
def loss_tr(r1,r2, return_coords=False):
t1 = time.time()
loss_tr = 0
mask = (r2 != 0).reshape(r2.shape)
mask = (torch.sum(mask,dim=1) > 0).unsqueeze(1)
mask = mask.repeat(1,3,1)
batch_mask = torch.sum(mask,dim=(1,2)) > 0
r1 = r1[batch_mask,:,:]
r2 = r2[batch_mask,:,:]
mask = mask[batch_mask,:,:]
nb = r1.shape[0]
t2 = time.time()
#First we translate the two sets, by setting both their centroids to origin
r1c = torch.empty_like(r1)
r2c = torch.empty_like(r2)
for i in range(nb):
r1c[i, :, :] = r1[i, :, :] - torch.mean(r1[i, mask[i, :, :]].reshape(3, -1), dim=1, keepdim=True)
r2c[i, :, :] = r2[i, :, :] - torch.mean(r2[i, mask[i, :, :]].reshape(3, -1), dim=1, keepdim=True)
t3 = time.time()
r1c = r1c * mask
r2c = r2c * mask
H = torch.bmm(r1c,r2c.transpose(1,2))
# try:
# U, S, V = torch.svd(H)
# except: # torch.svd may have convergence issues for GPU and CPU.
# U, S, V = torch.svd(H + 1e-4 * H.mean() * torch.rand(H.shape,device=H.device))
U, S, V = torch.svd(H)
t4 = time.time()
d = torch.sign(torch.det(torch.bmm(V, U.transpose(1,2))))
t5 = time.time()
tt=torch.tensor([[1]*nb, [1]*nb, d]).transpose(0,1)
tmp = torch.diag_embed(tt).to(device=V.device)
t6 = time.time()
R = torch.bmm(V, torch.bmm(tmp, U.transpose(1,2)))
r1cr = torch.bmm(R, r1c)
loss_tr = torch.mean(torch.norm(r1cr - r2c, dim=(1, 2)) ** 2 / torch.norm(r2c, dim=(1, 2)) ** 2)
t7 = time.time()
# print("{:2.4f},{:2.4f},{:2.4f},{:2.4f},{:2.4f},{:2.4f}".format(t2-t1,t3-t2,t4-t3,t5-t4,t6-t5,t7-t6))
if return_coords:
pred = r1cr[-1,:,:].squeeze().cpu().detach().numpy()
target = r2c[-1,:,:].squeeze().cpu().detach().numpy()
return loss_tr, pred, target
else:
return loss_tr | 33.754386 | 321 | 0.584546 | import time
import matplotlib
import numpy as np
matplotlib.use('Agg')
import torch
import torch.nn as nn
class LossMultiTargets(nn.Module):
def __init__(self,loss_fnc=torch.nn.CrossEntropyLoss()):
super(LossMultiTargets, self).__init__()
self.loss = loss_fnc
def forward(self, inputs,targets):
loss = 0
nb = len(targets)
for (input,target) in zip(inputs,targets):
loss += self.loss(input,target)
loss /= nb
return loss
class MSELoss(torch.nn.Module):
def __init__(self):
super(MSELoss,self).__init__()
def forward(self, input, target):
nb = target.shape[0]
result = 0
for i in range(nb):
inputi = input[i,:,:]
targeti = target[i,:,:]
maski = targeti > 0
if torch.sum(maski) == 0:
continue
assert torch.norm(targeti[maski]) > 0
result += torch.norm((inputi[maski] - targeti[maski])) ** 2 / torch.norm(targeti[maski]) ** 2
return result/nb
def pc_translation_rotation_matching(r1,r2):
r1c = r1 - torch.mean(r1, dim=1, keepdim=True)
r2c = r2 - torch.mean(r2, dim=1, keepdim=True)
H = r1c @ r2c.transpose(0,1)
t1 = time.time()
U, S, V = torch.svd(H)
t2 = time.time()
d = torch.sign(torch.det(V @ U.transpose(0,1)))
t3 = time.time()
tmp = torch.diag_embed(torch.tensor([1, 1, d])).to(device=V.device)
t4 = time.time()
R = V @ tmp @ U.transpose(0,1)
t5 = time.time()
r1cr = R @ r1c
assert torch.norm(r2c) > 0
loss_tr1 = torch.norm(r1cr - r2c) ** 2 / torch.norm(r2c) ** 2
loss_tr = loss_tr1
print("{:2.4f},{:2.4f},{:2.4f},{:2.4f}".format(t2-t1,t3-t2,t4-t3,t5-t4))
return loss_tr
def loss_tr_wrapper(r1,r2):
nb = r1.shape[0]
loss_tr = 0
for i in range(nb):
r1i = r1[i, :, :]
r2i = r2[i,:,:]
mask = (r2i != 0).reshape(3, -1)
mask = torch.sum(mask,dim=0) > 0
r1i = r1i[:,mask]
r2i = r2i[:,mask]
loss_tri = pc_translation_rotation_matching(r1i, r2i)
loss_tr += loss_tri
loss_tr /= nb
return loss_tr
def loss_tr(r1,r2, return_coords=False):
t1 = time.time()
loss_tr = 0
mask = (r2 != 0).reshape(r2.shape)
mask = (torch.sum(mask,dim=1) > 0).unsqueeze(1)
mask = mask.repeat(1,3,1)
batch_mask = torch.sum(mask,dim=(1,2)) > 0
r1 = r1[batch_mask,:,:]
r2 = r2[batch_mask,:,:]
mask = mask[batch_mask,:,:]
nb = r1.shape[0]
t2 = time.time()
r1c = torch.empty_like(r1)
r2c = torch.empty_like(r2)
for i in range(nb):
r1c[i, :, :] = r1[i, :, :] - torch.mean(r1[i, mask[i, :, :]].reshape(3, -1), dim=1, keepdim=True)
r2c[i, :, :] = r2[i, :, :] - torch.mean(r2[i, mask[i, :, :]].reshape(3, -1), dim=1, keepdim=True)
t3 = time.time()
r1c = r1c * mask
r2c = r2c * mask
H = torch.bmm(r1c,r2c.transpose(1,2))
d = torch.sign(torch.det(torch.bmm(V, U.transpose(1,2))))
t5 = time.time()
tt=torch.tensor([[1]*nb, [1]*nb, d]).transpose(0,1)
tmp = torch.diag_embed(tt).to(device=V.device)
t6 = time.time()
R = torch.bmm(V, torch.bmm(tmp, U.transpose(1,2)))
r1cr = torch.bmm(R, r1c)
loss_tr = torch.mean(torch.norm(r1cr - r2c, dim=(1, 2)) ** 2 / torch.norm(r2c, dim=(1, 2)) ** 2)
t7 = time.time()
if return_coords:
pred = r1cr[-1,:,:].squeeze().cpu().detach().numpy()
target = r2c[-1,:,:].squeeze().cpu().detach().numpy()
return loss_tr, pred, target
else:
return loss_tr | true | true |
f72da1a3a3f39b2fcdbc9ceed248e3c2cc93b562 | 3,547 | py | Python | tutorials/02-intermediate/recurrent_neural_network/main.py | johnwu0604/pytorch-tutorial | bdbc283a0b79620d9b582f1c4d2c2220a853b856 | [
"MIT"
] | null | null | null | tutorials/02-intermediate/recurrent_neural_network/main.py | johnwu0604/pytorch-tutorial | bdbc283a0b79620d9b582f1c4d2c2220a853b856 | [
"MIT"
] | null | null | null | tutorials/02-intermediate/recurrent_neural_network/main.py | johnwu0604/pytorch-tutorial | bdbc283a0b79620d9b582f1c4d2c2220a853b856 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
sequence_length = 28
input_size = 28
hidden_size = 128
num_layers = 2
num_classes = 10
batch_size = 100
num_epochs = 2
learning_rate = 0.01
# MNIST dataset
train_dataset = torchvision.datasets.MNIST(root='../../data/',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='../../data/',
train=False,
transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Recurrent neural network (many-to-one)
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
# Set initial hidden and cell states
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
# Forward propagate LSTM
out, _ = self.lstm(x, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size)
# Decode the hidden state of the last time step
out = self.fc(out[:, -1, :])
return out
model = RNN(input_size, hidden_size, num_layers, num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.reshape(-1, sequence_length, input_size).to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# Test the model
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, sequence_length, input_size).to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
# Save the model checkpoint
torch.save(model.state_dict(), './outputs/model.ckpt')
| 34.105769 | 101 | 0.592895 | import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
sequence_length = 28
input_size = 28
hidden_size = 128
num_layers = 2
num_classes = 10
batch_size = 100
num_epochs = 2
learning_rate = 0.01
train_dataset = torchvision.datasets.MNIST(root='../../data/',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='../../data/',
train=False,
transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
out, _ = self.lstm(x, (h0, c0))
out = self.fc(out[:, -1, :])
return out
model = RNN(input_size, hidden_size, num_layers, num_classes).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.reshape(-1, sequence_length, input_size).to(device)
labels = labels.to(device)
outputs = model(images)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, sequence_length, input_size).to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
torch.save(model.state_dict(), './outputs/model.ckpt')
| true | true |
f72da28d39719cdb80464e715e1aa7f0c2b6a6be | 3,760 | py | Python | qa/rpc-tests/maxblocksinflight.py | OwenRay/zeroonecoin | c1cda5840196746a508de702e4c28fe8f017453b | [
"MIT"
] | 13 | 2018-03-25T20:35:46.000Z | 2021-02-15T14:33:40.000Z | qa/rpc-tests/maxblocksinflight.py | OwenRay/zeroonecoin | c1cda5840196746a508de702e4c28fe8f017453b | [
"MIT"
] | 21 | 2018-03-30T21:57:01.000Z | 2020-08-05T23:06:41.000Z | qa/rpc-tests/maxblocksinflight.py | OwenRay/zeroonecoin | c1cda5840196746a508de702e4c28fe8f017453b | [
"MIT"
] | 15 | 2018-03-29T13:40:27.000Z | 2021-12-25T18:08:54.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import logging
'''
In this test we connect to one node over p2p, send it numerous inv's, and
compare the resulting number of getdata requests to a max allowed value. We
test for exceeding 128 blocks in flight, which was the limit an 0.9 client will
reach. [0.10 clients shouldn't request more than 16 from a single peer.]
'''
MAX_REQUESTS = 128
class TestManager(NodeConnCB):
# set up NodeConnCB callbacks, overriding base class
def on_getdata(self, conn, message):
self.log.debug("got getdata %s" % repr(message))
# Log the requests
for inv in message.inv:
if inv.hash not in self.blockReqCounts:
self.blockReqCounts[inv.hash] = 0
self.blockReqCounts[inv.hash] += 1
def on_close(self, conn):
if not self.disconnectOkay:
raise EarlyDisconnectError(0)
def __init__(self):
NodeConnCB.__init__(self)
self.log = logging.getLogger("BlockRelayTest")
def add_new_connection(self, connection):
self.connection = connection
self.blockReqCounts = {}
self.disconnectOkay = False
def run(self):
self.connection.rpc.generate(1) # Leave IBD
numBlocksToGenerate = [8, 16, 128, 1024]
for count in range(len(numBlocksToGenerate)):
current_invs = []
for i in range(numBlocksToGenerate[count]):
current_invs.append(CInv(2, random.randrange(0, 1 << 256)))
if len(current_invs) >= 50000:
self.connection.send_message(msg_inv(current_invs))
current_invs = []
if len(current_invs) > 0:
self.connection.send_message(msg_inv(current_invs))
# Wait and see how many blocks were requested
time.sleep(2)
total_requests = 0
with mininode_lock:
for key in self.blockReqCounts:
total_requests += self.blockReqCounts[key]
if self.blockReqCounts[key] > 1:
raise AssertionError("Error, test failed: block %064x requested more than once" % key)
if total_requests > MAX_REQUESTS:
raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
print("Round %d: success (total requests: %d)" % (count, total_requests))
self.disconnectOkay = True
self.connection.disconnect_node()
class MaxBlocksInFlightTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("ZOCD", "zerooned"),
help="Binary to test max block requests behavior")
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager()
test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
NetworkThread().start() # Start up network handling in another thread
test.run()
if __name__ == '__main__':
MaxBlocksInFlightTest().main()
| 38.762887 | 110 | 0.629255 |
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import logging
MAX_REQUESTS = 128
class TestManager(NodeConnCB):
def on_getdata(self, conn, message):
self.log.debug("got getdata %s" % repr(message))
for inv in message.inv:
if inv.hash not in self.blockReqCounts:
self.blockReqCounts[inv.hash] = 0
self.blockReqCounts[inv.hash] += 1
def on_close(self, conn):
if not self.disconnectOkay:
raise EarlyDisconnectError(0)
def __init__(self):
NodeConnCB.__init__(self)
self.log = logging.getLogger("BlockRelayTest")
def add_new_connection(self, connection):
self.connection = connection
self.blockReqCounts = {}
self.disconnectOkay = False
def run(self):
self.connection.rpc.generate(1)
numBlocksToGenerate = [8, 16, 128, 1024]
for count in range(len(numBlocksToGenerate)):
current_invs = []
for i in range(numBlocksToGenerate[count]):
current_invs.append(CInv(2, random.randrange(0, 1 << 256)))
if len(current_invs) >= 50000:
self.connection.send_message(msg_inv(current_invs))
current_invs = []
if len(current_invs) > 0:
self.connection.send_message(msg_inv(current_invs))
time.sleep(2)
total_requests = 0
with mininode_lock:
for key in self.blockReqCounts:
total_requests += self.blockReqCounts[key]
if self.blockReqCounts[key] > 1:
raise AssertionError("Error, test failed: block %064x requested more than once" % key)
if total_requests > MAX_REQUESTS:
raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
print("Round %d: success (total requests: %d)" % (count, total_requests))
self.disconnectOkay = True
self.connection.disconnect_node()
class MaxBlocksInFlightTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("ZOCD", "zerooned"),
help="Binary to test max block requests behavior")
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager()
test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
NetworkThread().start()
test.run()
if __name__ == '__main__':
MaxBlocksInFlightTest().main()
| true | true |
f72da2d3b8872a2e49703402ba8c673ea736d87a | 782 | py | Python | pilot/perception/object_detection/load_label_map.py | yanbarurobotics/PythonPilot | eb07c620320c90ba85941041d08678a6142f988f | [
"Apache-2.0"
] | 129 | 2019-05-21T09:15:12.000Z | 2022-03-03T15:14:32.000Z | pilot/perception/object_detection/load_label_map.py | yanbarurobotics/PythonPilot | eb07c620320c90ba85941041d08678a6142f988f | [
"Apache-2.0"
] | 1 | 2022-03-28T04:21:54.000Z | 2022-03-28T04:21:54.000Z | pilot/perception/object_detection/load_label_map.py | yanbarurobotics/PythonPilot | eb07c620320c90ba85941041d08678a6142f988f | [
"Apache-2.0"
] | 31 | 2019-05-21T23:31:24.000Z | 2022-03-21T19:22:40.000Z | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from object_detection.tf_utils import label_map_util
class LoadLabelMap():
def __init__(self):
return
def load_label_map(self, cfg):
"""
LOAD LABEL MAP
"""
print('Loading label map')
LABEL_PATH = cfg['label_path']
NUM_CLASSES = cfg['num_classes']
try:
label_map = label_map_util.load_labelmap(LABEL_PATH)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
except:
import traceback
traceback.print_exc()
return category_index
| 28.962963 | 134 | 0.626598 |
from object_detection.tf_utils import label_map_util
class LoadLabelMap():
def __init__(self):
return
def load_label_map(self, cfg):
print('Loading label map')
LABEL_PATH = cfg['label_path']
NUM_CLASSES = cfg['num_classes']
try:
label_map = label_map_util.load_labelmap(LABEL_PATH)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
except:
import traceback
traceback.print_exc()
return category_index
| true | true |
f72da46334b99da9f99dcbbd2bb1d806a0260194 | 152 | py | Python | piu/__init__.py | Krazylee/piu | d1ac6c90d45af0a87b162b9b4bc882536dab04aa | [
"BSD-3-Clause"
] | 1 | 2015-06-15T15:26:12.000Z | 2015-06-15T15:26:12.000Z | piu/__init__.py | Krazylee/piu | d1ac6c90d45af0a87b162b9b4bc882536dab04aa | [
"BSD-3-Clause"
] | null | null | null | piu/__init__.py | Krazylee/piu | d1ac6c90d45af0a87b162b9b4bc882536dab04aa | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Krazy Lee'
__email__ = 'lixiangstar@gmail.com'
__version__ = '0.1.0'
from .piu import Piu
| 16.888889 | 35 | 0.664474 |
__author__ = 'Krazy Lee'
__email__ = 'lixiangstar@gmail.com'
__version__ = '0.1.0'
from .piu import Piu
| true | true |
f72da61a7319bfedd1000e77309afca39803be65 | 37,507 | py | Python | src/sixtypical/compiler.py | catseye/SixtyPical | 075ba9ff2a9a171e8fcc6682d5374fc61f20e6c6 | [
"Unlicense"
] | 28 | 2015-06-13T16:38:52.000Z | 2021-09-06T16:47:09.000Z | src/sixtypical/compiler.py | catseye/SixtyPical | 075ba9ff2a9a171e8fcc6682d5374fc61f20e6c6 | [
"Unlicense"
] | null | null | null | src/sixtypical/compiler.py | catseye/SixtyPical | 075ba9ff2a9a171e8fcc6682d5374fc61f20e6c6 | [
"Unlicense"
] | 3 | 2019-10-11T07:22:02.000Z | 2020-10-19T15:23:05.000Z | # encoding: UTF-8
from sixtypical.ast import (
Program, Routine, Block, SingleOp, Reset, Call, GoTo, If, Repeat, For, WithInterruptsOff, Save, PointInto
)
from sixtypical.model import (
ConstantRef, LocationRef, IndexedRef, IndirectRef,
TYPE_BIT, TYPE_BYTE, TYPE_WORD,
TableType, PointerType, RoutineType, VectorType,
REG_A, REG_X, REG_Y, FLAG_C
)
from sixtypical.emitter import Byte, Word, Table, Label, Offset, LowAddressByte, HighAddressByte
from sixtypical.gen6502 import (
Immediate, Absolute, AbsoluteX, AbsoluteY, ZeroPage, Indirect, IndirectY, Relative,
LDA, LDX, LDY, STA, STX, STY,
TAX, TAY, TXA, TYA,
PHA, PLA,
CLC, SEC, ADC, SBC, ROL, ROR,
INC, INX, INY, DEC, DEX, DEY,
CMP, CPX, CPY, AND, ORA, EOR,
BCC, BCS, BNE, BEQ, BPL, BMI,
JMP, JSR, RTS,
SEI, CLI,
NOP,
)
class UnsupportedOpcodeError(KeyError):
pass
class Compiler(object):
def __init__(self, symtab, emitter):
self.symtab = symtab
self.emitter = emitter
self.routines = {} # routine.name -> Routine
self.routine_locals = {} # routine.name -> { local.name -> Label }
self.labels = {} # global.name -> Label ("global" includes routines)
self.trampolines = {} # Location -> Label
self.pointer_assoc = {} # pointer name -> table name (I'm not entirely happy about this)
self.current_routine = None
# - - - - helper methods - - - -
def get_type_for_name(self, name):
if self.current_routine and self.symtab.has_local(self.current_routine.name, name):
return self.symtab.fetch_local_type(self.current_routine.name, name)
return self.symtab.fetch_global_type(name)
def get_type(self, ref):
if isinstance(ref, ConstantRef):
return ref.type
if not isinstance(ref, LocationRef):
raise NotImplementedError
return self.get_type_for_name(ref.name)
def addressing_mode_for_index(self, index):
if index == REG_X:
return AbsoluteX
elif index == REG_Y:
return AbsoluteY
else:
raise NotImplementedError(index)
def compute_length_of_defn(self, defn):
length = None
type_ = self.get_type_for_name(defn.name)
if type_ == TYPE_BYTE:
length = 1
elif type_ == TYPE_WORD or isinstance(type_, (PointerType, VectorType)):
length = 2
elif isinstance(type_, TableType):
length = type_.size * (1 if type_.of_type == TYPE_BYTE else 2)
if length is None:
raise NotImplementedError("Need size for type {}".format(type_))
return length
def get_label(self, name):
if self.current_routine:
local_label = self.routine_locals.get(self.current_routine.name, {}).get(name)
if local_label:
return local_label
return self.labels[name]
def absolute_or_zero_page(self, label):
if label.addr is not None and label.addr < 256:
return ZeroPage(label)
else:
return Absolute(label)
# - - - - visitor methods - - - -
def compile_program(self, program, compilation_roster=None):
assert isinstance(program, Program)
declarations = []
for defn in program.defns:
length = self.compute_length_of_defn(defn)
label = Label(defn.name, addr=defn.addr, length=length)
self.labels[defn.name] = label
declarations.append((defn, self.symtab.fetch_global_type(defn.name), label))
for routine in program.routines:
self.routines[routine.name] = routine
label = Label(routine.name)
if routine.addr is not None:
label.set_addr(routine.addr)
self.labels[routine.name] = label
self.current_routine = routine
local_labels = {}
for defn in routine.locals:
length = self.compute_length_of_defn(defn)
label = Label(defn.name, addr=defn.addr, length=length)
local_labels[defn.name] = label
declarations.append((defn, self.symtab.fetch_local_type(routine.name, defn.name), label))
self.routine_locals[routine.name] = local_labels
self.current_routine = None
if compilation_roster is None:
compilation_roster = [['main']] + [[routine.name] for routine in program.routines if routine.name != 'main']
for roster_row in compilation_roster:
for i, routine_name in enumerate(roster_row):
if i < len(roster_row) - 1:
self.compile_routine(self.routines[routine_name], next_routine=self.routines[roster_row[i + 1]])
else:
self.compile_routine(self.routines[routine_name])
for location, label in self.trampolines.items():
self.emitter.resolve_label(label)
self.emitter.emit(JMP(Indirect(self.get_label(location.name))))
self.emitter.emit(RTS())
# initialized data
for defn, type_, label in declarations:
if defn.initial is not None:
initial_data = None
if type_ == TYPE_BYTE:
initial_data = Byte(defn.initial)
elif type_ == TYPE_WORD:
initial_data = Word(defn.initial)
elif TableType.is_a_table_type(type_, TYPE_BYTE):
initial_data = Table([Byte(i) for i in defn.initial], type_.size)
elif TableType.is_a_table_type(type_, TYPE_WORD):
initial_data = Table([Word(i) for i in defn.initial], type_.size)
else:
raise NotImplementedError(type_)
label.set_length(initial_data.size())
self.emitter.resolve_label(label)
self.emitter.emit(initial_data)
# uninitialized, "BSS" data
for defn, type_, label in declarations:
if defn.initial is None and defn.addr is None:
self.emitter.resolve_bss_label(label)
def compile_routine(self, routine, next_routine=None):
assert isinstance(routine, Routine)
self.current_routine = routine
if routine.block:
self.emitter.resolve_label(self.get_label(routine.name))
self.compile_block(routine.block)
needs_rts = True
last_op = self.emitter.get_tail()
if isinstance(last_op, JSR):
if isinstance(last_op.operand, Absolute):
if isinstance(last_op.operand.value, Label):
label = last_op.operand.value
self.emitter.retract()
self.emitter.emit(JMP(Absolute(label)))
last_op = self.emitter.get_tail()
if isinstance(last_op, JMP):
needs_rts = False
if isinstance(last_op.operand, Absolute):
if isinstance(last_op.operand.value, Label):
if next_routine and last_op.operand.value.name == next_routine.name:
self.emitter.retract()
if needs_rts:
self.emitter.emit(RTS())
self.current_routine = None
def compile_block(self, block):
assert isinstance(block, Block)
block.shallow_contains_goto = False
for instr in block.instrs:
self.compile_instr(instr)
if isinstance(instr, GoTo):
block.shallow_contains_goto = True
def compile_instr(self, instr):
if isinstance(instr, SingleOp):
return self.compile_single_op(instr)
elif isinstance(instr, Call):
return self.compile_call(instr)
elif isinstance(instr, GoTo):
return self.compile_goto(instr)
elif isinstance(instr, If):
return self.compile_if(instr)
elif isinstance(instr, Repeat):
return self.compile_repeat(instr)
elif isinstance(instr, For):
return self.compile_for(instr)
elif isinstance(instr, WithInterruptsOff):
return self.compile_with_interrupts_off(instr)
elif isinstance(instr, Save):
return self.compile_save(instr)
elif isinstance(instr, PointInto):
return self.compile_point_into(instr)
elif isinstance(instr, Reset):
return self.compile_reset(instr)
else:
raise NotImplementedError
def compile_single_op(self, instr):
opcode = instr.opcode
dest = instr.dest
src = instr.src
if opcode == 'ld':
if dest == REG_A:
if src == REG_X:
self.emitter.emit(TXA())
elif src == REG_Y:
self.emitter.emit(TYA())
elif isinstance(src, ConstantRef):
self.emitter.emit(LDA(Immediate(Byte(src.value))))
elif isinstance(src, IndexedRef) and src.index == REG_X:
self.emitter.emit(LDA(AbsoluteX(Offset(self.get_label(src.ref.name), src.offset.value))))
elif isinstance(src, IndexedRef) and src.index == REG_Y:
self.emitter.emit(LDA(AbsoluteY(Offset(self.get_label(src.ref.name), src.offset.value))))
elif isinstance(src, IndirectRef) and isinstance(self.get_type(src.ref), PointerType):
self.emitter.emit(LDA(IndirectY(self.get_label(src.ref.name))))
else:
self.emitter.emit(LDA(self.absolute_or_zero_page(self.get_label(src.name))))
elif dest == REG_X:
if src == REG_A:
self.emitter.emit(TAX())
elif isinstance(src, ConstantRef):
self.emitter.emit(LDX(Immediate(Byte(src.value))))
elif isinstance(src, IndexedRef) and src.index == REG_Y:
self.emitter.emit(LDX(AbsoluteY(Offset(self.get_label(src.ref.name), src.offset.value))))
else:
self.emitter.emit(LDX(self.absolute_or_zero_page(self.get_label(src.name))))
elif dest == REG_Y:
if src == REG_A:
self.emitter.emit(TAY())
elif isinstance(src, ConstantRef):
self.emitter.emit(LDY(Immediate(Byte(src.value))))
elif isinstance(src, IndexedRef) and src.index == REG_X:
self.emitter.emit(LDY(AbsoluteX(Offset(self.get_label(src.ref.name), src.offset.value))))
else:
self.emitter.emit(LDY(self.absolute_or_zero_page(self.get_label(src.name))))
else:
raise UnsupportedOpcodeError(instr)
elif opcode == 'st':
if dest == FLAG_C and src == ConstantRef(TYPE_BIT, 0):
self.emitter.emit(CLC())
elif dest == FLAG_C and src == ConstantRef(TYPE_BIT, 1):
self.emitter.emit(SEC())
else:
op_cls = {
REG_A: STA,
REG_X: STX,
REG_Y: STY
}.get(src, None)
if isinstance(dest, IndexedRef):
mode_cls = {
REG_X: AbsoluteX,
REG_Y: AbsoluteY,
}[dest.index]
operand = mode_cls(Offset(self.get_label(dest.ref.name), dest.offset.value))
elif isinstance(dest, IndirectRef) and isinstance(self.get_type(dest.ref), PointerType):
operand = IndirectY(self.get_label(dest.ref.name))
else:
operand = self.absolute_or_zero_page(self.get_label(dest.name))
if op_cls is None:
raise UnsupportedOpcodeError(instr)
self.emitter.emit(op_cls(operand))
elif opcode == 'add':
if dest == REG_X or dest == REG_Y:
raise UnsupportedOpcodeError(instr)
if dest == REG_A:
if isinstance(src, ConstantRef):
self.emitter.emit(ADC(Immediate(Byte(src.value))))
elif isinstance(src, IndexedRef):
mode = self.addressing_mode_for_index(src.index)
self.emitter.emit(ADC(mode(Offset(self.get_label(src.ref.name), src.offset.value))))
else:
self.emitter.emit(ADC(Absolute(self.get_label(src.name))))
elif isinstance(dest, LocationRef) and self.get_type(src) == TYPE_BYTE and self.get_type(dest) == TYPE_BYTE:
if isinstance(src, ConstantRef):
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(dest_label)))
self.emitter.emit(ADC(Immediate(Byte(src.low_byte()))))
self.emitter.emit(STA(Absolute(dest_label)))
elif isinstance(src, LocationRef):
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(dest_label)))
self.emitter.emit(ADC(Absolute(src_label)))
self.emitter.emit(STA(Absolute(dest_label)))
else:
raise UnsupportedOpcodeError(instr)
elif isinstance(dest, LocationRef) and self.get_type(src) == TYPE_WORD and self.get_type(dest) == TYPE_WORD:
if isinstance(src, ConstantRef):
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(dest_label)))
self.emitter.emit(ADC(Immediate(Byte(src.low_byte()))))
self.emitter.emit(STA(Absolute(dest_label)))
self.emitter.emit(LDA(Absolute(Offset(dest_label, 1))))
self.emitter.emit(ADC(Immediate(Byte(src.high_byte()))))
self.emitter.emit(STA(Absolute(Offset(dest_label, 1))))
elif isinstance(src, LocationRef):
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(dest_label)))
self.emitter.emit(ADC(Absolute(src_label)))
self.emitter.emit(STA(Absolute(dest_label)))
self.emitter.emit(LDA(Absolute(Offset(dest_label, 1))))
self.emitter.emit(ADC(Absolute(Offset(src_label, 1))))
self.emitter.emit(STA(Absolute(Offset(dest_label, 1))))
else:
raise UnsupportedOpcodeError(instr)
elif isinstance(dest, LocationRef) and self.get_type(src) == TYPE_WORD and isinstance(self.get_type(dest), PointerType):
if isinstance(src, ConstantRef):
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(ZeroPage(dest_label)))
self.emitter.emit(ADC(Immediate(Byte(src.low_byte()))))
self.emitter.emit(STA(ZeroPage(dest_label)))
self.emitter.emit(LDA(ZeroPage(Offset(dest_label, 1))))
self.emitter.emit(ADC(Immediate(Byte(src.high_byte()))))
self.emitter.emit(STA(ZeroPage(Offset(dest_label, 1))))
elif isinstance(src, LocationRef):
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(ZeroPage(dest_label)))
self.emitter.emit(ADC(Absolute(src_label)))
self.emitter.emit(STA(ZeroPage(dest_label)))
self.emitter.emit(LDA(ZeroPage(Offset(dest_label, 1))))
self.emitter.emit(ADC(Absolute(Offset(src_label, 1))))
self.emitter.emit(STA(ZeroPage(Offset(dest_label, 1))))
else:
raise UnsupportedOpcodeError(instr)
else:
raise UnsupportedOpcodeError(instr)
elif opcode == 'sub':
if dest == REG_X or dest == REG_Y:
raise UnsupportedOpcodeError(instr)
if dest == REG_A:
if isinstance(src, ConstantRef):
self.emitter.emit(SBC(Immediate(Byte(src.value))))
elif isinstance(src, IndexedRef):
mode = self.addressing_mode_for_index(src.index)
self.emitter.emit(SBC(mode(Offset(self.get_label(src.ref.name), src.offset.value))))
else:
self.emitter.emit(SBC(Absolute(self.get_label(src.name))))
elif isinstance(dest, LocationRef) and self.get_type(src) == TYPE_BYTE and self.get_type(dest) == TYPE_BYTE:
if isinstance(src, ConstantRef):
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(dest_label)))
self.emitter.emit(SBC(Immediate(Byte(src.low_byte()))))
self.emitter.emit(STA(Absolute(dest_label)))
elif isinstance(src, LocationRef):
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(dest_label)))
self.emitter.emit(SBC(Absolute(src_label)))
self.emitter.emit(STA(Absolute(dest_label)))
else:
raise UnsupportedOpcodeError(instr)
elif isinstance(dest, LocationRef) and self.get_type(src) == TYPE_WORD and self.get_type(dest) == TYPE_WORD:
if isinstance(src, ConstantRef):
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(dest_label)))
self.emitter.emit(SBC(Immediate(Byte(src.low_byte()))))
self.emitter.emit(STA(Absolute(dest_label)))
self.emitter.emit(LDA(Absolute(Offset(dest_label, 1))))
self.emitter.emit(SBC(Immediate(Byte(src.high_byte()))))
self.emitter.emit(STA(Absolute(Offset(dest_label, 1))))
elif isinstance(src, LocationRef):
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(dest_label)))
self.emitter.emit(SBC(Absolute(src_label)))
self.emitter.emit(STA(Absolute(dest_label)))
self.emitter.emit(LDA(Absolute(Offset(dest_label, 1))))
self.emitter.emit(SBC(Absolute(Offset(src_label, 1))))
self.emitter.emit(STA(Absolute(Offset(dest_label, 1))))
else:
raise UnsupportedOpcodeError(instr)
else:
raise UnsupportedOpcodeError(instr)
elif opcode == 'cmp':
self.compile_cmp(instr, instr.src, instr.dest)
elif opcode in ('and', 'or', 'xor',):
cls = {
'and': AND,
'or': ORA,
'xor': EOR,
}[opcode]
if dest == REG_A:
if isinstance(src, ConstantRef):
self.emitter.emit(cls(Immediate(Byte(src.value))))
elif isinstance(src, IndexedRef):
mode = self.addressing_mode_for_index(src.index)
self.emitter.emit(cls(mode(Offset(self.get_label(src.ref.name), src.offset.value))))
else:
self.emitter.emit(cls(self.absolute_or_zero_page(self.get_label(src.name))))
else:
raise UnsupportedOpcodeError(instr)
elif opcode == 'inc':
self.compile_inc(instr, instr.dest)
elif opcode == 'dec':
self.compile_dec(instr, instr.dest)
elif opcode in ('shl', 'shr'):
cls = {
'shl': ROL,
'shr': ROR,
}[opcode]
if dest == REG_A:
self.emitter.emit(cls())
elif isinstance(dest, IndexedRef):
mode = self.addressing_mode_for_index(dest.index)
self.emitter.emit(cls(mode(Offset(self.get_label(dest.ref.name), dest.offset.value))))
else:
self.emitter.emit(cls(self.absolute_or_zero_page(self.get_label(dest.name))))
elif opcode == 'copy':
self.compile_copy(instr, instr.src, instr.dest)
elif opcode == 'trash':
pass
elif opcode == 'nop':
self.emitter.emit(NOP())
else:
raise NotImplementedError(opcode)
def compile_call(self, instr):
location = instr.location
label = self.get_label(instr.location.name)
location_type = self.get_type(location)
if isinstance(location_type, RoutineType):
self.emitter.emit(JSR(Absolute(label)))
elif isinstance(location_type, VectorType):
trampoline = self.trampolines.setdefault(
location, Label(location.name + '_trampoline')
)
self.emitter.emit(JSR(Absolute(trampoline)))
else:
raise NotImplementedError(location_type)
def compile_goto(self, instr):
location = instr.location
label = self.get_label(instr.location.name)
location_type = self.get_type(location)
if isinstance(location_type, RoutineType):
self.emitter.emit(JMP(Absolute(label)))
elif isinstance(location_type, VectorType):
self.emitter.emit(JMP(Indirect(label)))
else:
raise NotImplementedError(location_type)
def compile_cmp(self, instr, src, dest):
"""`instr` is only for reporting purposes"""
if isinstance(src, LocationRef) and self.get_type(src) == TYPE_WORD:
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(dest_label)))
self.emitter.emit(CMP(Absolute(src_label)))
end_label = Label('end_label')
self.emitter.emit(BNE(Relative(end_label)))
self.emitter.emit(LDA(Absolute(Offset(dest_label, 1))))
self.emitter.emit(CMP(Absolute(Offset(src_label, 1))))
self.emitter.resolve_label(end_label)
return
if isinstance(src, ConstantRef) and self.get_type(src) == TYPE_WORD:
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(dest_label)))
self.emitter.emit(CMP(Immediate(Byte(src.low_byte()))))
end_label = Label('end_label')
self.emitter.emit(BNE(Relative(end_label)))
self.emitter.emit(LDA(Absolute(Offset(dest_label, 1))))
self.emitter.emit(CMP(Immediate(Byte(src.high_byte()))))
self.emitter.resolve_label(end_label)
return
cls = {
'a': CMP,
'x': CPX,
'y': CPY,
}.get(dest.name)
if cls is None:
raise UnsupportedOpcodeError(instr)
if isinstance(src, ConstantRef):
self.emitter.emit(cls(Immediate(Byte(src.value))))
elif isinstance(src, IndexedRef):
# FIXME might not work for some dest's (that is, cls's)
mode = self.addressing_mode_for_index(src.index)
self.emitter.emit(cls(mode(Offset(self.get_label(src.ref.name), src.offset.value))))
else:
self.emitter.emit(cls(Absolute(self.get_label(src.name))))
def compile_inc(self, instr, dest):
"""`instr` is only for reporting purposes"""
if dest == REG_X:
self.emitter.emit(INX())
elif dest == REG_Y:
self.emitter.emit(INY())
elif isinstance(dest, IndexedRef):
mode = self.addressing_mode_for_index(dest.index)
self.emitter.emit(INC(mode(Offset(self.get_label(dest.ref.name), dest.offset.value))))
else:
self.emitter.emit(INC(Absolute(self.get_label(dest.name))))
def compile_dec(self, instr, dest):
"""`instr` is only for reporting purposes"""
if dest == REG_X:
self.emitter.emit(DEX())
elif dest == REG_Y:
self.emitter.emit(DEY())
elif isinstance(dest, IndexedRef):
mode = self.addressing_mode_for_index(dest.index)
self.emitter.emit(DEC(mode(Offset(self.get_label(dest.ref.name), dest.offset.value))))
else:
self.emitter.emit(DEC(Absolute(self.get_label(dest.name))))
def compile_copy(self, instr, src, dest):
if isinstance(src, (IndirectRef, IndexedRef)):
src_ref_type = self.get_type(src.ref)
else:
src_type = self.get_type(src)
if isinstance(dest, (IndirectRef, IndexedRef)):
dest_ref_type = self.get_type(dest.ref)
else:
dest_type = self.get_type(dest)
if isinstance(src, ConstantRef) and isinstance(dest, IndirectRef) and src_type == TYPE_BYTE and isinstance(dest_ref_type, PointerType):
### copy 123, [ptr] + y
dest_label = self.get_label(dest.ref.name)
self.emitter.emit(LDA(Immediate(Byte(src.value))))
self.emitter.emit(STA(IndirectY(dest_label)))
elif isinstance(src, LocationRef) and isinstance(dest, IndirectRef) and src_type == TYPE_BYTE and isinstance(dest_ref_type, PointerType):
### copy b, [ptr] + y
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.ref.name)
self.emitter.emit(LDA(Absolute(src_label)))
self.emitter.emit(STA(IndirectY(dest_label)))
elif isinstance(src, IndirectRef) and isinstance(dest, LocationRef) and dest_type == TYPE_BYTE and isinstance(src_ref_type, PointerType):
### copy [ptr] + y, b
src_label = self.get_label(src.ref.name)
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(IndirectY(src_label)))
self.emitter.emit(STA(Absolute(dest_label)))
elif isinstance(src, IndirectRef) and isinstance(dest, IndirectRef) and isinstance(src_ref_type, PointerType) and isinstance(dest_ref_type, PointerType):
### copy [ptra] + y, [ptrb] + y
src_label = self.get_label(src.ref.name)
dest_label = self.get_label(dest.ref.name)
self.emitter.emit(LDA(IndirectY(src_label)))
self.emitter.emit(STA(IndirectY(dest_label)))
elif isinstance(src, LocationRef) and isinstance(dest, IndexedRef) and src_type == TYPE_WORD and TableType.is_a_table_type(dest_ref_type, TYPE_WORD):
### copy w, wtab + y
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.ref.name)
mode = self.addressing_mode_for_index(dest.index)
self.emitter.emit(LDA(Absolute(src_label)))
self.emitter.emit(STA(mode(Offset(dest_label, dest.offset.value))))
self.emitter.emit(LDA(Absolute(Offset(src_label, 1))))
self.emitter.emit(STA(mode(Offset(dest_label, dest.offset.value + 256))))
elif isinstance(src, LocationRef) and isinstance(dest, IndexedRef) and isinstance(src_type, VectorType) and isinstance(dest_ref_type, TableType) and isinstance(dest_ref_type.of_type, VectorType):
### copy vec, vtab + y
# FIXME this is the exact same as above - can this be simplified?
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.ref.name)
mode = self.addressing_mode_for_index(dest.index)
self.emitter.emit(LDA(Absolute(src_label)))
self.emitter.emit(STA(mode(Offset(dest_label, dest.offset.value))))
self.emitter.emit(LDA(Absolute(Offset(src_label, 1))))
self.emitter.emit(STA(mode(Offset(dest_label, dest.offset.value + 256))))
elif isinstance(src, LocationRef) and isinstance(dest, IndexedRef) and isinstance(src_type, RoutineType) and isinstance(dest_ref_type, TableType) and isinstance(dest_ref_type.of_type, VectorType):
### copy routine, vtab + y
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.ref.name)
mode = self.addressing_mode_for_index(dest.index)
self.emitter.emit(LDA(Immediate(HighAddressByte(src_label))))
self.emitter.emit(STA(mode(Offset(dest_label, dest.offset.value))))
self.emitter.emit(LDA(Immediate(LowAddressByte(src_label))))
self.emitter.emit(STA(mode(Offset(dest_label, dest.offset.value + 256))))
elif isinstance(src, ConstantRef) and isinstance(dest, IndexedRef) and src_type == TYPE_WORD and TableType.is_a_table_type(dest_ref_type, TYPE_WORD):
### copy 9999, wtab + y
dest_label = self.get_label(dest.ref.name)
mode = self.addressing_mode_for_index(dest.index)
self.emitter.emit(LDA(Immediate(Byte(src.low_byte()))))
self.emitter.emit(STA(mode(Offset(dest_label, dest.offset.value))))
self.emitter.emit(LDA(Immediate(Byte(src.high_byte()))))
self.emitter.emit(STA(mode(Offset(dest_label, dest.offset.value + 256))))
elif isinstance(src, IndexedRef) and isinstance(dest, LocationRef) and TableType.is_a_table_type(src_ref_type, TYPE_WORD) and dest_type == TYPE_WORD:
### copy wtab + y, w
src_label = self.get_label(src.ref.name)
dest_label = self.get_label(dest.name)
mode = self.addressing_mode_for_index(src.index)
self.emitter.emit(LDA(mode(Offset(src_label, src.offset.value))))
self.emitter.emit(STA(Absolute(dest_label)))
self.emitter.emit(LDA(mode(Offset(src_label, src.offset.value + 256))))
self.emitter.emit(STA(Absolute(Offset(dest_label, 1))))
elif isinstance(src, IndexedRef) and isinstance(dest, LocationRef) and isinstance(dest_type, VectorType) and isinstance(src_ref_type, TableType) and isinstance(src_ref_type.of_type, VectorType):
### copy vtab + y, vec
# FIXME this is the exact same as above - can this be simplified?
src_label = self.get_label(src.ref.name)
dest_label = self.get_label(dest.name)
mode = self.addressing_mode_for_index(src.index)
self.emitter.emit(LDA(mode(Offset(src_label, src.offset.value))))
self.emitter.emit(STA(Absolute(dest_label)))
self.emitter.emit(LDA(mode(Offset(src_label, src.offset.value + 256))))
self.emitter.emit(STA(Absolute(Offset(dest_label, 1))))
elif src_type == TYPE_BYTE and dest_type == TYPE_BYTE and not isinstance(src, ConstantRef):
### copy b1, b2
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(src_label)))
self.emitter.emit(STA(Absolute(dest_label)))
elif src_type == TYPE_WORD and dest_type == TYPE_WORD and isinstance(src, ConstantRef):
### copy 9999, w
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Immediate(Byte(src.low_byte()))))
self.emitter.emit(STA(Absolute(dest_label)))
self.emitter.emit(LDA(Immediate(Byte(src.high_byte()))))
self.emitter.emit(STA(Absolute(Offset(dest_label, 1))))
elif src_type == TYPE_WORD and dest_type == TYPE_WORD and not isinstance(src, ConstantRef):
### copy w1, w2
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(src_label)))
self.emitter.emit(STA(Absolute(dest_label)))
self.emitter.emit(LDA(Absolute(Offset(src_label, 1))))
self.emitter.emit(STA(Absolute(Offset(dest_label, 1))))
elif isinstance(src_type, VectorType) and isinstance(dest_type, VectorType):
### copy v1, v2
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(src_label)))
self.emitter.emit(STA(Absolute(dest_label)))
self.emitter.emit(LDA(Absolute(Offset(src_label, 1))))
self.emitter.emit(STA(Absolute(Offset(dest_label, 1))))
elif isinstance(src_type, RoutineType) and isinstance(dest_type, VectorType):
### copy routine, vec
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Immediate(HighAddressByte(src_label))))
self.emitter.emit(STA(Absolute(dest_label)))
self.emitter.emit(LDA(Immediate(LowAddressByte(src_label))))
self.emitter.emit(STA(Absolute(Offset(dest_label, 1))))
else:
raise NotImplementedError(src_type)
def compile_if(self, instr):
cls = {
False: {
'c': BCC,
'z': BNE,
'n': BPL,
},
True: {
'c': BCS,
'z': BEQ,
'n': BMI,
},
}[instr.inverted].get(instr.src.name)
if cls is None:
raise UnsupportedOpcodeError(instr)
else_label = Label('else_label')
self.emitter.emit(cls(Relative(else_label)))
self.compile_block(instr.block1)
if instr.block2 is not None:
if instr.block1.shallow_contains_goto:
self.emitter.resolve_label(else_label)
self.compile_block(instr.block2)
else:
end_label = Label('end_label')
self.emitter.emit(JMP(Absolute(end_label)))
self.emitter.resolve_label(else_label)
self.compile_block(instr.block2)
self.emitter.resolve_label(end_label)
else:
self.emitter.resolve_label(else_label)
def compile_repeat(self, instr):
top_label = self.emitter.make_label()
self.compile_block(instr.block)
if instr.src is None: # indicates 'repeat forever'
self.emitter.emit(JMP(Absolute(top_label)))
else:
cls = {
False: {
'c': BCC,
'z': BNE,
'n': BPL,
},
True: {
'c': BCS,
'z': BEQ,
'n': BMI,
},
}[instr.inverted].get(instr.src.name)
if cls is None:
raise UnsupportedOpcodeError(instr)
self.emitter.emit(cls(Relative(top_label)))
def compile_for(self, instr):
top_label = self.emitter.make_label()
self.compile_block(instr.block)
if instr.direction > 0:
self.compile_inc(instr, instr.dest)
final = instr.final.succ()
elif instr.direction < 0:
self.compile_dec(instr, instr.dest)
final = instr.final.pred()
self.compile_cmp(instr, final, instr.dest)
self.emitter.emit(BNE(Relative(top_label)))
def compile_with_interrupts_off(self, instr):
self.emitter.emit(SEI())
self.compile_block(instr.block)
self.emitter.emit(CLI())
def compile_save(self, instr):
for location in instr.locations:
if location == REG_A:
self.emitter.emit(PHA())
elif location == REG_X:
self.emitter.emit(TXA())
self.emitter.emit(PHA())
elif location == REG_Y:
self.emitter.emit(TYA())
self.emitter.emit(PHA())
else:
src_label = self.get_label(location.name)
self.emitter.emit(LDA(Absolute(src_label)))
self.emitter.emit(PHA())
self.compile_block(instr.block)
for location in reversed(instr.locations):
if location == REG_A:
self.emitter.emit(PLA())
elif location == REG_X:
self.emitter.emit(PLA())
self.emitter.emit(TAX())
elif location == REG_Y:
self.emitter.emit(PLA())
self.emitter.emit(TAY())
else:
src_label = self.get_label(location.name)
self.emitter.emit(PLA())
self.emitter.emit(STA(Absolute(src_label)))
def compile_point_into(self, instr):
self.pointer_assoc[instr.pointer.name] = instr.table.name
self.compile_block(instr.block)
del self.pointer_assoc[instr.pointer.name]
def compile_reset(self, instr):
table_name = self.pointer_assoc[instr.pointer.name]
src_label = Offset(self.get_label(table_name), instr.offset.value)
dest_label = self.get_label(instr.pointer.name)
self.emitter.emit(LDA(Immediate(HighAddressByte(src_label))))
self.emitter.emit(STA(ZeroPage(dest_label)))
self.emitter.emit(LDA(Immediate(LowAddressByte(src_label))))
self.emitter.emit(STA(ZeroPage(Offset(dest_label, 1))))
| 47.840561 | 204 | 0.584691 |
from sixtypical.ast import (
Program, Routine, Block, SingleOp, Reset, Call, GoTo, If, Repeat, For, WithInterruptsOff, Save, PointInto
)
from sixtypical.model import (
ConstantRef, LocationRef, IndexedRef, IndirectRef,
TYPE_BIT, TYPE_BYTE, TYPE_WORD,
TableType, PointerType, RoutineType, VectorType,
REG_A, REG_X, REG_Y, FLAG_C
)
from sixtypical.emitter import Byte, Word, Table, Label, Offset, LowAddressByte, HighAddressByte
from sixtypical.gen6502 import (
Immediate, Absolute, AbsoluteX, AbsoluteY, ZeroPage, Indirect, IndirectY, Relative,
LDA, LDX, LDY, STA, STX, STY,
TAX, TAY, TXA, TYA,
PHA, PLA,
CLC, SEC, ADC, SBC, ROL, ROR,
INC, INX, INY, DEC, DEX, DEY,
CMP, CPX, CPY, AND, ORA, EOR,
BCC, BCS, BNE, BEQ, BPL, BMI,
JMP, JSR, RTS,
SEI, CLI,
NOP,
)
class UnsupportedOpcodeError(KeyError):
pass
class Compiler(object):
def __init__(self, symtab, emitter):
self.symtab = symtab
self.emitter = emitter
self.routines = {}
self.routine_locals = {}
self.labels = {}
self.trampolines = {}
self.pointer_assoc = {}
self.current_routine = None
# - - - - helper methods - - - -
def get_type_for_name(self, name):
if self.current_routine and self.symtab.has_local(self.current_routine.name, name):
return self.symtab.fetch_local_type(self.current_routine.name, name)
return self.symtab.fetch_global_type(name)
def get_type(self, ref):
if isinstance(ref, ConstantRef):
return ref.type
if not isinstance(ref, LocationRef):
raise NotImplementedError
return self.get_type_for_name(ref.name)
def addressing_mode_for_index(self, index):
if index == REG_X:
return AbsoluteX
elif index == REG_Y:
return AbsoluteY
else:
raise NotImplementedError(index)
def compute_length_of_defn(self, defn):
length = None
type_ = self.get_type_for_name(defn.name)
if type_ == TYPE_BYTE:
length = 1
elif type_ == TYPE_WORD or isinstance(type_, (PointerType, VectorType)):
length = 2
elif isinstance(type_, TableType):
length = type_.size * (1 if type_.of_type == TYPE_BYTE else 2)
if length is None:
raise NotImplementedError("Need size for type {}".format(type_))
return length
def get_label(self, name):
if self.current_routine:
local_label = self.routine_locals.get(self.current_routine.name, {}).get(name)
if local_label:
return local_label
return self.labels[name]
def absolute_or_zero_page(self, label):
if label.addr is not None and label.addr < 256:
return ZeroPage(label)
else:
return Absolute(label)
# - - - - visitor methods - - - -
def compile_program(self, program, compilation_roster=None):
assert isinstance(program, Program)
declarations = []
for defn in program.defns:
length = self.compute_length_of_defn(defn)
label = Label(defn.name, addr=defn.addr, length=length)
self.labels[defn.name] = label
declarations.append((defn, self.symtab.fetch_global_type(defn.name), label))
for routine in program.routines:
self.routines[routine.name] = routine
label = Label(routine.name)
if routine.addr is not None:
label.set_addr(routine.addr)
self.labels[routine.name] = label
self.current_routine = routine
local_labels = {}
for defn in routine.locals:
length = self.compute_length_of_defn(defn)
label = Label(defn.name, addr=defn.addr, length=length)
local_labels[defn.name] = label
declarations.append((defn, self.symtab.fetch_local_type(routine.name, defn.name), label))
self.routine_locals[routine.name] = local_labels
self.current_routine = None
if compilation_roster is None:
compilation_roster = [['main']] + [[routine.name] for routine in program.routines if routine.name != 'main']
for roster_row in compilation_roster:
for i, routine_name in enumerate(roster_row):
if i < len(roster_row) - 1:
self.compile_routine(self.routines[routine_name], next_routine=self.routines[roster_row[i + 1]])
else:
self.compile_routine(self.routines[routine_name])
for location, label in self.trampolines.items():
self.emitter.resolve_label(label)
self.emitter.emit(JMP(Indirect(self.get_label(location.name))))
self.emitter.emit(RTS())
# initialized data
for defn, type_, label in declarations:
if defn.initial is not None:
initial_data = None
if type_ == TYPE_BYTE:
initial_data = Byte(defn.initial)
elif type_ == TYPE_WORD:
initial_data = Word(defn.initial)
elif TableType.is_a_table_type(type_, TYPE_BYTE):
initial_data = Table([Byte(i) for i in defn.initial], type_.size)
elif TableType.is_a_table_type(type_, TYPE_WORD):
initial_data = Table([Word(i) for i in defn.initial], type_.size)
else:
raise NotImplementedError(type_)
label.set_length(initial_data.size())
self.emitter.resolve_label(label)
self.emitter.emit(initial_data)
# uninitialized, "BSS" data
for defn, type_, label in declarations:
if defn.initial is None and defn.addr is None:
self.emitter.resolve_bss_label(label)
def compile_routine(self, routine, next_routine=None):
assert isinstance(routine, Routine)
self.current_routine = routine
if routine.block:
self.emitter.resolve_label(self.get_label(routine.name))
self.compile_block(routine.block)
needs_rts = True
last_op = self.emitter.get_tail()
if isinstance(last_op, JSR):
if isinstance(last_op.operand, Absolute):
if isinstance(last_op.operand.value, Label):
label = last_op.operand.value
self.emitter.retract()
self.emitter.emit(JMP(Absolute(label)))
last_op = self.emitter.get_tail()
if isinstance(last_op, JMP):
needs_rts = False
if isinstance(last_op.operand, Absolute):
if isinstance(last_op.operand.value, Label):
if next_routine and last_op.operand.value.name == next_routine.name:
self.emitter.retract()
if needs_rts:
self.emitter.emit(RTS())
self.current_routine = None
def compile_block(self, block):
assert isinstance(block, Block)
block.shallow_contains_goto = False
for instr in block.instrs:
self.compile_instr(instr)
if isinstance(instr, GoTo):
block.shallow_contains_goto = True
def compile_instr(self, instr):
if isinstance(instr, SingleOp):
return self.compile_single_op(instr)
elif isinstance(instr, Call):
return self.compile_call(instr)
elif isinstance(instr, GoTo):
return self.compile_goto(instr)
elif isinstance(instr, If):
return self.compile_if(instr)
elif isinstance(instr, Repeat):
return self.compile_repeat(instr)
elif isinstance(instr, For):
return self.compile_for(instr)
elif isinstance(instr, WithInterruptsOff):
return self.compile_with_interrupts_off(instr)
elif isinstance(instr, Save):
return self.compile_save(instr)
elif isinstance(instr, PointInto):
return self.compile_point_into(instr)
elif isinstance(instr, Reset):
return self.compile_reset(instr)
else:
raise NotImplementedError
def compile_single_op(self, instr):
opcode = instr.opcode
dest = instr.dest
src = instr.src
if opcode == 'ld':
if dest == REG_A:
if src == REG_X:
self.emitter.emit(TXA())
elif src == REG_Y:
self.emitter.emit(TYA())
elif isinstance(src, ConstantRef):
self.emitter.emit(LDA(Immediate(Byte(src.value))))
elif isinstance(src, IndexedRef) and src.index == REG_X:
self.emitter.emit(LDA(AbsoluteX(Offset(self.get_label(src.ref.name), src.offset.value))))
elif isinstance(src, IndexedRef) and src.index == REG_Y:
self.emitter.emit(LDA(AbsoluteY(Offset(self.get_label(src.ref.name), src.offset.value))))
elif isinstance(src, IndirectRef) and isinstance(self.get_type(src.ref), PointerType):
self.emitter.emit(LDA(IndirectY(self.get_label(src.ref.name))))
else:
self.emitter.emit(LDA(self.absolute_or_zero_page(self.get_label(src.name))))
elif dest == REG_X:
if src == REG_A:
self.emitter.emit(TAX())
elif isinstance(src, ConstantRef):
self.emitter.emit(LDX(Immediate(Byte(src.value))))
elif isinstance(src, IndexedRef) and src.index == REG_Y:
self.emitter.emit(LDX(AbsoluteY(Offset(self.get_label(src.ref.name), src.offset.value))))
else:
self.emitter.emit(LDX(self.absolute_or_zero_page(self.get_label(src.name))))
elif dest == REG_Y:
if src == REG_A:
self.emitter.emit(TAY())
elif isinstance(src, ConstantRef):
self.emitter.emit(LDY(Immediate(Byte(src.value))))
elif isinstance(src, IndexedRef) and src.index == REG_X:
self.emitter.emit(LDY(AbsoluteX(Offset(self.get_label(src.ref.name), src.offset.value))))
else:
self.emitter.emit(LDY(self.absolute_or_zero_page(self.get_label(src.name))))
else:
raise UnsupportedOpcodeError(instr)
elif opcode == 'st':
if dest == FLAG_C and src == ConstantRef(TYPE_BIT, 0):
self.emitter.emit(CLC())
elif dest == FLAG_C and src == ConstantRef(TYPE_BIT, 1):
self.emitter.emit(SEC())
else:
op_cls = {
REG_A: STA,
REG_X: STX,
REG_Y: STY
}.get(src, None)
if isinstance(dest, IndexedRef):
mode_cls = {
REG_X: AbsoluteX,
REG_Y: AbsoluteY,
}[dest.index]
operand = mode_cls(Offset(self.get_label(dest.ref.name), dest.offset.value))
elif isinstance(dest, IndirectRef) and isinstance(self.get_type(dest.ref), PointerType):
operand = IndirectY(self.get_label(dest.ref.name))
else:
operand = self.absolute_or_zero_page(self.get_label(dest.name))
if op_cls is None:
raise UnsupportedOpcodeError(instr)
self.emitter.emit(op_cls(operand))
elif opcode == 'add':
if dest == REG_X or dest == REG_Y:
raise UnsupportedOpcodeError(instr)
if dest == REG_A:
if isinstance(src, ConstantRef):
self.emitter.emit(ADC(Immediate(Byte(src.value))))
elif isinstance(src, IndexedRef):
mode = self.addressing_mode_for_index(src.index)
self.emitter.emit(ADC(mode(Offset(self.get_label(src.ref.name), src.offset.value))))
else:
self.emitter.emit(ADC(Absolute(self.get_label(src.name))))
elif isinstance(dest, LocationRef) and self.get_type(src) == TYPE_BYTE and self.get_type(dest) == TYPE_BYTE:
if isinstance(src, ConstantRef):
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(dest_label)))
self.emitter.emit(ADC(Immediate(Byte(src.low_byte()))))
self.emitter.emit(STA(Absolute(dest_label)))
elif isinstance(src, LocationRef):
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(dest_label)))
self.emitter.emit(ADC(Absolute(src_label)))
self.emitter.emit(STA(Absolute(dest_label)))
else:
raise UnsupportedOpcodeError(instr)
elif isinstance(dest, LocationRef) and self.get_type(src) == TYPE_WORD and self.get_type(dest) == TYPE_WORD:
if isinstance(src, ConstantRef):
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(dest_label)))
self.emitter.emit(ADC(Immediate(Byte(src.low_byte()))))
self.emitter.emit(STA(Absolute(dest_label)))
self.emitter.emit(LDA(Absolute(Offset(dest_label, 1))))
self.emitter.emit(ADC(Immediate(Byte(src.high_byte()))))
self.emitter.emit(STA(Absolute(Offset(dest_label, 1))))
elif isinstance(src, LocationRef):
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(dest_label)))
self.emitter.emit(ADC(Absolute(src_label)))
self.emitter.emit(STA(Absolute(dest_label)))
self.emitter.emit(LDA(Absolute(Offset(dest_label, 1))))
self.emitter.emit(ADC(Absolute(Offset(src_label, 1))))
self.emitter.emit(STA(Absolute(Offset(dest_label, 1))))
else:
raise UnsupportedOpcodeError(instr)
elif isinstance(dest, LocationRef) and self.get_type(src) == TYPE_WORD and isinstance(self.get_type(dest), PointerType):
if isinstance(src, ConstantRef):
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(ZeroPage(dest_label)))
self.emitter.emit(ADC(Immediate(Byte(src.low_byte()))))
self.emitter.emit(STA(ZeroPage(dest_label)))
self.emitter.emit(LDA(ZeroPage(Offset(dest_label, 1))))
self.emitter.emit(ADC(Immediate(Byte(src.high_byte()))))
self.emitter.emit(STA(ZeroPage(Offset(dest_label, 1))))
elif isinstance(src, LocationRef):
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(ZeroPage(dest_label)))
self.emitter.emit(ADC(Absolute(src_label)))
self.emitter.emit(STA(ZeroPage(dest_label)))
self.emitter.emit(LDA(ZeroPage(Offset(dest_label, 1))))
self.emitter.emit(ADC(Absolute(Offset(src_label, 1))))
self.emitter.emit(STA(ZeroPage(Offset(dest_label, 1))))
else:
raise UnsupportedOpcodeError(instr)
else:
raise UnsupportedOpcodeError(instr)
elif opcode == 'sub':
if dest == REG_X or dest == REG_Y:
raise UnsupportedOpcodeError(instr)
if dest == REG_A:
if isinstance(src, ConstantRef):
self.emitter.emit(SBC(Immediate(Byte(src.value))))
elif isinstance(src, IndexedRef):
mode = self.addressing_mode_for_index(src.index)
self.emitter.emit(SBC(mode(Offset(self.get_label(src.ref.name), src.offset.value))))
else:
self.emitter.emit(SBC(Absolute(self.get_label(src.name))))
elif isinstance(dest, LocationRef) and self.get_type(src) == TYPE_BYTE and self.get_type(dest) == TYPE_BYTE:
if isinstance(src, ConstantRef):
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(dest_label)))
self.emitter.emit(SBC(Immediate(Byte(src.low_byte()))))
self.emitter.emit(STA(Absolute(dest_label)))
elif isinstance(src, LocationRef):
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(dest_label)))
self.emitter.emit(SBC(Absolute(src_label)))
self.emitter.emit(STA(Absolute(dest_label)))
else:
raise UnsupportedOpcodeError(instr)
elif isinstance(dest, LocationRef) and self.get_type(src) == TYPE_WORD and self.get_type(dest) == TYPE_WORD:
if isinstance(src, ConstantRef):
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(dest_label)))
self.emitter.emit(SBC(Immediate(Byte(src.low_byte()))))
self.emitter.emit(STA(Absolute(dest_label)))
self.emitter.emit(LDA(Absolute(Offset(dest_label, 1))))
self.emitter.emit(SBC(Immediate(Byte(src.high_byte()))))
self.emitter.emit(STA(Absolute(Offset(dest_label, 1))))
elif isinstance(src, LocationRef):
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(dest_label)))
self.emitter.emit(SBC(Absolute(src_label)))
self.emitter.emit(STA(Absolute(dest_label)))
self.emitter.emit(LDA(Absolute(Offset(dest_label, 1))))
self.emitter.emit(SBC(Absolute(Offset(src_label, 1))))
self.emitter.emit(STA(Absolute(Offset(dest_label, 1))))
else:
raise UnsupportedOpcodeError(instr)
else:
raise UnsupportedOpcodeError(instr)
elif opcode == 'cmp':
self.compile_cmp(instr, instr.src, instr.dest)
elif opcode in ('and', 'or', 'xor',):
cls = {
'and': AND,
'or': ORA,
'xor': EOR,
}[opcode]
if dest == REG_A:
if isinstance(src, ConstantRef):
self.emitter.emit(cls(Immediate(Byte(src.value))))
elif isinstance(src, IndexedRef):
mode = self.addressing_mode_for_index(src.index)
self.emitter.emit(cls(mode(Offset(self.get_label(src.ref.name), src.offset.value))))
else:
self.emitter.emit(cls(self.absolute_or_zero_page(self.get_label(src.name))))
else:
raise UnsupportedOpcodeError(instr)
elif opcode == 'inc':
self.compile_inc(instr, instr.dest)
elif opcode == 'dec':
self.compile_dec(instr, instr.dest)
elif opcode in ('shl', 'shr'):
cls = {
'shl': ROL,
'shr': ROR,
}[opcode]
if dest == REG_A:
self.emitter.emit(cls())
elif isinstance(dest, IndexedRef):
mode = self.addressing_mode_for_index(dest.index)
self.emitter.emit(cls(mode(Offset(self.get_label(dest.ref.name), dest.offset.value))))
else:
self.emitter.emit(cls(self.absolute_or_zero_page(self.get_label(dest.name))))
elif opcode == 'copy':
self.compile_copy(instr, instr.src, instr.dest)
elif opcode == 'trash':
pass
elif opcode == 'nop':
self.emitter.emit(NOP())
else:
raise NotImplementedError(opcode)
def compile_call(self, instr):
location = instr.location
label = self.get_label(instr.location.name)
location_type = self.get_type(location)
if isinstance(location_type, RoutineType):
self.emitter.emit(JSR(Absolute(label)))
elif isinstance(location_type, VectorType):
trampoline = self.trampolines.setdefault(
location, Label(location.name + '_trampoline')
)
self.emitter.emit(JSR(Absolute(trampoline)))
else:
raise NotImplementedError(location_type)
def compile_goto(self, instr):
location = instr.location
label = self.get_label(instr.location.name)
location_type = self.get_type(location)
if isinstance(location_type, RoutineType):
self.emitter.emit(JMP(Absolute(label)))
elif isinstance(location_type, VectorType):
self.emitter.emit(JMP(Indirect(label)))
else:
raise NotImplementedError(location_type)
def compile_cmp(self, instr, src, dest):
if isinstance(src, LocationRef) and self.get_type(src) == TYPE_WORD:
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(dest_label)))
self.emitter.emit(CMP(Absolute(src_label)))
end_label = Label('end_label')
self.emitter.emit(BNE(Relative(end_label)))
self.emitter.emit(LDA(Absolute(Offset(dest_label, 1))))
self.emitter.emit(CMP(Absolute(Offset(src_label, 1))))
self.emitter.resolve_label(end_label)
return
if isinstance(src, ConstantRef) and self.get_type(src) == TYPE_WORD:
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(dest_label)))
self.emitter.emit(CMP(Immediate(Byte(src.low_byte()))))
end_label = Label('end_label')
self.emitter.emit(BNE(Relative(end_label)))
self.emitter.emit(LDA(Absolute(Offset(dest_label, 1))))
self.emitter.emit(CMP(Immediate(Byte(src.high_byte()))))
self.emitter.resolve_label(end_label)
return
cls = {
'a': CMP,
'x': CPX,
'y': CPY,
}.get(dest.name)
if cls is None:
raise UnsupportedOpcodeError(instr)
if isinstance(src, ConstantRef):
self.emitter.emit(cls(Immediate(Byte(src.value))))
elif isinstance(src, IndexedRef):
# FIXME might not work for some dest's (that is, cls's)
mode = self.addressing_mode_for_index(src.index)
self.emitter.emit(cls(mode(Offset(self.get_label(src.ref.name), src.offset.value))))
else:
self.emitter.emit(cls(Absolute(self.get_label(src.name))))
def compile_inc(self, instr, dest):
if dest == REG_X:
self.emitter.emit(INX())
elif dest == REG_Y:
self.emitter.emit(INY())
elif isinstance(dest, IndexedRef):
mode = self.addressing_mode_for_index(dest.index)
self.emitter.emit(INC(mode(Offset(self.get_label(dest.ref.name), dest.offset.value))))
else:
self.emitter.emit(INC(Absolute(self.get_label(dest.name))))
def compile_dec(self, instr, dest):
if dest == REG_X:
self.emitter.emit(DEX())
elif dest == REG_Y:
self.emitter.emit(DEY())
elif isinstance(dest, IndexedRef):
mode = self.addressing_mode_for_index(dest.index)
self.emitter.emit(DEC(mode(Offset(self.get_label(dest.ref.name), dest.offset.value))))
else:
self.emitter.emit(DEC(Absolute(self.get_label(dest.name))))
def compile_copy(self, instr, src, dest):
if isinstance(src, (IndirectRef, IndexedRef)):
src_ref_type = self.get_type(src.ref)
else:
src_type = self.get_type(src)
if isinstance(dest, (IndirectRef, IndexedRef)):
dest_ref_type = self.get_type(dest.ref)
else:
dest_type = self.get_type(dest)
if isinstance(src, ConstantRef) and isinstance(dest, IndirectRef) and src_type == TYPE_BYTE and isinstance(dest_ref_type, PointerType):
### copy 123, [ptr] + y
dest_label = self.get_label(dest.ref.name)
self.emitter.emit(LDA(Immediate(Byte(src.value))))
self.emitter.emit(STA(IndirectY(dest_label)))
elif isinstance(src, LocationRef) and isinstance(dest, IndirectRef) and src_type == TYPE_BYTE and isinstance(dest_ref_type, PointerType):
### copy b, [ptr] + y
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.ref.name)
self.emitter.emit(LDA(Absolute(src_label)))
self.emitter.emit(STA(IndirectY(dest_label)))
elif isinstance(src, IndirectRef) and isinstance(dest, LocationRef) and dest_type == TYPE_BYTE and isinstance(src_ref_type, PointerType):
### copy [ptr] + y, b
src_label = self.get_label(src.ref.name)
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(IndirectY(src_label)))
self.emitter.emit(STA(Absolute(dest_label)))
elif isinstance(src, IndirectRef) and isinstance(dest, IndirectRef) and isinstance(src_ref_type, PointerType) and isinstance(dest_ref_type, PointerType):
### copy [ptra] + y, [ptrb] + y
src_label = self.get_label(src.ref.name)
dest_label = self.get_label(dest.ref.name)
self.emitter.emit(LDA(IndirectY(src_label)))
self.emitter.emit(STA(IndirectY(dest_label)))
elif isinstance(src, LocationRef) and isinstance(dest, IndexedRef) and src_type == TYPE_WORD and TableType.is_a_table_type(dest_ref_type, TYPE_WORD):
### copy w, wtab + y
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.ref.name)
mode = self.addressing_mode_for_index(dest.index)
self.emitter.emit(LDA(Absolute(src_label)))
self.emitter.emit(STA(mode(Offset(dest_label, dest.offset.value))))
self.emitter.emit(LDA(Absolute(Offset(src_label, 1))))
self.emitter.emit(STA(mode(Offset(dest_label, dest.offset.value + 256))))
elif isinstance(src, LocationRef) and isinstance(dest, IndexedRef) and isinstance(src_type, VectorType) and isinstance(dest_ref_type, TableType) and isinstance(dest_ref_type.of_type, VectorType):
### copy vec, vtab + y
# FIXME this is the exact same as above - can this be simplified?
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.ref.name)
mode = self.addressing_mode_for_index(dest.index)
self.emitter.emit(LDA(Absolute(src_label)))
self.emitter.emit(STA(mode(Offset(dest_label, dest.offset.value))))
self.emitter.emit(LDA(Absolute(Offset(src_label, 1))))
self.emitter.emit(STA(mode(Offset(dest_label, dest.offset.value + 256))))
elif isinstance(src, LocationRef) and isinstance(dest, IndexedRef) and isinstance(src_type, RoutineType) and isinstance(dest_ref_type, TableType) and isinstance(dest_ref_type.of_type, VectorType):
### copy routine, vtab + y
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.ref.name)
mode = self.addressing_mode_for_index(dest.index)
self.emitter.emit(LDA(Immediate(HighAddressByte(src_label))))
self.emitter.emit(STA(mode(Offset(dest_label, dest.offset.value))))
self.emitter.emit(LDA(Immediate(LowAddressByte(src_label))))
self.emitter.emit(STA(mode(Offset(dest_label, dest.offset.value + 256))))
elif isinstance(src, ConstantRef) and isinstance(dest, IndexedRef) and src_type == TYPE_WORD and TableType.is_a_table_type(dest_ref_type, TYPE_WORD):
### copy 9999, wtab + y
dest_label = self.get_label(dest.ref.name)
mode = self.addressing_mode_for_index(dest.index)
self.emitter.emit(LDA(Immediate(Byte(src.low_byte()))))
self.emitter.emit(STA(mode(Offset(dest_label, dest.offset.value))))
self.emitter.emit(LDA(Immediate(Byte(src.high_byte()))))
self.emitter.emit(STA(mode(Offset(dest_label, dest.offset.value + 256))))
elif isinstance(src, IndexedRef) and isinstance(dest, LocationRef) and TableType.is_a_table_type(src_ref_type, TYPE_WORD) and dest_type == TYPE_WORD:
### copy wtab + y, w
src_label = self.get_label(src.ref.name)
dest_label = self.get_label(dest.name)
mode = self.addressing_mode_for_index(src.index)
self.emitter.emit(LDA(mode(Offset(src_label, src.offset.value))))
self.emitter.emit(STA(Absolute(dest_label)))
self.emitter.emit(LDA(mode(Offset(src_label, src.offset.value + 256))))
self.emitter.emit(STA(Absolute(Offset(dest_label, 1))))
elif isinstance(src, IndexedRef) and isinstance(dest, LocationRef) and isinstance(dest_type, VectorType) and isinstance(src_ref_type, TableType) and isinstance(src_ref_type.of_type, VectorType):
### copy vtab + y, vec
# FIXME this is the exact same as above - can this be simplified?
src_label = self.get_label(src.ref.name)
dest_label = self.get_label(dest.name)
mode = self.addressing_mode_for_index(src.index)
self.emitter.emit(LDA(mode(Offset(src_label, src.offset.value))))
self.emitter.emit(STA(Absolute(dest_label)))
self.emitter.emit(LDA(mode(Offset(src_label, src.offset.value + 256))))
self.emitter.emit(STA(Absolute(Offset(dest_label, 1))))
elif src_type == TYPE_BYTE and dest_type == TYPE_BYTE and not isinstance(src, ConstantRef):
### copy b1, b2
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(src_label)))
self.emitter.emit(STA(Absolute(dest_label)))
elif src_type == TYPE_WORD and dest_type == TYPE_WORD and isinstance(src, ConstantRef):
### copy 9999, w
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Immediate(Byte(src.low_byte()))))
self.emitter.emit(STA(Absolute(dest_label)))
self.emitter.emit(LDA(Immediate(Byte(src.high_byte()))))
self.emitter.emit(STA(Absolute(Offset(dest_label, 1))))
elif src_type == TYPE_WORD and dest_type == TYPE_WORD and not isinstance(src, ConstantRef):
### copy w1, w2
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(src_label)))
self.emitter.emit(STA(Absolute(dest_label)))
self.emitter.emit(LDA(Absolute(Offset(src_label, 1))))
self.emitter.emit(STA(Absolute(Offset(dest_label, 1))))
elif isinstance(src_type, VectorType) and isinstance(dest_type, VectorType):
### copy v1, v2
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Absolute(src_label)))
self.emitter.emit(STA(Absolute(dest_label)))
self.emitter.emit(LDA(Absolute(Offset(src_label, 1))))
self.emitter.emit(STA(Absolute(Offset(dest_label, 1))))
elif isinstance(src_type, RoutineType) and isinstance(dest_type, VectorType):
### copy routine, vec
src_label = self.get_label(src.name)
dest_label = self.get_label(dest.name)
self.emitter.emit(LDA(Immediate(HighAddressByte(src_label))))
self.emitter.emit(STA(Absolute(dest_label)))
self.emitter.emit(LDA(Immediate(LowAddressByte(src_label))))
self.emitter.emit(STA(Absolute(Offset(dest_label, 1))))
else:
raise NotImplementedError(src_type)
def compile_if(self, instr):
cls = {
False: {
'c': BCC,
'z': BNE,
'n': BPL,
},
True: {
'c': BCS,
'z': BEQ,
'n': BMI,
},
}[instr.inverted].get(instr.src.name)
if cls is None:
raise UnsupportedOpcodeError(instr)
else_label = Label('else_label')
self.emitter.emit(cls(Relative(else_label)))
self.compile_block(instr.block1)
if instr.block2 is not None:
if instr.block1.shallow_contains_goto:
self.emitter.resolve_label(else_label)
self.compile_block(instr.block2)
else:
end_label = Label('end_label')
self.emitter.emit(JMP(Absolute(end_label)))
self.emitter.resolve_label(else_label)
self.compile_block(instr.block2)
self.emitter.resolve_label(end_label)
else:
self.emitter.resolve_label(else_label)
def compile_repeat(self, instr):
top_label = self.emitter.make_label()
self.compile_block(instr.block)
if instr.src is None: # indicates 'repeat forever'
self.emitter.emit(JMP(Absolute(top_label)))
else:
cls = {
False: {
'c': BCC,
'z': BNE,
'n': BPL,
},
True: {
'c': BCS,
'z': BEQ,
'n': BMI,
},
}[instr.inverted].get(instr.src.name)
if cls is None:
raise UnsupportedOpcodeError(instr)
self.emitter.emit(cls(Relative(top_label)))
def compile_for(self, instr):
top_label = self.emitter.make_label()
self.compile_block(instr.block)
if instr.direction > 0:
self.compile_inc(instr, instr.dest)
final = instr.final.succ()
elif instr.direction < 0:
self.compile_dec(instr, instr.dest)
final = instr.final.pred()
self.compile_cmp(instr, final, instr.dest)
self.emitter.emit(BNE(Relative(top_label)))
def compile_with_interrupts_off(self, instr):
self.emitter.emit(SEI())
self.compile_block(instr.block)
self.emitter.emit(CLI())
def compile_save(self, instr):
for location in instr.locations:
if location == REG_A:
self.emitter.emit(PHA())
elif location == REG_X:
self.emitter.emit(TXA())
self.emitter.emit(PHA())
elif location == REG_Y:
self.emitter.emit(TYA())
self.emitter.emit(PHA())
else:
src_label = self.get_label(location.name)
self.emitter.emit(LDA(Absolute(src_label)))
self.emitter.emit(PHA())
self.compile_block(instr.block)
for location in reversed(instr.locations):
if location == REG_A:
self.emitter.emit(PLA())
elif location == REG_X:
self.emitter.emit(PLA())
self.emitter.emit(TAX())
elif location == REG_Y:
self.emitter.emit(PLA())
self.emitter.emit(TAY())
else:
src_label = self.get_label(location.name)
self.emitter.emit(PLA())
self.emitter.emit(STA(Absolute(src_label)))
def compile_point_into(self, instr):
self.pointer_assoc[instr.pointer.name] = instr.table.name
self.compile_block(instr.block)
del self.pointer_assoc[instr.pointer.name]
def compile_reset(self, instr):
table_name = self.pointer_assoc[instr.pointer.name]
src_label = Offset(self.get_label(table_name), instr.offset.value)
dest_label = self.get_label(instr.pointer.name)
self.emitter.emit(LDA(Immediate(HighAddressByte(src_label))))
self.emitter.emit(STA(ZeroPage(dest_label)))
self.emitter.emit(LDA(Immediate(LowAddressByte(src_label))))
self.emitter.emit(STA(ZeroPage(Offset(dest_label, 1))))
| true | true |
f72da63ad73723e98b0413706ba25b4c48abe99e | 4,370 | py | Python | gestaoemp/lib/python3.6/site-packages/pysignfe/nfe/manual_300/soap_100.py | jasonrosa92/gestaoempresa | c942c7cb5ff3bd299da94df11eab49b7dd0c7d71 | [
"MIT"
] | null | null | null | gestaoemp/lib/python3.6/site-packages/pysignfe/nfe/manual_300/soap_100.py | jasonrosa92/gestaoempresa | c942c7cb5ff3bd299da94df11eab49b7dd0c7d71 | [
"MIT"
] | null | null | null | gestaoemp/lib/python3.6/site-packages/pysignfe/nfe/manual_300/soap_100.py | jasonrosa92/gestaoempresa | c942c7cb5ff3bd299da94df11eab49b7dd0c7d71 | [
"MIT"
] | 1 | 2020-11-04T08:45:57.000Z | 2020-11-04T08:45:57.000Z | # -*- coding: utf-8 -*-
from pysignfe.xml_sped import *
from pysignfe.nfe.manual_300 import ESQUEMA_ATUAL
import os
DIRNAME = os.path.dirname(__file__)
class CabecMsg(XMLNFe):
def __init__(self):
super(CabecMsg, self).__init__()
self.versao = TagDecimal(nome=u'cabecMsg' , codigo=u'' , propriedade=u'versao', namespace=NAMESPACE_NFE, valor=u'1.02', raiz=u'//cabecMsg')
self.versaoDados = TagDecimal(nome=u'versaoDados', codigo=u'A01', raiz=u'//cabecMsg', tamanho=[1, 4])
self.caminho_esquema = os.path.join(DIRNAME, u'schema/', ESQUEMA_ATUAL + u'/')
self.arquivo_esquema = u'cabecMsg_v1.02.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += self.versao.xml
xml += self.versaoDados.xml
xml += u'</cabecMsg>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versaoDados.xml = arquivo
xml = property(get_xml, set_xml)
class NFeCabecMsg(XMLNFe):
def __init__(self):
super(NFeCabecMsg, self).__init__()
self.cabec = CabecMsg()
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<nfeCabecMsg>'
xml += tirar_acentos(self.cabec.xml)
xml += u'</nfeCabecMsg>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.cabec.xml = arquivo
xml = property(get_xml, set_xml)
class NFeDadosMsg(XMLNFe):
def __init__(self):
super(NFeDadosMsg, self).__init__()
self.dados = None
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<nfeDadosMsg>'
xml += tirar_acentos(self.dados.xml)
xml += u'</nfeDadosMsg>'
return xml
def set_xml(self, arquivo):
pass
xml = property(get_xml, set_xml)
class SOAPEnvio(XMLNFe):
def __init__(self):
super(SOAPEnvio, self).__init__()
self.webservice = u''
self.metodo = u''
self.envio = None
self.nfeCabecMsg = NFeCabecMsg()
self.nfeDadosMsg = NFeDadosMsg()
self._header = {u'content-type': u'application/soap+xml; charset=utf-8',
u'Accept': u'application/soap+xml; charset=utf-8'}
def get_xml(self):
self.nfeDadosMsg.dados = self.envio
self.nfeCabecMsg.cabec.versaoDados.valor = self.envio.versao.valor
self._header['SOAPAction'] = self.metodo
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += u'<soap:Envelope xmlns:soap="http://www.w3.org/2003/05/soap-envelope">'
xml += u'<soap:Body>'
xml += u'<' + self.metodo + u' xmlns="http://www.portalfiscal.inf.br/nfe/wsdl/' + self.webservice + u'">'
xml += self.nfeCabecMsg.xml
xml += self.nfeDadosMsg.xml
xml += u'</' + self.metodo + u'>'
xml += u'</soap:Body>'
xml += u'</soap:Envelope>'
return xml
def set_xml(self):
pass
xml = property(get_xml, set_xml)
def get_header(self):
header = self._header
return header
header = property(get_header)
class SOAPRetorno(XMLNFe):
def __init__(self):
super(SOAPRetorno, self).__init__()
self.webservice = u''
self.metodo = u''
self.resposta = None
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += u'<soap:Envelope xmlns:soap="http://www.w3.org/2003/05/soap-envelope">'
xml += u'<soap:Body>'
xml += u'<' + self.metodo + u'Response xmlns="http://www.portalfiscal.inf.br/nfe/wsdl/' + self.webservice + u'">'
xml += u'<' + self.metodo + u'Result>'
xml += self.resposta.xml
xml += u'</' + self.metodo + u'Result>'
xml += u'</' + self.metodo + u'Response>'
xml += u'</soap:Body>'
xml += u'</soap:Envelope>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
resposta = por_acentos(self._le_tag(u'//*/res:' + self.metodo + u'Result', ns=(u'http://www.portalfiscal.inf.br/nfe/wsdl/' + self.webservice)))
resposta = tira_abertura(resposta)
#print resposta
self.resposta.xml = resposta
return self.xml
xml = property(get_xml, set_xml)
| 30.347222 | 156 | 0.579634 |
from pysignfe.xml_sped import *
from pysignfe.nfe.manual_300 import ESQUEMA_ATUAL
import os
DIRNAME = os.path.dirname(__file__)
class CabecMsg(XMLNFe):
def __init__(self):
super(CabecMsg, self).__init__()
self.versao = TagDecimal(nome=u'cabecMsg' , codigo=u'' , propriedade=u'versao', namespace=NAMESPACE_NFE, valor=u'1.02', raiz=u'//cabecMsg')
self.versaoDados = TagDecimal(nome=u'versaoDados', codigo=u'A01', raiz=u'//cabecMsg', tamanho=[1, 4])
self.caminho_esquema = os.path.join(DIRNAME, u'schema/', ESQUEMA_ATUAL + u'/')
self.arquivo_esquema = u'cabecMsg_v1.02.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += self.versao.xml
xml += self.versaoDados.xml
xml += u'</cabecMsg>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versaoDados.xml = arquivo
xml = property(get_xml, set_xml)
class NFeCabecMsg(XMLNFe):
def __init__(self):
super(NFeCabecMsg, self).__init__()
self.cabec = CabecMsg()
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<nfeCabecMsg>'
xml += tirar_acentos(self.cabec.xml)
xml += u'</nfeCabecMsg>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.cabec.xml = arquivo
xml = property(get_xml, set_xml)
class NFeDadosMsg(XMLNFe):
def __init__(self):
super(NFeDadosMsg, self).__init__()
self.dados = None
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += u'<nfeDadosMsg>'
xml += tirar_acentos(self.dados.xml)
xml += u'</nfeDadosMsg>'
return xml
def set_xml(self, arquivo):
pass
xml = property(get_xml, set_xml)
class SOAPEnvio(XMLNFe):
def __init__(self):
super(SOAPEnvio, self).__init__()
self.webservice = u''
self.metodo = u''
self.envio = None
self.nfeCabecMsg = NFeCabecMsg()
self.nfeDadosMsg = NFeDadosMsg()
self._header = {u'content-type': u'application/soap+xml; charset=utf-8',
u'Accept': u'application/soap+xml; charset=utf-8'}
def get_xml(self):
self.nfeDadosMsg.dados = self.envio
self.nfeCabecMsg.cabec.versaoDados.valor = self.envio.versao.valor
self._header['SOAPAction'] = self.metodo
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += u'<soap:Envelope xmlns:soap="http://www.w3.org/2003/05/soap-envelope">'
xml += u'<soap:Body>'
xml += u'<' + self.metodo + u' xmlns="http://www.portalfiscal.inf.br/nfe/wsdl/' + self.webservice + u'">'
xml += self.nfeCabecMsg.xml
xml += self.nfeDadosMsg.xml
xml += u'</' + self.metodo + u'>'
xml += u'</soap:Body>'
xml += u'</soap:Envelope>'
return xml
def set_xml(self):
pass
xml = property(get_xml, set_xml)
def get_header(self):
header = self._header
return header
header = property(get_header)
class SOAPRetorno(XMLNFe):
def __init__(self):
super(SOAPRetorno, self).__init__()
self.webservice = u''
self.metodo = u''
self.resposta = None
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += u'<soap:Envelope xmlns:soap="http://www.w3.org/2003/05/soap-envelope">'
xml += u'<soap:Body>'
xml += u'<' + self.metodo + u'Response xmlns="http://www.portalfiscal.inf.br/nfe/wsdl/' + self.webservice + u'">'
xml += u'<' + self.metodo + u'Result>'
xml += self.resposta.xml
xml += u'</' + self.metodo + u'Result>'
xml += u'</' + self.metodo + u'Response>'
xml += u'</soap:Body>'
xml += u'</soap:Envelope>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
resposta = por_acentos(self._le_tag(u'//*/res:' + self.metodo + u'Result', ns=(u'http://www.portalfiscal.inf.br/nfe/wsdl/' + self.webservice)))
resposta = tira_abertura(resposta)
self.resposta.xml = resposta
return self.xml
xml = property(get_xml, set_xml)
| true | true |
f72da63b1dcdd6bb6f1aa453dd36c56867d4b5bb | 1,786 | py | Python | Conteudo das Aulas/048/Cifra de Cesar - Gabarito.py | cerberus707/lab-python | ebba3c9cde873d70d4bb61084f79ce30b7f9e047 | [
"Apache-2.0"
] | null | null | null | Conteudo das Aulas/048/Cifra de Cesar - Gabarito.py | cerberus707/lab-python | ebba3c9cde873d70d4bb61084f79ce30b7f9e047 | [
"Apache-2.0"
] | null | null | null | Conteudo das Aulas/048/Cifra de Cesar - Gabarito.py | cerberus707/lab-python | ebba3c9cde873d70d4bb61084f79ce30b7f9e047 | [
"Apache-2.0"
] | null | null | null | TAM_MAX_CH = 26
def recebeModo():
"""
Função que pergunta se o usuário quer criptografar ou
decriptografar e garante que uma entrada válida foi recebida
"""
while True:
modo = input("Você deseja criptografar ou decriptografar?\n").lower()
if modo in 'criptografar c decriptografar d'.split():
return modo
else:
print("Entre 'criptografar' ou 'c' ou 'decriptografar' ou 'd'.")
def recebeChave():
"""
Função que pede o valor da chave para o usuário
e devolve a chave caso o valor desta esteja adequado
"""
global TAM_MAX_CH
chave = 0
while True:
chave = int(input('Entre o número da chave (1-%s)\n'%(TAM_MAX_CH)))
if 1 <= chave <= TAM_MAX_CH:
return chave
def geraMsgTraduzida(modo, mensagem, chave):
"""
Traduz a mensagem do usuário de modo conveniente
"""
if modo[0] == 'd':
chave *= -1
traduzido = ''
for simbolo in mensagem:
if simbolo.isalpha():
num = ord(simbolo)
num += chave
if simbolo.isupper():
if num > ord('Z'):
num -= 26
elif num < ord('A'):
num += 26
elif simbolo.islower():
if num > ord('z'):
num -= 26
elif num < ord('a'):
num += 26
traduzido += chr(num)
else:
traduzido += simbolo
return traduzido
def main():
"""
Função principal do programa
"""
modo = recebeModo()
mensagem = input("Entre sua mensagem\n")
chave = recebeChave()
print("Seu texto traduzido é:")
print(geraMsgTraduzida(modo, mensagem, chave))
main()
| 24.135135 | 77 | 0.526876 | TAM_MAX_CH = 26
def recebeModo():
while True:
modo = input("Você deseja criptografar ou decriptografar?\n").lower()
if modo in 'criptografar c decriptografar d'.split():
return modo
else:
print("Entre 'criptografar' ou 'c' ou 'decriptografar' ou 'd'.")
def recebeChave():
global TAM_MAX_CH
chave = 0
while True:
chave = int(input('Entre o número da chave (1-%s)\n'%(TAM_MAX_CH)))
if 1 <= chave <= TAM_MAX_CH:
return chave
def geraMsgTraduzida(modo, mensagem, chave):
if modo[0] == 'd':
chave *= -1
traduzido = ''
for simbolo in mensagem:
if simbolo.isalpha():
num = ord(simbolo)
num += chave
if simbolo.isupper():
if num > ord('Z'):
num -= 26
elif num < ord('A'):
num += 26
elif simbolo.islower():
if num > ord('z'):
num -= 26
elif num < ord('a'):
num += 26
traduzido += chr(num)
else:
traduzido += simbolo
return traduzido
def main():
modo = recebeModo()
mensagem = input("Entre sua mensagem\n")
chave = recebeChave()
print("Seu texto traduzido é:")
print(geraMsgTraduzida(modo, mensagem, chave))
main()
| true | true |
f72da7ef4747c1b5ac0fd719ff237ee623ce1a9b | 260 | py | Python | dj/blog/polls/views.py | jaysys/pystep | 7c58598b19474597de70963801bf5179afd04ce2 | [
"MIT"
] | null | null | null | dj/blog/polls/views.py | jaysys/pystep | 7c58598b19474597de70963801bf5179afd04ce2 | [
"MIT"
] | null | null | null | dj/blog/polls/views.py | jaysys/pystep | 7c58598b19474597de70963801bf5179afd04ce2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
def index(request):
return HttpResponse("Hello, JooWorld. You're at the j-polls index.")
| 21.666667 | 72 | 0.75 |
from __future__ import unicode_literals
from django.shortcuts import render
from django.http import HttpResponse
def index(request):
return HttpResponse("Hello, JooWorld. You're at the j-polls index.")
| true | true |
f72daaceac15280ca2f5821e19aa6c09c0d6f4d5 | 2,035 | py | Python | sdks/python/apache_beam/examples/complete/game/user_score_test.py | eyal0/beam | 9c6922976cc2a5c6a2ef836c1986ff769cda99a5 | [
"Apache-2.0"
] | 35 | 2016-09-22T22:53:14.000Z | 2020-02-13T15:12:21.000Z | sdks/python/apache_beam/examples/complete/game/user_score_test.py | eyal0/beam | 9c6922976cc2a5c6a2ef836c1986ff769cda99a5 | [
"Apache-2.0"
] | 80 | 2020-01-16T09:55:09.000Z | 2020-10-03T13:43:07.000Z | sdks/python/apache_beam/examples/complete/game/user_score_test.py | eyal0/beam | 9c6922976cc2a5c6a2ef836c1986ff769cda99a5 | [
"Apache-2.0"
] | 88 | 2016-11-27T02:16:11.000Z | 2020-02-28T05:10:26.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test for the user_score example."""
# pytype: skip-file
from __future__ import absolute_import
import logging
import unittest
import apache_beam as beam
from apache_beam.examples.complete.game import user_score
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class UserScoreTest(unittest.TestCase):
SAMPLE_DATA = [
'user1_team1,team1,18,1447686663000,2015-11-16 15:11:03.921',
'user1_team1,team1,18,1447690263000,2015-11-16 16:11:03.921',
'user2_team2,team2,2,1447690263000,2015-11-16 16:11:03.955',
'user3_team3,team3,8,1447690263000,2015-11-16 16:11:03.955',
'user4_team3,team3,5,1447690263000,2015-11-16 16:11:03.959',
'user1_team1,team1,14,1447697463000,2015-11-16 18:11:03.955',
]
def test_user_score(self):
with TestPipeline() as p:
result = (
p | beam.Create(UserScoreTest.SAMPLE_DATA) | user_score.UserScore())
assert_that(
result,
equal_to([('user1_team1', 50), ('user2_team2', 2), ('user3_team3', 8),
('user4_team3', 5)]))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| 35.086207 | 80 | 0.732187 |
from __future__ import absolute_import
import logging
import unittest
import apache_beam as beam
from apache_beam.examples.complete.game import user_score
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class UserScoreTest(unittest.TestCase):
SAMPLE_DATA = [
'user1_team1,team1,18,1447686663000,2015-11-16 15:11:03.921',
'user1_team1,team1,18,1447690263000,2015-11-16 16:11:03.921',
'user2_team2,team2,2,1447690263000,2015-11-16 16:11:03.955',
'user3_team3,team3,8,1447690263000,2015-11-16 16:11:03.955',
'user4_team3,team3,5,1447690263000,2015-11-16 16:11:03.959',
'user1_team1,team1,14,1447697463000,2015-11-16 18:11:03.955',
]
def test_user_score(self):
with TestPipeline() as p:
result = (
p | beam.Create(UserScoreTest.SAMPLE_DATA) | user_score.UserScore())
assert_that(
result,
equal_to([('user1_team1', 50), ('user2_team2', 2), ('user3_team3', 8),
('user4_team3', 5)]))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| true | true |
f72daaf11e6fe72370b0058b84751cc12565a7e9 | 445 | py | Python | data/scripts/templates/object/tangible/loot/misc/shared_mystical_orb.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/tangible/loot/misc/shared_mystical_orb.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/tangible/loot/misc/shared_mystical_orb.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/misc/shared_mystical_orb.iff"
result.attribute_template_id = -1
result.stfName("item_n","mystical_orb")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 26.176471 | 70 | 0.723596 | true | true | |
f72dab1cc5ec45ec2f055c9305443861b75f89b5 | 3,181 | py | Python | Summarize/solution_timeseries_EVAL.py | JoseHernandez9094/CohortLexicase | 5179a3c0db6dcf0c2cae79fcfd08b4b919c9269d | [
"MIT"
] | null | null | null | Summarize/solution_timeseries_EVAL.py | JoseHernandez9094/CohortLexicase | 5179a3c0db6dcf0c2cae79fcfd08b4b919c9269d | [
"MIT"
] | null | null | null | Summarize/solution_timeseries_EVAL.py | JoseHernandez9094/CohortLexicase | 5179a3c0db6dcf0c2cae79fcfd08b4b919c9269d | [
"MIT"
] | null | null | null | #python3
#This script will make csv so that graph_timeseries.py can create plots with them!
import pandas as p
MAX_EVAL = 512*512*1000
df = p.read_csv('../Data/Raw/min_programs__eval_262144000.csv')
treat = {}
TREATMENT = 'treatment'
FOUND = 'solution_found'
UPDATE = 'update_found'
EVAL = 'evaluation_found'
POS_UPDATE = 0
POS_EVAL = 1
for i,row in df.iterrows():
#If we do not have the treatment in our data dict
if row[TREATMENT] in treat:
if row[FOUND] == True:
#If the row has found a solution store gen and eval
tup = tuple([float(row[UPDATE]), float(row[EVAL])])
treat[row[TREATMENT]].append(tup)
else:
if row[FOUND] == True:
temp = [tuple([float(row[UPDATE]), float(row[EVAL])])]
treat[row[TREATMENT]] = temp
#Will gather data by problem into CN,CS lists for generations.
#TODO
K_PROB = 0
K_SEL = 1
K_CN = 2
K_CS = 3
data_gen = {}
for k,val in treat.items():
k = k[8:].split('__')
gens = [x[POS_EVAL] for x in val]
gens.sort()
dimen = k[K_CN] + '-' + k[K_CS]
prob = k[K_PROB]
sele = k[K_SEL]
#check if problem exists within the first layer of dict
if prob not in data_gen:
#If not in the dict, create an empty one for it
data_gen[prob] = {}
#Check if selection not within the second layer
if sele not in data_gen[prob]:
#Second level is the selection scheme
data_gen[prob][sele] = {}
#Third level is the dimensionality
data_gen[prob][sele][dimen] = gens
#Selection is within the second layer
else:
#Third level is the dimensionality
data_gen[prob][sele][dimen] = gens
else:
#Check if selection not within the second layer
if sele not in data_gen[prob]:
#Second level is the selection scheme
data_gen[prob][sele] = {}
#Third level is the dimensionality
data_gen[prob][sele][dimen] = gens
#Selection is within the second layer
else:
#Third level is the dimensionality
data_gen[prob][sele][dimen] = gens
#Go through each problem
for prob in data_gen:
#Go through each selection scheme
for sele in data_gen[prob]:
#Go through each dimensionality
for dimen in data_gen[prob][sele]:
raw = []
raw.append(tuple([0,0]))
d = data_gen[prob][sele][dimen]
#Create the coordinates
for i in range(0, len(d)):
# raw.append(tuple([d[i], raw[len(raw)-1][1]]))
raw.append(tuple([d[i], raw[len(raw)-1][1]+1]))
raw.append([MAX_EVAL, raw[len(raw)-1][1]])
gen = [x[0] for x in raw]
cnt = [x[1] for x in raw]
raw_data = {'Evaluation': gen, 'Solution_Count': cnt}
df = p.DataFrame(raw_data, columns = ['Evaluation', 'Solution_Count'])
fname = prob + '__' + sele[4:] + '__' + dimen + '.csv'
df.to_csv('../Data/Polished/Evaluations/'+fname) | 34.204301 | 83 | 0.565545 |
import pandas as p
MAX_EVAL = 512*512*1000
df = p.read_csv('../Data/Raw/min_programs__eval_262144000.csv')
treat = {}
TREATMENT = 'treatment'
FOUND = 'solution_found'
UPDATE = 'update_found'
EVAL = 'evaluation_found'
POS_UPDATE = 0
POS_EVAL = 1
for i,row in df.iterrows():
if row[TREATMENT] in treat:
if row[FOUND] == True:
tup = tuple([float(row[UPDATE]), float(row[EVAL])])
treat[row[TREATMENT]].append(tup)
else:
if row[FOUND] == True:
temp = [tuple([float(row[UPDATE]), float(row[EVAL])])]
treat[row[TREATMENT]] = temp
K_PROB = 0
K_SEL = 1
K_CN = 2
K_CS = 3
data_gen = {}
for k,val in treat.items():
k = k[8:].split('__')
gens = [x[POS_EVAL] for x in val]
gens.sort()
dimen = k[K_CN] + '-' + k[K_CS]
prob = k[K_PROB]
sele = k[K_SEL]
if prob not in data_gen:
data_gen[prob] = {}
if sele not in data_gen[prob]:
data_gen[prob][sele] = {}
data_gen[prob][sele][dimen] = gens
else:
data_gen[prob][sele][dimen] = gens
else:
if sele not in data_gen[prob]:
data_gen[prob][sele] = {}
data_gen[prob][sele][dimen] = gens
else:
data_gen[prob][sele][dimen] = gens
for prob in data_gen:
for sele in data_gen[prob]:
for dimen in data_gen[prob][sele]:
raw = []
raw.append(tuple([0,0]))
d = data_gen[prob][sele][dimen]
for i in range(0, len(d)):
raw.append(tuple([d[i], raw[len(raw)-1][1]+1]))
raw.append([MAX_EVAL, raw[len(raw)-1][1]])
gen = [x[0] for x in raw]
cnt = [x[1] for x in raw]
raw_data = {'Evaluation': gen, 'Solution_Count': cnt}
df = p.DataFrame(raw_data, columns = ['Evaluation', 'Solution_Count'])
fname = prob + '__' + sele[4:] + '__' + dimen + '.csv'
df.to_csv('../Data/Polished/Evaluations/'+fname) | true | true |
f72dad363ea7d388f7f367a338f861969e70adc2 | 4,856 | py | Python | onlinecourse/models.py | rabih96/final_template | 692c1b967bd089c6f265f9f22db2f8f2ed72f12b | [
"Apache-2.0"
] | null | null | null | onlinecourse/models.py | rabih96/final_template | 692c1b967bd089c6f265f9f22db2f8f2ed72f12b | [
"Apache-2.0"
] | null | null | null | onlinecourse/models.py | rabih96/final_template | 692c1b967bd089c6f265f9f22db2f8f2ed72f12b | [
"Apache-2.0"
] | null | null | null | import sys
from django.utils.timezone import now
try:
from django.db import models
except Exception:
print("There was an error loading django modules. Do you have django installed?")
sys.exit()
from django.conf import settings
import uuid
# Instructor model
class Instructor(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
full_time = models.BooleanField(default=True)
total_learners = models.IntegerField()
def __str__(self):
return self.user.username
# Learner model
class Learner(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
STUDENT = 'student'
DEVELOPER = 'developer'
DATA_SCIENTIST = 'data_scientist'
DATABASE_ADMIN = 'dba'
OCCUPATION_CHOICES = [
(STUDENT, 'Student'),
(DEVELOPER, 'Developer'),
(DATA_SCIENTIST, 'Data Scientist'),
(DATABASE_ADMIN, 'Database Admin')
]
occupation = models.CharField(
null=False,
max_length=20,
choices=OCCUPATION_CHOICES,
default=STUDENT
)
social_link = models.URLField(max_length=200)
def __str__(self):
return self.user.username + "," + \
self.occupation
# Course model
class Course(models.Model):
name = models.CharField(null=False, max_length=30, default='online course')
image = models.ImageField(upload_to='course_images/')
description = models.CharField(max_length=1000)
pub_date = models.DateField(null=True)
instructors = models.ManyToManyField(Instructor)
users = models.ManyToManyField(settings.AUTH_USER_MODEL, through='Enrollment')
total_enrollment = models.IntegerField(default=0)
is_enrolled = False
def __str__(self):
return "Name: " + self.name + "," + \
"Description: " + self.description
# Lesson model
class Lesson(models.Model):
title = models.CharField(max_length=200, default="title")
order = models.IntegerField(default=0)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
content = models.TextField()
# Enrollment model
# <HINT> Once a user enrolled a class, an enrollment entry should be created between the user and course
# And we could use the enrollment to track information such as exam submissions
class Enrollment(models.Model):
AUDIT = 'audit'
HONOR = 'honor'
BETA = 'BETA'
COURSE_MODES = [
(AUDIT, 'Audit'),
(HONOR, 'Honor'),
(BETA, 'BETA')
]
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
date_enrolled = models.DateField(default=now)
mode = models.CharField(max_length=5, choices=COURSE_MODES, default=AUDIT)
rating = models.FloatField(default=5.0)
# <HINT> Create a Question Model with:
# Used to persist question content for a course
# Has a One-To-Many (or Many-To-Many if you want to reuse questions) relationship with course
# Has a grade point for each question
# Has question content
# Other fields and methods you would like to design
class Question(models.Model):
# Foreign key to lesson
lesson = models.ForeignKey(Lesson , on_delete=models.CASCADE)
# question text
question_text = models.TextField()
# question grade/mark
grade = models.FloatField()
# <HINT> A sample model method to calculate if learner get the score of the question
def is_get_score(self, selected_ids):
all_answers = self.choice_set.filter(is_correct=True).count()
selected_correct = self.choice_set.filter(is_correct=True, id__in=selected_ids).count()
selected_incorrect = self.choice_set.filter(is_correct=False, id__in=selected_ids).count()
if all_answers == (selected_correct - selected_incorrect):
return True
else:
return False
# <HINT> Create a Choice Model with:
# Used to persist choice content for a question
# One-To-Many (or Many-To-Many if you want to reuse choices) relationship with Question
# Choice content
# Indicate if this choice of the question is a correct one or not
# Other fields and methods you would like to design
class Choice(models.Model):
question = models.ForeignKey(Question , on_delete=models.CASCADE)
choice_text = models.TextField()
is_correct = models.BooleanField(default=False)
# <HINT> The submission model
# One enrollment could have multiple submission
# One submission could have multiple choices
# One choice could belong to multiple submissions
class Submission(models.Model):
enrollment = models.ForeignKey(Enrollment, on_delete=models.CASCADE)
choices = models.ManyToManyField(Choice)
# Other fields and methods you would like to design | 34.197183 | 104 | 0.702636 | import sys
from django.utils.timezone import now
try:
from django.db import models
except Exception:
print("There was an error loading django modules. Do you have django installed?")
sys.exit()
from django.conf import settings
import uuid
class Instructor(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
full_time = models.BooleanField(default=True)
total_learners = models.IntegerField()
def __str__(self):
return self.user.username
class Learner(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
STUDENT = 'student'
DEVELOPER = 'developer'
DATA_SCIENTIST = 'data_scientist'
DATABASE_ADMIN = 'dba'
OCCUPATION_CHOICES = [
(STUDENT, 'Student'),
(DEVELOPER, 'Developer'),
(DATA_SCIENTIST, 'Data Scientist'),
(DATABASE_ADMIN, 'Database Admin')
]
occupation = models.CharField(
null=False,
max_length=20,
choices=OCCUPATION_CHOICES,
default=STUDENT
)
social_link = models.URLField(max_length=200)
def __str__(self):
return self.user.username + "," + \
self.occupation
class Course(models.Model):
name = models.CharField(null=False, max_length=30, default='online course')
image = models.ImageField(upload_to='course_images/')
description = models.CharField(max_length=1000)
pub_date = models.DateField(null=True)
instructors = models.ManyToManyField(Instructor)
users = models.ManyToManyField(settings.AUTH_USER_MODEL, through='Enrollment')
total_enrollment = models.IntegerField(default=0)
is_enrolled = False
def __str__(self):
return "Name: " + self.name + "," + \
"Description: " + self.description
class Lesson(models.Model):
title = models.CharField(max_length=200, default="title")
order = models.IntegerField(default=0)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
content = models.TextField()
class Enrollment(models.Model):
AUDIT = 'audit'
HONOR = 'honor'
BETA = 'BETA'
COURSE_MODES = [
(AUDIT, 'Audit'),
(HONOR, 'Honor'),
(BETA, 'BETA')
]
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
date_enrolled = models.DateField(default=now)
mode = models.CharField(max_length=5, choices=COURSE_MODES, default=AUDIT)
rating = models.FloatField(default=5.0)
class Question(models.Model):
lesson = models.ForeignKey(Lesson , on_delete=models.CASCADE)
question_text = models.TextField()
grade = models.FloatField()
def is_get_score(self, selected_ids):
all_answers = self.choice_set.filter(is_correct=True).count()
selected_correct = self.choice_set.filter(is_correct=True, id__in=selected_ids).count()
selected_incorrect = self.choice_set.filter(is_correct=False, id__in=selected_ids).count()
if all_answers == (selected_correct - selected_incorrect):
return True
else:
return False
class Choice(models.Model):
question = models.ForeignKey(Question , on_delete=models.CASCADE)
choice_text = models.TextField()
is_correct = models.BooleanField(default=False)
class Submission(models.Model):
enrollment = models.ForeignKey(Enrollment, on_delete=models.CASCADE)
choices = models.ManyToManyField(Choice)
| true | true |
f72dad48dd10fce6afcd1cb15ac7ff2dc551c9de | 11,159 | py | Python | catalog/models.py | gengzhao2020/django_local_library | 2854531940cf813f669198f5a49d7b4946fa0f49 | [
"MIT"
] | null | null | null | catalog/models.py | gengzhao2020/django_local_library | 2854531940cf813f669198f5a49d7b4946fa0f49 | [
"MIT"
] | 16 | 2021-06-30T14:57:44.000Z | 2021-07-05T23:40:45.000Z | catalog/models.py | gengzhao2020/django_local_library | 2854531940cf813f669198f5a49d7b4946fa0f49 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
"""
Genre model
This model is used to store information about the book category — for example whether it is fiction or non-fiction,
romance or military history, etc.
The model has a single CharField field (name), which is used to describe the genre (this is limited to 200 characters
and has some help_text. At the end of the model we declare a __str__() method, which simply returns the name of the
genre defined by a particular record. No verbose name has been defined, so the field will be called Name in forms.
"""
class Genre(models.Model):
"""Model representing a book genre (e.g. Science Fiction, Non Fiction)."""
name = models.CharField(max_length=200, help_text="Enter a book genre (e.g. Science Fiction, French Poetry etc.)")
class Meta:
ordering = ['name']
permissions = (
('can_create_genre', 'Create genre'),
('can_update_genre', 'Update genre'),
('can_delete_genre', 'Delete genre'),
)
def get_absolute_url(self):
"""Returns the url to access a particular genre instance."""
return reverse('genre-detail', args=[str(self.id)])
def __str__(self):
"""String for representing the Model object (in Admin site etc.)"""
return self.name
"""
Language model
Imagine a local benefactor donates a number of new books written in another language (say, Farsi). The challenge is to
work out how these would be best represented in our library website, and then to add them to the models.
Some things to consider:
1. Should "language" be associated with a Book, BookInstance, or some other object?
2. Should the different languages be represented using model, a free text field, or a hard-coded selection list?
"""
class Language(models.Model):
"""Model representing a Language (e.g. English, French, Japanese, etc.)"""
name = models.CharField(max_length=200,
help_text="Enter the book's natural language (e.g. English, French, Japanese etc.)")
class Meta:
ordering = ['name']
permissions = (
('can_create_language', 'Create language'),
('can_update_language', 'Update language'),
('can_delete_language', 'Delete language'),
)
def get_absolute_url(self):
"""Returns the url to access a particular language instance."""
return reverse('language-detail', args=[str(self.id)])
def __str__(self):
"""String for representing the Model object (in Admin site etc.)"""
return self.name
"""
Book model
The book model represents all information about an available book in a general sense, but not a particular physical
"instance" or "copy" available for loan. The model uses a CharField to represent the book's title and isbn (note how
the isbn specifies its label as "ISBN" using the first unnamed parameter because the default label would otherwise be
"Isbn"). The model uses TextField for the summary, because this text may need to be quite long.
The genre is a ManyToManyField, so that a book can have multiple genres and a genre can have many books. The author is
declared as ForeignKey, so each book will only have one author, but an author may have many books (in practice a book
might have multiple authors, but not in this implementation!)
In both field types the related model class is declared as the first unnamed parameter using either the model class or
a string containing the name of the related model. You must use the name of the model as a string if the associated
class has not yet been defined in this file before it is referenced! The other parameters of interest in the author
field are null=True, which allows the database to store a Null value if no author is selected, and
on_delete=models.SET_NULL, which will set the value of the author to Null if the associated author record is deleted.
The model also defines __str__() , using the book's title field to represent a Book record. The final method,
get_absolute_url() returns a URL that can be used to access a detail record for this model (for this to work we will
have to define a URL mapping that has the name book-detail, and define an associated view and template).
"""
from django.urls import reverse # Used to generate URLs by reversing the URL patterns
class Book(models.Model):
"""Model representing a book (but not a specific copy of a book)."""
title = models.CharField(max_length=200)
author = models.ForeignKey('Author', on_delete=models.SET_NULL, null=True)
# Foreign Key used because book can only have one author, but authors can have multiple books
# Author as a string rather than object because it hasn't been declared yet in the file.
summary = models.TextField(max_length=3000, blank=True, help_text="Enter a brief description of the book")
isbn = models.CharField('ISBN', max_length=13, blank=True,
help_text='10 or 13 Characters <a href="https://www.isbn-international.org/content/what'
'-isbn" target="_blank">ISBN number</a>')
genre = models.ManyToManyField(Genre, help_text="Select a genre for this book")
# ManyToManyField used because genre can contain many books. Books can cover many genres.
# Genre class has already been defined so we can specify the object above.
language = models.ForeignKey('Language', on_delete=models.SET_NULL, null=True)
class Meta:
ordering = ['title', 'author']
permissions = (
("can_create_book", "Create book"),
("can_update_book", "Update book"),
("can_delete_book", "Delete book"),
)
def display_genre(self):
"""Creates a string for the Genre. This is required to display genre in Admin."""
return ', '.join([genre.name for genre in self.genre.all()[:3]])
display_genre.short_description = 'Genre'
def get_absolute_url(self):
"""Returns the url to access a particular book instance."""
return reverse('book-detail', args=[str(self.id)])
def __str__(self):
"""String for representing the Model object."""
return self.title
"""
BookInstance model
The BookInstance represents a specific copy of a book that someone might borrow, and includes information about
whether the copy is available or on what date it is expected back, "imprint" or version details, and a unique id for
the book in the library.
The model uses
1. ForeignKey to identify the associated Book (each book can have many copies, but a copy can only have one Book).
2. CharField to represent the imprint (specific release) of the book.
We additionally declare a few new types of field:
1. UUIDField is used for the id field to set it as the primary_key for this model. This type of field allocates a
globally unique value for each instance (one for every book you can find in the library).
2. DateField is used for the due_back date (at which the book is expected to come available after being borrowed or in
maintenance). This value can be blank or null (needed for when the book is available). The model metadata (Class Meta)
uses this field to order records when they are returned in a query.
3. status is a CharField that defines a choice/selection list. As you can see, we define a tuple containing tuples of
key-value pairs and pass it to the choices argument. The value in a key/value pair is a display value that a user can
select, while the keys are the values that are actually saved if the option is selected. We've also set a default
value of 'm' (maintenance) as books will initially be created unavailable before they are stocked on the shelves.
The model __str__() represents the BookInstance object using a combination of its unique id and the associated
Book's title.
"""
import uuid # Required for unique book instances
from datetime import date
from django.contrib.auth.models import User # Required to assign User as a borrower
from django.db.models import F # Required to use query expressions
class BookInstance(models.Model):
"""Model representing a specific copy of a book (i.e. that can be borrowed from the library)."""
id = models.UUIDField(primary_key=True, default=uuid.uuid4,
help_text="Unique ID for this particular book across whole library")
book = models.ForeignKey('Book', on_delete=models.SET_NULL, null=True)
imprint = models.CharField(max_length=200)
due_back = models.DateField(null=True, blank=True, help_text='Enter the date in the form of yyyy-mm-dd')
borrower = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True)
@property
def is_overdue(self):
if self.due_back and date.today() > self.due_back:
return True
return False
LOAN_STATUS = (
('m', 'Maintenance'),
('o', 'On loan'),
('a', 'Available'),
('r', 'Reserved'),
)
status = models.CharField(max_length=1, choices=LOAN_STATUS, blank=True, default='m', help_text='Book availability')
class Meta:
ordering = [F('due_back').asc(nulls_last=True)]
permissions = (
("can_mark_returned", "Set book as returned"),
("can_create_bookinstance", "Create bookinstance"),
("can_update_bookinstance", "Update bookinstance"),
("can_delete_bookinstance", "Delete bookinstance"),
)
def __str__(self):
"""String for representing the Model object"""
return '{0} ({1})'.format(self.id, self.book.title)
"""
Author model
The model defines an author as having a first name, last name, date of birth, and (optional) date of death. It
specifies that by default the __str__() returns the name in last name, firstname order. The get_absolute_url() method
reverses the author-detail URL mapping to get the URL for displaying an individual author.
"""
class Author(models.Model):
"""Model representing an author."""
first_name = models.CharField(max_length=100) # first name = given name = 名
last_name = models.CharField(max_length=100) # last name = family name = surname = 姓
date_of_birth = models.DateField(null=True, blank=True, help_text='Enter the date in the form of yyyy-mm-dd')
date_of_death = models.DateField('Died', null=True, blank=True,
help_text='Enter the date in the form of yyyy-mm-dd')
class Meta:
ordering = ['last_name', 'first_name']
permissions = (
("can_create_author", "Create author"),
("can_update_author", "Update author"),
("can_delete_author", "Delete author"),
)
def get_absolute_url(self):
"""Returns the url to access a particular author instance."""
return reverse('author-detail', args=[str(self.id)])
def __str__(self):
"""String for representing the Model object."""
return '{0} {1}'.format(self.first_name, self.last_name)
"""
Caution!
You should re-run the database migrations everytime you make changes in this file.
"""
| 46.302905 | 120 | 0.700332 | from django.db import models
class Genre(models.Model):
name = models.CharField(max_length=200, help_text="Enter a book genre (e.g. Science Fiction, French Poetry etc.)")
class Meta:
ordering = ['name']
permissions = (
('can_create_genre', 'Create genre'),
('can_update_genre', 'Update genre'),
('can_delete_genre', 'Delete genre'),
)
def get_absolute_url(self):
return reverse('genre-detail', args=[str(self.id)])
def __str__(self):
return self.name
class Language(models.Model):
name = models.CharField(max_length=200,
help_text="Enter the book's natural language (e.g. English, French, Japanese etc.)")
class Meta:
ordering = ['name']
permissions = (
('can_create_language', 'Create language'),
('can_update_language', 'Update language'),
('can_delete_language', 'Delete language'),
)
def get_absolute_url(self):
return reverse('language-detail', args=[str(self.id)])
def __str__(self):
return self.name
from django.urls import reverse # Used to generate URLs by reversing the URL patterns
class Book(models.Model):
title = models.CharField(max_length=200)
author = models.ForeignKey('Author', on_delete=models.SET_NULL, null=True)
# Foreign Key used because book can only have one author, but authors can have multiple books
# Author as a string rather than object because it hasn't been declared yet in the file.
summary = models.TextField(max_length=3000, blank=True, help_text="Enter a brief description of the book")
isbn = models.CharField('ISBN', max_length=13, blank=True,
help_text='10 or 13 Characters <a href="https://www.isbn-international.org/content/what'
'-isbn" target="_blank">ISBN number</a>')
genre = models.ManyToManyField(Genre, help_text="Select a genre for this book")
language = models.ForeignKey('Language', on_delete=models.SET_NULL, null=True)
class Meta:
ordering = ['title', 'author']
permissions = (
("can_create_book", "Create book"),
("can_update_book", "Update book"),
("can_delete_book", "Delete book"),
)
def display_genre(self):
return ', '.join([genre.name for genre in self.genre.all()[:3]])
display_genre.short_description = 'Genre'
def get_absolute_url(self):
return reverse('book-detail', args=[str(self.id)])
def __str__(self):
return self.title
import uuid
from datetime import date
from django.contrib.auth.models import User
from django.db.models import F
class BookInstance(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4,
help_text="Unique ID for this particular book across whole library")
book = models.ForeignKey('Book', on_delete=models.SET_NULL, null=True)
imprint = models.CharField(max_length=200)
due_back = models.DateField(null=True, blank=True, help_text='Enter the date in the form of yyyy-mm-dd')
borrower = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True)
@property
def is_overdue(self):
if self.due_back and date.today() > self.due_back:
return True
return False
LOAN_STATUS = (
('m', 'Maintenance'),
('o', 'On loan'),
('a', 'Available'),
('r', 'Reserved'),
)
status = models.CharField(max_length=1, choices=LOAN_STATUS, blank=True, default='m', help_text='Book availability')
class Meta:
ordering = [F('due_back').asc(nulls_last=True)]
permissions = (
("can_mark_returned", "Set book as returned"),
("can_create_bookinstance", "Create bookinstance"),
("can_update_bookinstance", "Update bookinstance"),
("can_delete_bookinstance", "Delete bookinstance"),
)
def __str__(self):
return '{0} ({1})'.format(self.id, self.book.title)
class Author(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
date_of_birth = models.DateField(null=True, blank=True, help_text='Enter the date in the form of yyyy-mm-dd')
date_of_death = models.DateField('Died', null=True, blank=True,
help_text='Enter the date in the form of yyyy-mm-dd')
class Meta:
ordering = ['last_name', 'first_name']
permissions = (
("can_create_author", "Create author"),
("can_update_author", "Update author"),
("can_delete_author", "Delete author"),
)
def get_absolute_url(self):
return reverse('author-detail', args=[str(self.id)])
def __str__(self):
return '{0} {1}'.format(self.first_name, self.last_name)
| true | true |
f72dae3fa0d68f4a0c07bb213ee6a4a1705d1c25 | 5,575 | py | Python | deeplookup/env.py | ybubnov/dnsthreat | 75a3298379c8b48aeea6bae6c5c31a7d5e9fe315 | [
"Apache-2.0"
] | 10 | 2021-07-29T10:02:09.000Z | 2022-01-18T10:03:10.000Z | deeplookup/env.py | ybubnov/dnsthreat | 75a3298379c8b48aeea6bae6c5c31a7d5e9fe315 | [
"Apache-2.0"
] | null | null | null | deeplookup/env.py | ybubnov/dnsthreat | 75a3298379c8b48aeea6bae6c5c31a7d5e9fe315 | [
"Apache-2.0"
] | null | null | null | from enum import Enum
import gym
import numpy as np
from gym import spaces
from gym.utils import seeding
class Action(Enum):
decrease_attention = 0
increase_attention = 1
access_detector = 2
isolate_node = 3
forget_node = 4
class State(Enum):
healthy = 0
infected = 1
class MalwareEnv(gym.Env):
"""
Observations:
Type: Box(2)
Num Observation Min Max
0 Attention Level 0.05 1.0
1 Malware Rate 0.0 1.0
Actions:
Type: Discrete(5)
Num Action
0 Decrease attention
1 Increase attention
2 Access detector
3 Isolate node
4 Forget node
Reward:
Reward of -0.1 is awarded for accessing detector.
Reward of -0.2 is awarded for decreasing attention.
Reward of -0.8 is awarded for increasing attention.
Reward of 1 is awarded for isolation of infected node.
Reward of 1 is awarded for forgeting healthy node.
Reward of -1 is awarded for isolation of healthy node.
Reward of -1 if awarded for forgetting infected node.
Starting State:
Attention level is set between [0.1, 0.2]
Actual state is set either to 'healthy' or 'infected'.
Episode Termination:
Node is either isolated of forgotten.
Episode length is greater than 100.
"""
def __init__(self, malware_prob: float = 0.9, seed: int = 100, log: bool = False):
self.min_attention = 0.05
self.max_attention = 1.0
self.min_rate = 0.0
self.max_rate = 1.0
self.attention_inc = 0.05
self.low = np.array([self.min_attention, self.min_rate], dtype=np.float32)
self.high = np.array([self.max_attention, self.max_rate], dtype=np.float32)
self.action_space = spaces.Discrete(5)
self.observation_space = spaces.Box(self.low, self.high, dtype=np.float32)
self.malware_prob = malware_prob
self.log = log
# (attention, health)
self.state = (None, None, None)
self.latest_action = None
self.actions = []
self.seed(seed)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def start_step_policy(self, observation):
attention, malware_rate = observation
if attention > self.min_attention:
return Action.access_detector.value
return Action.increase_attention.value
def step(self, action):
if isinstance(action, np.ndarray):
action = np.argmax(action)
assert self.action_space.contains(action), f"{action} ({type(action)}) invalid"
action = Action(action)
if self.log:
self.actions.append(action)
attention, malware_rate, health = self.state
st = State(health)
if action == Action.decrease_attention:
attention = max(self.min_attention, attention - self.attention_inc)
if action == Action.increase_attention:
attention = min(self.max_attention, attention + self.attention_inc)
if action == Action.access_detector:
# Accessing a detector changes malware rate.
#
# When the node is healthy, there is a `1 - malware_prob` probability
# to observe malware. And malware rate depends on the attention level.
#
# Throw a "dice" in order to calculate the malware rate.
prob = self.np_random.uniform()
T = (1 - self.malware_prob) if st == State.healthy else self.malware_prob
mu = np.average([0, attention])
# sigma = 0.2
malware_rate = 0 if prob > T else self.np_random.normal(mu, 0.01)
malware_rate = max(self.min_rate, malware_rate)
malware_rate = min(self.max_rate, malware_rate)
# Agent does not observe the node health directly, only through
# malware rate.
self.state = np.array([attention, malware_rate, health])
self.latest_action = action
observation = np.array([attention, malware_rate])
reward = self.compute_reward(health, action)
done = action in {Action.isolate_node, Action.forget_node}
return observation, reward, done, {} # {"state": self.state}
def compute_reward(self, health, action):
if action == Action.decrease_attention:
return -0.2
if action == Action.increase_attention:
return -0.8
if action == Action.access_detector:
return -0.1
if action == Action.isolate_node:
return 1 * (health * 2 - 1)
if action == Action.forget_node:
return -1 * (health * 2 - 1)
return 0
def reset(self):
# Node if either healthy (0), or infected (1), when node is infected,
# agent observes malware requests depending on the attention level.
health = self.np_random.choice([0, 1])
attention = self.min_attention
malware_rate = 0
self.state = np.array([attention, malware_rate, health])
return np.array([attention, malware_rate])
def render(self, mode="human"):
attention, malware_rate, infected = self.state
print(f"\tattention: {attention} - malware rate: {malware_rate}", end=" - ")
print(f"health: {'infected' if infected else 'healthy'}", end=" - ")
print(f"action: {self.latest_action}")
def close(self):
pass
| 33.584337 | 87 | 0.609507 | from enum import Enum
import gym
import numpy as np
from gym import spaces
from gym.utils import seeding
class Action(Enum):
decrease_attention = 0
increase_attention = 1
access_detector = 2
isolate_node = 3
forget_node = 4
class State(Enum):
healthy = 0
infected = 1
class MalwareEnv(gym.Env):
def __init__(self, malware_prob: float = 0.9, seed: int = 100, log: bool = False):
self.min_attention = 0.05
self.max_attention = 1.0
self.min_rate = 0.0
self.max_rate = 1.0
self.attention_inc = 0.05
self.low = np.array([self.min_attention, self.min_rate], dtype=np.float32)
self.high = np.array([self.max_attention, self.max_rate], dtype=np.float32)
self.action_space = spaces.Discrete(5)
self.observation_space = spaces.Box(self.low, self.high, dtype=np.float32)
self.malware_prob = malware_prob
self.log = log
self.state = (None, None, None)
self.latest_action = None
self.actions = []
self.seed(seed)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def start_step_policy(self, observation):
attention, malware_rate = observation
if attention > self.min_attention:
return Action.access_detector.value
return Action.increase_attention.value
def step(self, action):
if isinstance(action, np.ndarray):
action = np.argmax(action)
assert self.action_space.contains(action), f"{action} ({type(action)}) invalid"
action = Action(action)
if self.log:
self.actions.append(action)
attention, malware_rate, health = self.state
st = State(health)
if action == Action.decrease_attention:
attention = max(self.min_attention, attention - self.attention_inc)
if action == Action.increase_attention:
attention = min(self.max_attention, attention + self.attention_inc)
if action == Action.access_detector:
prob = self.np_random.uniform()
T = (1 - self.malware_prob) if st == State.healthy else self.malware_prob
mu = np.average([0, attention])
malware_rate = 0 if prob > T else self.np_random.normal(mu, 0.01)
malware_rate = max(self.min_rate, malware_rate)
malware_rate = min(self.max_rate, malware_rate)
self.state = np.array([attention, malware_rate, health])
self.latest_action = action
observation = np.array([attention, malware_rate])
reward = self.compute_reward(health, action)
done = action in {Action.isolate_node, Action.forget_node}
return observation, reward, done, {}
def compute_reward(self, health, action):
if action == Action.decrease_attention:
return -0.2
if action == Action.increase_attention:
return -0.8
if action == Action.access_detector:
return -0.1
if action == Action.isolate_node:
return 1 * (health * 2 - 1)
if action == Action.forget_node:
return -1 * (health * 2 - 1)
return 0
def reset(self):
health = self.np_random.choice([0, 1])
attention = self.min_attention
malware_rate = 0
self.state = np.array([attention, malware_rate, health])
return np.array([attention, malware_rate])
def render(self, mode="human"):
attention, malware_rate, infected = self.state
print(f"\tattention: {attention} - malware rate: {malware_rate}", end=" - ")
print(f"health: {'infected' if infected else 'healthy'}", end=" - ")
print(f"action: {self.latest_action}")
def close(self):
pass
| true | true |
f72daf46254df0d6c88e364ddd25c09ced7b0cae | 1,415 | py | Python | src/migrations/versions/7b0843b4944f_.py | colinnewell/Adventure-Insecure | 46717dd14d88887559bb3a392c67b534c294edaa | [
"MIT"
] | 4 | 2016-09-24T19:46:12.000Z | 2017-07-08T02:17:06.000Z | src/migrations/versions/7b0843b4944f_.py | colinnewell/Adventure-Insecure | 46717dd14d88887559bb3a392c67b534c294edaa | [
"MIT"
] | null | null | null | src/migrations/versions/7b0843b4944f_.py | colinnewell/Adventure-Insecure | 46717dd14d88887559bb3a392c67b534c294edaa | [
"MIT"
] | null | null | null | """empty message
Revision ID: 7b0843b4944f
Revises: a83fe752a741
Create Date: 2016-08-08 23:12:27.138166
"""
# revision identifiers, used by Alembic.
revision = '7b0843b4944f'
down_revision = 'a83fe752a741'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('order_header',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('title', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('order_line',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('request', sa.Text(), nullable=False),
sa.Column('order_id', sa.Integer(), nullable=True),
sa.Column('order_for', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['order_for'], ['users.id'], ),
sa.ForeignKeyConstraint(['order_id'], ['order_header.id'], name='order_id_fkey'),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('order_line')
op.drop_table('order_header')
### end Alembic commands ###
| 31.444444 | 85 | 0.677739 |
revision = '7b0843b4944f'
down_revision = 'a83fe752a741'
from alembic import op
import sqlalchemy as sa
def upgrade():
DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('title', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('order_line',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('request', sa.Text(), nullable=False),
sa.Column('order_id', sa.Integer(), nullable=True),
sa.Column('order_for', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['order_for'], ['users.id'], ),
sa.ForeignKeyConstraint(['order_id'], ['order_header.id'], name='order_id_fkey'),
sa.PrimaryKeyConstraint('id')
)
| true | true |
f72db0772a914856e4cccbef369f54251e89d2f9 | 1,043 | py | Python | app/core/migrations/0004_recipe.py | kyla-m/recipe-app-api | 8fcd7bcee0075c2399204af98afff8c4ddc2cde1 | [
"MIT"
] | null | null | null | app/core/migrations/0004_recipe.py | kyla-m/recipe-app-api | 8fcd7bcee0075c2399204af98afff8c4ddc2cde1 | [
"MIT"
] | null | null | null | app/core/migrations/0004_recipe.py | kyla-m/recipe-app-api | 8fcd7bcee0075c2399204af98afff8c4ddc2cde1 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.15 on 2021-06-30 17:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_ingredient'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=255)),
('ingredients', models.ManyToManyField(to='core.Ingredient')),
('tags', models.ManyToManyField(to='core.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 35.965517 | 118 | 0.604027 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_ingredient'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=255)),
('ingredients', models.ManyToManyField(to='core.Ingredient')),
('tags', models.ManyToManyField(to='core.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f72db08facbd9830410347ef99c14ccf4da7bba4 | 3,878 | py | Python | catalyst/contrib/dl/runner/alchemy.py | asmekal/catalyst | e11365c0a9812649ceaef14e53061cd5117d8684 | [
"MIT"
] | null | null | null | catalyst/contrib/dl/runner/alchemy.py | asmekal/catalyst | e11365c0a9812649ceaef14e53061cd5117d8684 | [
"MIT"
] | null | null | null | catalyst/contrib/dl/runner/alchemy.py | asmekal/catalyst | e11365c0a9812649ceaef14e53061cd5117d8684 | [
"MIT"
] | null | null | null | from typing import Dict # isort:skip
from alchemy import Logger
from catalyst.dl import utils
from catalyst.dl.core import Experiment, Runner
from catalyst.dl.runner import SupervisedRunner
class AlchemyRunner(Runner):
"""
Runner wrapper with Alchemy integration hooks.
Read about Alchemy here https://alchemy.host
Powered by Catalyst.Ecosystem
Example:
.. code-block:: python
from catalyst.dl import SupervisedAlchemyRunner
runner = SupervisedAlchemyRunner()
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
loaders=loaders,
logdir=logdir,
num_epochs=num_epochs,
verbose=True,
monitoring_params={
"token": "...", # your Alchemy token
"project": "your_project_name",
"experiment": "your_experiment_name",
"group": "your_experiment_group_name"
}
)
"""
def _init(
self,
log_on_batch_end: bool = False,
log_on_epoch_end: bool = True,
):
super()._init()
self.log_on_batch_end = log_on_batch_end
self.log_on_epoch_end = log_on_epoch_end
if (self.log_on_batch_end and not self.log_on_epoch_end) \
or (not self.log_on_batch_end and self.log_on_epoch_end):
self.batch_log_suffix = ""
self.epoch_log_suffix = ""
else:
self.batch_log_suffix = "_batch"
self.epoch_log_suffix = "_epoch"
def _log_metrics(self, metrics: Dict, mode: str, suffix: str = ""):
for key, value in metrics.items():
metric_name = f"{key}/{mode}{suffix}"
self.logger.log_scalar(metric_name, value)
def _pre_experiment_hook(self, experiment: Experiment):
monitoring_params = experiment.monitoring_params
log_on_batch_end: bool = \
monitoring_params.pop("log_on_batch_end", False)
log_on_epoch_end: bool = \
monitoring_params.pop("log_on_epoch_end", True)
self._init(
log_on_batch_end=log_on_batch_end,
log_on_epoch_end=log_on_epoch_end,
)
self.logger = Logger(**monitoring_params)
def _post_experiment_hook(self, experiment: Experiment):
self.logger.close()
def _run_batch(self, batch):
super()._run_batch(batch=batch)
if self.log_on_batch_end and not self.state.is_distributed_worker:
mode = self.state.loader_name
metrics = self.state.batch_metrics
self._log_metrics(
metrics=metrics, mode=mode, suffix=self.batch_log_suffix
)
def _run_epoch(self, stage: str, epoch: int):
super()._run_epoch(stage=stage, epoch=epoch)
if self.log_on_epoch_end and not self.state.is_distributed_worker:
mode_metrics = utils.split_dict_to_subdicts(
dct=self.state.epoch_metrics,
prefixes=list(self.state.loaders.keys()),
extra_key="_base",
)
for mode, metrics in mode_metrics.items():
self._log_metrics(
metrics=metrics, mode=mode, suffix=self.epoch_log_suffix
)
def run_experiment(self, experiment: Experiment):
"""Starts experiment
Args:
experiment (Experiment): experiment class
"""
self._pre_experiment_hook(experiment=experiment)
super().run_experiment(experiment=experiment)
self._post_experiment_hook(experiment=experiment)
class SupervisedAlchemyRunner(AlchemyRunner, SupervisedRunner):
"""SupervisedRunner with Alchemy"""
pass
__all__ = ["AlchemyRunner", "SupervisedAlchemyRunner"]
| 32.864407 | 76 | 0.612171 | from typing import Dict
from alchemy import Logger
from catalyst.dl import utils
from catalyst.dl.core import Experiment, Runner
from catalyst.dl.runner import SupervisedRunner
class AlchemyRunner(Runner):
def _init(
self,
log_on_batch_end: bool = False,
log_on_epoch_end: bool = True,
):
super()._init()
self.log_on_batch_end = log_on_batch_end
self.log_on_epoch_end = log_on_epoch_end
if (self.log_on_batch_end and not self.log_on_epoch_end) \
or (not self.log_on_batch_end and self.log_on_epoch_end):
self.batch_log_suffix = ""
self.epoch_log_suffix = ""
else:
self.batch_log_suffix = "_batch"
self.epoch_log_suffix = "_epoch"
def _log_metrics(self, metrics: Dict, mode: str, suffix: str = ""):
for key, value in metrics.items():
metric_name = f"{key}/{mode}{suffix}"
self.logger.log_scalar(metric_name, value)
def _pre_experiment_hook(self, experiment: Experiment):
monitoring_params = experiment.monitoring_params
log_on_batch_end: bool = \
monitoring_params.pop("log_on_batch_end", False)
log_on_epoch_end: bool = \
monitoring_params.pop("log_on_epoch_end", True)
self._init(
log_on_batch_end=log_on_batch_end,
log_on_epoch_end=log_on_epoch_end,
)
self.logger = Logger(**monitoring_params)
def _post_experiment_hook(self, experiment: Experiment):
self.logger.close()
def _run_batch(self, batch):
super()._run_batch(batch=batch)
if self.log_on_batch_end and not self.state.is_distributed_worker:
mode = self.state.loader_name
metrics = self.state.batch_metrics
self._log_metrics(
metrics=metrics, mode=mode, suffix=self.batch_log_suffix
)
def _run_epoch(self, stage: str, epoch: int):
super()._run_epoch(stage=stage, epoch=epoch)
if self.log_on_epoch_end and not self.state.is_distributed_worker:
mode_metrics = utils.split_dict_to_subdicts(
dct=self.state.epoch_metrics,
prefixes=list(self.state.loaders.keys()),
extra_key="_base",
)
for mode, metrics in mode_metrics.items():
self._log_metrics(
metrics=metrics, mode=mode, suffix=self.epoch_log_suffix
)
def run_experiment(self, experiment: Experiment):
self._pre_experiment_hook(experiment=experiment)
super().run_experiment(experiment=experiment)
self._post_experiment_hook(experiment=experiment)
class SupervisedAlchemyRunner(AlchemyRunner, SupervisedRunner):
pass
__all__ = ["AlchemyRunner", "SupervisedAlchemyRunner"]
| true | true |
f72db0ea58b82317604e179be05a4bcd2493f975 | 39,757 | py | Python | plasmapy/formulary/tests/test_parameters.py | RAJAGOPALAN-GANGADHARAN/PlasmaPy | 6df9583cc47375687a07300c0aa11ba31634d770 | [
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-1-Clause",
"BSD-3-Clause"
] | 2 | 2020-09-16T08:53:45.000Z | 2022-01-29T18:00:10.000Z | plasmapy/formulary/tests/test_parameters.py | RAJAGOPALAN-GANGADHARAN/PlasmaPy | 6df9583cc47375687a07300c0aa11ba31634d770 | [
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-1-Clause",
"BSD-3-Clause"
] | null | null | null | plasmapy/formulary/tests/test_parameters.py | RAJAGOPALAN-GANGADHARAN/PlasmaPy | 6df9583cc47375687a07300c0aa11ba31634d770 | [
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-1-Clause",
"BSD-3-Clause"
] | null | null | null | """Tests for functions that calculate plasma parameters."""
import numpy as np
import pytest
from astropy import units as u
from astropy.constants import m_e, m_p
from astropy.tests.helper import assert_quantity_allclose
from plasmapy.formulary.parameters import (
Alfven_speed,
betaH_,
Bohm_diffusion,
cs_,
cwp_,
DB_,
Debye_length,
Debye_number,
gyrofrequency,
gyroradius,
Hall_parameter,
inertial_length,
ion_sound_speed,
lambdaD_,
lower_hybrid_frequency,
magnetic_energy_density,
magnetic_pressure,
mass_density,
nD_,
oc_,
plasma_frequency,
pmag_,
pth_,
rc_,
rho_,
rhoc_,
thermal_pressure,
thermal_speed,
ub_,
upper_hybrid_frequency,
va_,
wc_,
wlh_,
wp_,
wuh_,
)
from plasmapy.particles import Particle
from plasmapy.particles.exceptions import InvalidParticleError
from plasmapy.utils.exceptions import (
PhysicsError,
PhysicsWarning,
PlasmaPyFutureWarning,
RelativityError,
RelativityWarning,
)
from plasmapy.utils.pytest_helpers import assert_can_handle_nparray
B = 1.0 * u.T
Z = 1
ion = "p"
m_i = m_p
n_i = 5e19 * u.m ** -3
n_e = Z * 5e19 * u.m ** -3
rho = n_i * m_i + n_e * m_e
T_e = 1e6 * u.K
T_i = 1e6 * u.K
k_1 = 3e1 * u.m ** -1
k_2 = 3e7 * u.m ** -1
B_arr = np.array([0.001, 0.002]) * u.T
B_nanarr = np.array([0.001, np.nan]) * u.T
B_allnanarr = np.array([np.nan, np.nan]) * u.T
rho_arr = np.array([5e-10, 2e-10]) * u.kg / u.m ** 3
rho_infarr = np.array([np.inf, 5e19]) * u.m ** -3
rho_negarr = np.array([-5e19, 6e19]) * u.m ** -3
T_arr = np.array([1e6, 2e6]) * u.K
T_nanarr = np.array([1e6, np.nan]) * u.K
T_nanarr2 = np.array([np.nan, 2e6]) * u.K
T_allnanarr = np.array([np.nan, np.nan]) * u.K
T_negarr = np.array([1e6, -5151.0]) * u.K
V = 25.2 * u.m / u.s
V_arr = np.array([25, 50]) * u.m / u.s
V_nanarr = np.array([25, np.nan]) * u.m / u.s
V_allnanarr = np.array([np.nan, np.nan]) * u.m / u.s
mu = m_p.to(u.u).value
class Test_mass_density:
r"""Test the mass_density function in parameters.py."""
@pytest.mark.parametrize(
"args, kwargs, conditional",
[
((-1 * u.kg * u.m ** -3, "He"), {}, pytest.raises(ValueError)),
((-1 * u.m ** -3, "He"), {}, pytest.raises(ValueError)),
(("not a Quantity", "He"), {}, pytest.raises(TypeError)),
((1 * u.m ** -3,), {}, pytest.raises(TypeError)),
((1 * u.J, "He"), {}, pytest.raises(u.UnitTypeError)),
((1 * u.m ** -3, None), {}, pytest.raises(TypeError)),
(
(1 * u.m ** -3, "He"),
{"z_ratio": "not a ratio"},
pytest.raises(TypeError),
),
],
)
def test_raises(self, args, kwargs, conditional):
with conditional:
mass_density(*args, **kwargs)
@pytest.mark.parametrize(
"args, kwargs, expected",
[
((1.0 * u.g * u.m ** -3, ""), {}, 1.0e-3 * u.kg * u.m ** -3),
((5.0e12 * u.cm ** -3, "He"), {}, 3.32323849e-8 * u.kg * u.m ** -3),
(
(5.0e12 * u.cm ** -3, Particle("He")),
{},
3.32323849e-8 * u.kg * u.m ** -3,
),
(
(5.0e12 * u.cm ** -3, "He"),
{"z_ratio": 0.5},
1.66161925e-08 * u.kg * u.m ** -3,
),
(
(5.0e12 * u.cm ** -3, "He"),
{"z_ratio": -0.5},
1.66161925e-08 * u.kg * u.m ** -3,
),
],
)
def test_values(self, args, kwargs, expected):
assert np.isclose(mass_density(*args, **kwargs), expected)
def test_handle_nparrays(self):
"""Test for ability to handle numpy array quantities"""
assert_can_handle_nparray(mass_density)
# Assertions below that are in CGS units with 2-3 significant digits
# are generally from the NRL Plasma Formulary.
class TestAlfvenSpeed:
"""Test `~plasmapy.formulary.parameters.Alfven_speed`."""
@pytest.mark.parametrize("alias", [va_])
def test_aliases(self, alias):
assert alias is Alfven_speed
@pytest.mark.parametrize(
"args, kwargs, _error",
[
# scenarios that raise RelativityError
((10 * u.T, 1.0e-10 * u.kg * u.m ** -3), {}, RelativityError),
((np.inf * u.T, 1 * u.m ** -3), {"ion": "p"}, RelativityError),
((-np.inf * u.T, 1 * u.m ** -3), {"ion": "p"}, RelativityError),
#
# scenarios that raise InvalidParticleError
((1 * u.T, 5e19 * u.m ** -3), {"ion": "spacecats"}, InvalidParticleError),
#
# scenarios that raise TypeError
(("not a Bfield", 1.0e-10 * u.kg * u.m ** -3), {}, TypeError),
((10 * u.T, "not a density"), {}, TypeError),
((10 * u.T, 5), {"ion": "p"}, TypeError),
((1 * u.T, 1.0e18 * u.m ** -3), {"ion": ["He"]}, TypeError),
((1 * u.T, 1.0e18 * u.m ** -3), {"ion": "He", "z_mean": "nope"}, TypeError),
#
# scenarios that raise UnitTypeError
((1 * u.T, 1.0e18 * u.cm), {"ion": "He"}, u.UnitTypeError),
((1 * u.T, 5 * u.m ** -2), {"ion": "p"}, u.UnitTypeError),
((1 * u.cm, 1.0e18 * u.m ** -3), {"ion": "He"}, u.UnitTypeError),
((5 * u.A, 5e19 * u.m ** -3), {"ion": "p"}, u.UnitTypeError),
#
# scenarios that raise ValueError
((1 * u.T, -1.0e18 * u.m ** -3), {"ion": "He"}, ValueError),
(
(np.array([5, 6, 7]) * u.T, np.array([5, 6]) * u.m ** -3),
{"ion": "p"},
ValueError,
),
(
(np.array([0.001, 0.002]) * u.T, np.array([-5e19, 6e19]) * u.m ** -3),
{"ion": "p"},
ValueError,
),
],
)
def test_raises(self, args, kwargs, _error):
"""Test scenarios that raise exceptions or warnings."""
with pytest.raises(_error):
Alfven_speed(*args, **kwargs)
@pytest.mark.parametrize(
"args, kwargs, expected, isclose_kw, _warning",
[
# scenarios that issue RelativityWarning
(
(5 * u.T, 5e19 * u.m ** -3),
{"ion": "H"},
15413707.39,
{},
RelativityWarning,
),
(
(5 * u.T, 5e19 * u.m ** -3),
{"ion": "H+"},
15413707.39,
{"rtol": 3.0e-4},
RelativityWarning,
),
(
(5 * u.T, 5e19 * u.m ** -3),
{"ion": "p"},
15413707.39,
{"rtol": 4.0e-4},
RelativityWarning,
),
#
# scenarios that issue UnitsWarning
((0.5, 1.0e18 * u.m ** -3), {"ion": "He"}, 5470657.93, {}, u.UnitsWarning),
],
)
def test_warns(self, args, kwargs, expected, isclose_kw, _warning):
"""Test scenarios that issue warnings"""
with pytest.warns(_warning):
val = Alfven_speed(*args, **kwargs)
assert isinstance(val, u.Quantity)
assert val.unit == u.m / u.s
assert np.isclose(val.value, expected, **isclose_kw)
@pytest.mark.parametrize(
"args, kwargs, expected, isclose_kw",
[
(
(1 * u.T, 1e-8 * u.kg * u.m ** -3),
{"ion": "p"},
8920620.58 * u.m / u.s,
{"rtol": 1e-6},
),
(
(1 * u.T, 1e-8 * u.kg * u.m ** -3),
{},
8920620.58 * u.m / u.s,
{"rtol": 1e-6},
),
(
(0.05 * u.T, 1e18 * u.m ** -3),
{"ion": "He"},
Alfven_speed(0.05 * u.T, 6.64738793e-09 * u.kg * u.m ** -3),
{},
),
(
(0.05 * u.T, 1e18 * u.m ** -3),
{"ion": "He+"},
Alfven_speed(0.05 * u.T, 1e18 * u.m ** -3, ion="He"),
{"rtol": 7e-5},
),
(
(0.05 * u.T, 1e18 * u.m ** -3),
{"ion": "He", "z_mean": 2},
Alfven_speed(0.05 * u.T, 1e18 * u.m ** -3, ion="He +2"),
{"rtol": 1.4e-4},
),
(
(0.05 * u.T, 1e18 * u.m ** -3),
{"ion": Particle("He+")},
Alfven_speed(0.05 * u.T, 1e18 * u.m ** -3, ion="He+"),
{},
),
(
([0.001, 0.002] * u.T, 5e-10 * u.kg * u.m ** -3),
{},
[
va_(0.001 * u.T, 5e-10 * u.kg * u.m ** -3).value,
va_(0.002 * u.T, 5e-10 * u.kg * u.m ** -3).value,
]
* (u.m / u.s),
{},
),
(
([0.001, 0.002] * u.T, [5e-10, 2e-10] * u.kg * u.m ** -3),
{},
[
va_(0.001 * u.T, 5e-10 * u.kg * u.m ** -3).value,
va_(0.002 * u.T, 2e-10 * u.kg * u.m ** -3).value,
]
* (u.m / u.s),
{},
),
(
(0.001 * u.T, [1.0e18, 2e18] * u.m ** -3),
{"ion": "p"},
[
va_(0.001 * u.T, 1e18 * u.m ** -3, ion="p").value,
va_(0.001 * u.T, 2e18 * u.m ** -3, ion="p").value,
]
* (u.m / u.s),
{},
),
],
)
def test_values(self, args, kwargs, expected, isclose_kw):
"""Test expected values."""
assert np.allclose(Alfven_speed(*args, **kwargs), expected, **isclose_kw)
@pytest.mark.parametrize(
"args, kwargs, nan_mask",
[
((np.nan * u.T, 1 * u.kg * u.m ** -3), {}, []),
((0.001 * u.T, np.nan * u.kg * u.m ** -3), {}, []),
(([np.nan, 0.001] * u.T, 1 * u.kg * u.m ** -3), {}, [True, False]),
(
(0.001 * u.T, [np.nan, 1.0, np.nan] * u.kg * u.m ** -3),
{},
[True, False, True],
),
(([np.nan, 0.001] * u.T, [1, np.nan] * u.kg * u.m ** -3), {}, [True, True]),
(
(0.001 * u.T, [np.nan, 1e18, np.nan] * u.m ** -3),
{"ion": "Ar+"},
[True, False, True],
),
],
)
def test_nan_values(self, args, kwargs, nan_mask):
"""Input scenarios that leat to `numpy.nan` values being returned."""
val = Alfven_speed(*args, **kwargs)
if np.isscalar(val.value):
assert np.isnan(val)
else:
nan_arr = np.isnan(val)
assert np.all(nan_arr[nan_mask])
assert np.all(np.logical_not(nan_arr[np.logical_not(nan_mask)]))
def test_handle_nparrays(self):
"""Test for ability to handle numpy array quantities"""
assert_can_handle_nparray(Alfven_speed)
class Test_Ion_Sound_Speed:
r"""Test the ion_sound_speed function in parameters.py."""
@pytest.mark.parametrize(
"args, kwargs, expected, isclose_kw",
[
(
(),
{
"T_i": 1.3232 * u.MK,
"T_e": 1.831 * u.MK,
"ion": "p",
"gamma_e": 1,
"gamma_i": 3,
},
218816.06086407552 * (u.m / u.s),
{},
),
(
(1.831 * u.MK, 1.3232 * u.MK, "p"),
{},
218816.06086407552 * (u.m / u.s),
{},
), # Test that function call without keyword argument works correctly
(
(),
{
"T_i": 1.3232 * u.MK,
"T_e": 1.831 * u.MK,
"n_e": n_e,
"k": k_1,
"ion": "p",
"gamma_e": 1,
"gamma_i": 3,
},
218816.06086407552 * (u.m / u.s),
{},
),
(
(),
{
"T_i": 1.3232 * u.MK,
"T_e": 1.831 * u.MK,
"n_e": n_e,
"k": k_2,
"ion": "p",
"gamma_e": 1,
"gamma_i": 3,
},
552.3212936293337 * (u.m / u.s),
{},
),
(
(),
{
"T_i": 0.88 * u.MK,
"T_e": 1.28 * u.MK,
"n_e": n_e,
"k": 0 * u.m ** -1,
"ion": "p",
"gamma_e": 1.2,
"gamma_i": 3.4,
},
193328.52857788358 * (u.m / u.s),
{},
),
(
(),
{"T_i": T_i, "T_e": 0 * u.K, "n_e": n_e, "k": k_1, "ion": "p+"},
ion_sound_speed(T_i=T_i, T_e=0 * u.K, n_e=n_e, k=k_1, ion="p+").value
* (u.m / u.s),
{},
),
(
(),
{
"T_e": 1.2e6 * u.K,
"T_i": 0 * u.K,
"n_e": n_e,
"k": 0 * u.m ** -1,
"z_mean": 0.8,
"ion": "p",
},
89018.09 * (u.m / u.s),
{"atol": 0.0, "rtol": 1e-6},
), # testing for user input z_mean
],
)
def test_values(self, args, kwargs, expected, isclose_kw):
assert np.isclose(ion_sound_speed(*args, **kwargs), expected, **isclose_kw)
# case when Z=1 is assumed
# assert ion_sound_speed(T_i=T_i, T_e=T_e, ion='p+') == ion_sound_speed(T_i=T_i, T_e=T_e,
# ion='H-1')
@pytest.mark.parametrize(
"kwargs1, kwargs2, _warning",
[
({"T_i": T_i, "T_e": T_e, "n_e": n_e, "ion": "p"}, {}, PhysicsWarning),
({"T_i": T_i, "T_e": T_e, "k": k_1, "ion": "p"}, {}, PhysicsWarning),
({"T_i": 5e11 * u.K, "T_e": 0 * u.K, "ion": "p"}, {}, RelativityWarning),
(
{"T_e": 1.2e6, "T_i": 0 * u.K, "n_e": n_e, "k": k_1, "ion": "p"},
{"T_e": 1.2e6 * u.K, "T_i": 0 * u.K, "n_e": n_e, "k": k_1, "ion": "p"},
u.UnitsWarning,
),
(
{"T_i": 1.3e6, "T_e": 0 * u.K, "n_e": n_e, "k": k_1, "ion": "p"},
{"T_i": 1.3e6 * u.K, "T_e": 0 * u.K, "n_e": n_e, "k": k_1, "ion": "p"},
u.UnitsWarning,
),
],
)
def test_warns(self, kwargs1, kwargs2, _warning):
with pytest.warns(_warning):
val = ion_sound_speed(**kwargs1)
if kwargs2 != {}:
val == ion_sound_speed(**kwargs2)
@pytest.mark.parametrize(
"args, kwargs, _error",
[
(
(),
{
"T_i": T_i,
"T_e": T_e,
"n_e": n_e,
"k": k_1,
"ion": "p",
"gamma_i": np.inf,
},
RelativityError,
),
(
(),
{
"T_i": np.array([5, 6, 5]) * u.K,
"T_e": np.array([3, 4]) * u.K,
"n_e": np.array([5, 6, 5]) * u.m ** -3,
"k": np.array([3, 4]) * u.m ** -3,
"ion": "p",
},
u.UnitTypeError,
),
((5 * u.T), {"ion": "p"}, TypeError), # Is this test right??????
((), {"ion": "p"}, TypeError),
(
(),
{"T_i": T_i, "T_e": 0 * u.K, "gamma_i": 0.9999, "ion": "p"},
PhysicsError,
),
(
(),
{"T_i": T_i, "T_e": 0 * u.K, "gamma_e": 0.9999, "ion": "p"},
PhysicsError,
),
(
(),
{"T_i": T_i, "T_e": 0 * u.K, "gamma_e": "sdjklsf", "ion": "p"},
TypeError,
),
(
(),
{"T_i": T_i, "T_e": 0 * u.K, "gamma_i": "fsdfas", "ion": "p"},
TypeError,
),
((), {"T_i": T_i, "T_e": 0 * u.K, "ion": "cupcakes"}, InvalidParticleError),
((), {"T_i": -np.abs(T_i), "T_e": 0 * u.K, "ion": "p"}, ValueError),
(
(),
{"T_i": T_i, "T_e": 0 * u.K, "n_e": -np.abs(n_e), "k": k_1, "ion": "p"},
ValueError,
),
(
(),
{"T_i": T_i, "T_e": 0 * u.K, "n_e": n_e, "k": -np.abs(k_1), "ion": "p"},
ValueError,
),
((), {"T_i": 5e19 * u.K, "T_e": 0 * u.K, "ion": "p"}, RelativityError),
(
(),
{"T_i": 5 * u.A, "T_e": 0 * u.K, "n_e": n_e, "k": k_1, "ion": "p"},
u.UnitTypeError,
),
(
(),
{"T_i": T_negarr, "T_e": 0 * u.K, "n_e": n_e, "k": k_1, "ion": "p"},
ValueError,
),
(
(),
{"T_e": T_negarr, "T_i": 0 * u.K, "n_e": n_e, "k": k_1, "ion": "p"},
ValueError,
),
],
)
def test_raises(self, args, kwargs, _error):
with pytest.raises(_error):
ion_sound_speed(*args, **kwargs)
@pytest.mark.parametrize(
"kwargs",
[
({"T_i": T_nanarr, "T_e": 0 * u.K, "n_e": n_e, "k": k_1, "ion": "p"}),
({"T_e": T_nanarr, "T_i": 0 * u.K, "n_e": n_e, "k": k_1, "ion": "p"}),
],
)
def test_nan_values(self, kwargs):
np.isnan(ion_sound_speed(**kwargs)[1])
def test_handle_nparrays(self):
assert_can_handle_nparray(ion_sound_speed)
def test_thermal_pressure():
assert thermal_pressure(T_e, n_i).unit.is_equivalent(u.Pa)
# TODO: may be array issues with arg "mass"
assert_can_handle_nparray(thermal_pressure)
def test_gyrofrequency():
r"""Test the gyrofrequency function in parameters.py."""
assert gyrofrequency(B, "e-").unit.is_equivalent(u.rad / u.s)
assert gyrofrequency(B, "e-", to_hz=True).unit.is_equivalent(u.Hz)
assert np.isclose(gyrofrequency(1 * u.T, "e-").value, 175882008784.72018)
assert np.isclose(gyrofrequency(2.4 * u.T, "e-").value, 422116821083.3284)
assert np.isclose(
gyrofrequency(1 * u.T, "e-", to_hz=True).value, 27992490076.528206
)
assert np.isclose(
gyrofrequency(2.4 * u.T, "e-", signed=True).value, -422116821083.3284
)
assert np.isclose(gyrofrequency(1 * u.G, "e-").cgs.value, 1.76e7, rtol=1e-3)
with pytest.raises(TypeError):
with pytest.warns(u.UnitsWarning):
gyrofrequency(u.m, "e-")
with pytest.raises(u.UnitTypeError):
gyrofrequency(u.m * 1, "e-")
assert np.isnan(gyrofrequency(B_nanarr, "e-")[-1])
# The following is a test to check that equivalencies from astropy
# are working.
omega_ce = gyrofrequency(2.2 * u.T, "e-")
f_ce = (omega_ce / (2 * np.pi)) / u.rad
f_ce_use_equiv = omega_ce.to(u.Hz, equivalencies=[(u.cy / u.s, u.Hz)])
assert np.isclose(f_ce.value, f_ce_use_equiv.value)
with pytest.warns(u.UnitsWarning):
assert gyrofrequency(5.0, "e-") == gyrofrequency(5.0 * u.T, "e-")
assert gyrofrequency(B, particle=ion).unit.is_equivalent(u.rad / u.s)
assert np.isclose(gyrofrequency(1 * u.T, particle="p").value, 95788335.834874)
assert np.isclose(gyrofrequency(2.4 * u.T, particle="p").value, 229892006.00369796)
assert np.isclose(gyrofrequency(1 * u.G, particle="p").cgs.value, 9.58e3, rtol=2e-3)
assert gyrofrequency(-5 * u.T, "p") == gyrofrequency(5 * u.T, "p")
# Case when Z=1 is assumed
# assert gyrofrequency(B, particle='p+') == gyrofrequency(B, particle='H-1')
assert gyrofrequency(B, particle="e+") == gyrofrequency(B, "e-")
with pytest.warns(u.UnitsWarning):
gyrofrequency(8, "p")
with pytest.raises(u.UnitTypeError):
gyrofrequency(5 * u.m, "p")
with pytest.raises(InvalidParticleError):
gyrofrequency(8 * u.T, particle="asdfasd")
with pytest.warns(u.UnitsWarning):
# TODO this should be WARNS, not RAISES. and it's probably still raised
assert gyrofrequency(5.0, "p") == gyrofrequency(5.0 * u.T, "p")
gyrofrequency(1 * u.T, particle="p")
# testing for user input Z
testMeth1 = gyrofrequency(1 * u.T, particle="p", Z=0.8).si.value
testTrue1 = 76630665.79318453
errStr = f"gyrofrequency() gave {testMeth1}, should be {testTrue1}."
assert np.isclose(testMeth1, testTrue1, atol=0.0, rtol=1e-5), errStr
assert_can_handle_nparray(gyrofrequency, kwargs={"signed": True})
assert_can_handle_nparray(gyrofrequency, kwargs={"signed": False})
def test_gyroradius():
r"""Test the gyroradius function in parameters.py."""
assert gyroradius(B, "e-", T=T_e).unit.is_equivalent(u.m)
assert gyroradius(B, "e-", Vperp=25 * u.m / u.s).unit.is_equivalent(u.m)
# test for possiblity to allow nan for input values
assert np.isnan(gyroradius(np.nan * u.T, particle="e-", T=1 * u.K))
assert np.isnan(gyroradius(1 * u.T, particle="e-", T=np.nan * u.K))
assert np.isnan(gyroradius(1 * u.T, particle="e-", Vperp=np.nan * u.m / u.s))
Vperp = 1e6 * u.m / u.s
Bmag = 1 * u.T
omega_ce = gyrofrequency(Bmag, "e-")
analytical_result = (Vperp / omega_ce).to(
u.m, equivalencies=u.dimensionless_angles()
)
assert gyroradius(Bmag, "e-", Vperp=Vperp) == analytical_result
with pytest.raises(TypeError):
with pytest.warns(u.UnitsWarning):
gyroradius(u.T, "e-")
with pytest.raises(u.UnitTypeError):
gyroradius(5 * u.A, "e-", Vperp=8 * u.m / u.s)
with pytest.raises(u.UnitTypeError):
gyroradius(5 * u.T, "e-", Vperp=8 * u.m)
with pytest.raises(ValueError):
gyroradius(np.array([5, 6]) * u.T, "e-", Vperp=np.array([5, 6, 7]) * u.m / u.s)
assert np.isnan(gyroradius(np.nan * u.T, "e-", Vperp=1 * u.m / u.s))
with pytest.raises(ValueError):
gyroradius(3.14159 * u.T, "e-", T=-1 * u.K)
with pytest.warns(u.UnitsWarning):
assert gyroradius(1.0, "e-", Vperp=1.0) == gyroradius(
1.0 * u.T, "e-", Vperp=1.0 * u.m / u.s
)
with pytest.warns(u.UnitsWarning):
assert gyroradius(1.1, "e-", T=1.2) == gyroradius(1.1 * u.T, "e-", T=1.2 * u.K)
with pytest.raises(ValueError):
gyroradius(1.1 * u.T, "e-", Vperp=1 * u.m / u.s, T=1.2 * u.K)
with pytest.raises(u.UnitTypeError):
gyroradius(1.1 * u.T, "e-", Vperp=1.1 * u.m, T=1.2 * u.K)
# Check for Deprecation warning when using T_i instead of T
with pytest.warns(PlasmaPyFutureWarning):
gyroradius(1.1 * u.T, "e-", T_i=1.2 * u.K)
assert gyroradius(B, particle="p", T=T_i).unit.is_equivalent(u.m)
assert gyroradius(B, particle="p", Vperp=25 * u.m / u.s).unit.is_equivalent(u.m)
# Case when Z=1 is assumed
assert np.isclose(
gyroradius(B, particle="p", T=T_i),
gyroradius(B, particle="H+", T=T_i),
atol=1e-6 * u.m,
)
gyroPos = gyroradius(B, particle="p", Vperp=V)
gyroNeg = gyroradius(B, particle="p", Vperp=-V)
assert gyroPos == gyroNeg
Vperp = 1e6 * u.m / u.s
Bmag = 1 * u.T
omega_ci = gyrofrequency(Bmag, particle="p")
analytical_result = (Vperp / omega_ci).to(
u.m, equivalencies=u.dimensionless_angles()
)
assert gyroradius(Bmag, particle="p", Vperp=Vperp) == analytical_result
T2 = 1.2 * u.MK
B2 = 123 * u.G
particle2 = "alpha"
Vperp2 = thermal_speed(T2, particle=particle2)
gyro_by_vperp = gyroradius(B2, particle="alpha", Vperp=Vperp2)
assert gyro_by_vperp == gyroradius(B2, particle="alpha", T=T2)
explicit_positron_gyro = gyroradius(1 * u.T, particle="positron", T=1 * u.MK)
assert explicit_positron_gyro == gyroradius(1 * u.T, "e-", T=1 * u.MK)
with pytest.raises(TypeError):
with pytest.warns(u.UnitsWarning):
gyroradius(u.T, particle="p", Vperp=8 * u.m / u.s)
with pytest.raises(ValueError):
gyroradius(B, particle="p", T=-1 * u.K)
with pytest.warns(u.UnitsWarning):
gyro_without_units = gyroradius(1.0, particle="p", Vperp=1.0)
gyro_with_units = gyroradius(1.0 * u.T, particle="p", Vperp=1.0 * u.m / u.s)
assert gyro_without_units == gyro_with_units
with pytest.warns(u.UnitsWarning):
gyro_t_without_units = gyroradius(1.1, particle="p", T=1.2)
gyro_t_with_units = gyroradius(1.1 * u.T, particle="p", T=1.2 * u.K)
assert gyro_t_with_units == gyro_t_without_units
with pytest.raises(ValueError):
gyroradius(1.1 * u.T, particle="p", Vperp=1 * u.m / u.s, T=1.2 * u.K)
with pytest.raises(u.UnitTypeError):
gyroradius(1.1 * u.T, particle="p", Vperp=1.1 * u.m, T=1.2 * u.K)
with pytest.raises(u.UnitTypeError):
gyroradius(1.1 * u.T, particle="p", Vperp=1.2 * u.m, T=1.1 * u.K)
class Test_gyroradius:
# some custom numpy array tests here, because of the T / Vperp situation
def test_handle_numpy_array(self):
# Tests to verify that can handle Quantities with numpy array as the value:
assert gyroradius(B_arr, "e-", Vperp=V_arr)[0] == gyroradius(
B_arr[0], "e-", Vperp=V_arr[0]
)
assert gyroradius(B_arr, "e-", T=T_arr)[0] == gyroradius(
B_arr[0], "e-", T=T_arr[0]
)
def test_handle_mixed_Qarrays(self):
# If both Vperp or T are input as Qarrays, but only one of the two is valid
# at each element, then that's fine, the function should work:
assert gyroradius(B_arr, "e-", Vperp=V_nanarr, T=T_nanarr2)[0] == gyroradius(
B_arr[0], "e-", Vperp=V_nanarr[0], T=T_nanarr2[0]
)
def test_raise_two_valid_inputs(self):
# If both Vperp or T are nan-less, Qarrays or not, should raise ValueError:
with pytest.raises(ValueError):
gyroradius(B_arr, "e-", Vperp=V, T=T_arr)
with pytest.raises(ValueError):
gyroradius(B_arr, "e-", Vperp=V_arr, T=T_i)
def test_all_valid_and_one_valid(self):
# If one of (Vperp, T) is a valid and one is Qarray with at least one valid, ValueError:
with pytest.raises(ValueError):
gyroradius(B_arr, "e-", Vperp=V, T=T_nanarr)
with pytest.raises(ValueError):
gyroradius(B_arr, "e-", Vperp=V_nanarr, T=T_i)
def test_scalar_and_nan_qarray(self):
# If either Vperp or T is a valid scalar and the other is a Qarray of all nans,
# should do something valid and not raise a ValueError
assert np.all(np.isfinite(gyroradius(B_arr, "e-", Vperp=V, T=T_allnanarr)))
assert np.all(np.isfinite(gyroradius(B_arr, "e-", Vperp=V_allnanarr, T=T_i)))
def test_keeps_arguments_unchanged(self):
Vperp1 = u.Quantity([np.nan, 1], unit=u.m / u.s)
Vperp2 = u.Quantity([np.nan, 1], unit=u.m / u.s) # an exact copy
T_i = u.Quantity([1, np.nan], unit=u.K)
gyroradius(B_arr, "e-", Vperp=Vperp1, T=T_i)
assert_quantity_allclose(Vperp1, Vperp2)
def test_plasma_frequency():
r"""Test the plasma_frequency function in parameters.py."""
assert plasma_frequency(n_e, "e-").unit.is_equivalent(u.rad / u.s)
assert plasma_frequency(n_e, "e-", to_hz=True).unit.is_equivalent(u.Hz)
assert np.isclose(plasma_frequency(1 * u.cm ** -3, "e-").value, 5.64e4, rtol=1e-2)
assert np.isclose(
plasma_frequency(1 * u.cm ** -3, particle="N").value, 3.53e2, rtol=1e-1
)
assert np.isclose(
plasma_frequency(1 * u.cm ** -3, particle="N", to_hz=True).value,
56.19000195094519,
)
with pytest.raises(TypeError):
with pytest.warns(u.UnitsWarning):
plasma_frequency(u.m ** -3, "e-")
with pytest.raises(u.UnitTypeError):
plasma_frequency(5 * u.m ** -2, "e-")
assert np.isnan(plasma_frequency(np.nan * u.m ** -3, "e-"))
with pytest.warns(u.UnitsWarning):
assert plasma_frequency(1e19, "e-") == plasma_frequency(1e19 * u.m ** -3, "e-")
assert plasma_frequency(n_i, particle="p").unit.is_equivalent(u.rad / u.s)
# Case where Z=1 is assumed
assert plasma_frequency(n_i, particle="H-1+") == plasma_frequency(n_i, particle="p")
assert np.isclose(
plasma_frequency(mu * u.cm ** -3, particle="p").value, 1.32e3, rtol=1e-2
)
with pytest.raises(ValueError):
plasma_frequency(n=5 * u.m ** -3, particle="sdfas")
with pytest.warns(u.UnitsWarning):
plasma_freq_no_units = plasma_frequency(1e19, particle="p")
assert plasma_freq_no_units == plasma_frequency(1e19 * u.m ** -3, particle="p")
plasma_frequency(1e17 * u.cm ** -3, particle="p")
# testing for user input z_mean
testMeth1 = plasma_frequency(1e17 * u.cm ** -3, particle="p", z_mean=0.8).si.value
testTrue1 = 333063562455.4028
errStr = f"plasma_frequency() gave {testMeth1}, should be {testTrue1}."
assert np.isclose(testMeth1, testTrue1, atol=0.0, rtol=1e-6), errStr
assert_can_handle_nparray(plasma_frequency)
def test_Debye_length():
r"""Test the Debye_length function in parameters.py."""
assert Debye_length(T_e, n_e).unit.is_equivalent(u.m)
assert np.isclose(Debye_length(1 * u.eV, 1 * u.cm ** -3).value, 7.43, atol=0.005)
with pytest.warns(u.UnitsWarning):
Debye_length(5, 5 * u.m ** -3)
with pytest.raises(u.UnitTypeError):
Debye_length(56 * u.kg, 5 * u.m ** -3)
with pytest.raises(ValueError):
Debye_length(5 * u.eV, -5 * u.m ** -3)
with pytest.raises(ValueError):
Debye_length(-45 * u.K, 5 * u.m ** -3)
Tarr2 = np.array([1, 2]) * u.K
narr3 = np.array([1, 2, 3]) * u.m ** -3
with pytest.raises(ValueError):
Debye_length(Tarr2, narr3)
with pytest.warns(u.UnitsWarning):
assert Debye_length(2.0, 2.0) == Debye_length(2.0 * u.K, 2.0 * u.m ** -3)
with pytest.warns(u.UnitsWarning):
assert Debye_length(2.0 * u.K, 2.0) == Debye_length(2.0, 2.0 * u.m ** -3)
assert_can_handle_nparray(Debye_length)
def test_Debye_number():
r"""Test the Debye_number function in parameters.py."""
assert Debye_number(T_e, n_e).unit.is_equivalent(u.dimensionless_unscaled)
T_e_eV = T_e.to(u.eV, equivalencies=u.temperature_energy())
assert np.isclose(Debye_number(T_e, n_e).value, Debye_number(T_e_eV, n_e).value)
assert np.isclose(Debye_number(1 * u.eV, 1 * u.cm ** -3).value, 1720862385.43342)
with pytest.warns(u.UnitsWarning):
Debye_number(T_e, 4)
with pytest.raises(ValueError):
Debye_number(None, n_e)
with pytest.raises(u.UnitTypeError):
Debye_number(5 * u.m, 5 * u.m ** -3)
with pytest.raises(u.UnitTypeError):
Debye_number(5 * u.K, 5 * u.m ** 3)
with pytest.raises(ValueError):
Debye_number(5j * u.K, 5 * u.cm ** -3)
Tarr2 = np.array([1, 2]) * u.K
narr3 = np.array([1, 2, 3]) * u.m ** -3
with pytest.raises(ValueError):
Debye_number(Tarr2, narr3)
with pytest.warns(u.UnitsWarning):
assert Debye_number(1.1, 1.1) == Debye_number(1.1 * u.K, 1.1 * u.m ** -3)
with pytest.warns(u.UnitsWarning):
assert Debye_number(1.1 * u.K, 1.1) == Debye_number(1.1, 1.1 * u.m ** -3)
assert_can_handle_nparray(Debye_number)
def test_inertial_length():
r"""Test the inertial_length function in parameters.py."""
assert inertial_length(n_i, particle="p").unit.is_equivalent(u.m)
assert np.isclose(
inertial_length(mu * u.cm ** -3, particle="p").cgs.value, 2.28e7, rtol=0.01
)
inertial_length_electron_plus = inertial_length(5.351 * u.m ** -3, particle="e+")
assert inertial_length_electron_plus == inertial_length(
5.351 * u.m ** -3, particle="e"
)
assert inertial_length(n_i, particle="p") == inertial_length(n_i, particle="p")
with pytest.warns(u.UnitsWarning):
inertial_length(4, particle="p")
with pytest.raises(u.UnitTypeError):
inertial_length(4 * u.m ** -2, particle="p")
with pytest.raises(ValueError):
inertial_length(-5 * u.m ** -3, particle="p")
with pytest.raises(InvalidParticleError):
inertial_length(n_i, particle=-135)
with pytest.warns(u.UnitsWarning):
inertial_length_no_units = inertial_length(1e19, particle="p")
assert inertial_length_no_units == inertial_length(
1e19 * u.m ** -3, particle="p"
)
assert inertial_length(n_e, "e-").unit.is_equivalent(u.m)
assert np.isclose(
inertial_length(1 * u.cm ** -3, "e-").cgs.value, 5.31e5, rtol=1e-3
)
with pytest.warns(u.UnitsWarning):
inertial_length(5, "e-")
with pytest.raises(u.UnitTypeError):
inertial_length(5 * u.m, "e-")
with pytest.raises(ValueError):
inertial_length(-5 * u.m ** -3, "e-")
with pytest.warns(u.UnitsWarning):
assert inertial_length(1e19, "e-") == inertial_length(1e19 * u.m ** -3, "e-")
assert_can_handle_nparray(inertial_length)
def test_magnetic_pressure():
r"""Test the magnetic_pressure function in parameters.py."""
assert magnetic_pressure(B_arr).unit.is_equivalent(u.Pa)
assert magnetic_pressure(B).unit.is_equivalent(u.Pa)
assert magnetic_pressure(B).unit.name == "Pa"
assert magnetic_pressure(B).value == magnetic_energy_density(B).value
assert magnetic_pressure(B) == magnetic_energy_density(B.to(u.G))
assert np.isclose(magnetic_pressure(B).value, 397887.35772973835)
with pytest.warns(u.UnitsWarning):
magnetic_pressure(5)
with pytest.raises(u.UnitTypeError):
magnetic_pressure(5 * u.m)
assert np.isnan(magnetic_pressure(np.nan * u.T))
with pytest.raises(ValueError):
magnetic_pressure(5j * u.T)
assert np.isnan(magnetic_pressure(B_nanarr)[-1])
with pytest.warns(u.UnitsWarning):
assert magnetic_pressure(22.2) == magnetic_pressure(22.2 * u.T)
assert_can_handle_nparray(magnetic_pressure)
def test_magnetic_energy_density():
r"""Test the magnetic_energy_density function in parameters.py."""
assert magnetic_energy_density(B_arr).unit.is_equivalent(u.J / u.m ** 3)
assert magnetic_energy_density(B).unit.is_equivalent("J / m3")
assert magnetic_energy_density(B).value == magnetic_pressure(B).value
assert_quantity_allclose(
magnetic_energy_density(2 * B), 4 * magnetic_energy_density(B)
)
assert_quantity_allclose(magnetic_energy_density(B).value, 397887.35772973835)
assert_quantity_allclose(
magnetic_energy_density(B), magnetic_energy_density(B.to(u.G))
)
assert isinstance(magnetic_energy_density(B_arr), u.Quantity)
with pytest.warns(u.UnitsWarning):
magnetic_energy_density(5)
with pytest.raises(u.UnitTypeError):
magnetic_energy_density(5 * u.m)
assert np.isnan(magnetic_energy_density(np.nan * u.T))
with pytest.raises(ValueError):
magnetic_energy_density(5j * u.T)
assert np.isnan(magnetic_energy_density(B_nanarr)[-1])
with pytest.warns(u.UnitsWarning):
assert magnetic_energy_density(22.2) == magnetic_energy_density(22.2 * u.T)
assert_can_handle_nparray(magnetic_energy_density)
def test_upper_hybrid_frequency():
r"""Test the upper_hybrid_frequency function in parameters.py."""
omega_uh = upper_hybrid_frequency(B, n_e=n_e)
omega_uh_hz = upper_hybrid_frequency(B, n_e=n_e, to_hz=True)
omega_ce = gyrofrequency(B, "e-")
omega_pe = plasma_frequency(n=n_e, particle="e-")
assert omega_ce.unit.is_equivalent(u.rad / u.s)
assert omega_pe.unit.is_equivalent(u.rad / u.s)
assert omega_uh.unit.is_equivalent(u.rad / u.s)
assert omega_uh_hz.unit.is_equivalent(u.Hz)
left_hand_side = omega_uh ** 2
right_hand_side = omega_ce ** 2 + omega_pe ** 2
assert np.isclose(left_hand_side.value, right_hand_side.value)
assert np.isclose(omega_uh_hz.value, 69385868857.90918)
with pytest.raises(ValueError):
upper_hybrid_frequency(5 * u.T, n_e=-1 * u.m ** -3)
with pytest.warns(u.UnitsWarning):
assert upper_hybrid_frequency(1.2, 1.3) == upper_hybrid_frequency(
1.2 * u.T, 1.3 * u.m ** -3
)
with pytest.warns(u.UnitsWarning):
assert upper_hybrid_frequency(1.4 * u.T, 1.3) == upper_hybrid_frequency(
1.4, 1.3 * u.m ** -3
)
assert_can_handle_nparray(upper_hybrid_frequency)
def test_lower_hybrid_frequency():
r"""Test the lower_hybrid_frequency function in parameters.py."""
ion = "He-4 1+"
omega_ci = gyrofrequency(B, particle=ion)
omega_pi = plasma_frequency(n=n_i, particle=ion)
omega_ce = gyrofrequency(B, "e-")
omega_lh = lower_hybrid_frequency(B, n_i=n_i, ion=ion)
omega_lh_hz = lower_hybrid_frequency(B, n_i=n_i, ion=ion, to_hz=True)
assert omega_ci.unit.is_equivalent(u.rad / u.s)
assert omega_pi.unit.is_equivalent(u.rad / u.s)
assert omega_ce.unit.is_equivalent(u.rad / u.s)
assert omega_lh.unit.is_equivalent(u.rad / u.s)
left_hand_side = omega_lh ** -2
right_hand_side = (
1 / (omega_ci ** 2 + omega_pi ** 2) + omega_ci ** -1 * omega_ce ** -1
)
assert np.isclose(left_hand_side.value, right_hand_side.value)
assert np.isclose(omega_lh_hz.value, 299878691.3223296)
with pytest.raises(ValueError):
lower_hybrid_frequency(0.2 * u.T, n_i=5e19 * u.m ** -3, ion="asdfasd")
with pytest.raises(ValueError):
lower_hybrid_frequency(0.2 * u.T, n_i=-5e19 * u.m ** -3, ion="asdfasd")
with pytest.raises(ValueError):
lower_hybrid_frequency(np.nan * u.T, n_i=-5e19 * u.m ** -3, ion="asdfasd")
with pytest.warns(u.UnitsWarning):
assert lower_hybrid_frequency(1.3, 1e19, "p+") == lower_hybrid_frequency(
1.3 * u.T, 1e19 * u.m ** -3, "p+"
)
assert_can_handle_nparray(lower_hybrid_frequency)
def test_Bohm_diffusion():
r"""Test Mag_Reynolds in dimensionless.py"""
T_e = 5000 * u.K
B = 10 * u.T
assert (Bohm_diffusion(T_e, B)).unit == u.m ** 2 / u.s
with pytest.warns(u.UnitsWarning):
Bohm_diffusion(5000, B)
with pytest.raises(u.UnitTypeError):
Bohm_diffusion(2.2 * u.kg, B)
@pytest.mark.parametrize(
"alias, parent",
[
(rho_, mass_density),
(va_, Alfven_speed),
(cs_, ion_sound_speed),
(pth_, thermal_pressure),
(betaH_, Hall_parameter),
(oc_, gyrofrequency),
(wc_, gyrofrequency),
(rc_, gyroradius),
(rhoc_, gyroradius),
(wp_, plasma_frequency),
(lambdaD_, Debye_length),
(nD_, Debye_number),
(cwp_, inertial_length),
(pmag_, magnetic_pressure),
(ub_, magnetic_energy_density),
(wuh_, upper_hybrid_frequency),
(wlh_, lower_hybrid_frequency),
(DB_, Bohm_diffusion),
],
)
def test_parameters_aliases(alias, parent):
"""Test all aliases defined in parameters.py"""
assert alias is parent
| 33.692373 | 96 | 0.525014 |
import numpy as np
import pytest
from astropy import units as u
from astropy.constants import m_e, m_p
from astropy.tests.helper import assert_quantity_allclose
from plasmapy.formulary.parameters import (
Alfven_speed,
betaH_,
Bohm_diffusion,
cs_,
cwp_,
DB_,
Debye_length,
Debye_number,
gyrofrequency,
gyroradius,
Hall_parameter,
inertial_length,
ion_sound_speed,
lambdaD_,
lower_hybrid_frequency,
magnetic_energy_density,
magnetic_pressure,
mass_density,
nD_,
oc_,
plasma_frequency,
pmag_,
pth_,
rc_,
rho_,
rhoc_,
thermal_pressure,
thermal_speed,
ub_,
upper_hybrid_frequency,
va_,
wc_,
wlh_,
wp_,
wuh_,
)
from plasmapy.particles import Particle
from plasmapy.particles.exceptions import InvalidParticleError
from plasmapy.utils.exceptions import (
PhysicsError,
PhysicsWarning,
PlasmaPyFutureWarning,
RelativityError,
RelativityWarning,
)
from plasmapy.utils.pytest_helpers import assert_can_handle_nparray
B = 1.0 * u.T
Z = 1
ion = "p"
m_i = m_p
n_i = 5e19 * u.m ** -3
n_e = Z * 5e19 * u.m ** -3
rho = n_i * m_i + n_e * m_e
T_e = 1e6 * u.K
T_i = 1e6 * u.K
k_1 = 3e1 * u.m ** -1
k_2 = 3e7 * u.m ** -1
B_arr = np.array([0.001, 0.002]) * u.T
B_nanarr = np.array([0.001, np.nan]) * u.T
B_allnanarr = np.array([np.nan, np.nan]) * u.T
rho_arr = np.array([5e-10, 2e-10]) * u.kg / u.m ** 3
rho_infarr = np.array([np.inf, 5e19]) * u.m ** -3
rho_negarr = np.array([-5e19, 6e19]) * u.m ** -3
T_arr = np.array([1e6, 2e6]) * u.K
T_nanarr = np.array([1e6, np.nan]) * u.K
T_nanarr2 = np.array([np.nan, 2e6]) * u.K
T_allnanarr = np.array([np.nan, np.nan]) * u.K
T_negarr = np.array([1e6, -5151.0]) * u.K
V = 25.2 * u.m / u.s
V_arr = np.array([25, 50]) * u.m / u.s
V_nanarr = np.array([25, np.nan]) * u.m / u.s
V_allnanarr = np.array([np.nan, np.nan]) * u.m / u.s
mu = m_p.to(u.u).value
class Test_mass_density:
@pytest.mark.parametrize(
"args, kwargs, conditional",
[
((-1 * u.kg * u.m ** -3, "He"), {}, pytest.raises(ValueError)),
((-1 * u.m ** -3, "He"), {}, pytest.raises(ValueError)),
(("not a Quantity", "He"), {}, pytest.raises(TypeError)),
((1 * u.m ** -3,), {}, pytest.raises(TypeError)),
((1 * u.J, "He"), {}, pytest.raises(u.UnitTypeError)),
((1 * u.m ** -3, None), {}, pytest.raises(TypeError)),
(
(1 * u.m ** -3, "He"),
{"z_ratio": "not a ratio"},
pytest.raises(TypeError),
),
],
)
def test_raises(self, args, kwargs, conditional):
with conditional:
mass_density(*args, **kwargs)
@pytest.mark.parametrize(
"args, kwargs, expected",
[
((1.0 * u.g * u.m ** -3, ""), {}, 1.0e-3 * u.kg * u.m ** -3),
((5.0e12 * u.cm ** -3, "He"), {}, 3.32323849e-8 * u.kg * u.m ** -3),
(
(5.0e12 * u.cm ** -3, Particle("He")),
{},
3.32323849e-8 * u.kg * u.m ** -3,
),
(
(5.0e12 * u.cm ** -3, "He"),
{"z_ratio": 0.5},
1.66161925e-08 * u.kg * u.m ** -3,
),
(
(5.0e12 * u.cm ** -3, "He"),
{"z_ratio": -0.5},
1.66161925e-08 * u.kg * u.m ** -3,
),
],
)
def test_values(self, args, kwargs, expected):
assert np.isclose(mass_density(*args, **kwargs), expected)
def test_handle_nparrays(self):
assert_can_handle_nparray(mass_density)
class TestAlfvenSpeed:
@pytest.mark.parametrize("alias", [va_])
def test_aliases(self, alias):
assert alias is Alfven_speed
@pytest.mark.parametrize(
"args, kwargs, _error",
[
((10 * u.T, 1.0e-10 * u.kg * u.m ** -3), {}, RelativityError),
((np.inf * u.T, 1 * u.m ** -3), {"ion": "p"}, RelativityError),
((-np.inf * u.T, 1 * u.m ** -3), {"ion": "p"}, RelativityError),
((1 * u.T, 5e19 * u.m ** -3), {"ion": "spacecats"}, InvalidParticleError),
(("not a Bfield", 1.0e-10 * u.kg * u.m ** -3), {}, TypeError),
((10 * u.T, "not a density"), {}, TypeError),
((10 * u.T, 5), {"ion": "p"}, TypeError),
((1 * u.T, 1.0e18 * u.m ** -3), {"ion": ["He"]}, TypeError),
((1 * u.T, 1.0e18 * u.m ** -3), {"ion": "He", "z_mean": "nope"}, TypeError),
((1 * u.T, 1.0e18 * u.cm), {"ion": "He"}, u.UnitTypeError),
((1 * u.T, 5 * u.m ** -2), {"ion": "p"}, u.UnitTypeError),
((1 * u.cm, 1.0e18 * u.m ** -3), {"ion": "He"}, u.UnitTypeError),
((5 * u.A, 5e19 * u.m ** -3), {"ion": "p"}, u.UnitTypeError),
((1 * u.T, -1.0e18 * u.m ** -3), {"ion": "He"}, ValueError),
(
(np.array([5, 6, 7]) * u.T, np.array([5, 6]) * u.m ** -3),
{"ion": "p"},
ValueError,
),
(
(np.array([0.001, 0.002]) * u.T, np.array([-5e19, 6e19]) * u.m ** -3),
{"ion": "p"},
ValueError,
),
],
)
def test_raises(self, args, kwargs, _error):
with pytest.raises(_error):
Alfven_speed(*args, **kwargs)
@pytest.mark.parametrize(
"args, kwargs, expected, isclose_kw, _warning",
[
(
(5 * u.T, 5e19 * u.m ** -3),
{"ion": "H"},
15413707.39,
{},
RelativityWarning,
),
(
(5 * u.T, 5e19 * u.m ** -3),
{"ion": "H+"},
15413707.39,
{"rtol": 3.0e-4},
RelativityWarning,
),
(
(5 * u.T, 5e19 * u.m ** -3),
{"ion": "p"},
15413707.39,
{"rtol": 4.0e-4},
RelativityWarning,
),
((0.5, 1.0e18 * u.m ** -3), {"ion": "He"}, 5470657.93, {}, u.UnitsWarning),
],
)
def test_warns(self, args, kwargs, expected, isclose_kw, _warning):
with pytest.warns(_warning):
val = Alfven_speed(*args, **kwargs)
assert isinstance(val, u.Quantity)
assert val.unit == u.m / u.s
assert np.isclose(val.value, expected, **isclose_kw)
@pytest.mark.parametrize(
"args, kwargs, expected, isclose_kw",
[
(
(1 * u.T, 1e-8 * u.kg * u.m ** -3),
{"ion": "p"},
8920620.58 * u.m / u.s,
{"rtol": 1e-6},
),
(
(1 * u.T, 1e-8 * u.kg * u.m ** -3),
{},
8920620.58 * u.m / u.s,
{"rtol": 1e-6},
),
(
(0.05 * u.T, 1e18 * u.m ** -3),
{"ion": "He"},
Alfven_speed(0.05 * u.T, 6.64738793e-09 * u.kg * u.m ** -3),
{},
),
(
(0.05 * u.T, 1e18 * u.m ** -3),
{"ion": "He+"},
Alfven_speed(0.05 * u.T, 1e18 * u.m ** -3, ion="He"),
{"rtol": 7e-5},
),
(
(0.05 * u.T, 1e18 * u.m ** -3),
{"ion": "He", "z_mean": 2},
Alfven_speed(0.05 * u.T, 1e18 * u.m ** -3, ion="He +2"),
{"rtol": 1.4e-4},
),
(
(0.05 * u.T, 1e18 * u.m ** -3),
{"ion": Particle("He+")},
Alfven_speed(0.05 * u.T, 1e18 * u.m ** -3, ion="He+"),
{},
),
(
([0.001, 0.002] * u.T, 5e-10 * u.kg * u.m ** -3),
{},
[
va_(0.001 * u.T, 5e-10 * u.kg * u.m ** -3).value,
va_(0.002 * u.T, 5e-10 * u.kg * u.m ** -3).value,
]
* (u.m / u.s),
{},
),
(
([0.001, 0.002] * u.T, [5e-10, 2e-10] * u.kg * u.m ** -3),
{},
[
va_(0.001 * u.T, 5e-10 * u.kg * u.m ** -3).value,
va_(0.002 * u.T, 2e-10 * u.kg * u.m ** -3).value,
]
* (u.m / u.s),
{},
),
(
(0.001 * u.T, [1.0e18, 2e18] * u.m ** -3),
{"ion": "p"},
[
va_(0.001 * u.T, 1e18 * u.m ** -3, ion="p").value,
va_(0.001 * u.T, 2e18 * u.m ** -3, ion="p").value,
]
* (u.m / u.s),
{},
),
],
)
def test_values(self, args, kwargs, expected, isclose_kw):
assert np.allclose(Alfven_speed(*args, **kwargs), expected, **isclose_kw)
@pytest.mark.parametrize(
"args, kwargs, nan_mask",
[
((np.nan * u.T, 1 * u.kg * u.m ** -3), {}, []),
((0.001 * u.T, np.nan * u.kg * u.m ** -3), {}, []),
(([np.nan, 0.001] * u.T, 1 * u.kg * u.m ** -3), {}, [True, False]),
(
(0.001 * u.T, [np.nan, 1.0, np.nan] * u.kg * u.m ** -3),
{},
[True, False, True],
),
(([np.nan, 0.001] * u.T, [1, np.nan] * u.kg * u.m ** -3), {}, [True, True]),
(
(0.001 * u.T, [np.nan, 1e18, np.nan] * u.m ** -3),
{"ion": "Ar+"},
[True, False, True],
),
],
)
def test_nan_values(self, args, kwargs, nan_mask):
val = Alfven_speed(*args, **kwargs)
if np.isscalar(val.value):
assert np.isnan(val)
else:
nan_arr = np.isnan(val)
assert np.all(nan_arr[nan_mask])
assert np.all(np.logical_not(nan_arr[np.logical_not(nan_mask)]))
def test_handle_nparrays(self):
assert_can_handle_nparray(Alfven_speed)
class Test_Ion_Sound_Speed:
@pytest.mark.parametrize(
"args, kwargs, expected, isclose_kw",
[
(
(),
{
"T_i": 1.3232 * u.MK,
"T_e": 1.831 * u.MK,
"ion": "p",
"gamma_e": 1,
"gamma_i": 3,
},
218816.06086407552 * (u.m / u.s),
{},
),
(
(1.831 * u.MK, 1.3232 * u.MK, "p"),
{},
218816.06086407552 * (u.m / u.s),
{},
),
(
(),
{
"T_i": 1.3232 * u.MK,
"T_e": 1.831 * u.MK,
"n_e": n_e,
"k": k_1,
"ion": "p",
"gamma_e": 1,
"gamma_i": 3,
},
218816.06086407552 * (u.m / u.s),
{},
),
(
(),
{
"T_i": 1.3232 * u.MK,
"T_e": 1.831 * u.MK,
"n_e": n_e,
"k": k_2,
"ion": "p",
"gamma_e": 1,
"gamma_i": 3,
},
552.3212936293337 * (u.m / u.s),
{},
),
(
(),
{
"T_i": 0.88 * u.MK,
"T_e": 1.28 * u.MK,
"n_e": n_e,
"k": 0 * u.m ** -1,
"ion": "p",
"gamma_e": 1.2,
"gamma_i": 3.4,
},
193328.52857788358 * (u.m / u.s),
{},
),
(
(),
{"T_i": T_i, "T_e": 0 * u.K, "n_e": n_e, "k": k_1, "ion": "p+"},
ion_sound_speed(T_i=T_i, T_e=0 * u.K, n_e=n_e, k=k_1, ion="p+").value
* (u.m / u.s),
{},
),
(
(),
{
"T_e": 1.2e6 * u.K,
"T_i": 0 * u.K,
"n_e": n_e,
"k": 0 * u.m ** -1,
"z_mean": 0.8,
"ion": "p",
},
89018.09 * (u.m / u.s),
{"atol": 0.0, "rtol": 1e-6},
),
],
)
def test_values(self, args, kwargs, expected, isclose_kw):
assert np.isclose(ion_sound_speed(*args, **kwargs), expected, **isclose_kw)
@pytest.mark.parametrize(
"kwargs1, kwargs2, _warning",
[
({"T_i": T_i, "T_e": T_e, "n_e": n_e, "ion": "p"}, {}, PhysicsWarning),
({"T_i": T_i, "T_e": T_e, "k": k_1, "ion": "p"}, {}, PhysicsWarning),
({"T_i": 5e11 * u.K, "T_e": 0 * u.K, "ion": "p"}, {}, RelativityWarning),
(
{"T_e": 1.2e6, "T_i": 0 * u.K, "n_e": n_e, "k": k_1, "ion": "p"},
{"T_e": 1.2e6 * u.K, "T_i": 0 * u.K, "n_e": n_e, "k": k_1, "ion": "p"},
u.UnitsWarning,
),
(
{"T_i": 1.3e6, "T_e": 0 * u.K, "n_e": n_e, "k": k_1, "ion": "p"},
{"T_i": 1.3e6 * u.K, "T_e": 0 * u.K, "n_e": n_e, "k": k_1, "ion": "p"},
u.UnitsWarning,
),
],
)
def test_warns(self, kwargs1, kwargs2, _warning):
with pytest.warns(_warning):
val = ion_sound_speed(**kwargs1)
if kwargs2 != {}:
val == ion_sound_speed(**kwargs2)
@pytest.mark.parametrize(
"args, kwargs, _error",
[
(
(),
{
"T_i": T_i,
"T_e": T_e,
"n_e": n_e,
"k": k_1,
"ion": "p",
"gamma_i": np.inf,
},
RelativityError,
),
(
(),
{
"T_i": np.array([5, 6, 5]) * u.K,
"T_e": np.array([3, 4]) * u.K,
"n_e": np.array([5, 6, 5]) * u.m ** -3,
"k": np.array([3, 4]) * u.m ** -3,
"ion": "p",
},
u.UnitTypeError,
),
((5 * u.T), {"ion": "p"}, TypeError),
((), {"ion": "p"}, TypeError),
(
(),
{"T_i": T_i, "T_e": 0 * u.K, "gamma_i": 0.9999, "ion": "p"},
PhysicsError,
),
(
(),
{"T_i": T_i, "T_e": 0 * u.K, "gamma_e": 0.9999, "ion": "p"},
PhysicsError,
),
(
(),
{"T_i": T_i, "T_e": 0 * u.K, "gamma_e": "sdjklsf", "ion": "p"},
TypeError,
),
(
(),
{"T_i": T_i, "T_e": 0 * u.K, "gamma_i": "fsdfas", "ion": "p"},
TypeError,
),
((), {"T_i": T_i, "T_e": 0 * u.K, "ion": "cupcakes"}, InvalidParticleError),
((), {"T_i": -np.abs(T_i), "T_e": 0 * u.K, "ion": "p"}, ValueError),
(
(),
{"T_i": T_i, "T_e": 0 * u.K, "n_e": -np.abs(n_e), "k": k_1, "ion": "p"},
ValueError,
),
(
(),
{"T_i": T_i, "T_e": 0 * u.K, "n_e": n_e, "k": -np.abs(k_1), "ion": "p"},
ValueError,
),
((), {"T_i": 5e19 * u.K, "T_e": 0 * u.K, "ion": "p"}, RelativityError),
(
(),
{"T_i": 5 * u.A, "T_e": 0 * u.K, "n_e": n_e, "k": k_1, "ion": "p"},
u.UnitTypeError,
),
(
(),
{"T_i": T_negarr, "T_e": 0 * u.K, "n_e": n_e, "k": k_1, "ion": "p"},
ValueError,
),
(
(),
{"T_e": T_negarr, "T_i": 0 * u.K, "n_e": n_e, "k": k_1, "ion": "p"},
ValueError,
),
],
)
def test_raises(self, args, kwargs, _error):
with pytest.raises(_error):
ion_sound_speed(*args, **kwargs)
@pytest.mark.parametrize(
"kwargs",
[
({"T_i": T_nanarr, "T_e": 0 * u.K, "n_e": n_e, "k": k_1, "ion": "p"}),
({"T_e": T_nanarr, "T_i": 0 * u.K, "n_e": n_e, "k": k_1, "ion": "p"}),
],
)
def test_nan_values(self, kwargs):
np.isnan(ion_sound_speed(**kwargs)[1])
def test_handle_nparrays(self):
assert_can_handle_nparray(ion_sound_speed)
def test_thermal_pressure():
assert thermal_pressure(T_e, n_i).unit.is_equivalent(u.Pa)
assert_can_handle_nparray(thermal_pressure)
def test_gyrofrequency():
assert gyrofrequency(B, "e-").unit.is_equivalent(u.rad / u.s)
assert gyrofrequency(B, "e-", to_hz=True).unit.is_equivalent(u.Hz)
assert np.isclose(gyrofrequency(1 * u.T, "e-").value, 175882008784.72018)
assert np.isclose(gyrofrequency(2.4 * u.T, "e-").value, 422116821083.3284)
assert np.isclose(
gyrofrequency(1 * u.T, "e-", to_hz=True).value, 27992490076.528206
)
assert np.isclose(
gyrofrequency(2.4 * u.T, "e-", signed=True).value, -422116821083.3284
)
assert np.isclose(gyrofrequency(1 * u.G, "e-").cgs.value, 1.76e7, rtol=1e-3)
with pytest.raises(TypeError):
with pytest.warns(u.UnitsWarning):
gyrofrequency(u.m, "e-")
with pytest.raises(u.UnitTypeError):
gyrofrequency(u.m * 1, "e-")
assert np.isnan(gyrofrequency(B_nanarr, "e-")[-1])
omega_ce = gyrofrequency(2.2 * u.T, "e-")
f_ce = (omega_ce / (2 * np.pi)) / u.rad
f_ce_use_equiv = omega_ce.to(u.Hz, equivalencies=[(u.cy / u.s, u.Hz)])
assert np.isclose(f_ce.value, f_ce_use_equiv.value)
with pytest.warns(u.UnitsWarning):
assert gyrofrequency(5.0, "e-") == gyrofrequency(5.0 * u.T, "e-")
assert gyrofrequency(B, particle=ion).unit.is_equivalent(u.rad / u.s)
assert np.isclose(gyrofrequency(1 * u.T, particle="p").value, 95788335.834874)
assert np.isclose(gyrofrequency(2.4 * u.T, particle="p").value, 229892006.00369796)
assert np.isclose(gyrofrequency(1 * u.G, particle="p").cgs.value, 9.58e3, rtol=2e-3)
assert gyrofrequency(-5 * u.T, "p") == gyrofrequency(5 * u.T, "p")
assert gyrofrequency(B, particle="e+") == gyrofrequency(B, "e-")
with pytest.warns(u.UnitsWarning):
gyrofrequency(8, "p")
with pytest.raises(u.UnitTypeError):
gyrofrequency(5 * u.m, "p")
with pytest.raises(InvalidParticleError):
gyrofrequency(8 * u.T, particle="asdfasd")
with pytest.warns(u.UnitsWarning):
assert gyrofrequency(5.0, "p") == gyrofrequency(5.0 * u.T, "p")
gyrofrequency(1 * u.T, particle="p")
# testing for user input Z
testMeth1 = gyrofrequency(1 * u.T, particle="p", Z=0.8).si.value
testTrue1 = 76630665.79318453
errStr = f"gyrofrequency() gave {testMeth1}, should be {testTrue1}."
assert np.isclose(testMeth1, testTrue1, atol=0.0, rtol=1e-5), errStr
assert_can_handle_nparray(gyrofrequency, kwargs={"signed": True})
assert_can_handle_nparray(gyrofrequency, kwargs={"signed": False})
def test_gyroradius():
assert gyroradius(B, "e-", T=T_e).unit.is_equivalent(u.m)
assert gyroradius(B, "e-", Vperp=25 * u.m / u.s).unit.is_equivalent(u.m)
# test for possiblity to allow nan for input values
assert np.isnan(gyroradius(np.nan * u.T, particle="e-", T=1 * u.K))
assert np.isnan(gyroradius(1 * u.T, particle="e-", T=np.nan * u.K))
assert np.isnan(gyroradius(1 * u.T, particle="e-", Vperp=np.nan * u.m / u.s))
Vperp = 1e6 * u.m / u.s
Bmag = 1 * u.T
omega_ce = gyrofrequency(Bmag, "e-")
analytical_result = (Vperp / omega_ce).to(
u.m, equivalencies=u.dimensionless_angles()
)
assert gyroradius(Bmag, "e-", Vperp=Vperp) == analytical_result
with pytest.raises(TypeError):
with pytest.warns(u.UnitsWarning):
gyroradius(u.T, "e-")
with pytest.raises(u.UnitTypeError):
gyroradius(5 * u.A, "e-", Vperp=8 * u.m / u.s)
with pytest.raises(u.UnitTypeError):
gyroradius(5 * u.T, "e-", Vperp=8 * u.m)
with pytest.raises(ValueError):
gyroradius(np.array([5, 6]) * u.T, "e-", Vperp=np.array([5, 6, 7]) * u.m / u.s)
assert np.isnan(gyroradius(np.nan * u.T, "e-", Vperp=1 * u.m / u.s))
with pytest.raises(ValueError):
gyroradius(3.14159 * u.T, "e-", T=-1 * u.K)
with pytest.warns(u.UnitsWarning):
assert gyroradius(1.0, "e-", Vperp=1.0) == gyroradius(
1.0 * u.T, "e-", Vperp=1.0 * u.m / u.s
)
with pytest.warns(u.UnitsWarning):
assert gyroradius(1.1, "e-", T=1.2) == gyroradius(1.1 * u.T, "e-", T=1.2 * u.K)
with pytest.raises(ValueError):
gyroradius(1.1 * u.T, "e-", Vperp=1 * u.m / u.s, T=1.2 * u.K)
with pytest.raises(u.UnitTypeError):
gyroradius(1.1 * u.T, "e-", Vperp=1.1 * u.m, T=1.2 * u.K)
# Check for Deprecation warning when using T_i instead of T
with pytest.warns(PlasmaPyFutureWarning):
gyroradius(1.1 * u.T, "e-", T_i=1.2 * u.K)
assert gyroradius(B, particle="p", T=T_i).unit.is_equivalent(u.m)
assert gyroradius(B, particle="p", Vperp=25 * u.m / u.s).unit.is_equivalent(u.m)
# Case when Z=1 is assumed
assert np.isclose(
gyroradius(B, particle="p", T=T_i),
gyroradius(B, particle="H+", T=T_i),
atol=1e-6 * u.m,
)
gyroPos = gyroradius(B, particle="p", Vperp=V)
gyroNeg = gyroradius(B, particle="p", Vperp=-V)
assert gyroPos == gyroNeg
Vperp = 1e6 * u.m / u.s
Bmag = 1 * u.T
omega_ci = gyrofrequency(Bmag, particle="p")
analytical_result = (Vperp / omega_ci).to(
u.m, equivalencies=u.dimensionless_angles()
)
assert gyroradius(Bmag, particle="p", Vperp=Vperp) == analytical_result
T2 = 1.2 * u.MK
B2 = 123 * u.G
particle2 = "alpha"
Vperp2 = thermal_speed(T2, particle=particle2)
gyro_by_vperp = gyroradius(B2, particle="alpha", Vperp=Vperp2)
assert gyro_by_vperp == gyroradius(B2, particle="alpha", T=T2)
explicit_positron_gyro = gyroradius(1 * u.T, particle="positron", T=1 * u.MK)
assert explicit_positron_gyro == gyroradius(1 * u.T, "e-", T=1 * u.MK)
with pytest.raises(TypeError):
with pytest.warns(u.UnitsWarning):
gyroradius(u.T, particle="p", Vperp=8 * u.m / u.s)
with pytest.raises(ValueError):
gyroradius(B, particle="p", T=-1 * u.K)
with pytest.warns(u.UnitsWarning):
gyro_without_units = gyroradius(1.0, particle="p", Vperp=1.0)
gyro_with_units = gyroradius(1.0 * u.T, particle="p", Vperp=1.0 * u.m / u.s)
assert gyro_without_units == gyro_with_units
with pytest.warns(u.UnitsWarning):
gyro_t_without_units = gyroradius(1.1, particle="p", T=1.2)
gyro_t_with_units = gyroradius(1.1 * u.T, particle="p", T=1.2 * u.K)
assert gyro_t_with_units == gyro_t_without_units
with pytest.raises(ValueError):
gyroradius(1.1 * u.T, particle="p", Vperp=1 * u.m / u.s, T=1.2 * u.K)
with pytest.raises(u.UnitTypeError):
gyroradius(1.1 * u.T, particle="p", Vperp=1.1 * u.m, T=1.2 * u.K)
with pytest.raises(u.UnitTypeError):
gyroradius(1.1 * u.T, particle="p", Vperp=1.2 * u.m, T=1.1 * u.K)
class Test_gyroradius:
# some custom numpy array tests here, because of the T / Vperp situation
def test_handle_numpy_array(self):
# Tests to verify that can handle Quantities with numpy array as the value:
assert gyroradius(B_arr, "e-", Vperp=V_arr)[0] == gyroradius(
B_arr[0], "e-", Vperp=V_arr[0]
)
assert gyroradius(B_arr, "e-", T=T_arr)[0] == gyroradius(
B_arr[0], "e-", T=T_arr[0]
)
def test_handle_mixed_Qarrays(self):
# If both Vperp or T are input as Qarrays, but only one of the two is valid
# at each element, then that's fine, the function should work:
assert gyroradius(B_arr, "e-", Vperp=V_nanarr, T=T_nanarr2)[0] == gyroradius(
B_arr[0], "e-", Vperp=V_nanarr[0], T=T_nanarr2[0]
)
def test_raise_two_valid_inputs(self):
with pytest.raises(ValueError):
gyroradius(B_arr, "e-", Vperp=V, T=T_arr)
with pytest.raises(ValueError):
gyroradius(B_arr, "e-", Vperp=V_arr, T=T_i)
def test_all_valid_and_one_valid(self):
with pytest.raises(ValueError):
gyroradius(B_arr, "e-", Vperp=V, T=T_nanarr)
with pytest.raises(ValueError):
gyroradius(B_arr, "e-", Vperp=V_nanarr, T=T_i)
def test_scalar_and_nan_qarray(self):
assert np.all(np.isfinite(gyroradius(B_arr, "e-", Vperp=V, T=T_allnanarr)))
assert np.all(np.isfinite(gyroradius(B_arr, "e-", Vperp=V_allnanarr, T=T_i)))
def test_keeps_arguments_unchanged(self):
Vperp1 = u.Quantity([np.nan, 1], unit=u.m / u.s)
Vperp2 = u.Quantity([np.nan, 1], unit=u.m / u.s)
T_i = u.Quantity([1, np.nan], unit=u.K)
gyroradius(B_arr, "e-", Vperp=Vperp1, T=T_i)
assert_quantity_allclose(Vperp1, Vperp2)
def test_plasma_frequency():
assert plasma_frequency(n_e, "e-").unit.is_equivalent(u.rad / u.s)
assert plasma_frequency(n_e, "e-", to_hz=True).unit.is_equivalent(u.Hz)
assert np.isclose(plasma_frequency(1 * u.cm ** -3, "e-").value, 5.64e4, rtol=1e-2)
assert np.isclose(
plasma_frequency(1 * u.cm ** -3, particle="N").value, 3.53e2, rtol=1e-1
)
assert np.isclose(
plasma_frequency(1 * u.cm ** -3, particle="N", to_hz=True).value,
56.19000195094519,
)
with pytest.raises(TypeError):
with pytest.warns(u.UnitsWarning):
plasma_frequency(u.m ** -3, "e-")
with pytest.raises(u.UnitTypeError):
plasma_frequency(5 * u.m ** -2, "e-")
assert np.isnan(plasma_frequency(np.nan * u.m ** -3, "e-"))
with pytest.warns(u.UnitsWarning):
assert plasma_frequency(1e19, "e-") == plasma_frequency(1e19 * u.m ** -3, "e-")
assert plasma_frequency(n_i, particle="p").unit.is_equivalent(u.rad / u.s)
assert plasma_frequency(n_i, particle="H-1+") == plasma_frequency(n_i, particle="p")
assert np.isclose(
plasma_frequency(mu * u.cm ** -3, particle="p").value, 1.32e3, rtol=1e-2
)
with pytest.raises(ValueError):
plasma_frequency(n=5 * u.m ** -3, particle="sdfas")
with pytest.warns(u.UnitsWarning):
plasma_freq_no_units = plasma_frequency(1e19, particle="p")
assert plasma_freq_no_units == plasma_frequency(1e19 * u.m ** -3, particle="p")
plasma_frequency(1e17 * u.cm ** -3, particle="p")
testMeth1 = plasma_frequency(1e17 * u.cm ** -3, particle="p", z_mean=0.8).si.value
testTrue1 = 333063562455.4028
errStr = f"plasma_frequency() gave {testMeth1}, should be {testTrue1}."
assert np.isclose(testMeth1, testTrue1, atol=0.0, rtol=1e-6), errStr
assert_can_handle_nparray(plasma_frequency)
def test_Debye_length():
assert Debye_length(T_e, n_e).unit.is_equivalent(u.m)
assert np.isclose(Debye_length(1 * u.eV, 1 * u.cm ** -3).value, 7.43, atol=0.005)
with pytest.warns(u.UnitsWarning):
Debye_length(5, 5 * u.m ** -3)
with pytest.raises(u.UnitTypeError):
Debye_length(56 * u.kg, 5 * u.m ** -3)
with pytest.raises(ValueError):
Debye_length(5 * u.eV, -5 * u.m ** -3)
with pytest.raises(ValueError):
Debye_length(-45 * u.K, 5 * u.m ** -3)
Tarr2 = np.array([1, 2]) * u.K
narr3 = np.array([1, 2, 3]) * u.m ** -3
with pytest.raises(ValueError):
Debye_length(Tarr2, narr3)
with pytest.warns(u.UnitsWarning):
assert Debye_length(2.0, 2.0) == Debye_length(2.0 * u.K, 2.0 * u.m ** -3)
with pytest.warns(u.UnitsWarning):
assert Debye_length(2.0 * u.K, 2.0) == Debye_length(2.0, 2.0 * u.m ** -3)
assert_can_handle_nparray(Debye_length)
def test_Debye_number():
assert Debye_number(T_e, n_e).unit.is_equivalent(u.dimensionless_unscaled)
T_e_eV = T_e.to(u.eV, equivalencies=u.temperature_energy())
assert np.isclose(Debye_number(T_e, n_e).value, Debye_number(T_e_eV, n_e).value)
assert np.isclose(Debye_number(1 * u.eV, 1 * u.cm ** -3).value, 1720862385.43342)
with pytest.warns(u.UnitsWarning):
Debye_number(T_e, 4)
with pytest.raises(ValueError):
Debye_number(None, n_e)
with pytest.raises(u.UnitTypeError):
Debye_number(5 * u.m, 5 * u.m ** -3)
with pytest.raises(u.UnitTypeError):
Debye_number(5 * u.K, 5 * u.m ** 3)
with pytest.raises(ValueError):
Debye_number(5j * u.K, 5 * u.cm ** -3)
Tarr2 = np.array([1, 2]) * u.K
narr3 = np.array([1, 2, 3]) * u.m ** -3
with pytest.raises(ValueError):
Debye_number(Tarr2, narr3)
with pytest.warns(u.UnitsWarning):
assert Debye_number(1.1, 1.1) == Debye_number(1.1 * u.K, 1.1 * u.m ** -3)
with pytest.warns(u.UnitsWarning):
assert Debye_number(1.1 * u.K, 1.1) == Debye_number(1.1, 1.1 * u.m ** -3)
assert_can_handle_nparray(Debye_number)
def test_inertial_length():
assert inertial_length(n_i, particle="p").unit.is_equivalent(u.m)
assert np.isclose(
inertial_length(mu * u.cm ** -3, particle="p").cgs.value, 2.28e7, rtol=0.01
)
inertial_length_electron_plus = inertial_length(5.351 * u.m ** -3, particle="e+")
assert inertial_length_electron_plus == inertial_length(
5.351 * u.m ** -3, particle="e"
)
assert inertial_length(n_i, particle="p") == inertial_length(n_i, particle="p")
with pytest.warns(u.UnitsWarning):
inertial_length(4, particle="p")
with pytest.raises(u.UnitTypeError):
inertial_length(4 * u.m ** -2, particle="p")
with pytest.raises(ValueError):
inertial_length(-5 * u.m ** -3, particle="p")
with pytest.raises(InvalidParticleError):
inertial_length(n_i, particle=-135)
with pytest.warns(u.UnitsWarning):
inertial_length_no_units = inertial_length(1e19, particle="p")
assert inertial_length_no_units == inertial_length(
1e19 * u.m ** -3, particle="p"
)
assert inertial_length(n_e, "e-").unit.is_equivalent(u.m)
assert np.isclose(
inertial_length(1 * u.cm ** -3, "e-").cgs.value, 5.31e5, rtol=1e-3
)
with pytest.warns(u.UnitsWarning):
inertial_length(5, "e-")
with pytest.raises(u.UnitTypeError):
inertial_length(5 * u.m, "e-")
with pytest.raises(ValueError):
inertial_length(-5 * u.m ** -3, "e-")
with pytest.warns(u.UnitsWarning):
assert inertial_length(1e19, "e-") == inertial_length(1e19 * u.m ** -3, "e-")
assert_can_handle_nparray(inertial_length)
def test_magnetic_pressure():
assert magnetic_pressure(B_arr).unit.is_equivalent(u.Pa)
assert magnetic_pressure(B).unit.is_equivalent(u.Pa)
assert magnetic_pressure(B).unit.name == "Pa"
assert magnetic_pressure(B).value == magnetic_energy_density(B).value
assert magnetic_pressure(B) == magnetic_energy_density(B.to(u.G))
assert np.isclose(magnetic_pressure(B).value, 397887.35772973835)
with pytest.warns(u.UnitsWarning):
magnetic_pressure(5)
with pytest.raises(u.UnitTypeError):
magnetic_pressure(5 * u.m)
assert np.isnan(magnetic_pressure(np.nan * u.T))
with pytest.raises(ValueError):
magnetic_pressure(5j * u.T)
assert np.isnan(magnetic_pressure(B_nanarr)[-1])
with pytest.warns(u.UnitsWarning):
assert magnetic_pressure(22.2) == magnetic_pressure(22.2 * u.T)
assert_can_handle_nparray(magnetic_pressure)
def test_magnetic_energy_density():
assert magnetic_energy_density(B_arr).unit.is_equivalent(u.J / u.m ** 3)
assert magnetic_energy_density(B).unit.is_equivalent("J / m3")
assert magnetic_energy_density(B).value == magnetic_pressure(B).value
assert_quantity_allclose(
magnetic_energy_density(2 * B), 4 * magnetic_energy_density(B)
)
assert_quantity_allclose(magnetic_energy_density(B).value, 397887.35772973835)
assert_quantity_allclose(
magnetic_energy_density(B), magnetic_energy_density(B.to(u.G))
)
assert isinstance(magnetic_energy_density(B_arr), u.Quantity)
with pytest.warns(u.UnitsWarning):
magnetic_energy_density(5)
with pytest.raises(u.UnitTypeError):
magnetic_energy_density(5 * u.m)
assert np.isnan(magnetic_energy_density(np.nan * u.T))
with pytest.raises(ValueError):
magnetic_energy_density(5j * u.T)
assert np.isnan(magnetic_energy_density(B_nanarr)[-1])
with pytest.warns(u.UnitsWarning):
assert magnetic_energy_density(22.2) == magnetic_energy_density(22.2 * u.T)
assert_can_handle_nparray(magnetic_energy_density)
def test_upper_hybrid_frequency():
omega_uh = upper_hybrid_frequency(B, n_e=n_e)
omega_uh_hz = upper_hybrid_frequency(B, n_e=n_e, to_hz=True)
omega_ce = gyrofrequency(B, "e-")
omega_pe = plasma_frequency(n=n_e, particle="e-")
assert omega_ce.unit.is_equivalent(u.rad / u.s)
assert omega_pe.unit.is_equivalent(u.rad / u.s)
assert omega_uh.unit.is_equivalent(u.rad / u.s)
assert omega_uh_hz.unit.is_equivalent(u.Hz)
left_hand_side = omega_uh ** 2
right_hand_side = omega_ce ** 2 + omega_pe ** 2
assert np.isclose(left_hand_side.value, right_hand_side.value)
assert np.isclose(omega_uh_hz.value, 69385868857.90918)
with pytest.raises(ValueError):
upper_hybrid_frequency(5 * u.T, n_e=-1 * u.m ** -3)
with pytest.warns(u.UnitsWarning):
assert upper_hybrid_frequency(1.2, 1.3) == upper_hybrid_frequency(
1.2 * u.T, 1.3 * u.m ** -3
)
with pytest.warns(u.UnitsWarning):
assert upper_hybrid_frequency(1.4 * u.T, 1.3) == upper_hybrid_frequency(
1.4, 1.3 * u.m ** -3
)
assert_can_handle_nparray(upper_hybrid_frequency)
def test_lower_hybrid_frequency():
ion = "He-4 1+"
omega_ci = gyrofrequency(B, particle=ion)
omega_pi = plasma_frequency(n=n_i, particle=ion)
omega_ce = gyrofrequency(B, "e-")
omega_lh = lower_hybrid_frequency(B, n_i=n_i, ion=ion)
omega_lh_hz = lower_hybrid_frequency(B, n_i=n_i, ion=ion, to_hz=True)
assert omega_ci.unit.is_equivalent(u.rad / u.s)
assert omega_pi.unit.is_equivalent(u.rad / u.s)
assert omega_ce.unit.is_equivalent(u.rad / u.s)
assert omega_lh.unit.is_equivalent(u.rad / u.s)
left_hand_side = omega_lh ** -2
right_hand_side = (
1 / (omega_ci ** 2 + omega_pi ** 2) + omega_ci ** -1 * omega_ce ** -1
)
assert np.isclose(left_hand_side.value, right_hand_side.value)
assert np.isclose(omega_lh_hz.value, 299878691.3223296)
with pytest.raises(ValueError):
lower_hybrid_frequency(0.2 * u.T, n_i=5e19 * u.m ** -3, ion="asdfasd")
with pytest.raises(ValueError):
lower_hybrid_frequency(0.2 * u.T, n_i=-5e19 * u.m ** -3, ion="asdfasd")
with pytest.raises(ValueError):
lower_hybrid_frequency(np.nan * u.T, n_i=-5e19 * u.m ** -3, ion="asdfasd")
with pytest.warns(u.UnitsWarning):
assert lower_hybrid_frequency(1.3, 1e19, "p+") == lower_hybrid_frequency(
1.3 * u.T, 1e19 * u.m ** -3, "p+"
)
assert_can_handle_nparray(lower_hybrid_frequency)
def test_Bohm_diffusion():
T_e = 5000 * u.K
B = 10 * u.T
assert (Bohm_diffusion(T_e, B)).unit == u.m ** 2 / u.s
with pytest.warns(u.UnitsWarning):
Bohm_diffusion(5000, B)
with pytest.raises(u.UnitTypeError):
Bohm_diffusion(2.2 * u.kg, B)
@pytest.mark.parametrize(
"alias, parent",
[
(rho_, mass_density),
(va_, Alfven_speed),
(cs_, ion_sound_speed),
(pth_, thermal_pressure),
(betaH_, Hall_parameter),
(oc_, gyrofrequency),
(wc_, gyrofrequency),
(rc_, gyroradius),
(rhoc_, gyroradius),
(wp_, plasma_frequency),
(lambdaD_, Debye_length),
(nD_, Debye_number),
(cwp_, inertial_length),
(pmag_, magnetic_pressure),
(ub_, magnetic_energy_density),
(wuh_, upper_hybrid_frequency),
(wlh_, lower_hybrid_frequency),
(DB_, Bohm_diffusion),
],
)
def test_parameters_aliases(alias, parent):
assert alias is parent
| true | true |
f72db11b2f7cbd06b26d3936d52ef5ae13ca8cca | 3,415 | py | Python | test/programytest/oob/defaults/test_email.py | cdoebler1/AIML2 | ee692ec5ea3794cd1bc4cc8ec2a6b5e5c20a0d6a | [
"MIT"
] | 345 | 2016-11-23T22:37:04.000Z | 2022-03-30T20:44:44.000Z | test/programytest/oob/defaults/test_email.py | MikeyBeez/program-y | 00d7a0c7d50062f18f0ab6f4a041068e119ef7f0 | [
"MIT"
] | 275 | 2016-12-07T10:30:28.000Z | 2022-02-08T21:28:33.000Z | test/programytest/oob/defaults/test_email.py | VProgramMist/modified-program-y | f32efcafafd773683b3fe30054d5485fe9002b7d | [
"MIT"
] | 159 | 2016-11-28T18:59:30.000Z | 2022-03-20T18:02:44.000Z | import unittest
import unittest.mock
import xml.etree.ElementTree as ET
from programy.oob.callmom.email import EmailOutOfBandProcessor
from programytest.client import TestClient
class EmailOutOfBandProcessorTests(unittest.TestCase):
def setUp(self):
client = TestClient()
self._client_context = client.create_client_context("testid")
def test_processor_xml_parsing(self):
oob_processor = EmailOutOfBandProcessor()
self.assertIsNotNone(oob_processor)
self.assertFalse(oob_processor.parse_oob_xml(None))
oob = []
self.assertFalse(oob_processor.parse_oob_xml(oob))
oob = []
oob.append(unittest.mock.Mock())
oob[0].tag = "to"
oob[0].text = "fred@west.com"
oob.append(unittest.mock.Mock())
oob[1].tag = "subject"
oob[1].text = "Hello!"
oob.append(unittest.mock.Mock())
oob[2].tag = "body"
oob[2].text = "Got any cement?"
self.assertTrue(oob_processor.parse_oob_xml(oob))
def test_processor_xml_parsing_no_to(self):
oob_processor = EmailOutOfBandProcessor()
self.assertIsNotNone(oob_processor)
self.assertFalse(oob_processor.parse_oob_xml(None))
oob = []
self.assertFalse(oob_processor.parse_oob_xml(oob))
oob = []
oob.append(unittest.mock.Mock())
oob[0].tag = "subject"
oob[0].text = "Hello!"
oob.append(unittest.mock.Mock())
oob[1].tag = "body"
oob[1].text = "Got any cement?"
self.assertFalse(oob_processor.parse_oob_xml(oob))
def test_processor_xml_parsing_no_subject(self):
oob_processor = EmailOutOfBandProcessor()
self.assertIsNotNone(oob_processor)
self.assertFalse(oob_processor.parse_oob_xml(None))
oob = []
self.assertFalse(oob_processor.parse_oob_xml(oob))
oob = []
oob.append(unittest.mock.Mock())
oob[0].tag = "to"
oob[0].text = "fred@west.com"
oob.append(unittest.mock.Mock())
oob[1].tag = "body"
oob[1].text = "Got any cement?"
self.assertFalse(oob_processor.parse_oob_xml(oob))
def test_processor_xml_parsing_no_body(self):
oob_processor = EmailOutOfBandProcessor()
self.assertIsNotNone(oob_processor)
self.assertFalse(oob_processor.parse_oob_xml(None))
oob = []
self.assertFalse(oob_processor.parse_oob_xml(oob))
oob = []
oob.append(unittest.mock.Mock())
oob[0].tag = "to"
oob[0].text = "fred@west.com"
oob.append(unittest.mock.Mock())
oob[1].tag = "subject"
oob[1].text = "Hello!"
oob.append(unittest.mock.Mock())
self.assertFalse(oob_processor.parse_oob_xml(oob))
def test_email_processor_invalid(self):
oob_processor = EmailOutOfBandProcessor()
self.assertIsNotNone(oob_processor)
oob_content = ET.fromstring("<email>process</email>")
self.assertFalse(oob_processor.parse_oob_xml(oob_content))
def test_email_processor_valid(self):
oob_processor = EmailOutOfBandProcessor()
self.assertIsNotNone(oob_processor)
oob_content = ET.fromstring("<email><to>me@me.com</to><subject>test</subject><body>test body</body></email>")
self.assertEqual("EMAIL", oob_processor.process_out_of_bounds(self._client_context, oob_content)) | 32.216981 | 117 | 0.654173 | import unittest
import unittest.mock
import xml.etree.ElementTree as ET
from programy.oob.callmom.email import EmailOutOfBandProcessor
from programytest.client import TestClient
class EmailOutOfBandProcessorTests(unittest.TestCase):
def setUp(self):
client = TestClient()
self._client_context = client.create_client_context("testid")
def test_processor_xml_parsing(self):
oob_processor = EmailOutOfBandProcessor()
self.assertIsNotNone(oob_processor)
self.assertFalse(oob_processor.parse_oob_xml(None))
oob = []
self.assertFalse(oob_processor.parse_oob_xml(oob))
oob = []
oob.append(unittest.mock.Mock())
oob[0].tag = "to"
oob[0].text = "fred@west.com"
oob.append(unittest.mock.Mock())
oob[1].tag = "subject"
oob[1].text = "Hello!"
oob.append(unittest.mock.Mock())
oob[2].tag = "body"
oob[2].text = "Got any cement?"
self.assertTrue(oob_processor.parse_oob_xml(oob))
def test_processor_xml_parsing_no_to(self):
oob_processor = EmailOutOfBandProcessor()
self.assertIsNotNone(oob_processor)
self.assertFalse(oob_processor.parse_oob_xml(None))
oob = []
self.assertFalse(oob_processor.parse_oob_xml(oob))
oob = []
oob.append(unittest.mock.Mock())
oob[0].tag = "subject"
oob[0].text = "Hello!"
oob.append(unittest.mock.Mock())
oob[1].tag = "body"
oob[1].text = "Got any cement?"
self.assertFalse(oob_processor.parse_oob_xml(oob))
def test_processor_xml_parsing_no_subject(self):
oob_processor = EmailOutOfBandProcessor()
self.assertIsNotNone(oob_processor)
self.assertFalse(oob_processor.parse_oob_xml(None))
oob = []
self.assertFalse(oob_processor.parse_oob_xml(oob))
oob = []
oob.append(unittest.mock.Mock())
oob[0].tag = "to"
oob[0].text = "fred@west.com"
oob.append(unittest.mock.Mock())
oob[1].tag = "body"
oob[1].text = "Got any cement?"
self.assertFalse(oob_processor.parse_oob_xml(oob))
def test_processor_xml_parsing_no_body(self):
oob_processor = EmailOutOfBandProcessor()
self.assertIsNotNone(oob_processor)
self.assertFalse(oob_processor.parse_oob_xml(None))
oob = []
self.assertFalse(oob_processor.parse_oob_xml(oob))
oob = []
oob.append(unittest.mock.Mock())
oob[0].tag = "to"
oob[0].text = "fred@west.com"
oob.append(unittest.mock.Mock())
oob[1].tag = "subject"
oob[1].text = "Hello!"
oob.append(unittest.mock.Mock())
self.assertFalse(oob_processor.parse_oob_xml(oob))
def test_email_processor_invalid(self):
oob_processor = EmailOutOfBandProcessor()
self.assertIsNotNone(oob_processor)
oob_content = ET.fromstring("<email>process</email>")
self.assertFalse(oob_processor.parse_oob_xml(oob_content))
def test_email_processor_valid(self):
oob_processor = EmailOutOfBandProcessor()
self.assertIsNotNone(oob_processor)
oob_content = ET.fromstring("<email><to>me@me.com</to><subject>test</subject><body>test body</body></email>")
self.assertEqual("EMAIL", oob_processor.process_out_of_bounds(self._client_context, oob_content)) | true | true |
f72db15d88506aca4c050bef3ef999424a0aa8af | 6,927 | py | Python | scrum/migrations/0002_rename_section_to_story.py | OpenSourceSag/django-dash2013 | 5646d3123547732507c9b83a55cfe54dcead9eef | [
"BSD-3-Clause"
] | 1 | 2016-01-09T03:14:43.000Z | 2016-01-09T03:14:43.000Z | scrum/migrations/0002_rename_section_to_story.py | OpenSourceSag/django-dash2013 | 5646d3123547732507c9b83a55cfe54dcead9eef | [
"BSD-3-Clause"
] | null | null | null | scrum/migrations/0002_rename_section_to_story.py | OpenSourceSag/django-dash2013 | 5646d3123547732507c9b83a55cfe54dcead9eef | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_table('scrum_section', 'scrum_story')
def backwards(self, orm):
db.rename_table('scrum_story', 'scrum_section')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'scrum.project': {
'Meta': {'ordering': "('id',)", 'object_name': 'Project'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'Project_users'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"})
},
u'scrum.section': {
'Meta': {'ordering': "('title',)", 'object_name': 'Section'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Section_project'", 'to': u"orm['scrum.Project']"}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'scrum.sprint': {
'Meta': {'ordering': "('number',)", 'unique_together': "(('number', 'project'),)", 'object_name': 'Sprint'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Sprint_project'", 'to': u"orm['scrum.Project']"}),
'tasks': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['scrum.Task']", 'null': 'True', 'through': u"orm['scrum.SprintTasks']", 'blank': 'True'})
},
u'scrum.sprinttasks': {
'Meta': {'object_name': 'SprintTasks'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sprint': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sprints'", 'to': u"orm['scrum.Sprint']"}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tasks'", 'to': u"orm['scrum.Task']"}),
'task_end_status': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'scrum.task': {
'Meta': {'ordering': "('id',)", 'object_name': 'Task'},
'assigned_to': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'Task_users'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Task_section'", 'to': u"orm['scrum.Section']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['scrum'] | 72.915789 | 203 | 0.560993 |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_table('scrum_section', 'scrum_story')
def backwards(self, orm):
db.rename_table('scrum_story', 'scrum_section')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'scrum.project': {
'Meta': {'ordering': "('id',)", 'object_name': 'Project'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'Project_users'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"})
},
u'scrum.section': {
'Meta': {'ordering': "('title',)", 'object_name': 'Section'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Section_project'", 'to': u"orm['scrum.Project']"}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'scrum.sprint': {
'Meta': {'ordering': "('number',)", 'unique_together': "(('number', 'project'),)", 'object_name': 'Sprint'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Sprint_project'", 'to': u"orm['scrum.Project']"}),
'tasks': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['scrum.Task']", 'null': 'True', 'through': u"orm['scrum.SprintTasks']", 'blank': 'True'})
},
u'scrum.sprinttasks': {
'Meta': {'object_name': 'SprintTasks'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sprint': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sprints'", 'to': u"orm['scrum.Sprint']"}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tasks'", 'to': u"orm['scrum.Task']"}),
'task_end_status': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'scrum.task': {
'Meta': {'ordering': "('id',)", 'object_name': 'Task'},
'assigned_to': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'Task_users'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Task_section'", 'to': u"orm['scrum.Section']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['scrum'] | true | true |
f72db2150e33c99d77c2b2f4a04aba4277c87834 | 689 | py | Python | easy/ugly number/solution.py | i-redbyte/leetcode | b1569b06dd219495e7db9a8122f839bcaf9f6024 | [
"MIT"
] | 4 | 2021-06-03T22:19:13.000Z | 2021-10-05T18:14:12.000Z | easy/ugly number/solution.py | i-redbyte/leetcode | b1569b06dd219495e7db9a8122f839bcaf9f6024 | [
"MIT"
] | null | null | null | easy/ugly number/solution.py | i-redbyte/leetcode | b1569b06dd219495e7db9a8122f839bcaf9f6024 | [
"MIT"
] | null | null | null | class Solution:
def maxDiv(self, a: int, b: int) -> int:
while a % b == 0:
a = a / b
return a
def isUgly2(self, n: int) -> bool:
n = self.maxDiv(n, 2)
n = self.maxDiv(n, 3)
n = self.maxDiv(n, 5)
return n == 1
def isUgly(self, n: int) -> bool:
if n == 0:
return False
while n % 2 == 0:
n /= 2
while n % 3 == 0:
n /= 3
while n % 5 == 0:
n /= 5
return n == 1
s = Solution()
print(s.isUgly(6))
print(s.isUgly(7))
print(s.isUgly(8))
print(s.isUgly(1))
print(s.isUgly(14))
print(s.isUgly(21))
print(s.isUgly(0))
print(s.isUgly(1))
| 20.264706 | 44 | 0.454282 | class Solution:
def maxDiv(self, a: int, b: int) -> int:
while a % b == 0:
a = a / b
return a
def isUgly2(self, n: int) -> bool:
n = self.maxDiv(n, 2)
n = self.maxDiv(n, 3)
n = self.maxDiv(n, 5)
return n == 1
def isUgly(self, n: int) -> bool:
if n == 0:
return False
while n % 2 == 0:
n /= 2
while n % 3 == 0:
n /= 3
while n % 5 == 0:
n /= 5
return n == 1
s = Solution()
print(s.isUgly(6))
print(s.isUgly(7))
print(s.isUgly(8))
print(s.isUgly(1))
print(s.isUgly(14))
print(s.isUgly(21))
print(s.isUgly(0))
print(s.isUgly(1))
| true | true |
f72db2685b749298afabe9cef2e6ca318e0fa3f5 | 4,526 | py | Python | plenum/test/nodestack/test_resend_stashed_msgs.py | anikitinDSR/indy-plenum | f2a03344cdff4edbe0bf3528a8ddf34a8b1c6b85 | [
"Apache-2.0"
] | null | null | null | plenum/test/nodestack/test_resend_stashed_msgs.py | anikitinDSR/indy-plenum | f2a03344cdff4edbe0bf3528a8ddf34a8b1c6b85 | [
"Apache-2.0"
] | null | null | null | plenum/test/nodestack/test_resend_stashed_msgs.py | anikitinDSR/indy-plenum | f2a03344cdff4edbe0bf3528a8ddf34a8b1c6b85 | [
"Apache-2.0"
] | null | null | null | from copy import copy
import pytest
from stp_core.loop.eventually import eventually
from plenum.common.constants import OP_FIELD_NAME, BATCH
from plenum.common.messages.node_messages import Batch
from plenum.common.stacks import nodeStackClass
from plenum.common.types import f
from stp_core.network.auth_mode import AuthMode
from stp_core.network.port_dispenser import genHa
from stp_core.test.helper import Printer, connectStack
from stp_zmq.test.helper import genKeys
@pytest.fixture()
def registry():
return {
'Alpha': genHa(),
'Beta': genHa(),
'Gamma': genHa(),
'Delta': genHa()
}
@pytest.fixture(scope="function")
def func_create_stacks(tdir, registry):
def create_stack(count):
genKeys(tdir, registry.keys())
stacks = []
for name, ha in registry.items():
printer = Printer(name)
stackParams = dict(name=name, ha=ha, basedirpath=tdir,
auth_mode=AuthMode.RESTRICTED.value)
reg = copy(registry)
reg.pop(name)
stack = nodeStackClass(stackParams, printer.print, reg)
stack.start()
stacks.append(stack)
if len(stacks) == count:
break
return stacks
yield create_stack
@pytest.mark.skip("INDY-2253: we don't stasj ping/pongs anymore")
def test_use_send_from_zstack_on_resend(func_create_stacks, looper):
aStack, bStack = func_create_stacks(2)
connectStack(aStack, bStack)
"""
Sending some pi msgs for creating a batch on flashOutBox
This function just put 'pi ' message into outBoxes queue, not send
"""
aStack.sendPingPong(bStack.name)
aStack.sendPingPong(bStack.name)
"""
Emulate batch creation and sending. Batch should be added into _stashed_to_disconnected queue
"""
aStack.flushOutBoxes()
assert len(aStack._stashed_to_disconnected[bStack.name]) == 1
batch_to_disconnected = aStack.deserializeMsg(aStack._stashed_to_disconnected[bStack.name][0])
assert OP_FIELD_NAME in batch_to_disconnected and batch_to_disconnected[OP_FIELD_NAME] == BATCH
"""
This method call connect method for bStack and put 'pi' message into outBoxes queue
"""
connectStack(bStack, aStack)
"""
Wait for socket's connecting routines
"""
looper.runFor(1)
"""
This instruction get 'pi' message from outBoxes queue, create a batch if needed and send it to aStack
"""
bStack.flushOutBoxes()
"""
It needs for getting 'pi' message from bStack. It process 'pi' message and put 'po' message to outBoxes queue
"""
looper.run(aStack.service())
"""
Send 'po' message to bStack
"""
aStack.flushOutBoxes()
"""
Processing previous sending batch (zmq feature) and 'po'
"""
looper.run(bStack.service())
"""
For sending 'po' message to aStack
"""
bStack.flushOutBoxes()
"""
Append 'pi' msg for checking that batch into batch will not be included
"""
aStack._stashed_to_disconnected[bStack.name].append('pi')
"""
Emulate that aStack got 'po' message from bStack and it must run _resend_to_disconnected
"""
looper.run(aStack.service())
"""
Emulate batch creating and sending
"""
aStack.flushOutBoxes()
looper.run(bStack._serviceStack(bStack.age, None))
"""
rxMsgs queue should contains only one 'pi' message from step 3 and batch
which was failed to sending to disconnected stack from step 2
"""
got_pi = False
got_batch = False
def rxMsgsNotEmpty():
assert bStack.rxMsgs
looper.run(eventually(rxMsgsNotEmpty))
while bStack.rxMsgs:
m, frm = bStack.rxMsgs.popleft()
if m.encode() not in bStack.healthMessages:
msg = bStack.deserializeMsg(m)
else:
got_pi = True
continue
if OP_FIELD_NAME in msg and msg[OP_FIELD_NAME] == BATCH:
if msg == batch_to_disconnected:
"""
Exactly the same batch which should be sent to disconnected node
"""
got_batch = True
continue
else:
"""Check that there is no batches with batch as message"""
batch = Batch(messages=msg[f.MSGS.nm],
signature=msg[f.SIG.nm])
for m in batch.messages:
assert OP_FIELD_NAME not in m and BATCH not in m
assert got_pi and got_batch
| 32.328571 | 113 | 0.643394 | from copy import copy
import pytest
from stp_core.loop.eventually import eventually
from plenum.common.constants import OP_FIELD_NAME, BATCH
from plenum.common.messages.node_messages import Batch
from plenum.common.stacks import nodeStackClass
from plenum.common.types import f
from stp_core.network.auth_mode import AuthMode
from stp_core.network.port_dispenser import genHa
from stp_core.test.helper import Printer, connectStack
from stp_zmq.test.helper import genKeys
@pytest.fixture()
def registry():
return {
'Alpha': genHa(),
'Beta': genHa(),
'Gamma': genHa(),
'Delta': genHa()
}
@pytest.fixture(scope="function")
def func_create_stacks(tdir, registry):
def create_stack(count):
genKeys(tdir, registry.keys())
stacks = []
for name, ha in registry.items():
printer = Printer(name)
stackParams = dict(name=name, ha=ha, basedirpath=tdir,
auth_mode=AuthMode.RESTRICTED.value)
reg = copy(registry)
reg.pop(name)
stack = nodeStackClass(stackParams, printer.print, reg)
stack.start()
stacks.append(stack)
if len(stacks) == count:
break
return stacks
yield create_stack
@pytest.mark.skip("INDY-2253: we don't stasj ping/pongs anymore")
def test_use_send_from_zstack_on_resend(func_create_stacks, looper):
aStack, bStack = func_create_stacks(2)
connectStack(aStack, bStack)
aStack.sendPingPong(bStack.name)
aStack.sendPingPong(bStack.name)
aStack.flushOutBoxes()
assert len(aStack._stashed_to_disconnected[bStack.name]) == 1
batch_to_disconnected = aStack.deserializeMsg(aStack._stashed_to_disconnected[bStack.name][0])
assert OP_FIELD_NAME in batch_to_disconnected and batch_to_disconnected[OP_FIELD_NAME] == BATCH
connectStack(bStack, aStack)
looper.runFor(1)
bStack.flushOutBoxes()
looper.run(aStack.service())
aStack.flushOutBoxes()
looper.run(bStack.service())
bStack.flushOutBoxes()
aStack._stashed_to_disconnected[bStack.name].append('pi')
looper.run(aStack.service())
aStack.flushOutBoxes()
looper.run(bStack._serviceStack(bStack.age, None))
got_pi = False
got_batch = False
def rxMsgsNotEmpty():
assert bStack.rxMsgs
looper.run(eventually(rxMsgsNotEmpty))
while bStack.rxMsgs:
m, frm = bStack.rxMsgs.popleft()
if m.encode() not in bStack.healthMessages:
msg = bStack.deserializeMsg(m)
else:
got_pi = True
continue
if OP_FIELD_NAME in msg and msg[OP_FIELD_NAME] == BATCH:
if msg == batch_to_disconnected:
got_batch = True
continue
else:
"""Check that there is no batches with batch as message"""
batch = Batch(messages=msg[f.MSGS.nm],
signature=msg[f.SIG.nm])
for m in batch.messages:
assert OP_FIELD_NAME not in m and BATCH not in m
assert got_pi and got_batch
| true | true |
f72db2f84a2d185ab76ee58dd425a4bf305847e5 | 1,628 | py | Python | src/secondaires/auberge/masques/__init__.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | src/secondaires/auberge/masques/__init__.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | src/secondaires/auberge/masques/__init__.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | # -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package des masques du module perso."""
from . import chambre
| 47.882353 | 79 | 0.778256 |
from . import chambre
| true | true |
f72db4a9438ec86a8a7ddb3a4b730c35c957db51 | 4,189 | py | Python | elvis/modeling/meta_arch/vl_pretrainer.py | seo-95/elvis | a89c759acdf6ce64c7e6863aeb68dc0ba3293fed | [
"Apache-2.0"
] | 1 | 2021-08-01T13:55:27.000Z | 2021-08-01T13:55:27.000Z | elvis/modeling/meta_arch/vl_pretrainer.py | seo-95/elvis | a89c759acdf6ce64c7e6863aeb68dc0ba3293fed | [
"Apache-2.0"
] | null | null | null | elvis/modeling/meta_arch/vl_pretrainer.py | seo-95/elvis | a89c759acdf6ce64c7e6863aeb68dc0ba3293fed | [
"Apache-2.0"
] | null | null | null | import copy
import os
import pdb
import random
from typing import Dict, List, Text, TypeVar
import torch
import torch.nn as nn
import torch.nn.functional as F
from elvis.modeling.models import build_net
from elvis.modeling.models.layers import FC, MLP
from elvis.utils.vlp_objectives import optimal_transport_dist
from .base import MetaArch
from .build import ARCH_REGISTRY
Tensor = TypeVar('torch.tensor')
__all__ = ['AlignmentVLP',
'build_align_vlp']
class AlignmentVLP(MetaArch):
"""Meta architecture for Visual Language Pretraining (VLP) based on image-caption alignment
"""
def __init__(self, model, max_visual, max_tokens, tasks_dict) -> None:
super().__init__()
self.model = model
self.max_visual = max_visual
self.max_tokens = max_tokens+2 #take into account [CLS] and [SEP]
self.tasks_dict = tasks_dict
self.lm_mlp = MLP(in_features=self.model.embed_dim,
hidden_dim=self.model.embed_dim,
out_features=len(self.model.tokenizer)-1,
dropout_p=.1)
self.itm_fc = FC(in_features=self.model.embed_dim, out_features=2)
def forward(self, vis_in, txt_in, vis_mask, txt_mask, **kwargs) -> Dict:
cntx_emb = self.model(vis_in=vis_in, vis_mask=vis_mask, txt_in=txt_in, txt_mask=txt_mask)
txt_emb = cntx_emb[:, :self.max_tokens]
itm_logits = self.itm_fc(txt_emb[:, 0, :]) #pass everything but use only [CLS]: better parallelization of loss computation
lm_logits = self.lm_mlp(txt_emb[:, 1:, :])
#? exclude special tokens from ot computation
vis_mask = torch.cat(
(torch.ones((vis_mask.shape[0], 1), device=vis_mask.device), vis_mask),
dim=-1) #add attention for [IMG]
ot_dist = optimal_transport_dist(txt_emb=cntx_emb[:, :self.max_tokens, :].float(),
img_emb=cntx_emb[:, self.max_tokens:, :].float(),
txt_pad=~txt_mask.bool(),
img_pad=~vis_mask.bool()
)
return {'lm_logits': lm_logits, 'itm_logits': itm_logits, 'ot_dist': ot_dist}
def compute_loss(self, lm_logits, itm_logits, lm_targets, itm_targets, **kwargs) -> Dict:
B = lm_logits.shape[0]
n_mlm = sum([t == 'MLM' for t in kwargs['tasks']])
n_itm = len(kwargs['tasks']) - n_mlm
loss_dict = {}
#compute lm loss (compute it also if n_mlm > 0 otherwise the DDP will raise an exception)
lm_loss = F.cross_entropy(lm_logits.transpose(1, 2), lm_targets[:, 1:], reduction='sum')
if n_mlm > 0:
lm_loss /= n_mlm
loss_dict['lm_loss'] = lm_loss
#compute itm loss (compute it also if n_itm > 0 otherwise the DDP will raise an exception)
itm_loss = F.cross_entropy(itm_logits, itm_targets[:, 0], reduction='sum')
ot_pos = kwargs['ot_dist'].masked_select(itm_targets[:, 0] == 1)
ot_neg = kwargs['ot_dist'].masked_select(itm_targets[:, 0] == 0)
#we want to maximize the OT distance for negative pairs and minimize OT distance for positive ones
ot_loss = ot_pos.sum() - ot_neg.sum()
itm_loss = (itm_loss + 0.1 * ot_loss)
if n_itm > 0:
itm_loss /= n_itm
loss_dict['itm_loss'] = itm_loss
loss_dict['loss'] = sum(loss_dict.values())
return loss_dict
def save_on_disk(self, path):
state_dict = copy.deepcopy(self).cpu().state_dict()
ckp_file = os.path.join(path, 'state_dict.pt')
torch.save(state_dict, ckp_file)
@ARCH_REGISTRY.register()
def build_align_vlp(cfg):
model, data_interface = build_net(cfg.MODEL, get_interface='vlp')
vlp = AlignmentVLP(model,
max_visual=cfg.MODEL.MAX_N_VISUAL,
max_tokens=cfg.MODEL.MAX_N_TOKENS,
tasks_dict=cfg.MODEL.TASKS.get_as_dict())
return vlp, data_interface
| 41.475248 | 130 | 0.603008 | import copy
import os
import pdb
import random
from typing import Dict, List, Text, TypeVar
import torch
import torch.nn as nn
import torch.nn.functional as F
from elvis.modeling.models import build_net
from elvis.modeling.models.layers import FC, MLP
from elvis.utils.vlp_objectives import optimal_transport_dist
from .base import MetaArch
from .build import ARCH_REGISTRY
Tensor = TypeVar('torch.tensor')
__all__ = ['AlignmentVLP',
'build_align_vlp']
class AlignmentVLP(MetaArch):
def __init__(self, model, max_visual, max_tokens, tasks_dict) -> None:
super().__init__()
self.model = model
self.max_visual = max_visual
self.max_tokens = max_tokens+2
self.tasks_dict = tasks_dict
self.lm_mlp = MLP(in_features=self.model.embed_dim,
hidden_dim=self.model.embed_dim,
out_features=len(self.model.tokenizer)-1,
dropout_p=.1)
self.itm_fc = FC(in_features=self.model.embed_dim, out_features=2)
def forward(self, vis_in, txt_in, vis_mask, txt_mask, **kwargs) -> Dict:
cntx_emb = self.model(vis_in=vis_in, vis_mask=vis_mask, txt_in=txt_in, txt_mask=txt_mask)
txt_emb = cntx_emb[:, :self.max_tokens]
itm_logits = self.itm_fc(txt_emb[:, 0, :])
lm_logits = self.lm_mlp(txt_emb[:, 1:, :])
vis_mask = torch.cat(
(torch.ones((vis_mask.shape[0], 1), device=vis_mask.device), vis_mask),
dim=-1)
ot_dist = optimal_transport_dist(txt_emb=cntx_emb[:, :self.max_tokens, :].float(),
img_emb=cntx_emb[:, self.max_tokens:, :].float(),
txt_pad=~txt_mask.bool(),
img_pad=~vis_mask.bool()
)
return {'lm_logits': lm_logits, 'itm_logits': itm_logits, 'ot_dist': ot_dist}
def compute_loss(self, lm_logits, itm_logits, lm_targets, itm_targets, **kwargs) -> Dict:
B = lm_logits.shape[0]
n_mlm = sum([t == 'MLM' for t in kwargs['tasks']])
n_itm = len(kwargs['tasks']) - n_mlm
loss_dict = {}
lm_loss = F.cross_entropy(lm_logits.transpose(1, 2), lm_targets[:, 1:], reduction='sum')
if n_mlm > 0:
lm_loss /= n_mlm
loss_dict['lm_loss'] = lm_loss
itm_loss = F.cross_entropy(itm_logits, itm_targets[:, 0], reduction='sum')
ot_pos = kwargs['ot_dist'].masked_select(itm_targets[:, 0] == 1)
ot_neg = kwargs['ot_dist'].masked_select(itm_targets[:, 0] == 0)
ot_loss = ot_pos.sum() - ot_neg.sum()
itm_loss = (itm_loss + 0.1 * ot_loss)
if n_itm > 0:
itm_loss /= n_itm
loss_dict['itm_loss'] = itm_loss
loss_dict['loss'] = sum(loss_dict.values())
return loss_dict
def save_on_disk(self, path):
state_dict = copy.deepcopy(self).cpu().state_dict()
ckp_file = os.path.join(path, 'state_dict.pt')
torch.save(state_dict, ckp_file)
@ARCH_REGISTRY.register()
def build_align_vlp(cfg):
model, data_interface = build_net(cfg.MODEL, get_interface='vlp')
vlp = AlignmentVLP(model,
max_visual=cfg.MODEL.MAX_N_VISUAL,
max_tokens=cfg.MODEL.MAX_N_TOKENS,
tasks_dict=cfg.MODEL.TASKS.get_as_dict())
return vlp, data_interface
| true | true |
f72db4fe126c81c8a809a327d8b91edb1269602b | 5,434 | py | Python | tests/portfolio/test_portfolio.py | keisuke-umezawa/backlight | db49a966fdb38de693bb8157cec88d98620f9946 | [
"MIT"
] | 8 | 2018-11-06T16:48:45.000Z | 2021-02-14T18:02:27.000Z | tests/portfolio/test_portfolio.py | keisuke-umezawa/backlight | db49a966fdb38de693bb8157cec88d98620f9946 | [
"MIT"
] | 36 | 2018-11-02T23:21:59.000Z | 2021-02-08T10:27:29.000Z | tests/portfolio/test_portfolio.py | keisuke-umezawa/backlight | db49a966fdb38de693bb8157cec88d98620f9946 | [
"MIT"
] | 5 | 2018-11-07T06:05:24.000Z | 2021-11-20T08:57:39.000Z | import pytest
import pandas as pd
import numpy as np
import backlight
from backlight.portfolio.portfolio import create_portfolio as module
from backlight.portfolio.portfolio import _fusion_positions
import backlight.positions.positions
from backlight.trades.trades import make_trades
from backlight.asset.currency import Currency
@pytest.fixture
def trades():
trades = []
index = [
"2018-06-06 00:00:00",
"2018-06-06 00:01:00",
"2018-06-06 00:02:00",
"2018-06-06 00:03:00",
"2018-06-06 00:03:00",
"2018-06-06 00:04:00 ",
"2018-06-06 00:05:00",
"2018-06-06 00:05:00",
"2018-06-06 00:06:00 ",
"2018-06-06 00:06:00 ",
"2018-06-06 00:07:00 ",
"2018-06-06 00:08:00 ",
"2018-06-06 00:09:00 ",
"2018-06-06 00:09:00 ",
]
trade = pd.Series(
index=pd.to_datetime(index),
data=[1, -1, -1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1],
name="amount",
)
ids = [0, 1, 0, 1, 2, 3, 2, 4, 3, 5, 4, 5, 6, 6]
currency_unit = Currency.JPY
trades.append(make_trades("USDJPY", [trade], currency_unit, [ids]))
trades.append(make_trades("EURJPY", [trade], currency_unit, [ids]))
trades.append(make_trades("USDJPY", [trade], currency_unit, [ids]))
return trades
@pytest.fixture
def markets():
markets = []
symbol = "USDJPY"
currency_unit = Currency.JPY
quote_currency = Currency.USD
periods = 13
df = pd.DataFrame(
index=pd.date_range(start="2018-06-05 23:57:00", freq="1min", periods=periods),
data=np.repeat(2, periods)[:, None],
columns=["mid"],
)
markets.append(
backlight.datasource.from_dataframe(
df, symbol, currency_unit, quote_currency=quote_currency
)
)
symbol = "EURJPY"
currency_unit = Currency.JPY
quote_currency = Currency.EUR
df = pd.DataFrame(
index=pd.date_range(start="2018-06-05 23:57:00", freq="1min", periods=periods),
data=np.repeat(4, periods)[:, None],
columns=["mid"],
)
markets.append(
backlight.datasource.from_dataframe(
df, symbol, currency_unit, quote_currency=quote_currency
)
)
return markets
@pytest.fixture
def principal():
return {"USDJPY": 10, "EURJPY": 10}
@pytest.fixture
def lot_size():
return {"USDJPY": 2, "EURJPY": 2}
def test_create_portfolio(trades, markets, principal, lot_size):
portfolio = module(trades, markets, principal, lot_size, Currency.USD)
index = [
"2018-06-05 23:59:00",
"2018-06-06 00:00:00",
"2018-06-06 00:01:00",
"2018-06-06 00:02:00",
"2018-06-06 00:03:00",
"2018-06-06 00:04:00 ",
"2018-06-06 00:05:00",
"2018-06-06 00:06:00 ",
"2018-06-06 00:07:00 ",
"2018-06-06 00:08:00 ",
"2018-06-06 00:09:00 ",
]
data1 = [
[0.0, 0.0, 5.0],
[2.0, 2.0, 1.0],
[0.0, 2.0, 5.0],
[-2.0, 2.0, 9.0],
[2.0, 2.0, 1.0],
[4.0, 2.0, -3.0],
[0.0, 2.0, 5.0],
[-4.0, 2.0, 13.0],
[-2.0, 2.0, 9.0],
[0.0, 2.0, 5.0],
[0.0, 2.0, 5.0],
]
data2 = [
[0.0, 0.0, 10.0],
[4.0, 2.0, 6.0],
[0.0, 2.0, 10.0],
[-4.0, 2.0, 14.0],
[4.0, 2.0, 6.0],
[8.0, 2.0, 2.0],
[0.0, 2.0, 10.0],
[-8.0, 2.0, 18.0],
[-4.0, 2.0, 14.0],
[0.0, 2.0, 10.0],
[0.0, 2.0, 10.0],
]
data = [data1, data2]
for (position, d) in zip(portfolio._positions, data):
expected = pd.DataFrame(
index=pd.to_datetime(index),
data=d,
columns=["amount", "price", "principal"],
)
assert ((expected == position).all()).all()
def test_fusion_positions():
periods = 3
data = np.arange(periods * 3).reshape((periods, 3))
columns = ["amount", "price", "principal"]
currency_unit = Currency.JPY
positions_list = []
df = pd.DataFrame(
data=data,
index=pd.date_range("2012-1-1", periods=periods, freq="D"),
columns=columns,
)
symbol = "USDJPY"
positions_list.append(
backlight.positions.positions.from_dataframe(df, symbol, currency_unit)
)
df = pd.DataFrame(
data=data,
index=pd.date_range("2012-1-2", periods=periods, freq="D"),
columns=columns,
)
symbol = "USDJPY"
positions_list.append(
backlight.positions.positions.from_dataframe(df, symbol, currency_unit)
)
df = pd.DataFrame(
data=data,
index=pd.date_range("2012-1-4", periods=periods, freq="D"),
columns=columns,
)
symbol = "EURJPY"
positions_list.append(
backlight.positions.positions.from_dataframe(df, symbol, currency_unit)
)
fusioned = _fusion_positions(positions_list)
data1 = np.arange(periods * 3).reshape((periods, 3))
data2 = [[0, 1, 2], [3, 5, 7], [9, 11, 13], [6, 7, 8]]
df1 = pd.DataFrame(
data=data1,
index=pd.date_range("2012-1-1", periods=periods, freq="D"),
columns=columns,
)
df2 = pd.DataFrame(
data=data2,
index=pd.date_range("2012-1-1", periods=periods + 1, freq="D"),
columns=columns,
)
expected = [df1, df2]
for exp, fus in zip(expected, fusioned):
assert exp.all().all() == fus.all().all()
| 26.507317 | 87 | 0.550423 | import pytest
import pandas as pd
import numpy as np
import backlight
from backlight.portfolio.portfolio import create_portfolio as module
from backlight.portfolio.portfolio import _fusion_positions
import backlight.positions.positions
from backlight.trades.trades import make_trades
from backlight.asset.currency import Currency
@pytest.fixture
def trades():
trades = []
index = [
"2018-06-06 00:00:00",
"2018-06-06 00:01:00",
"2018-06-06 00:02:00",
"2018-06-06 00:03:00",
"2018-06-06 00:03:00",
"2018-06-06 00:04:00 ",
"2018-06-06 00:05:00",
"2018-06-06 00:05:00",
"2018-06-06 00:06:00 ",
"2018-06-06 00:06:00 ",
"2018-06-06 00:07:00 ",
"2018-06-06 00:08:00 ",
"2018-06-06 00:09:00 ",
"2018-06-06 00:09:00 ",
]
trade = pd.Series(
index=pd.to_datetime(index),
data=[1, -1, -1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1],
name="amount",
)
ids = [0, 1, 0, 1, 2, 3, 2, 4, 3, 5, 4, 5, 6, 6]
currency_unit = Currency.JPY
trades.append(make_trades("USDJPY", [trade], currency_unit, [ids]))
trades.append(make_trades("EURJPY", [trade], currency_unit, [ids]))
trades.append(make_trades("USDJPY", [trade], currency_unit, [ids]))
return trades
@pytest.fixture
def markets():
markets = []
symbol = "USDJPY"
currency_unit = Currency.JPY
quote_currency = Currency.USD
periods = 13
df = pd.DataFrame(
index=pd.date_range(start="2018-06-05 23:57:00", freq="1min", periods=periods),
data=np.repeat(2, periods)[:, None],
columns=["mid"],
)
markets.append(
backlight.datasource.from_dataframe(
df, symbol, currency_unit, quote_currency=quote_currency
)
)
symbol = "EURJPY"
currency_unit = Currency.JPY
quote_currency = Currency.EUR
df = pd.DataFrame(
index=pd.date_range(start="2018-06-05 23:57:00", freq="1min", periods=periods),
data=np.repeat(4, periods)[:, None],
columns=["mid"],
)
markets.append(
backlight.datasource.from_dataframe(
df, symbol, currency_unit, quote_currency=quote_currency
)
)
return markets
@pytest.fixture
def principal():
return {"USDJPY": 10, "EURJPY": 10}
@pytest.fixture
def lot_size():
return {"USDJPY": 2, "EURJPY": 2}
def test_create_portfolio(trades, markets, principal, lot_size):
portfolio = module(trades, markets, principal, lot_size, Currency.USD)
index = [
"2018-06-05 23:59:00",
"2018-06-06 00:00:00",
"2018-06-06 00:01:00",
"2018-06-06 00:02:00",
"2018-06-06 00:03:00",
"2018-06-06 00:04:00 ",
"2018-06-06 00:05:00",
"2018-06-06 00:06:00 ",
"2018-06-06 00:07:00 ",
"2018-06-06 00:08:00 ",
"2018-06-06 00:09:00 ",
]
data1 = [
[0.0, 0.0, 5.0],
[2.0, 2.0, 1.0],
[0.0, 2.0, 5.0],
[-2.0, 2.0, 9.0],
[2.0, 2.0, 1.0],
[4.0, 2.0, -3.0],
[0.0, 2.0, 5.0],
[-4.0, 2.0, 13.0],
[-2.0, 2.0, 9.0],
[0.0, 2.0, 5.0],
[0.0, 2.0, 5.0],
]
data2 = [
[0.0, 0.0, 10.0],
[4.0, 2.0, 6.0],
[0.0, 2.0, 10.0],
[-4.0, 2.0, 14.0],
[4.0, 2.0, 6.0],
[8.0, 2.0, 2.0],
[0.0, 2.0, 10.0],
[-8.0, 2.0, 18.0],
[-4.0, 2.0, 14.0],
[0.0, 2.0, 10.0],
[0.0, 2.0, 10.0],
]
data = [data1, data2]
for (position, d) in zip(portfolio._positions, data):
expected = pd.DataFrame(
index=pd.to_datetime(index),
data=d,
columns=["amount", "price", "principal"],
)
assert ((expected == position).all()).all()
def test_fusion_positions():
periods = 3
data = np.arange(periods * 3).reshape((periods, 3))
columns = ["amount", "price", "principal"]
currency_unit = Currency.JPY
positions_list = []
df = pd.DataFrame(
data=data,
index=pd.date_range("2012-1-1", periods=periods, freq="D"),
columns=columns,
)
symbol = "USDJPY"
positions_list.append(
backlight.positions.positions.from_dataframe(df, symbol, currency_unit)
)
df = pd.DataFrame(
data=data,
index=pd.date_range("2012-1-2", periods=periods, freq="D"),
columns=columns,
)
symbol = "USDJPY"
positions_list.append(
backlight.positions.positions.from_dataframe(df, symbol, currency_unit)
)
df = pd.DataFrame(
data=data,
index=pd.date_range("2012-1-4", periods=periods, freq="D"),
columns=columns,
)
symbol = "EURJPY"
positions_list.append(
backlight.positions.positions.from_dataframe(df, symbol, currency_unit)
)
fusioned = _fusion_positions(positions_list)
data1 = np.arange(periods * 3).reshape((periods, 3))
data2 = [[0, 1, 2], [3, 5, 7], [9, 11, 13], [6, 7, 8]]
df1 = pd.DataFrame(
data=data1,
index=pd.date_range("2012-1-1", periods=periods, freq="D"),
columns=columns,
)
df2 = pd.DataFrame(
data=data2,
index=pd.date_range("2012-1-1", periods=periods + 1, freq="D"),
columns=columns,
)
expected = [df1, df2]
for exp, fus in zip(expected, fusioned):
assert exp.all().all() == fus.all().all()
| true | true |
f72db51aa93fbd1cba44def9ee99ae95419c4210 | 4,932 | py | Python | deeppavlov/models/slotfill/slotfill.py | xbodx/DeepPavlov | 4b60bf162df4294b8b0db3b72786cdd699c674fa | [
"Apache-2.0"
] | 1 | 2020-12-20T19:56:52.000Z | 2020-12-20T19:56:52.000Z | deeppavlov/models/slotfill/slotfill.py | xbodx/DeepPavlov | 4b60bf162df4294b8b0db3b72786cdd699c674fa | [
"Apache-2.0"
] | 1 | 2020-06-15T19:58:25.000Z | 2020-06-17T17:48:49.000Z | deeppavlov/models/slotfill/slotfill.py | xbodx/DeepPavlov | 4b60bf162df4294b8b0db3b72786cdd699c674fa | [
"Apache-2.0"
] | 1 | 2020-08-14T05:01:50.000Z | 2020-08-14T05:01:50.000Z | # Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from logging import getLogger
from rapidfuzz import process
from overrides import overrides
from deeppavlov.core.common.registry import register
from deeppavlov.core.data.utils import download
from deeppavlov.core.models.component import Component
from deeppavlov.core.models.serializable import Serializable
log = getLogger(__name__)
@register('dstc_slotfilling')
class DstcSlotFillingNetwork(Component, Serializable):
"""Slot filling for DSTC2 task with neural network"""
def __init__(self, threshold: float = 0.8, **kwargs):
super().__init__(**kwargs)
self.threshold = threshold
self._slot_vals = None
# Check existance of file with slots, slot values, and corrupted (misspelled) slot values
self.load()
@overrides
def __call__(self, tokens_batch, tags_batch, *args, **kwargs):
slots = [{}] * len(tokens_batch)
m = [i for i, v in enumerate(tokens_batch) if v]
if m:
tags_batch = [tags_batch[i] for i in m]
tokens_batch = [tokens_batch[i] for i in m]
for i, tokens, tags in zip(m, tokens_batch, tags_batch):
slots[i] = self.predict_slots(tokens, tags)
return slots
def predict_slots(self, tokens, tags):
# For utterance extract named entities and perform normalization for slot filling
entities, slots = self._chunk_finder(tokens, tags)
slot_values = {}
for entity, slot in zip(entities, slots):
match, score = self.ner2slot(entity, slot)
if score >= self.threshold * 100:
slot_values[slot] = match
return slot_values
def ner2slot(self, input_entity, slot):
# Given named entity return normalized slot value
if isinstance(input_entity, list):
input_entity = ' '.join(input_entity)
entities = []
normalized_slot_vals = []
for entity_name in self._slot_vals[slot]:
for entity in self._slot_vals[slot][entity_name]:
entities.append(entity)
normalized_slot_vals.append(entity_name)
best_match, score = process.extract(input_entity, entities, limit=2 ** 20)[0]
return normalized_slot_vals[entities.index(best_match)], score
@staticmethod
def _chunk_finder(tokens, tags):
# For BIO labeled sequence of tags extract all named entities form tokens
prev_tag = ''
chunk_tokens = []
entities = []
slots = []
for token, tag in zip(tokens, tags):
curent_tag = tag.split('-')[-1].strip()
current_prefix = tag.split('-')[0]
if tag.startswith('B-'):
if len(chunk_tokens) > 0:
entities.append(' '.join(chunk_tokens))
slots.append(prev_tag)
chunk_tokens = []
chunk_tokens.append(token)
if current_prefix == 'I':
if curent_tag != prev_tag:
if len(chunk_tokens) > 0:
entities.append(' '.join(chunk_tokens))
slots.append(prev_tag)
chunk_tokens = []
else:
chunk_tokens.append(token)
if current_prefix == 'O':
if len(chunk_tokens) > 0:
entities.append(' '.join(chunk_tokens))
slots.append(prev_tag)
chunk_tokens = []
prev_tag = curent_tag
if len(chunk_tokens) > 0:
entities.append(' '.join(chunk_tokens))
slots.append(prev_tag)
return entities, slots
def _download_slot_vals(self):
url = 'http://files.deeppavlov.ai/datasets/dstc_slot_vals.json'
download(self.save_path, url)
def save(self, *args, **kwargs):
with open(self.save_path, 'w', encoding='utf8') as f:
json.dump(self._slot_vals, f)
def serialize(self):
return json.dumps(self._slot_vals)
def load(self, *args, **kwargs):
if not self.load_path.exists():
self._download_slot_vals()
with open(self.load_path, encoding='utf8') as f:
self._slot_vals = json.load(f)
def deserialize(self, data):
self._slot_vals = json.loads(data)
| 38.232558 | 97 | 0.618005 |
import json
from logging import getLogger
from rapidfuzz import process
from overrides import overrides
from deeppavlov.core.common.registry import register
from deeppavlov.core.data.utils import download
from deeppavlov.core.models.component import Component
from deeppavlov.core.models.serializable import Serializable
log = getLogger(__name__)
@register('dstc_slotfilling')
class DstcSlotFillingNetwork(Component, Serializable):
def __init__(self, threshold: float = 0.8, **kwargs):
super().__init__(**kwargs)
self.threshold = threshold
self._slot_vals = None
self.load()
@overrides
def __call__(self, tokens_batch, tags_batch, *args, **kwargs):
slots = [{}] * len(tokens_batch)
m = [i for i, v in enumerate(tokens_batch) if v]
if m:
tags_batch = [tags_batch[i] for i in m]
tokens_batch = [tokens_batch[i] for i in m]
for i, tokens, tags in zip(m, tokens_batch, tags_batch):
slots[i] = self.predict_slots(tokens, tags)
return slots
def predict_slots(self, tokens, tags):
entities, slots = self._chunk_finder(tokens, tags)
slot_values = {}
for entity, slot in zip(entities, slots):
match, score = self.ner2slot(entity, slot)
if score >= self.threshold * 100:
slot_values[slot] = match
return slot_values
def ner2slot(self, input_entity, slot):
if isinstance(input_entity, list):
input_entity = ' '.join(input_entity)
entities = []
normalized_slot_vals = []
for entity_name in self._slot_vals[slot]:
for entity in self._slot_vals[slot][entity_name]:
entities.append(entity)
normalized_slot_vals.append(entity_name)
best_match, score = process.extract(input_entity, entities, limit=2 ** 20)[0]
return normalized_slot_vals[entities.index(best_match)], score
@staticmethod
def _chunk_finder(tokens, tags):
prev_tag = ''
chunk_tokens = []
entities = []
slots = []
for token, tag in zip(tokens, tags):
curent_tag = tag.split('-')[-1].strip()
current_prefix = tag.split('-')[0]
if tag.startswith('B-'):
if len(chunk_tokens) > 0:
entities.append(' '.join(chunk_tokens))
slots.append(prev_tag)
chunk_tokens = []
chunk_tokens.append(token)
if current_prefix == 'I':
if curent_tag != prev_tag:
if len(chunk_tokens) > 0:
entities.append(' '.join(chunk_tokens))
slots.append(prev_tag)
chunk_tokens = []
else:
chunk_tokens.append(token)
if current_prefix == 'O':
if len(chunk_tokens) > 0:
entities.append(' '.join(chunk_tokens))
slots.append(prev_tag)
chunk_tokens = []
prev_tag = curent_tag
if len(chunk_tokens) > 0:
entities.append(' '.join(chunk_tokens))
slots.append(prev_tag)
return entities, slots
def _download_slot_vals(self):
url = 'http://files.deeppavlov.ai/datasets/dstc_slot_vals.json'
download(self.save_path, url)
def save(self, *args, **kwargs):
with open(self.save_path, 'w', encoding='utf8') as f:
json.dump(self._slot_vals, f)
def serialize(self):
return json.dumps(self._slot_vals)
def load(self, *args, **kwargs):
if not self.load_path.exists():
self._download_slot_vals()
with open(self.load_path, encoding='utf8') as f:
self._slot_vals = json.load(f)
def deserialize(self, data):
self._slot_vals = json.loads(data)
| true | true |
f72db855da14e3dfcd6bb920a64df7a3b7b4b780 | 22,456 | py | Python | BigGAN-PyTorch/BigGAN_remove_condbn+++++.py | twice154/Spatial-Self-modulation-on-BigGAN | 6ca691231bf7e8fd388a08b5ce6b4e30a50dd57b | [
"ECL-2.0"
] | null | null | null | BigGAN-PyTorch/BigGAN_remove_condbn+++++.py | twice154/Spatial-Self-modulation-on-BigGAN | 6ca691231bf7e8fd388a08b5ce6b4e30a50dd57b | [
"ECL-2.0"
] | null | null | null | BigGAN-PyTorch/BigGAN_remove_condbn+++++.py | twice154/Spatial-Self-modulation-on-BigGAN | 6ca691231bf7e8fd388a08b5ce6b4e30a50dd57b | [
"ECL-2.0"
] | null | null | null | import numpy as np
import math
import functools
import torch
import torch.nn as nn
from torch.nn import init
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter as P
import layers
from sync_batchnorm import SynchronizedBatchNorm2d as SyncBatchNorm2d
# Architectures for G
# Attention is passed in in the format '32_64' to mean applying an attention
# block at both resolution 32x32 and 64x64. Just '64' will apply at 64x64.
def G_arch(ch=64, attention='64', ksize='333333', dilation='111111'):
arch = {}
arch[512] = {'in_channels' : [ch * item for item in [16, 16, 8, 8, 4, 2, 1]],
'out_channels' : [ch * item for item in [16, 8, 8, 4, 2, 1, 1]],
'upsample' : [True] * 7,
'resolution' : [8, 16, 32, 64, 128, 256, 512],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,10)}}
arch[256] = {'in_channels' : [ch * item for item in [16, 16, 8, 8, 4, 2]],
'out_channels' : [ch * item for item in [16, 8, 8, 4, 2, 1]],
'upsample' : [True] * 6,
'resolution' : [8, 16, 32, 64, 128, 256],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,9)}}
arch[128] = {'in_channels' : [ch * item for item in [16, 16, 8, 4, 2]],
'out_channels' : [ch * item for item in [16, 8, 4, 2, 1]],
'upsample' : [True] * 5,
'resolution' : [8, 16, 32, 64, 128],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,8)}}
arch[64] = {'in_channels' : [ch * item for item in [16, 16, 8, 4]],
'out_channels' : [ch * item for item in [16, 8, 4, 2]],
'upsample' : [True] * 4,
'resolution' : [8, 16, 32, 64],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,7)}}
arch[32] = {'in_channels' : [ch * item for item in [4, 4, 4]],
'out_channels' : [ch * item for item in [4, 4, 4]],
'upsample' : [True] * 3,
'resolution' : [8, 16, 32],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,6)}}
return arch
class Generator(nn.Module):
def __init__(self, G_ch=64, dim_z=128, bottom_width=4, resolution=128,
G_kernel_size=3, G_attn='64', n_classes=1000,
num_G_SVs=1, num_G_SV_itrs=1,
G_shared=True, shared_dim=0, hier=False,
cross_replica=False, mybn=False,
G_activation=nn.ReLU(inplace=False),
G_lr=5e-5, G_B1=0.0, G_B2=0.999, adam_eps=1e-8,
BN_eps=1e-5, SN_eps=1e-12, G_mixed_precision=False, G_fp16=False,
G_init='ortho', skip_init=False, no_optim=False,
G_param='SN', norm_style='bn',
**kwargs):
super(Generator, self).__init__()
# Channel width mulitplier
self.ch = G_ch
# Dimensionality of the latent space
self.dim_z = dim_z
# The initial spatial dimensions
self.bottom_width = bottom_width
# Resolution of the output
self.resolution = resolution
# Kernel size?
self.kernel_size = G_kernel_size
# Attention?
self.attention = G_attn
# number of classes, for use in categorical conditional generation
self.n_classes = n_classes
# Use shared embeddings?
self.G_shared = G_shared
# Dimensionality of the shared embedding? Unused if not using G_shared
self.shared_dim = shared_dim if shared_dim > 0 else dim_z
# Hierarchical latent space?
self.hier = hier
# Cross replica batchnorm?
self.cross_replica = cross_replica
# Use my batchnorm?
self.mybn = mybn
# nonlinearity for residual blocks
self.activation = G_activation
# Initialization style
self.init = G_init
# Parameterization style
self.G_param = G_param
# Normalization style
self.norm_style = norm_style
# Epsilon for BatchNorm?
self.BN_eps = BN_eps
# Epsilon for Spectral Norm?
self.SN_eps = SN_eps
# fp16?
self.fp16 = G_fp16
# Architecture dict
self.arch = G_arch(self.ch, self.attention)[resolution]
# If using hierarchical latents, adjust z
if self.hier:
# Number of places z slots into
self.num_slots = len(self.arch['in_channels']) + 1
self.z_chunk_size = (self.dim_z // self.num_slots)
# Recalculate latent dimensionality for even splitting into chunks
self.dim_z = self.z_chunk_size * self.num_slots
else:
self.num_slots = 1
self.z_chunk_size = 0
# Which convs, batchnorms, and linear layers to use
if self.G_param == 'SN':
self.which_conv = functools.partial(layers.SNConv2d,
kernel_size=3, padding=1,
num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
eps=self.SN_eps)
self.which_linear = functools.partial(layers.SNLinear,
num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
eps=self.SN_eps)
else:
self.which_conv = functools.partial(nn.Conv2d, kernel_size=3, padding=1)
self.which_linear = nn.Linear
# We use a non-spectral-normed embedding here regardless;
# For some reason applying SN to G's embedding seems to randomly cripple G
self.which_embedding = nn.Embedding
# bn_linear = (functools.partial(self.which_linear, bias=False) if self.G_shared
# else self.which_embedding)
self.which_bn = functools.partial(layers.bn,
# which_linear=bn_linear,
cross_replica=self.cross_replica,
mybn=self.mybn,
# input_size=(self.shared_dim + self.z_chunk_size if self.G_shared
# else self.n_classes),
# norm_style=self.norm_style,
eps=self.BN_eps)
# Prepare model
# If not using shared embeddings, self.shared is just a passthrough
self.shared = (self.which_embedding(n_classes, self.shared_dim) if G_shared
else layers.identity())
# First linear layer
self.linear = self.which_linear(self.dim_z // self.num_slots,
self.arch['in_channels'][0] * (self.bottom_width **2))
# self.blocks is a doubly-nested list of modules, the outer loop intended
# to be over blocks at a given resolution (resblocks and/or self-attention)
# while the inner loop is over a given block
self.blocks = []
for index in range(len(self.arch['out_channels'])):
self.blocks += [[layers.GBlock(in_channels=self.arch['in_channels'][index],
out_channels=self.arch['out_channels'][index],
which_conv=self.which_conv,
which_bn=self.which_bn,
activation=self.activation,
upsample=(functools.partial(F.interpolate, scale_factor=2)
if self.arch['upsample'][index] else None))]]
# If attention on this block, attach it to the end
if self.arch['attention'][self.arch['resolution'][index]]:
print('Adding attention layer in G at resolution %d' % self.arch['resolution'][index])
self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index], self.which_conv)]
# Turn self.blocks into a ModuleList so that it's all properly registered.
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
# output layer: batchnorm-relu-conv.
# Consider using a non-spectral conv here
self.output_layer = nn.Sequential(layers.bn(self.arch['out_channels'][-1],
cross_replica=self.cross_replica,
mybn=self.mybn),
self.activation,
self.which_conv(self.arch['out_channels'][-1], 3))
# Prepare spatial modulation model
# If not using shared embeddings, self.shared is just a passthrough
self.spatial_modulation_shared = (self.which_embedding(n_classes, self.shared_dim))
# First linear layer
self.spatial_modulation_linear = self.which_linear(self.dim_z + self.shared_dim,
self.arch['in_channels'][0] * (self.bottom_width **2))
# self.blocks is a doubly-nested list of modules, the outer loop intended
# to be over blocks at a given resolution (resblocks and/or self-attention)
# while the inner loop is over a given block
self.spatial_modulation_blocks = []
for index in range(len(self.arch['out_channels'])):
self.spatial_modulation_blocks += [[layers.SpatialModulationGBlock(in_channels=self.arch['in_channels'][index],
out_channels=self.arch['out_channels'][index],
which_conv=self.which_conv,
which_bn=self.which_bn,
activation=self.activation,
upsample=(functools.partial(F.interpolate, scale_factor=2)
if self.arch['upsample'][index] else None))]]
# Turn self.blocks into a ModuleList so that it's all properly registered.
self.spatial_modulation_blocks = nn.ModuleList([nn.ModuleList(block) for block in self.spatial_modulation_blocks])
# Initialize weights. Optionally skip init for testing.
if not skip_init:
self.init_weights()
# Set up optimizer
# If this is an EMA copy, no need for an optim, so just return now
if no_optim:
return
self.lr, self.B1, self.B2, self.adam_eps = G_lr, G_B1, G_B2, adam_eps
if G_mixed_precision:
print('Using fp16 adam in G...')
import utils
self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0,
eps=self.adam_eps)
else:
self.optim = optim.Adam(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0,
eps=self.adam_eps)
# LR scheduling, left here for forward compatibility
# self.lr_sched = {'itr' : 0}# if self.progressive else {}
# self.j = 0
# Initialize
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print('Param count for G''s initialized parameters: %d' % self.param_count)
# Note on this forward function: we pass in a y vector which has
# already been passed through G.shared to enable easy class-wise
# interpolation later. If we passed in the one-hot and then ran it through
# G.shared in this forward function, it would be harder to handle.
def forward(self, z, y):
# If hierarchical, concatenate zs and ys
if self.hier:
zs = torch.split(z, self.z_chunk_size, 1)
z = zs[0]
ys = [torch.cat([y, item], 1) for item in zs[1:]]
# Class embedding layer
# spatial_c = self.spatial_modulation_shared(y)
# Mixing layer
spatial_h = self.spatial_modulation_linear(torch.cat([y, z], 1))
# Reshape
spatial_h = spatial_h.view(spatial_h.size(0), -1, self.bottom_width, self.bottom_width)
else:
ys = [y] * len(self.blocks)
# Class embedding layer
spatial_c = self.spatial_modulation_shared(y)
# Mixing layer
if len(spatial_c.shape) == 3:
spatial_c = torch.squeeze(spatial_c, dim=1)
spatial_h = self.spatial_modulation_linear(torch.cat([spatial_c, z], 1))
# Reshape
spatial_h = spatial_h.view(spatial_h.size(0), -1, self.bottom_width, self.bottom_width)
# First linear layer
h = self.linear(z)
# Reshape
h = h.view(h.size(0), -1, self.bottom_width, self.bottom_width)
# Loop over blocks
for index, blocklist in enumerate(self.blocks):
# Spatial modulation calculation
spatial_h, voxelwise_a_mod, voxelwise_b_mod = self.spatial_modulation_blocks[index][0](spatial_h)
# Second inner loop in case block has multiple layers
for block in blocklist:
# Main layer forward
h = block(h, ys[index])
# Most coarse modulation
# h = (h - torch.mean(h, dim=(2, 3), keepdim=True)) / torch.std(h, dim=(2, 3), keepdim=True)
# h = h * (1 + global_a_mod.repeat(1, 1, h.shape[2], h.shape[3])) + global_b_mod.repeat(1, 1, h.shape[2], h.shape[3])
# Most fine modulation
h = (h - torch.mean(h, dim=(1, 2, 3), keepdim=True)) / torch.std(h, dim=(1, 2, 3), keepdim=True)
h = h * (1 + voxelwise_a_mod) + voxelwise_b_mod
# Apply batchnorm-relu-conv-tanh at output
return torch.tanh(self.output_layer(h))
# Discriminator architecture, same paradigm as G's above
def D_arch(ch=64, attention='64',ksize='333333', dilation='111111'):
arch = {}
arch[256] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8, 8, 16]],
'out_channels' : [item * ch for item in [1, 2, 4, 8, 8, 16, 16]],
'downsample' : [True] * 6 + [False],
'resolution' : [128, 64, 32, 16, 8, 4, 4 ],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,8)}}
arch[128] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8, 16]],
'out_channels' : [item * ch for item in [1, 2, 4, 8, 16, 16]],
'downsample' : [True] * 5 + [False],
'resolution' : [64, 32, 16, 8, 4, 4],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,8)}}
arch[64] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8]],
'out_channels' : [item * ch for item in [1, 2, 4, 8, 16]],
'downsample' : [True] * 4 + [False],
'resolution' : [32, 16, 8, 4, 4],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,7)}}
arch[32] = {'in_channels' : [3] + [item * ch for item in [4, 4, 4]],
'out_channels' : [item * ch for item in [4, 4, 4, 4]],
'downsample' : [True, True, False, False],
'resolution' : [16, 16, 16, 16],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,6)}}
return arch
class Discriminator(nn.Module):
def __init__(self, D_ch=64, D_wide=True, resolution=128,
D_kernel_size=3, D_attn='64', n_classes=1000,
num_D_SVs=1, num_D_SV_itrs=1, D_activation=nn.ReLU(inplace=False),
D_lr=2e-4, D_B1=0.0, D_B2=0.999, adam_eps=1e-8,
SN_eps=1e-12, output_dim=1, D_mixed_precision=False, D_fp16=False,
D_init='ortho', skip_init=False, D_param='SN', **kwargs):
super(Discriminator, self).__init__()
# Width multiplier
self.ch = D_ch
# Use Wide D as in BigGAN and SA-GAN or skinny D as in SN-GAN?
self.D_wide = D_wide
# Resolution
self.resolution = resolution
# Kernel size
self.kernel_size = D_kernel_size
# Attention?
self.attention = D_attn
# Number of classes
self.n_classes = n_classes
# Activation
self.activation = D_activation
# Initialization style
self.init = D_init
# Parameterization style
self.D_param = D_param
# Epsilon for Spectral Norm?
self.SN_eps = SN_eps
# Fp16?
self.fp16 = D_fp16
# Architecture
self.arch = D_arch(self.ch, self.attention)[resolution]
# Which convs, batchnorms, and linear layers to use
# No option to turn off SN in D right now
if self.D_param == 'SN':
self.which_conv = functools.partial(layers.SNConv2d,
kernel_size=3, padding=1,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
self.which_linear = functools.partial(layers.SNLinear,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
self.which_embedding = functools.partial(layers.SNEmbedding,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
# Prepare model
# self.blocks is a doubly-nested list of modules, the outer loop intended
# to be over blocks at a given resolution (resblocks and/or self-attention)
self.blocks = []
for index in range(len(self.arch['out_channels'])):
self.blocks += [[layers.DBlock(in_channels=self.arch['in_channels'][index],
out_channels=self.arch['out_channels'][index],
which_conv=self.which_conv,
wide=self.D_wide,
activation=self.activation,
preactivation=(index > 0),
downsample=(nn.AvgPool2d(2) if self.arch['downsample'][index] else None))]]
# If attention on this block, attach it to the end
if self.arch['attention'][self.arch['resolution'][index]]:
print('Adding attention layer in D at resolution %d' % self.arch['resolution'][index])
self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index],
self.which_conv)]
# Turn self.blocks into a ModuleList so that it's all properly registered.
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
# Linear output layer. The output dimension is typically 1, but may be
# larger if we're e.g. turning this into a VAE with an inference output
self.linear = self.which_linear(self.arch['out_channels'][-1], output_dim)
# Embedding for projection discrimination
self.embed = self.which_embedding(self.n_classes, self.arch['out_channels'][-1])
# Initialize weights
if not skip_init:
self.init_weights()
# Set up optimizer
self.lr, self.B1, self.B2, self.adam_eps = D_lr, D_B1, D_B2, adam_eps
if D_mixed_precision:
print('Using fp16 adam in D...')
import utils
self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
else:
self.optim = optim.Adam(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
# LR scheduling, left here for forward compatibility
# self.lr_sched = {'itr' : 0}# if self.progressive else {}
# self.j = 0
# Initialize
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print('Param count for D''s initialized parameters: %d' % self.param_count)
def forward(self, x, y=None):
# Stick x into h for cleaner for loops without flow control
h = x
# Loop over blocks
for index, blocklist in enumerate(self.blocks):
for block in blocklist:
h = block(h)
# Apply global sum pooling as in SN-GAN
h = torch.sum(self.activation(h), [2, 3])
# Get initial class-unconditional output
out = self.linear(h)
# Get projection of final featureset onto class vectors and add to evidence
out = out + torch.sum(self.embed(y) * h, 1, keepdim=True)
return out
# Parallelized G_D to minimize cross-gpu communication
# Without this, Generator outputs would get all-gathered and then rebroadcast.
class G_D(nn.Module):
def __init__(self, G, D):
super(G_D, self).__init__()
self.G = G
self.D = D
def forward(self, z, gy, x=None, dy=None, train_G=False, return_G_z=False,
split_D=False):
# If training G, enable grad tape
with torch.set_grad_enabled(train_G):
# Get Generator output given noise
G_z = self.G(z, self.G.shared(gy))
# Cast as necessary
if self.G.fp16 and not self.D.fp16:
G_z = G_z.float()
if self.D.fp16 and not self.G.fp16:
G_z = G_z.half()
# Split_D means to run D once with real data and once with fake,
# rather than concatenating along the batch dimension.
if split_D:
D_fake = self.D(G_z, gy)
if x is not None:
D_real = self.D(x, dy)
return D_fake, D_real
else:
if return_G_z:
return D_fake, G_z
else:
return D_fake
# If real data is provided, concatenate it with the Generator's output
# along the batch dimension for improved efficiency.
else:
D_input = torch.cat([G_z, x], 0) if x is not None else G_z
D_class = torch.cat([gy, dy], 0) if dy is not None else gy
# Get Discriminator output
D_out = self.D(D_input, D_class)
if x is not None:
return torch.split(D_out, [G_z.shape[0], x.shape[0]]) # D_fake, D_real
else:
if return_G_z:
return D_out, G_z
else:
return D_out
| 44.822355 | 123 | 0.59316 | import numpy as np
import math
import functools
import torch
import torch.nn as nn
from torch.nn import init
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter as P
import layers
from sync_batchnorm import SynchronizedBatchNorm2d as SyncBatchNorm2d
def G_arch(ch=64, attention='64', ksize='333333', dilation='111111'):
arch = {}
arch[512] = {'in_channels' : [ch * item for item in [16, 16, 8, 8, 4, 2, 1]],
'out_channels' : [ch * item for item in [16, 8, 8, 4, 2, 1, 1]],
'upsample' : [True] * 7,
'resolution' : [8, 16, 32, 64, 128, 256, 512],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,10)}}
arch[256] = {'in_channels' : [ch * item for item in [16, 16, 8, 8, 4, 2]],
'out_channels' : [ch * item for item in [16, 8, 8, 4, 2, 1]],
'upsample' : [True] * 6,
'resolution' : [8, 16, 32, 64, 128, 256],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,9)}}
arch[128] = {'in_channels' : [ch * item for item in [16, 16, 8, 4, 2]],
'out_channels' : [ch * item for item in [16, 8, 4, 2, 1]],
'upsample' : [True] * 5,
'resolution' : [8, 16, 32, 64, 128],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,8)}}
arch[64] = {'in_channels' : [ch * item for item in [16, 16, 8, 4]],
'out_channels' : [ch * item for item in [16, 8, 4, 2]],
'upsample' : [True] * 4,
'resolution' : [8, 16, 32, 64],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,7)}}
arch[32] = {'in_channels' : [ch * item for item in [4, 4, 4]],
'out_channels' : [ch * item for item in [4, 4, 4]],
'upsample' : [True] * 3,
'resolution' : [8, 16, 32],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,6)}}
return arch
class Generator(nn.Module):
def __init__(self, G_ch=64, dim_z=128, bottom_width=4, resolution=128,
G_kernel_size=3, G_attn='64', n_classes=1000,
num_G_SVs=1, num_G_SV_itrs=1,
G_shared=True, shared_dim=0, hier=False,
cross_replica=False, mybn=False,
G_activation=nn.ReLU(inplace=False),
G_lr=5e-5, G_B1=0.0, G_B2=0.999, adam_eps=1e-8,
BN_eps=1e-5, SN_eps=1e-12, G_mixed_precision=False, G_fp16=False,
G_init='ortho', skip_init=False, no_optim=False,
G_param='SN', norm_style='bn',
**kwargs):
super(Generator, self).__init__()
self.ch = G_ch
self.dim_z = dim_z
self.bottom_width = bottom_width
self.resolution = resolution
self.kernel_size = G_kernel_size
self.attention = G_attn
self.n_classes = n_classes
self.G_shared = G_shared
self.shared_dim = shared_dim if shared_dim > 0 else dim_z
self.hier = hier
self.cross_replica = cross_replica
self.mybn = mybn
self.activation = G_activation
self.init = G_init
self.G_param = G_param
self.norm_style = norm_style
self.BN_eps = BN_eps
self.SN_eps = SN_eps
self.fp16 = G_fp16
self.arch = G_arch(self.ch, self.attention)[resolution]
if self.hier:
self.num_slots = len(self.arch['in_channels']) + 1
self.z_chunk_size = (self.dim_z // self.num_slots)
self.dim_z = self.z_chunk_size * self.num_slots
else:
self.num_slots = 1
self.z_chunk_size = 0
if self.G_param == 'SN':
self.which_conv = functools.partial(layers.SNConv2d,
kernel_size=3, padding=1,
num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
eps=self.SN_eps)
self.which_linear = functools.partial(layers.SNLinear,
num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
eps=self.SN_eps)
else:
self.which_conv = functools.partial(nn.Conv2d, kernel_size=3, padding=1)
self.which_linear = nn.Linear
self.which_embedding = nn.Embedding
# bn_linear = (functools.partial(self.which_linear, bias=False) if self.G_shared
# else self.which_embedding)
self.which_bn = functools.partial(layers.bn,
# which_linear=bn_linear,
cross_replica=self.cross_replica,
mybn=self.mybn,
# input_size=(self.shared_dim + self.z_chunk_size if self.G_shared
# else self.n_classes),
# norm_style=self.norm_style,
eps=self.BN_eps)
# Prepare model
# If not using shared embeddings, self.shared is just a passthrough
self.shared = (self.which_embedding(n_classes, self.shared_dim) if G_shared
else layers.identity())
# First linear layer
self.linear = self.which_linear(self.dim_z // self.num_slots,
self.arch['in_channels'][0] * (self.bottom_width **2))
# self.blocks is a doubly-nested list of modules, the outer loop intended
# to be over blocks at a given resolution (resblocks and/or self-attention)
# while the inner loop is over a given block
self.blocks = []
for index in range(len(self.arch['out_channels'])):
self.blocks += [[layers.GBlock(in_channels=self.arch['in_channels'][index],
out_channels=self.arch['out_channels'][index],
which_conv=self.which_conv,
which_bn=self.which_bn,
activation=self.activation,
upsample=(functools.partial(F.interpolate, scale_factor=2)
if self.arch['upsample'][index] else None))]]
# If attention on this block, attach it to the end
if self.arch['attention'][self.arch['resolution'][index]]:
print('Adding attention layer in G at resolution %d' % self.arch['resolution'][index])
self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index], self.which_conv)]
# Turn self.blocks into a ModuleList so that it's all properly registered.
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
self.output_layer = nn.Sequential(layers.bn(self.arch['out_channels'][-1],
cross_replica=self.cross_replica,
mybn=self.mybn),
self.activation,
self.which_conv(self.arch['out_channels'][-1], 3))
self.spatial_modulation_shared = (self.which_embedding(n_classes, self.shared_dim))
self.spatial_modulation_linear = self.which_linear(self.dim_z + self.shared_dim,
self.arch['in_channels'][0] * (self.bottom_width **2))
self.spatial_modulation_blocks = []
for index in range(len(self.arch['out_channels'])):
self.spatial_modulation_blocks += [[layers.SpatialModulationGBlock(in_channels=self.arch['in_channels'][index],
out_channels=self.arch['out_channels'][index],
which_conv=self.which_conv,
which_bn=self.which_bn,
activation=self.activation,
upsample=(functools.partial(F.interpolate, scale_factor=2)
if self.arch['upsample'][index] else None))]]
self.spatial_modulation_blocks = nn.ModuleList([nn.ModuleList(block) for block in self.spatial_modulation_blocks])
# Initialize weights. Optionally skip init for testing.
if not skip_init:
self.init_weights()
# Set up optimizer
# If this is an EMA copy, no need for an optim, so just return now
if no_optim:
return
self.lr, self.B1, self.B2, self.adam_eps = G_lr, G_B1, G_B2, adam_eps
if G_mixed_precision:
print('Using fp16 adam in G...')
import utils
self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0,
eps=self.adam_eps)
else:
self.optim = optim.Adam(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0,
eps=self.adam_eps)
# LR scheduling, left here for forward compatibility
# self.lr_sched = {'itr' : 0}# if self.progressive else {}
# self.j = 0
# Initialize
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print('Param count for G''s initialized parameters: %d' % self.param_count)
# Note on this forward function: we pass in a y vector which has
# already been passed through G.shared to enable easy class-wise
# interpolation later. If we passed in the one-hot and then ran it through
# G.shared in this forward function, it would be harder to handle.
def forward(self, z, y):
# If hierarchical, concatenate zs and ys
if self.hier:
zs = torch.split(z, self.z_chunk_size, 1)
z = zs[0]
ys = [torch.cat([y, item], 1) for item in zs[1:]]
# Class embedding layer
# spatial_c = self.spatial_modulation_shared(y)
# Mixing layer
spatial_h = self.spatial_modulation_linear(torch.cat([y, z], 1))
# Reshape
spatial_h = spatial_h.view(spatial_h.size(0), -1, self.bottom_width, self.bottom_width)
else:
ys = [y] * len(self.blocks)
# Class embedding layer
spatial_c = self.spatial_modulation_shared(y)
# Mixing layer
if len(spatial_c.shape) == 3:
spatial_c = torch.squeeze(spatial_c, dim=1)
spatial_h = self.spatial_modulation_linear(torch.cat([spatial_c, z], 1))
# Reshape
spatial_h = spatial_h.view(spatial_h.size(0), -1, self.bottom_width, self.bottom_width)
# First linear layer
h = self.linear(z)
# Reshape
h = h.view(h.size(0), -1, self.bottom_width, self.bottom_width)
# Loop over blocks
for index, blocklist in enumerate(self.blocks):
# Spatial modulation calculation
spatial_h, voxelwise_a_mod, voxelwise_b_mod = self.spatial_modulation_blocks[index][0](spatial_h)
# Second inner loop in case block has multiple layers
for block in blocklist:
# Main layer forward
h = block(h, ys[index])
# Most coarse modulation
# h = (h - torch.mean(h, dim=(2, 3), keepdim=True)) / torch.std(h, dim=(2, 3), keepdim=True)
# h = h * (1 + global_a_mod.repeat(1, 1, h.shape[2], h.shape[3])) + global_b_mod.repeat(1, 1, h.shape[2], h.shape[3])
# Most fine modulation
h = (h - torch.mean(h, dim=(1, 2, 3), keepdim=True)) / torch.std(h, dim=(1, 2, 3), keepdim=True)
h = h * (1 + voxelwise_a_mod) + voxelwise_b_mod
# Apply batchnorm-relu-conv-tanh at output
return torch.tanh(self.output_layer(h))
# Discriminator architecture, same paradigm as G's above
def D_arch(ch=64, attention='64',ksize='333333', dilation='111111'):
arch = {}
arch[256] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8, 8, 16]],
'out_channels' : [item * ch for item in [1, 2, 4, 8, 8, 16, 16]],
'downsample' : [True] * 6 + [False],
'resolution' : [128, 64, 32, 16, 8, 4, 4 ],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,8)}}
arch[128] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8, 16]],
'out_channels' : [item * ch for item in [1, 2, 4, 8, 16, 16]],
'downsample' : [True] * 5 + [False],
'resolution' : [64, 32, 16, 8, 4, 4],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,8)}}
arch[64] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8]],
'out_channels' : [item * ch for item in [1, 2, 4, 8, 16]],
'downsample' : [True] * 4 + [False],
'resolution' : [32, 16, 8, 4, 4],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,7)}}
arch[32] = {'in_channels' : [3] + [item * ch for item in [4, 4, 4]],
'out_channels' : [item * ch for item in [4, 4, 4, 4]],
'downsample' : [True, True, False, False],
'resolution' : [16, 16, 16, 16],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,6)}}
return arch
class Discriminator(nn.Module):
def __init__(self, D_ch=64, D_wide=True, resolution=128,
D_kernel_size=3, D_attn='64', n_classes=1000,
num_D_SVs=1, num_D_SV_itrs=1, D_activation=nn.ReLU(inplace=False),
D_lr=2e-4, D_B1=0.0, D_B2=0.999, adam_eps=1e-8,
SN_eps=1e-12, output_dim=1, D_mixed_precision=False, D_fp16=False,
D_init='ortho', skip_init=False, D_param='SN', **kwargs):
super(Discriminator, self).__init__()
self.ch = D_ch
self.D_wide = D_wide
self.resolution = resolution
self.kernel_size = D_kernel_size
self.attention = D_attn
self.n_classes = n_classes
self.activation = D_activation
self.init = D_init
self.D_param = D_param
self.SN_eps = SN_eps
self.fp16 = D_fp16
self.arch = D_arch(self.ch, self.attention)[resolution]
if self.D_param == 'SN':
self.which_conv = functools.partial(layers.SNConv2d,
kernel_size=3, padding=1,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
self.which_linear = functools.partial(layers.SNLinear,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
self.which_embedding = functools.partial(layers.SNEmbedding,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
self.blocks = []
for index in range(len(self.arch['out_channels'])):
self.blocks += [[layers.DBlock(in_channels=self.arch['in_channels'][index],
out_channels=self.arch['out_channels'][index],
which_conv=self.which_conv,
wide=self.D_wide,
activation=self.activation,
preactivation=(index > 0),
downsample=(nn.AvgPool2d(2) if self.arch['downsample'][index] else None))]]
if self.arch['attention'][self.arch['resolution'][index]]:
print('Adding attention layer in D at resolution %d' % self.arch['resolution'][index])
self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index],
self.which_conv)]
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
# Linear output layer. The output dimension is typically 1, but may be
# larger if we're e.g. turning this into a VAE with an inference output
self.linear = self.which_linear(self.arch['out_channels'][-1], output_dim)
self.embed = self.which_embedding(self.n_classes, self.arch['out_channels'][-1])
if not skip_init:
self.init_weights()
self.lr, self.B1, self.B2, self.adam_eps = D_lr, D_B1, D_B2, adam_eps
if D_mixed_precision:
print('Using fp16 adam in D...')
import utils
self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
else:
self.optim = optim.Adam(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print('Param count for D''s initialized parameters: %d' % self.param_count)
def forward(self, x, y=None):
h = x
for index, blocklist in enumerate(self.blocks):
for block in blocklist:
h = block(h)
h = torch.sum(self.activation(h), [2, 3])
out = self.linear(h)
out = out + torch.sum(self.embed(y) * h, 1, keepdim=True)
return out
class G_D(nn.Module):
def __init__(self, G, D):
super(G_D, self).__init__()
self.G = G
self.D = D
def forward(self, z, gy, x=None, dy=None, train_G=False, return_G_z=False,
split_D=False):
with torch.set_grad_enabled(train_G):
G_z = self.G(z, self.G.shared(gy))
if self.G.fp16 and not self.D.fp16:
G_z = G_z.float()
if self.D.fp16 and not self.G.fp16:
G_z = G_z.half()
if split_D:
D_fake = self.D(G_z, gy)
if x is not None:
D_real = self.D(x, dy)
return D_fake, D_real
else:
if return_G_z:
return D_fake, G_z
else:
return D_fake
# along the batch dimension for improved efficiency.
else:
D_input = torch.cat([G_z, x], 0) if x is not None else G_z
D_class = torch.cat([gy, dy], 0) if dy is not None else gy
# Get Discriminator output
D_out = self.D(D_input, D_class)
if x is not None:
return torch.split(D_out, [G_z.shape[0], x.shape[0]]) # D_fake, D_real
else:
if return_G_z:
return D_out, G_z
else:
return D_out
| true | true |
f72db881c82bb2d2eb69127cc586e7b8a820eb99 | 1,374 | py | Python | thym/timer.py | apgeorg/thyme | 47bf312c27f76c6392370a499a23738dcae6ad17 | [
"MIT"
] | 2 | 2018-12-10T11:31:50.000Z | 2018-12-10T14:44:14.000Z | thym/timer.py | apgeorg/thyme | 47bf312c27f76c6392370a499a23738dcae6ad17 | [
"MIT"
] | 1 | 2019-06-25T09:05:32.000Z | 2019-06-25T09:05:32.000Z | thym/timer.py | apgeorg/thyme | 47bf312c27f76c6392370a499a23738dcae6ad17 | [
"MIT"
] | null | null | null | import threading
import time
class Timer(object):
def __init__(self, interval, callback_func, oneshot=False, args=None, kwargs=None):
self._interval = interval
self._oneshot = oneshot
self._f = callback_func
self._args = args if args is not None else []
self._kwargs = kwargs if kwargs is not None else {}
self._timer = None
self._start_time = None
self._elapsed_time = None
self._remaining_time = None
def _callback(self):
self._f(*self._args, **self._kwargs)
if not self._oneshot:
self.start()
else:
self.stop()
def stop(self):
if self._timer:
self._timer.cancel()
self._elapsed_time = time.time() - self._start_time
self._remaining_time = self._interval - self._elapsed_time
self._timer = None
def start(self, interval=None):
self._interval = interval if interval is not None else self._interval
self._timer = threading.Timer(self._interval, self._callback)
self._start_time = time.time()
self._timer.start()
@property
def elapsed(self):
return self._elapsed_time
@property
def remaining(self):
return self._remaining_time
@property
def running(self):
return True if self._timer else False
| 28.040816 | 87 | 0.61936 | import threading
import time
class Timer(object):
def __init__(self, interval, callback_func, oneshot=False, args=None, kwargs=None):
self._interval = interval
self._oneshot = oneshot
self._f = callback_func
self._args = args if args is not None else []
self._kwargs = kwargs if kwargs is not None else {}
self._timer = None
self._start_time = None
self._elapsed_time = None
self._remaining_time = None
def _callback(self):
self._f(*self._args, **self._kwargs)
if not self._oneshot:
self.start()
else:
self.stop()
def stop(self):
if self._timer:
self._timer.cancel()
self._elapsed_time = time.time() - self._start_time
self._remaining_time = self._interval - self._elapsed_time
self._timer = None
def start(self, interval=None):
self._interval = interval if interval is not None else self._interval
self._timer = threading.Timer(self._interval, self._callback)
self._start_time = time.time()
self._timer.start()
@property
def elapsed(self):
return self._elapsed_time
@property
def remaining(self):
return self._remaining_time
@property
def running(self):
return True if self._timer else False
| true | true |
f72dba625d6c9634543fe15a9ee5be84ebf0ba31 | 22,250 | py | Python | basic_samples/SDS/Python/SDSPy/Python3/program.py | hanhossain/OCS-Samples | 6f0f7878e6d9bccc32b6d663446678e070859d14 | [
"Apache-2.0"
] | null | null | null | basic_samples/SDS/Python/SDSPy/Python3/program.py | hanhossain/OCS-Samples | 6f0f7878e6d9bccc32b6d663446678e070859d14 | [
"Apache-2.0"
] | null | null | null | basic_samples/SDS/Python/SDSPy/Python3/program.py | hanhossain/OCS-Samples | 6f0f7878e6d9bccc32b6d663446678e070859d14 | [
"Apache-2.0"
] | null | null | null | # program.py
#
# Copyright (C) 2018 OSIsoft, LLC. All rights reserved.
#
# THIS SOFTWARE CONTAINS CONFIDENTIAL INFORMATION AND TRADE SECRETS OF
# OSIsoft, LLC. USE, DISCLOSURE, OR REPRODUCTION IS PROHIBITED WITHOUT
# THE PRIOR EXPRESS WRITTEN PERMISSION OF OSIsoft, LLC.
#
# RESTRICTED RIGHTS LEGEND
# Use, duplication, or disclosure by the Government is subject to restrictions
# as set forth in subparagraph (c)(1)(ii) of the Rights in Technical Data and
# Computer Software clause at DFARS 252.227.7013
#
# OSIsoft, LLC
# 1600 Alvarado St, San Leandro, CA 94577
from sdspy import *
import configparser
import datetime
import time
import math
import inspect
import collections
#returns a type that represents the WaveData data
def getWaveDataType(sampleTypeId):
if sampleTypeId is None or not isinstance(sampleTypeId, str):
raise TypeError("sampleTypeId is not an instantiated string")
intType = SdsType()
intType.Id = "intType"
intType.SdsTypeCode = SdsTypeCode.Int32
doubleType = SdsType()
doubleType.Id = "doubleType"
doubleType.SdsTypeCode = SdsTypeCode.Double
# note that the Order is the key (primary index)
orderProperty = SdsTypeProperty()
orderProperty.Id = "Order"
orderProperty.SdsType = intType
orderProperty.IsKey = True
tauProperty = SdsTypeProperty()
tauProperty.Id = "Tau"
tauProperty.SdsType = doubleType
radiansProperty = SdsTypeProperty()
radiansProperty.Id = "Radians"
radiansProperty.SdsType = doubleType
sinProperty = SdsTypeProperty()
sinProperty.Id = "Sin"
sinProperty.SdsType = doubleType
cosProperty = SdsTypeProperty()
cosProperty.Id = "Cos"
cosProperty.SdsType = doubleType
tanProperty = SdsTypeProperty()
tanProperty.Id = "Tan"
tanProperty.SdsType = doubleType
sinhProperty = SdsTypeProperty()
sinhProperty.Id = "Sinh"
sinhProperty.SdsType = doubleType
coshProperty = SdsTypeProperty()
coshProperty.Id = "Cosh"
coshProperty.SdsType = doubleType
tanhProperty = SdsTypeProperty()
tanhProperty.Id = "Tanh"
tanhProperty.SdsType = doubleType
#create an SdsType for WaveData Class
wave = SdsType()
wave.Id = sampleTypeId
wave.Name = "WaveDataSample"
wave.Description = "This is a sample Sds type for storing WaveData type events"
wave.SdsTypeCode = SdsTypeCode.Object
wave.Properties = [orderProperty, tauProperty, radiansProperty, sinProperty,
cosProperty, tanProperty, sinhProperty, coshProperty, tanhProperty]
return wave
#returns a type that represents the WaveDataTarget data
def getWaveDataTargetType(sampleTypeId):
if sampleTypeId is None or not isinstance(sampleTypeId, str):
raise TypeError("sampleTypeId is not an instantiated string")
intType = SdsType()
intType.Id = "intType"
intType.SdsTypeCode = SdsTypeCode.Int32
doubleType = SdsType()
doubleType.Id = "doubleType"
doubleType.SdsTypeCode = SdsTypeCode.Double
# note that the Order is the key (primary index)
orderTargetProperty = SdsTypeProperty()
orderTargetProperty.Id = "OrderTarget"
orderTargetProperty.SdsType = intType
orderTargetProperty.IsKey = True
tauTargetProperty = SdsTypeProperty()
tauTargetProperty.Id = "TauTarget"
tauTargetProperty.SdsType = doubleType
radiansTargetProperty = SdsTypeProperty()
radiansTargetProperty.Id = "RadiansTarget"
radiansTargetProperty.SdsType = doubleType
sinTargetProperty = SdsTypeProperty()
sinTargetProperty.Id = "SinTarget"
sinTargetProperty.SdsType = doubleType
cosTargetProperty = SdsTypeProperty()
cosTargetProperty.Id = "CosTarget"
cosTargetProperty.SdsType = doubleType
tanTargetProperty = SdsTypeProperty()
tanTargetProperty.Id = "TanTarget"
tanTargetProperty.SdsType = doubleType
sinhTargetProperty = SdsTypeProperty()
sinhTargetProperty.Id = "SinhTarget"
sinhTargetProperty.SdsType = doubleType
coshTargetProperty = SdsTypeProperty()
coshTargetProperty.Id = "CoshTarget"
coshTargetProperty.SdsType = doubleType
tanhTargetProperty = SdsTypeProperty()
tanhTargetProperty.Id = "TanhTarget"
tanhTargetProperty.SdsType = doubleType
#create an SdsType for WaveData Class
wave = SdsType()
wave.Id = sampleTargetTypeId
wave.Name = "WaveDataTargetSample"
wave.Description = "This is a sample Sds type for storing WaveDataTarget type events"
wave.SdsTypeCode = SdsTypeCode.Object
wave.Properties = [orderTargetProperty, tauTargetProperty, radiansTargetProperty, sinTargetProperty,
cosTargetProperty, tanTargetProperty, sinhTargetProperty, coshTargetProperty, tanhTargetProperty]
return wave
#returns a type that represents WaveDataInteger data
def getWaveDataIntegerType(sampleTypeId):
if sampleTypeId is None or not isinstance(sampleTypeId, str):
raise TypeError("sampleTypeId is not an instantiated string")
intType = SdsType()
intType.Id = "intType"
intType.SdsTypeCode = SdsTypeCode.Int32
# note that the Order is the key (primary index)
orderTargetProperty = SdsTypeProperty()
orderTargetProperty.Id = "OrderTarget"
orderTargetProperty.SdsType = intType
orderTargetProperty.IsKey = True
sinIntProperty = SdsTypeProperty()
sinIntProperty.Id = "SinInt"
sinIntProperty.SdsType = intType
cosIntProperty = SdsTypeProperty()
cosIntProperty.Id = "CosInt"
cosIntProperty.SdsType = intType
tanIntProperty = SdsTypeProperty()
tanIntProperty.Id = "TanInt"
tanIntProperty.SdsType = intType
#create an SdsType for the WaveDataInteger Class
wave = SdsType()
wave.Id = sampleIntegerTypeId
wave.Name = "WaveDataIntegerSample"
wave.Description = "This is a sample Sds type for storing WaveDataInteger type events"
wave.SdsTypeCode = SdsTypeCode.Object
wave.Properties = [orderTargetProperty, sinIntProperty,
cosIntProperty, tanIntProperty]
return wave
# Generate a new WaveData event
def nextWave(now, interval, multiplier, order):
totalSecondsDay = (now - now.replace(hour=0, minute=0, second = 0, microsecond = 0)).total_seconds() * 1000
intervalSeconds = (interval - interval.replace(hour=0, minute=0, second = 0, microsecond = 0)).total_seconds() * 1000
radians = ((totalSecondsDay % intervalSeconds ) / intervalSeconds) * 2 * math.pi
newWave = WaveData()
newWave.Order = order
newWave.Radians = radians
newWave.Tau = radians / (2 * math.pi)
newWave.Sin = multiplier * math.sin(radians)
newWave.Cos = multiplier * math.cos(radians)
newWave.Tan = multiplier * math.tan(radians)
newWave.Sinh = multiplier * math.sinh(radians)
newWave.Cosh = multiplier * math.cosh(radians)
newWave.Tanh = multiplier * math.tanh(radians)
return newWave
# we'll use the following for cleanup, supressing errors
def supressError(sdsCall):
try:
sdsCall()
except Exception as e:
print(("Encountered Error: {error}".format(error = e)))
def isprop(v):
return isinstance(v, property)
def toString(event):
string = ""
props = inspect.getmembers(type(event), isprop)
printOrder = [2,3,4,0,6,5,1,7,8]
orderedProps = [props[i] for i in printOrder]
for prop in orderedProps:
value = prop[1].fget(event)
if value is None:
string += "{name}: , ".format(name = prop[0])
else:
string += "{name}: {value}, ".format(name = prop[0], value = value)
return string[:-2]
def toWaveData(jsonObj):
# Many JSON implementations leave default values out. We compensate for WaveData, knowing
# that all values should be filled in
wave = WaveData()
properties = inspect.getmembers(type(wave), isprop)
for prop in properties:
# Pre-Assign the default
prop[1].fset(wave, 0)
#
if prop[0] in jsonObj:
value = jsonObj[prop[0]]
if value is not None:
prop[1].fset(wave, value)
return wave
######################################################################################################
# The following define the identifiers we'll use throughout
######################################################################################################
sampleTypeId = "WaveData_SampleType"
sampleTargetTypeId = "WaveDataTarget_SampleType"
sampleIntegerTypeId = "WaveData_IntegerType"
sampleStreamId = "WaveData_SampleStream"
sampleBehaviorId = "WaveData_SampleBehavior"
sampleStreamViewId = "WaveData_SampleStreamView"
sampleStreamViewIntId = "WaveData_SampleIntStreamView"
try:
config = configparser.ConfigParser()
config.read('config.ini')
client = SdsClient(config.get('Access', 'ApiVersion'), config.get('Access', 'Tenant'), config.get('Access', 'Address'), config.get('Credentials', 'Resource'),
config.get('Credentials', 'Authority'), config.get('Credentials', 'ClientId'), config.get('Credentials', 'ClientSecret'))
namespaceId = config.get('Configurations', 'Namespace')
print("------------------------------------------")
print(" _________ .___ __________ ")
print(" / _____/ __| _/_____\______ \___.__.")
print(" \_____ \ / __ |/ ___/| ___< | |")
print(" / \/ /_/ |\___ \ | | \___ |")
print("/_______ /\____ /____ >|____| / ____|")
print(" \/ \/ \/ \/ ")
print("------------------------------------------")
print("Sds endpoint at {url}".format(url = client.Uri))
print()
######################################################################################################
# SdsType get or creation
######################################################################################################
print("Creating an SdsType")
waveType = getWaveDataType(sampleTypeId)
waveType = client.getOrCreateType(namespaceId, waveType)
######################################################################################################
# Sds Stream creation
######################################################################################################
print("Creating an SdsStream")
stream = SdsStream()
stream.Id = sampleStreamId
stream.Name = "WaveStreamPySample"
stream.Description = "A Stream to store the WaveData events"
stream.TypeId = waveType.Id
stream.BehaviorId = None
client.createOrUpdateStream(namespaceId, stream)
######################################################################################################
# CRUD operations for events
######################################################################################################
start = datetime.datetime.now()
span = datetime.datetime.strptime("0:1:0", "%H:%M:%S")
print("Inserting data")
# Insert a single event
event = nextWave(start, span, 2.0, 0)
client.insertValue(namespaceId, stream.Id, event)
# Insert a list of events
waves = []
for i in range(2, 20, 2):
waves.append(nextWave(start + datetime.timedelta(seconds = i * 0.2), span, 2.0, i))
client.insertValues(namespaceId, stream.Id, waves)
# Get the last inserted event in a stream
print("Getting latest event")
wave = client.getLastValue(namespaceId, stream.Id, WaveData)
print(toString(wave))
print()
# Get all the events
waves = client.getWindowValues(namespaceId, stream.Id, WaveData, 0, 40)
print("Getting all events")
print("Total events found: " + str(len(waves)))
for wave in waves:
print(toString(wave))
print()
print("Updating events")
# Update the first event
event = nextWave(start, span, 4.0, 0)
client.updateValue(namespaceId, stream.Id, event)
# Update the rest of the events, adding events that have no prior index entry
updatedEvents = []
for i in range(2, 40, 2):
event = nextWave(start + datetime.timedelta(seconds = i * 0.2), span, 4.0, i)
updatedEvents.append(event)
client.updateValues(namespaceId, stream.Id, updatedEvents)
# Get all the events
waves = client.getWindowValues(namespaceId, stream.Id, WaveData, 0, 40)
print("Getting updated events")
print("Total events found: " + str(len(waves)))
for wave in waves:
print(toString(wave))
print()
print("Replacing events")
# replace one value
event = nextWave(start, span, 10.0, 0)
client.replaceValue(namespaceId, stream.Id, event)
# replace multiple values
replacedEvents = []
for i in range(2, 40, 2):
event = nextWave(start + datetime.timedelta(seconds=i * 0.2), span, 10.0, i)
replacedEvents.append(event)
client.replaceValues(namespaceId, stream.Id, replacedEvents)
# Get all the events
waves = client.getWindowValues(namespaceId, stream.Id, WaveData, 0, 40)
print("Getting replaced events")
print("Total events found: " + str(len(waves)))
for wave in waves:
print(toString(wave))
print()
######################################################################################################
# Property Overrides
######################################################################################################
print("Property Overrides")
print("Sds can interpolate or extrapolate data at an index location where data does not explicitly exist:")
print()
# We will retrieve three events using the default behavior, Continuous
waves = client.getRangeValues(namespaceId, stream.Id, WaveData, "1", 0, 3, False, SdsBoundaryType.ExactOrCalculated)
print("Default (Continuous) requesting data starting at index location '1', where we have not entered data, Sds will interpolate a value for each property:")
for wave in waves:
print(("Order: {order}: Radians: {radians} Cos: {cos}".format(order = wave.Order, radians = wave.Radians, cos = wave.Cos)))
# Create a Discrete stream PropertyOverride indicating that we do not want Sds to calculate a value for Radians and update our stream
propertyOverride = SdsStreamPropertyOverride()
propertyOverride.SdsTypePropertyId = 'Radians'
propertyOverride.InterpolationMode = 3
# update the stream
props = [propertyOverride]
stream.PropertyOverrides = props
client.createOrUpdateStream(namespaceId, stream)
waves = client.getRangeValues(namespaceId, stream.Id, WaveData, "1", 0, 3, False, SdsBoundaryType.ExactOrCalculated)
print()
print("We can override this behavior on a property by property basis, here we override the Radians property instructing Sds not to interpolate.")
print("Sds will now return the default value for the data type:")
for wave in waves:
print(("Order: {order}: Radians: {radians} Cos: {cos}".format(order = wave.Order, radians = wave.Radians, cos = wave.Cos)))
######################################################################################################
# Stream StreamViews
######################################################################################################
#Create additional types to define our targets
waveTargetType = getWaveDataTargetType(sampleTargetTypeId)
waveTargetType = client.getOrCreateType(namespaceId, waveTargetType)
waveIntegerType = getWaveDataIntegerType(sampleIntegerTypeId)
waveIntegerType = client.getOrCreateType(namespaceId, waveIntegerType)
#Create an SdsStreamViewProperty objects when we want to explicitly map one property to another
vp1 = SdsStreamViewProperty()
vp1.SourceId = "Order"
vp1.TargetId = "OrderTarget"
vp2 = SdsStreamViewProperty()
vp2.SourceId = "Sin"
vp2.TargetId = "SinInt"
vp3 = SdsStreamViewProperty()
vp3.SourceId = "Cos"
vp3.TargetId = "CosInt"
vp4 = SdsStreamViewProperty()
vp4.SourceId = "Tan"
vp4.TargetId = "TanInt"
#Create a streamView mapping our original type to our target type, data shape is the same so let Sds handle the mapping
streamView = SdsStreamView()
streamView.Id = sampleStreamViewId
streamView.Name = "SampleStreamView"
streamView.TargetTypeId = waveTargetType.Id
streamView.SourceTypeId = waveType.Id
#Data shape and data types are different so include explicit mappings between properties
manualStreamView = SdsStreamView()
manualStreamView.Id = sampleStreamViewIntId
manualStreamView.Name = "SampleIntStreamView"
manualStreamView.TargetTypeId = waveIntegerType.Id
manualStreamView.SourceTypeId = waveType.Id
manualStreamView.Properties = [vp1, vp2, vp3, vp4]
automaticStreamView = client.getOrCreateStreamView(namespaceId, streamView)
manualStreamView = client.getOrCreateStreamView(namespaceId, manualStreamView)
streamViewMap1 = SdsStreamViewMap()
streamViewMap1 = client.getStreamViewMap(namespaceId, automaticStreamView.Id)
streamViewMap2 = SdsStreamViewMap()
streamViewMap2 = client.getStreamViewMap(namespaceId, manualStreamView.Id)
rangeWaves = client.getRangeValues(namespaceId, stream.Id, WaveData, "1", 0, 3, False, SdsBoundaryType.ExactOrCalculated)
print()
print("SdsStreamViews")
print("Here is some of our data as it is stored on the server:")
for way in rangeWaves:
print(("Sin: {sin}, Cos: {cos}, Tan: {tan}".format(sin = way.Sin, cos = way.Cos, tan = way.Tan)))
#StreamView data when retrieved with a streamView
rangeWaves = client.getRangeValues(namespaceId, stream.Id, WaveDataTarget, "1", 0, 3, False, SdsBoundaryType.ExactOrCalculated, automaticStreamView.Id)
print()
print("Specifying a streamView with an SdsType of the same shape returns values that are automatically mapped to the target SdsType's properties:")
for way in rangeWaves:
print(("SinTarget: {sinTarget}, CosTarget: {cosTarget}, TanTarget: {tanTarget}".format(sinTarget = way.SinTarget, cosTarget = way.CosTarget, tanTarget = way.TanTarget)))
rangeWaves = client.getRangeValues(namespaceId, stream.Id, WaveDataInteger, "1", 0, 3, False, SdsBoundaryType.ExactOrCalculated, manualStreamView.Id)
print()
print("SdsStreamViews can also convert certain types of data, here we return integers where the original values were doubles:")
for way in rangeWaves:
print(("SinInt: {sinInt}, CosInt: {cosInt}, TanInt: {tanInt}".format(sinInt = way.SinInt, cosInt = way.CosInt, tanInt = way.TanInt)))
print ()
print ("We can query Sds to return the SdsStreamViewMap for our SdsStreamView, here is the one generated automatically:")
for prop in streamViewMap1.Properties:
print(("{source} => {dest}".format(source = prop.SourceId, dest = prop.TargetId)))
print ()
print ("Here is our explicit mapping, note SdsStreamViewMap will return all properties of the Source Type, even those without a corresponding Target property:")
for prop in streamViewMap2.Properties:
if hasattr(prop,'TargetId'):
print(("{source} => {dest}".format(source = prop.SourceId, dest = prop.TargetId)))
else:
print(("{source} => {dest}".format(source = prop.SourceId, dest = 'Not mapped')))
######################################################################################################
# Tags and Metadata
######################################################################################################
print()
print("Let's add some Tags and Metadata to our stream:")
tags = ["waves", "periodic", "2018", "validated"]
metadata = { "Region":"North America" , "Country":"Canada","Province":"Quebec" }
client.createOrUpdateTags(namespaceId, stream.Id, tags)
client.createOrUpdateMetadata(namespaceId, stream.Id, metadata)
print()
print("Tags now associated with ", stream.Id)
print(client.getTags(namespaceId, stream.Id))
region = client.getMetadata(namespaceId, stream.Id, "Region")
country = client.getMetadata(namespaceId, stream.Id, "Country")
province = client.getMetadata(namespaceId, stream.Id, "Province")
print()
print("Metadata now associated with", stream.Id, ":")
print("Metadata key Region: ", region)
print("Metadata key Country: ", country)
print("Metadata key Province: ", province)
print()
######################################################################################################
# Delete events
######################################################################################################
print()
print('Deleting values from the SdsStream')
# remove a single value from the stream
client.removeValue(namespaceId, stream.Id, 0)
# remove multiple values from the stream
client.removeWindowValues(namespaceId, stream.Id, 0, 40)
try:
event = client.getLastValue(namespaceId, stream.Id, WaveData)
if event != None:
raise ValueError
except TypeError:
pass
print("All values deleted successfully!")
except Exception as i:
print(("Encountered Error: {error}".format(error = i)))
print()
finally:
######################################################################################################
# SdsType, SdsStream, SdsStreamView and SdsBehavior deletion
######################################################################################################
# Clean up the remaining artifacts
print("Cleaning up")
print("Deleting the stream")
supressError(lambda: client.deleteStream(namespaceId, sampleStreamId))
print("Deleting the streamViews")
supressError(lambda: client.deleteStreamView(namespaceId, sampleStreamViewId))
supressError(lambda: client.deleteStreamView(namespaceId, sampleStreamViewIntId))
print("Deleting the types")
supressError(lambda: client.deleteType(namespaceId, sampleTypeId))
supressError(lambda: client.deleteType(namespaceId, sampleTargetTypeId))
supressError(lambda: client.deleteType(namespaceId, sampleIntegerTypeId))
print("done")
| 40.307971 | 177 | 0.639011 |
from sdspy import *
import configparser
import datetime
import time
import math
import inspect
import collections
def getWaveDataType(sampleTypeId):
if sampleTypeId is None or not isinstance(sampleTypeId, str):
raise TypeError("sampleTypeId is not an instantiated string")
intType = SdsType()
intType.Id = "intType"
intType.SdsTypeCode = SdsTypeCode.Int32
doubleType = SdsType()
doubleType.Id = "doubleType"
doubleType.SdsTypeCode = SdsTypeCode.Double
orderProperty = SdsTypeProperty()
orderProperty.Id = "Order"
orderProperty.SdsType = intType
orderProperty.IsKey = True
tauProperty = SdsTypeProperty()
tauProperty.Id = "Tau"
tauProperty.SdsType = doubleType
radiansProperty = SdsTypeProperty()
radiansProperty.Id = "Radians"
radiansProperty.SdsType = doubleType
sinProperty = SdsTypeProperty()
sinProperty.Id = "Sin"
sinProperty.SdsType = doubleType
cosProperty = SdsTypeProperty()
cosProperty.Id = "Cos"
cosProperty.SdsType = doubleType
tanProperty = SdsTypeProperty()
tanProperty.Id = "Tan"
tanProperty.SdsType = doubleType
sinhProperty = SdsTypeProperty()
sinhProperty.Id = "Sinh"
sinhProperty.SdsType = doubleType
coshProperty = SdsTypeProperty()
coshProperty.Id = "Cosh"
coshProperty.SdsType = doubleType
tanhProperty = SdsTypeProperty()
tanhProperty.Id = "Tanh"
tanhProperty.SdsType = doubleType
wave = SdsType()
wave.Id = sampleTypeId
wave.Name = "WaveDataSample"
wave.Description = "This is a sample Sds type for storing WaveData type events"
wave.SdsTypeCode = SdsTypeCode.Object
wave.Properties = [orderProperty, tauProperty, radiansProperty, sinProperty,
cosProperty, tanProperty, sinhProperty, coshProperty, tanhProperty]
return wave
def getWaveDataTargetType(sampleTypeId):
if sampleTypeId is None or not isinstance(sampleTypeId, str):
raise TypeError("sampleTypeId is not an instantiated string")
intType = SdsType()
intType.Id = "intType"
intType.SdsTypeCode = SdsTypeCode.Int32
doubleType = SdsType()
doubleType.Id = "doubleType"
doubleType.SdsTypeCode = SdsTypeCode.Double
orderTargetProperty = SdsTypeProperty()
orderTargetProperty.Id = "OrderTarget"
orderTargetProperty.SdsType = intType
orderTargetProperty.IsKey = True
tauTargetProperty = SdsTypeProperty()
tauTargetProperty.Id = "TauTarget"
tauTargetProperty.SdsType = doubleType
radiansTargetProperty = SdsTypeProperty()
radiansTargetProperty.Id = "RadiansTarget"
radiansTargetProperty.SdsType = doubleType
sinTargetProperty = SdsTypeProperty()
sinTargetProperty.Id = "SinTarget"
sinTargetProperty.SdsType = doubleType
cosTargetProperty = SdsTypeProperty()
cosTargetProperty.Id = "CosTarget"
cosTargetProperty.SdsType = doubleType
tanTargetProperty = SdsTypeProperty()
tanTargetProperty.Id = "TanTarget"
tanTargetProperty.SdsType = doubleType
sinhTargetProperty = SdsTypeProperty()
sinhTargetProperty.Id = "SinhTarget"
sinhTargetProperty.SdsType = doubleType
coshTargetProperty = SdsTypeProperty()
coshTargetProperty.Id = "CoshTarget"
coshTargetProperty.SdsType = doubleType
tanhTargetProperty = SdsTypeProperty()
tanhTargetProperty.Id = "TanhTarget"
tanhTargetProperty.SdsType = doubleType
wave = SdsType()
wave.Id = sampleTargetTypeId
wave.Name = "WaveDataTargetSample"
wave.Description = "This is a sample Sds type for storing WaveDataTarget type events"
wave.SdsTypeCode = SdsTypeCode.Object
wave.Properties = [orderTargetProperty, tauTargetProperty, radiansTargetProperty, sinTargetProperty,
cosTargetProperty, tanTargetProperty, sinhTargetProperty, coshTargetProperty, tanhTargetProperty]
return wave
def getWaveDataIntegerType(sampleTypeId):
if sampleTypeId is None or not isinstance(sampleTypeId, str):
raise TypeError("sampleTypeId is not an instantiated string")
intType = SdsType()
intType.Id = "intType"
intType.SdsTypeCode = SdsTypeCode.Int32
orderTargetProperty = SdsTypeProperty()
orderTargetProperty.Id = "OrderTarget"
orderTargetProperty.SdsType = intType
orderTargetProperty.IsKey = True
sinIntProperty = SdsTypeProperty()
sinIntProperty.Id = "SinInt"
sinIntProperty.SdsType = intType
cosIntProperty = SdsTypeProperty()
cosIntProperty.Id = "CosInt"
cosIntProperty.SdsType = intType
tanIntProperty = SdsTypeProperty()
tanIntProperty.Id = "TanInt"
tanIntProperty.SdsType = intType
wave = SdsType()
wave.Id = sampleIntegerTypeId
wave.Name = "WaveDataIntegerSample"
wave.Description = "This is a sample Sds type for storing WaveDataInteger type events"
wave.SdsTypeCode = SdsTypeCode.Object
wave.Properties = [orderTargetProperty, sinIntProperty,
cosIntProperty, tanIntProperty]
return wave
def nextWave(now, interval, multiplier, order):
totalSecondsDay = (now - now.replace(hour=0, minute=0, second = 0, microsecond = 0)).total_seconds() * 1000
intervalSeconds = (interval - interval.replace(hour=0, minute=0, second = 0, microsecond = 0)).total_seconds() * 1000
radians = ((totalSecondsDay % intervalSeconds ) / intervalSeconds) * 2 * math.pi
newWave = WaveData()
newWave.Order = order
newWave.Radians = radians
newWave.Tau = radians / (2 * math.pi)
newWave.Sin = multiplier * math.sin(radians)
newWave.Cos = multiplier * math.cos(radians)
newWave.Tan = multiplier * math.tan(radians)
newWave.Sinh = multiplier * math.sinh(radians)
newWave.Cosh = multiplier * math.cosh(radians)
newWave.Tanh = multiplier * math.tanh(radians)
return newWave
def supressError(sdsCall):
try:
sdsCall()
except Exception as e:
print(("Encountered Error: {error}".format(error = e)))
def isprop(v):
return isinstance(v, property)
def toString(event):
string = ""
props = inspect.getmembers(type(event), isprop)
printOrder = [2,3,4,0,6,5,1,7,8]
orderedProps = [props[i] for i in printOrder]
for prop in orderedProps:
value = prop[1].fget(event)
if value is None:
string += "{name}: , ".format(name = prop[0])
else:
string += "{name}: {value}, ".format(name = prop[0], value = value)
return string[:-2]
def toWaveData(jsonObj):
# Many JSON implementations leave default values out. We compensate for WaveData, knowing
# that all values should be filled in
wave = WaveData()
properties = inspect.getmembers(type(wave), isprop)
for prop in properties:
# Pre-Assign the default
prop[1].fset(wave, 0)
#
if prop[0] in jsonObj:
value = jsonObj[prop[0]]
if value is not None:
prop[1].fset(wave, value)
return wave
######################################################################################################
# The following define the identifiers we'll use throughout
| true | true |
f72dbb4b96808df2eff7d41ab6472c83c46f6b78 | 1,483 | py | Python | scripts/one-time/migrate_dynamo_to_pg.py | AndrewLorente/catsnap | 57427b8f61ef5185a41e49d55ffd7dd328777834 | [
"MIT"
] | 5 | 2015-11-23T18:40:00.000Z | 2019-03-22T06:54:04.000Z | scripts/one-time/migrate_dynamo_to_pg.py | AndrewLorente/catsnap | 57427b8f61ef5185a41e49d55ffd7dd328777834 | [
"MIT"
] | 5 | 2016-04-07T15:35:53.000Z | 2019-02-10T23:00:32.000Z | scripts/one-time/migrate_dynamo_to_pg.py | AndrewLorente/catsnap | 57427b8f61ef5185a41e49d55ffd7dd328777834 | [
"MIT"
] | 2 | 2015-12-02T16:44:05.000Z | 2017-09-29T23:17:33.000Z | #!/usr/bin/env python
import json
from sqlalchemy import Table, Column, String, MetaData
from catsnap import Client
from catsnap.table.tag import Tag
from catsnap.table.image import Image
from catsnap.table.image_tag import ImageTag
tags = []
images = []
image_tags = []
tag_table = Client().table('tag')
for item in tag_table.scan():
tags.append({
'name': item['tag']})
image_table = Client().table('image')
for item in image_table.scan():
images.append({
'filename': item['tag'],
'source_url': item.get('source_url', '')})
for tag in json.loads(item['tags']):
image_tags.append({
'filename': item['tag'],
'tag_name': tag})
session = Client().session()
metadata = MetaData(bind=Client()._engine)
temp_image_tag = Table(
'temp_image_tags',
metadata,
Column('tag_name', String, primary_key=True),
Column('filename', String, primary_key=True),
prefixes=['temporary'],
)
metadata.create_all()
session.execute(temp_image_tag.insert(image_tags))
session.execute(Image.__table__.insert(images))
session.execute(Tag.__table__.insert(tags))
#I can't figure out how to make sqlalchemy generate this query! :(
session.execute("""
insert into image_tag (image_id, tag_id) (
select
image.image_id,
tag.tag_id
from
temp_image_tags
inner join image using (filename)
inner join tag on tag.name = temp_image_tags.tag_name
)
""")
session.commit()
| 25.135593 | 66 | 0.67296 |
import json
from sqlalchemy import Table, Column, String, MetaData
from catsnap import Client
from catsnap.table.tag import Tag
from catsnap.table.image import Image
from catsnap.table.image_tag import ImageTag
tags = []
images = []
image_tags = []
tag_table = Client().table('tag')
for item in tag_table.scan():
tags.append({
'name': item['tag']})
image_table = Client().table('image')
for item in image_table.scan():
images.append({
'filename': item['tag'],
'source_url': item.get('source_url', '')})
for tag in json.loads(item['tags']):
image_tags.append({
'filename': item['tag'],
'tag_name': tag})
session = Client().session()
metadata = MetaData(bind=Client()._engine)
temp_image_tag = Table(
'temp_image_tags',
metadata,
Column('tag_name', String, primary_key=True),
Column('filename', String, primary_key=True),
prefixes=['temporary'],
)
metadata.create_all()
session.execute(temp_image_tag.insert(image_tags))
session.execute(Image.__table__.insert(images))
session.execute(Tag.__table__.insert(tags))
session.execute("""
insert into image_tag (image_id, tag_id) (
select
image.image_id,
tag.tag_id
from
temp_image_tags
inner join image using (filename)
inner join tag on tag.name = temp_image_tags.tag_name
)
""")
session.commit()
| true | true |
f72dbbd37d6df2e935c448ffbc4add7814f020a9 | 750 | py | Python | altdeutsch/tests/test_reader.py | clemsciences/old_high_german_texts | 1fe458613da5f13760d743cee99fc2eaceb59298 | [
"MIT"
] | null | null | null | altdeutsch/tests/test_reader.py | clemsciences/old_high_german_texts | 1fe458613da5f13760d743cee99fc2eaceb59298 | [
"MIT"
] | null | null | null | altdeutsch/tests/test_reader.py | clemsciences/old_high_german_texts | 1fe458613da5f13760d743cee99fc2eaceb59298 | [
"MIT"
] | null | null | null | """
"""
import os
import unittest
from altdeutsch import PACKDIR
from altdeutsch.reader import read_export
__author__ = ["Clément Besnier <clemsciences@aol.com>", ]
class UnitTest(unittest.TestCase):
def test_hildebrandslied(self):
res = read_export(os.path.join(PACKDIR, "tests", "data", "hildebrandslied.txt"))
self.assertEqual(list(res.keys()),
['tok', 'lemma', 'inflection', 'verse', 'edition', 'pos', 'text', 'translation',
'lang', 'clause', 'inflectionClass', 'posLemma', 'rhyme', 'document',
'inflectionClassLemma'])
self.assertEqual(res["tok"][0], ['Ik', 'gihorta', 'ðat', 'seggen', 'ðat', 'sih', 'urhettun', 'ænon', 'muotin'])
| 30 | 119 | 0.594667 |
import os
import unittest
from altdeutsch import PACKDIR
from altdeutsch.reader import read_export
__author__ = ["Clément Besnier <clemsciences@aol.com>", ]
class UnitTest(unittest.TestCase):
def test_hildebrandslied(self):
res = read_export(os.path.join(PACKDIR, "tests", "data", "hildebrandslied.txt"))
self.assertEqual(list(res.keys()),
['tok', 'lemma', 'inflection', 'verse', 'edition', 'pos', 'text', 'translation',
'lang', 'clause', 'inflectionClass', 'posLemma', 'rhyme', 'document',
'inflectionClassLemma'])
self.assertEqual(res["tok"][0], ['Ik', 'gihorta', 'ðat', 'seggen', 'ðat', 'sih', 'urhettun', 'ænon', 'muotin'])
| true | true |
f72dbcea00d85fb6260c29c8f4cc837758fb55d5 | 8,118 | py | Python | rlkit/core/eval_util.py | Ericonaldo/ILSwiss | efd25d457fd1578005c6fbc45cae29e9ab64a99d | [
"MIT"
] | 46 | 2021-07-28T03:15:23.000Z | 2022-03-31T22:19:22.000Z | rlkit/core/eval_util.py | Ericonaldo/ILSwiss | efd25d457fd1578005c6fbc45cae29e9ab64a99d | [
"MIT"
] | 3 | 2021-07-30T09:55:01.000Z | 2022-03-17T17:21:26.000Z | rlkit/core/eval_util.py | Ericonaldo/ILSwiss | efd25d457fd1578005c6fbc45cae29e9ab64a99d | [
"MIT"
] | 5 | 2021-07-28T16:45:14.000Z | 2022-03-11T01:39:19.000Z | """
Common evaluation utilities.
"""
from collections import OrderedDict
from numbers import Number
import os
import json
import numpy as np
from rlkit.core.vistools import plot_returns_on_same_plot, save_plot
def get_generic_path_information(paths, stat_prefix=""):
"""
Get an OrderedDict with a bunch of statistic names and values.
"""
statistics = OrderedDict()
returns = [sum(path["rewards"]) for path in paths]
# rewards = np.vstack([path["rewards"] for path in paths])
rewards = np.concatenate([path["rewards"] for path in paths])
statistics.update(
create_stats_ordered_dict(
"Rewards", rewards, stat_prefix=stat_prefix, always_show_all_stats=True
)
)
statistics.update(
create_stats_ordered_dict(
"Returns", returns, stat_prefix=stat_prefix, always_show_all_stats=True
)
)
# print(paths[0]["env_infos"])
if "is_success" in paths[0]["env_infos"][0].keys():
acc_sum = [(np.sum([x['is_success'] for x in path["env_infos"]])>0).astype(float) for path in paths]
acc = np.sum(acc_sum) * 1.0 / len(paths)
statistics.update(
create_stats_ordered_dict(
"Success Num", np.sum(acc_sum), stat_prefix=stat_prefix, always_show_all_stats=True
)
)
statistics.update(
create_stats_ordered_dict(
"Traj Num", len(paths), stat_prefix=stat_prefix, always_show_all_stats=True
)
)
statistics.update(
create_stats_ordered_dict(
"Success Rate", acc, stat_prefix=stat_prefix, always_show_all_stats=True
)
)
actions = [path["actions"] for path in paths]
# if isinstance(actions[0][0], np.ndarray):
# actions = np.vstack([path["actions"] for path in paths])
# else:
# actions = np.hstack([path["actions"] for path in paths])
statistics.update(
create_stats_ordered_dict(
"Actions", actions, stat_prefix=stat_prefix, always_show_all_stats=True
)
)
statistics.update(
create_stats_ordered_dict(
"Ep. Len.",
np.array([len(path["terminals"]) for path in paths]),
stat_prefix=stat_prefix,
always_show_all_stats=True,
)
)
statistics["Num Paths"] = len(paths)
return statistics
def get_average_returns(paths, std=False):
returns = [sum(path["rewards"]) for path in paths]
if std:
return np.mean(returns), np.std(returns)
return np.mean(returns)
def create_stats_ordered_dict(
name,
data,
stat_prefix=None,
always_show_all_stats=False,
exclude_max_min=False,
):
# print('\n<<<< STAT FOR {} {} >>>>'.format(stat_prefix, name))
if stat_prefix is not None:
name = "{} {}".format(stat_prefix, name)
if isinstance(data, Number):
# print('was a Number')
return OrderedDict({name: data})
if len(data) == 0:
return OrderedDict()
if isinstance(data, tuple):
# print('was a tuple')
ordered_dict = OrderedDict()
for number, d in enumerate(data):
sub_dict = create_stats_ordered_dict(
"{0}_{1}".format(name, number),
d,
)
ordered_dict.update(sub_dict)
return ordered_dict
if isinstance(data, list):
# print('was a list')
try:
iter(data[0])
except TypeError:
pass
else:
data = np.concatenate(data)
if isinstance(data, np.ndarray) and data.size == 1 and not always_show_all_stats:
# print('was a numpy array of data.size==1')
return OrderedDict({name: float(data)})
# print('was a numpy array NOT of data.size==1')
stats = OrderedDict(
[
(name + " Mean", np.mean(data)),
(name + " Std", np.std(data)),
]
)
if not exclude_max_min:
stats[name + " Max"] = np.max(data)
stats[name + " Min"] = np.min(data)
return stats
# I (Kamyar) will be adding my own eval utils here too
def plot_experiment_returns(
exp_path,
title,
save_path,
column_name="Test_Returns_Mean",
x_axis_lims=None,
y_axis_lims=None,
constraints=None,
plot_mean=False,
plot_horizontal_lines_at=None,
horizontal_lines_names=None,
):
"""
plots the Test Returns Mean of all the
"""
arr_list = []
names = []
dir_path = os.path.split(save_path)[0]
os.makedirs(dir_path, exist_ok=True)
# print(exp_path)
for sub_exp_dir in os.listdir(exp_path):
try:
sub_exp_path = os.path.join(exp_path, sub_exp_dir)
if not os.path.isdir(sub_exp_path):
continue
if constraints is not None:
constraints_satisfied = True
with open(os.path.join(sub_exp_path, "variant.json"), "r") as j:
d = json.load(j)
for k, v in constraints.items():
k = k.split(".")
d_v = d[k[0]]
for sub_k in k[1:]:
d_v = d_v[sub_k]
if d_v != v:
constraints_satisfied = False
break
if not constraints_satisfied:
# for debugging
# print('\nconstraints')
# print(constraints)
# print('\nthis dict')
# print(d)
continue
csv_full_path = os.path.join(sub_exp_path, "progress.csv")
# print(csv_full_path)
try:
progress_csv = np.genfromtxt(
csv_full_path, skip_header=0, delimiter=",", names=True
)
# print(progress_csv.dtype)
if isinstance(column_name, str):
column_name = [column_name]
for c_name in column_name:
if "+" in c_name:
first, second = c_name.split("+")
returns = progress_csv[first] + progress_csv[second]
elif "-" in c_name:
first, second = c_name.split("-")
returns = progress_csv[first] - progress_csv[second]
else:
returns = progress_csv[c_name]
arr_list.append(returns)
names.append(c_name + "_" + sub_exp_dir)
# print(csv_full_path)
except:
pass
except:
pass
if plot_mean:
min_len = min(map(lambda a: a.shape[0], arr_list))
arr_list = list(map(lambda a: a[:min_len], arr_list))
returns = np.stack(arr_list)
mean = np.mean(returns, 0)
std = np.std(returns, 0)
x = np.arange(min_len)
# save_plot(x, mean, title, save_path, color='cyan', x_axis_lims=x_axis_lims, y_axis_lims=y_axis_lims)
plot_returns_on_same_plot(
[mean, mean + std, mean - std],
["mean", "mean+std", "mean-std"],
title,
save_path,
x_axis_lims=x_axis_lims,
y_axis_lims=y_axis_lims,
)
else:
if len(arr_list) == 0:
print(0)
if plot_horizontal_lines_at is not None:
max_len = max(map(lambda a: a.shape[0], arr_list))
arr_list += [np.ones(max_len) * y_val for y_val in plot_horizontal_lines_at]
names += horizontal_lines_names
try:
# print(len(arr_list))
plot_returns_on_same_plot(
arr_list,
names,
title,
save_path,
x_axis_lims=x_axis_lims,
y_axis_lims=y_axis_lims,
)
except Exception as e:
print("Failed to plot:")
print(arr_list)
print(title)
print(exp_path)
print(constraints)
# raise e
| 32.214286 | 110 | 0.54607 |
from collections import OrderedDict
from numbers import Number
import os
import json
import numpy as np
from rlkit.core.vistools import plot_returns_on_same_plot, save_plot
def get_generic_path_information(paths, stat_prefix=""):
statistics = OrderedDict()
returns = [sum(path["rewards"]) for path in paths]
rewards = np.concatenate([path["rewards"] for path in paths])
statistics.update(
create_stats_ordered_dict(
"Rewards", rewards, stat_prefix=stat_prefix, always_show_all_stats=True
)
)
statistics.update(
create_stats_ordered_dict(
"Returns", returns, stat_prefix=stat_prefix, always_show_all_stats=True
)
)
if "is_success" in paths[0]["env_infos"][0].keys():
acc_sum = [(np.sum([x['is_success'] for x in path["env_infos"]])>0).astype(float) for path in paths]
acc = np.sum(acc_sum) * 1.0 / len(paths)
statistics.update(
create_stats_ordered_dict(
"Success Num", np.sum(acc_sum), stat_prefix=stat_prefix, always_show_all_stats=True
)
)
statistics.update(
create_stats_ordered_dict(
"Traj Num", len(paths), stat_prefix=stat_prefix, always_show_all_stats=True
)
)
statistics.update(
create_stats_ordered_dict(
"Success Rate", acc, stat_prefix=stat_prefix, always_show_all_stats=True
)
)
actions = [path["actions"] for path in paths]
statistics.update(
create_stats_ordered_dict(
"Actions", actions, stat_prefix=stat_prefix, always_show_all_stats=True
)
)
statistics.update(
create_stats_ordered_dict(
"Ep. Len.",
np.array([len(path["terminals"]) for path in paths]),
stat_prefix=stat_prefix,
always_show_all_stats=True,
)
)
statistics["Num Paths"] = len(paths)
return statistics
def get_average_returns(paths, std=False):
returns = [sum(path["rewards"]) for path in paths]
if std:
return np.mean(returns), np.std(returns)
return np.mean(returns)
def create_stats_ordered_dict(
name,
data,
stat_prefix=None,
always_show_all_stats=False,
exclude_max_min=False,
):
if stat_prefix is not None:
name = "{} {}".format(stat_prefix, name)
if isinstance(data, Number):
return OrderedDict({name: data})
if len(data) == 0:
return OrderedDict()
if isinstance(data, tuple):
ordered_dict = OrderedDict()
for number, d in enumerate(data):
sub_dict = create_stats_ordered_dict(
"{0}_{1}".format(name, number),
d,
)
ordered_dict.update(sub_dict)
return ordered_dict
if isinstance(data, list):
try:
iter(data[0])
except TypeError:
pass
else:
data = np.concatenate(data)
if isinstance(data, np.ndarray) and data.size == 1 and not always_show_all_stats:
return OrderedDict({name: float(data)})
stats = OrderedDict(
[
(name + " Mean", np.mean(data)),
(name + " Std", np.std(data)),
]
)
if not exclude_max_min:
stats[name + " Max"] = np.max(data)
stats[name + " Min"] = np.min(data)
return stats
def plot_experiment_returns(
exp_path,
title,
save_path,
column_name="Test_Returns_Mean",
x_axis_lims=None,
y_axis_lims=None,
constraints=None,
plot_mean=False,
plot_horizontal_lines_at=None,
horizontal_lines_names=None,
):
arr_list = []
names = []
dir_path = os.path.split(save_path)[0]
os.makedirs(dir_path, exist_ok=True)
for sub_exp_dir in os.listdir(exp_path):
try:
sub_exp_path = os.path.join(exp_path, sub_exp_dir)
if not os.path.isdir(sub_exp_path):
continue
if constraints is not None:
constraints_satisfied = True
with open(os.path.join(sub_exp_path, "variant.json"), "r") as j:
d = json.load(j)
for k, v in constraints.items():
k = k.split(".")
d_v = d[k[0]]
for sub_k in k[1:]:
d_v = d_v[sub_k]
if d_v != v:
constraints_satisfied = False
break
if not constraints_satisfied:
continue
csv_full_path = os.path.join(sub_exp_path, "progress.csv")
try:
progress_csv = np.genfromtxt(
csv_full_path, skip_header=0, delimiter=",", names=True
)
if isinstance(column_name, str):
column_name = [column_name]
for c_name in column_name:
if "+" in c_name:
first, second = c_name.split("+")
returns = progress_csv[first] + progress_csv[second]
elif "-" in c_name:
first, second = c_name.split("-")
returns = progress_csv[first] - progress_csv[second]
else:
returns = progress_csv[c_name]
arr_list.append(returns)
names.append(c_name + "_" + sub_exp_dir)
except:
pass
except:
pass
if plot_mean:
min_len = min(map(lambda a: a.shape[0], arr_list))
arr_list = list(map(lambda a: a[:min_len], arr_list))
returns = np.stack(arr_list)
mean = np.mean(returns, 0)
std = np.std(returns, 0)
x = np.arange(min_len)
plot_returns_on_same_plot(
[mean, mean + std, mean - std],
["mean", "mean+std", "mean-std"],
title,
save_path,
x_axis_lims=x_axis_lims,
y_axis_lims=y_axis_lims,
)
else:
if len(arr_list) == 0:
print(0)
if plot_horizontal_lines_at is not None:
max_len = max(map(lambda a: a.shape[0], arr_list))
arr_list += [np.ones(max_len) * y_val for y_val in plot_horizontal_lines_at]
names += horizontal_lines_names
try:
plot_returns_on_same_plot(
arr_list,
names,
title,
save_path,
x_axis_lims=x_axis_lims,
y_axis_lims=y_axis_lims,
)
except Exception as e:
print("Failed to plot:")
print(arr_list)
print(title)
print(exp_path)
print(constraints)
| true | true |
f72dbcf7ce7f36619bc491a433758075f439932e | 3,873 | py | Python | model/nlp/topic.py | shelleyHLX/ai-server | 12c4a654a686462b8b725fa0641cc967d2f80e14 | [
"Apache-2.0"
] | 1 | 2021-11-09T08:02:01.000Z | 2021-11-09T08:02:01.000Z | model/nlp/topic.py | shelleyHLX/ai-server | 12c4a654a686462b8b725fa0641cc967d2f80e14 | [
"Apache-2.0"
] | null | null | null | model/nlp/topic.py | shelleyHLX/ai-server | 12c4a654a686462b8b725fa0641cc967d2f80e14 | [
"Apache-2.0"
] | 1 | 2021-11-09T08:02:03.000Z | 2021-11-09T08:02:03.000Z | # -*- coding: utf-8 -*-
# Author: XuMing <xuming624@qq.com>
# Brief:
import operator
import os
import tensorflow as tf
from keras.models import load_model
from model.nlp.keras_data_reader import load_dict
from model.nlp.keras_data_reader import pad_sequence
from model.nlp.keras_data_reader import vectorize_words
from utils.io_util import get_logger
logger = get_logger(__file__)
label_revserv_dict = {0: '人类作者',
1: '机器作者',
2: '机器翻译',
3: '自动摘要'}
class Topic(object):
topic_model = None
def __init__(self, model_path, word_dict_path, maxlen=400):
self.name = 'topic'
self.maxlen = maxlen
# load dict
pwd_path = os.path.abspath(os.path.dirname(__file__))
if word_dict_path:
try:
self.word_ids_dict = load_dict(word_dict_path)
except IOError:
word_dict_path = os.path.join(pwd_path, '../..', word_dict_path)
self.word_ids_dict = load_dict(word_dict_path)
# load parrots_model by file
if model_path:
try:
self.topic_model = load_model(model_path)
except IOError:
model_path = os.path.join(pwd_path, '../..', model_path)
self.topic_model = load_model(model_path)
logger.info("Load topic model ok, path: " + model_path)
# self.topic_model._make_predict_function() # have to initialize before threading
self.graph = tf.get_default_graph()
else:
logger.warn('topic model file is need')
raise Exception('topic model file need')
@classmethod
def get_instance(cls, model_path, word_dict_path, maxlen=400):
if cls.topic_model:
return cls.topic_model
else:
obj = cls(model_path, word_dict_path, maxlen=maxlen)
cls.topic_model = obj
return obj
def get_topic(self, text):
# read data to index
test_text_words = [list(text)]
word_ids = vectorize_words(test_text_words, self.word_ids_dict)
# pad sequence
word_seq = pad_sequence(word_ids, self.maxlen)
with self.graph.as_default():
# predict prob
predict_probs = self.topic_model.predict(word_seq)
# get prob for one line test text
probs = predict_probs[0]
probs_dict = dict((idx, prob) for idx, prob in enumerate(probs))
probs_order_dict = sorted(probs_dict.items(), key=operator.itemgetter(1), reverse=True)
return probs_order_dict
def check(self, text):
"""
Args:
text: 欧洲冠军联赛是欧洲足球协会联盟主办的年度足球比赛
Returns:
{
"log_id": 3591049593939822907,
"items": {
"lv2_tag_list": [
{
"score": 0.877436,
"tag": "足球"
},
{
"score": 0.793682,
"tag": "国际足球"
},
{
"score": 0.775911,
"tag": "英超"
}
],
"lv1_tag_list": [
{
"score": 0.824329,
"tag": "体育"
}
]
}
}
"""
result_dict = {"text": text}
topics = self.get_topic(text)
items_list = []
for idx, prob in topics:
# get top 3
if len(items_list) > 2:
continue
items = dict()
items["score"] = prob
items["tag"] = label_revserv_dict[idx]
items_list.append(items)
result_dict['items'] = items_list
return result_dict
| 32.008264 | 95 | 0.52001 |
import operator
import os
import tensorflow as tf
from keras.models import load_model
from model.nlp.keras_data_reader import load_dict
from model.nlp.keras_data_reader import pad_sequence
from model.nlp.keras_data_reader import vectorize_words
from utils.io_util import get_logger
logger = get_logger(__file__)
label_revserv_dict = {0: '人类作者',
1: '机器作者',
2: '机器翻译',
3: '自动摘要'}
class Topic(object):
topic_model = None
def __init__(self, model_path, word_dict_path, maxlen=400):
self.name = 'topic'
self.maxlen = maxlen
pwd_path = os.path.abspath(os.path.dirname(__file__))
if word_dict_path:
try:
self.word_ids_dict = load_dict(word_dict_path)
except IOError:
word_dict_path = os.path.join(pwd_path, '../..', word_dict_path)
self.word_ids_dict = load_dict(word_dict_path)
if model_path:
try:
self.topic_model = load_model(model_path)
except IOError:
model_path = os.path.join(pwd_path, '../..', model_path)
self.topic_model = load_model(model_path)
logger.info("Load topic model ok, path: " + model_path)
ult_graph()
else:
logger.warn('topic model file is need')
raise Exception('topic model file need')
@classmethod
def get_instance(cls, model_path, word_dict_path, maxlen=400):
if cls.topic_model:
return cls.topic_model
else:
obj = cls(model_path, word_dict_path, maxlen=maxlen)
cls.topic_model = obj
return obj
def get_topic(self, text):
test_text_words = [list(text)]
word_ids = vectorize_words(test_text_words, self.word_ids_dict)
word_seq = pad_sequence(word_ids, self.maxlen)
with self.graph.as_default():
predict_probs = self.topic_model.predict(word_seq)
probs = predict_probs[0]
probs_dict = dict((idx, prob) for idx, prob in enumerate(probs))
probs_order_dict = sorted(probs_dict.items(), key=operator.itemgetter(1), reverse=True)
return probs_order_dict
def check(self, text):
result_dict = {"text": text}
topics = self.get_topic(text)
items_list = []
for idx, prob in topics:
if len(items_list) > 2:
continue
items = dict()
items["score"] = prob
items["tag"] = label_revserv_dict[idx]
items_list.append(items)
result_dict['items'] = items_list
return result_dict
| true | true |
f72dbd21266974dd5071e253cb4ab05797c2c1b1 | 146,647 | py | Python | test/test_fx.py | jiyuanzFB/pytorch | d047e475f830631d8fcc877ea17eac8fb34748d7 | [
"Intel"
] | 1 | 2022-03-08T14:43:39.000Z | 2022-03-08T14:43:39.000Z | test/test_fx.py | jiyuanzFB/pytorch | d047e475f830631d8fcc877ea17eac8fb34748d7 | [
"Intel"
] | null | null | null | test/test_fx.py | jiyuanzFB/pytorch | d047e475f830631d8fcc877ea17eac8fb34748d7 | [
"Intel"
] | null | null | null | # Owner(s): ["oncall: fx"]
import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import typing
import types
import warnings
import unittest
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH, CodeGen
from torch.fx.node import Target, Argument, _format_arg
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
TEST_WITH_ROCM,
find_library_location,
run_tests,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Used for test_autowrap_function. Autowrapped functions need to be global
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# used in test_pytree. It's all the way out here because pickling a GraphModule
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
wrap('getattr')
def wrapped_named_tup(p1, *, p2):
return p1.x + p2.y
wrap(wrapped_named_tup)
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
def _custom_fx_repr_fn(self) -> str:
return f"Pair(x={_format_arg(self.x)}, y={_format_arg(self.y)})"
# for testing pytrees
class Foo(object): # noqa: B209
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS):
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
# test for issue described at https://github.com/pytorch/pytorch/issues/63883
class M3(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
m3 = M3()
gm3 = symbolic_trace(m3)
new_instance = gm3.__new__(type(gm3))
new_instance.__init__(gm3, gm3.graph)
x = torch.randn(5, 3)
torch.testing.assert_allclose(new_instance(x), torch.relu(x))
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_fx_and_or(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x & x, x | x
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_matmul_tracing(self):
const = torch.randn(3)
def matmul_f(x):
return x @ const
mod = symbolic_trace(matmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), matmul_f(inp))
def rmatmul_f(x):
return const @ x
mod = symbolic_trace(rmatmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), rmatmul_f(inp))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
# saving the original list because we will insert new nodes as a part of a test
orig_graph_nodes = list(graph.nodes)
for node in orig_graph_nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
# verify that copying the node does not lose the stack trace
new_node = graph.node_copy(node)
self.assertTrue(new_node.stack_trace is not None)
assert 'test_fx.py' in new_node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_close(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_close(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_close(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_trace_buffer_slice(self):
bs, d_hid = 10, 23
class ExampleCode(torch.nn.Module):
def __init__(self):
super().__init__()
self.mm_param = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.mm_param2 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.lin = torch.nn.Linear(d_hid, d_hid)
self.register_buffer('buffer', torch.randn(bs + 100, d_hid))
def forward(self, x):
x = torch.mm(x, self.mm_param)
skip_connection = x
x = torch.relu(x)
x = torch.mm(x, self.mm_param) + self.buffer[:x.shape[0]]
x = self.lin(x)
x = torch.relu(x)
x = x + skip_connection
x = torch.mm(x, self.mm_param2)
x = self.lin(x)
return x
ec = ExampleCode()
traced = torch.fx.symbolic_trace(ec)
x = torch.randn(bs, d_hid)
torch.testing.assert_allclose(ec(x), traced(x))
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_tensor_attribute_coalseced(self):
def count_attrs(fx_module):
targets = set()
for node in traced.graph.nodes:
if node.op == 'get_attr':
targets.add(node.target)
return len(targets)
val = torch.tensor(5)
def f(x):
return x + val + val
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 1)
val2 = torch.tensor(5)
def f(x):
val = torch.tensor(5)
return x + val + val2
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 2)
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
# Test scriptability
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_torch_fx_getattr(self):
class FXGetattrTest(torch.nn.Module):
def forward(self, x):
return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3]))
traced = symbolic_trace(FXGetattrTest())
self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3]))
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_remove_uses_with_custom_filter(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu, lambda x: x != neg)
self.assertTrue(neg in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
# Not normally traceable; good reason to make
# this module a leaf.
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_wrong_target_type(self):
graph : torch.fx.Graph = torch.fx.Graph()
with self.assertRaises(ValueError):
n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',
args=(), kwargs={})
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_interpreter_default_args(self):
class Model(torch.nn.Module):
def forward(self, x, y=3.14159):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
out = interp.run(x)
torch.testing.assert_allclose(out, x + 3.14159)
def test_interpreter_not_enough_args(self):
class Model(torch.nn.Module):
def forward(self, x, y):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
with self.assertRaisesRegex(RuntimeError,
'Expected positional argument for parameter y, but one was not passed in'):
out = interp.run(x)
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_layout(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0)
traced = symbolic_trace(M())
x = torch.rand(5, 9, 3, 4)
self.assertEqual(traced(x), torch.zeros_like(x))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_immutable_list_pytree_ops(self):
rand_tensor = torch.randn(5, 3)
l = immutable_list([3, [rand_tensor, 42]])
flattened, spec = pytree.tree_flatten(l)
assert flattened == [3, rand_tensor, 42]
unflattened = pytree.tree_unflatten(flattened, spec)
assert unflattened == l
assert isinstance(unflattened, immutable_list)
def test_immutable_dict_pytree_ops(self):
rand_tensor = torch.randn(5, 3)
d = immutable_dict({'a': 3, 'b': [rand_tensor, 42]})
flattened, spec = pytree.tree_flatten(d)
assert flattened == [3, rand_tensor, 42]
unflattened = pytree.tree_unflatten(flattened, spec)
assert unflattened == d
assert isinstance(unflattened, immutable_dict)
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_prepend_self(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.prepend(b)
x.append(b)
self.assertEqual(len(graph.nodes), 3)
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(list(x.node.users.keys()), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(list(x.node.users.keys()), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_module_deepcopy_edit_nodes(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
traced1 = symbolic_trace(Foo())
copied = copy.deepcopy(traced1)
for node in copied.graph.nodes:
if node.target == torch.relu:
node.target = torch.neg
copied.recompile()
traced1.recompile()
x = torch.randn(15, 15)
torch.testing.assert_allclose(traced1(x), torch.relu(x))
torch.testing.assert_allclose(copied(x), torch.neg(x))
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_named_tuple_inlined(self):
class NamedTupMod(torch.nn.Module):
def forward(self, inp):
return wrapped_named_tup(Pair(inp, 1.2), p2=Pair(3.4, inp))
m = NamedTupMod()
input = torch.rand(3, 4)
ref = m(input)
traced = symbolic_trace(m)
res = traced(input)
self.assertEqual(ref, res)
# Check Pair NamedTuple works when inlined into the function call.
ph = call_func = None
for node in traced.graph.nodes:
if node.op == "placeholder":
ph = node
elif node.op == "call_function" and node.target == wrapped_named_tup:
node.update_arg(0, Pair(ph, 1.2))
node.update_kwarg("p2", Pair(3.4, ph))
call_func = node
break
self.assertTrue(call_func is not None)
self.assertTrue(isinstance(call_func.args[0], Pair))
self.assertTrue(isinstance(call_func.kwargs["p2"], Pair))
self.assertEqual(_format_arg(call_func.args[0]), "Pair(x=%inp, y=1.2)")
self.assertEqual(_format_arg(call_func.kwargs["p2"]), "Pair(x=3.4, y=%inp)")
traced.graph.eliminate_dead_code()
traced.recompile()
res = traced(input)
self.assertEqual(ref, res)
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_graph_module_replicate_for_dp(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
gm = torch.fx.symbolic_trace(Foo())
x = torch.randn(5, 3)
out = gm(x)
replica = gm._replicate_for_data_parallel()
out_replica = replica(x)
torch.testing.assert_allclose(out_replica, out)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_throw_out_variant(self):
def foo(x):
y = torch.rand_like(x)
torch.sigmoid(x, out=y)
return y
class MyTracer(torch.fx.Tracer):
check_mutable_operations = True
tracer = MyTracer()
with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'):
traced_graph = tracer.trace(foo)
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf((4, y), 3)
+ a_lifted_leaf((3, 4), 5)
+ a_lifted_leaf((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_ast_rewriter_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf2((4, y), 3)
+ a_lifted_leaf2((3, 4), 5)
+ a_lifted_leaf2((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf2", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_profiler_ranges_side_effect(self):
g = torch.fx.Graph()
handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',))
g.call_function(torch.ops.profiler._record_function_exit, (handle,))
g.output(None)
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
g.eliminate_dead_code()
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
def test_ast_rewriter_wrapped_via_decorator(self):
class F(torch.nn.Module):
def forward(self, x):
return wrapped_via_decorator(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(F())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(traced).transform()
self.assertIn("wrapped_via_decorator", transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_with_submodule", traced.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), traced(input))
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_delete_unused_submodules_leaf(self):
class SubModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.submod = SubModule()
def forward(self, x):
x = self.submod(x)
return x
model = Model()
class MyCustomTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return module_qualified_name == "submod"
inputs = torch.randn(1, 10)
traced_graph = MyCustomTracer().trace(model)
gm2 = torch.fx.GraphModule(model, traced_graph)
gm2.delete_all_unused_submodules()
torch.testing.assert_allclose(gm2(inputs), model(inputs))
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@unittest.skipIf(IS_WINDOWS, "Python Windows bug? https://bugs.python.org/issue45108")
def test_assert(self):
def f(x):
assert x > 1
return x + 1
try:
torch.fx.proxy.TracerBase.trace_asserts = True
traced = symbolic_trace(f)
finally:
torch.fx.proxy.TracerBase.trace_asserts = False
self.assertEqual(f(2), traced(2))
with self.assertRaises(AssertionError):
traced(0)
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(val))), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def test_custom_codegen(self):
class ListCodeGen(CodeGen):
def gen_fn_def(self, free_vars, maybe_return_annotation):
lst_unpack = f"""
def forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:
{', '.join(free_vars)} = args_list"""
return lst_unpack
def additional_globals(self):
return [('List', typing.List)]
def process_inputs(self, *inputs):
assert(len(inputs) == 1)
return inputs[0]
def f(a, b):
return a + b
nf = symbolic_trace(f)
vals = [torch.randn(3), torch.randn(3)]
self.assertEqual(nf(*vals), f(*vals))
nf.graph.set_codegen(ListCodeGen())
nf.recompile()
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf(vals), f(*vals))
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(vals))), f(*vals))
ts_f = torch.jit.script(nf)
self.assertEqual(nf(vals), ts_f(vals))
def test_imul_code_print(self):
graph = torch.fx.Graph()
a = graph.placeholder("a")
b = graph.placeholder("b")
graph.call_function(operator.imul, (a, b), {})
graph.output(a)
gm = torch.fx.GraphModule({}, graph)
gm.recompile()
self.assertEqual(gm(2, 3), 6)
self.assertIn("a *= b", gm.code)
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
if not isinstance(op.op, types.BuiltinFunctionType):
raise unittest.SkipTest("This path doesn't work on Python functions")
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
class TestFXAPIBackwardCompatibility(JitTestCase):
def setUp(self):
self.maxDiff = None
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def _fn_to_stable_annotation_str(self, obj):
"""
Unfortunately we have to serialize function signatures manually since
serialization for `inspect.Signature` objects is not stable across
python versions
"""
fn_name = torch.typename(obj)
signature = inspect.signature(obj)
sig_str = f'{fn_name}{signature}'
arg_strs = []
for k, v in signature.parameters.items():
maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\
if v.annotation is not inspect.Signature.empty else ''
def default_val_str(val):
if isinstance(val, (tuple, list)):
str_pieces = ['(' if isinstance(val, tuple) else '[']
str_pieces.append(', '.join(default_val_str(v) for v in val))
if isinstance(val, tuple) and len(str_pieces) == 2:
str_pieces.append(',')
str_pieces.append(')' if isinstance(val, tuple) else ']')
return ''.join(str_pieces)
# Need to fix up some default value strings.
# First case: modules. Default module `repr` contains the FS path of the module.
# Don't leak that
if isinstance(val, types.ModuleType):
return f'<module {val.__name__}>'
# Second case: callables. Callables (such as lambdas) encode their address in
# their string repr. Don't do that
if callable(val):
return f'<function {val.__name__}>'
return str(val)
if v.default is not inspect.Signature.empty:
default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'"
maybe_default = f' = {default_val_str}'
else:
maybe_default = ''
maybe_stars = ''
if v.kind == inspect.Parameter.VAR_POSITIONAL:
maybe_stars = '*'
elif v.kind == inspect.Parameter.VAR_KEYWORD:
maybe_stars = '**'
arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')
return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\
if signature.return_annotation is not inspect.Signature.empty else ''
return f'{fn_name}({", ".join(arg_strs)}){return_annot}'
def _annotation_type_to_stable_str(self, t, sig_str):
if t is inspect.Signature.empty:
return ''
# Forward ref
if isinstance(t, str):
return f"'{t}'"
if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):
return t.__forward_arg__
if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):
return t.__forward_arg__
trivial_mappings = {
str : 'str',
int : 'int',
float: 'float',
bool: 'bool',
torch.dtype: 'torch.dtype',
torch.Tensor: 'torch.Tensor',
torch.device: 'torch.device',
torch.memory_format: 'torch.memory_format',
slice: 'slice',
torch.nn.Module: 'torch.nn.modules.module.Module',
torch.fx.Graph : 'torch.fx.graph.Graph',
torch.fx.Node : 'torch.fx.node.Node',
torch.fx.Proxy : 'torch.fx.proxy.Proxy',
torch.fx.node.Target : 'torch.fx.node.Target',
torch.fx.node.Argument : 'torch.fx.node.Argument',
torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',
torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',
torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',
Ellipsis : '...',
typing.Any: 'Any',
type(None): 'NoneType',
None: 'None',
typing.Iterator: 'Iterator',
}
mapping = trivial_mappings.get(t, None)
if mapping:
return mapping
# Handle types with contained types
contained = getattr(t, '__args__', None) or []
# Callables contain a bare List for arguments
contained = t if isinstance(t, list) else contained
# Python 3.8 puts type vars into __args__ for unbound types such as Dict
if all(isinstance(ct, typing.TypeVar) for ct in contained):
contained = []
contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]
contained_type_str = f'[{", ".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''
origin = getattr(t, '__origin__', None)
if origin is None:
# Unbound types don't have `__origin__` in some Python versions, so fix that up here.
origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin
if origin in {tuple, typing.Tuple}:
return f'Tuple{contained_type_str}'
if origin in {typing.Union}:
# Annoying hack to detect Optional
if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):
not_none_param = contained[0] if contained[0] is not type(None) else contained[1]
return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'
return f'Union{contained_type_str}'
if origin in {dict, typing.Dict}:
return f'Dict{contained_type_str}'
if origin in {list, typing.List}:
return f'List{contained_type_str}'
if origin in {type, typing.Type}:
return f'Type{contained_type_str}'
if isinstance(t, typing.Callable):
if len(contained) > 0 and contained[0] is not Ellipsis:
return f'Callable[[{", ".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'
else:
return f'Callable{contained_type_str}'
raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'
f'Please add support for this type and confirm with the '
f'FX team that your signature change is valid.')
def test_function_back_compat(self):
"""
Test backward compatibility for function signatures with
@compatibility(is_backward_compatible=True). Currently this checks for
exact signature matches, which may lead to false positives. If this
becomes too annoying, we can refine this check to actually parse out
the saved schema strings and check if the change is truly backward-
incompatible.
"""
signature_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if not isinstance(obj, type):
signature_strs.append(self._fn_to_stable_annotation_str(obj))
signature_strs.sort()
try:
self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \
f"as backwards-compatible has experienced a signature change. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_class_member_back_compat(self):
"""
Test backward compatibility for members of classes with
@compatibility(is_backward_compatible=True). Currently this checks for
exact matches on the publicly visible members of the class.
"""
class_method_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if isinstance(obj, type):
public_members = [name for name in obj.__dict__ if not name.startswith('_')]
class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')
class_method_strs.sort()
try:
self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \
f"as backwards-compatible has experienced change in its public members. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_public_api_surface(self):
non_back_compat_objects = {}
def check_symbols_have_bc_designation(m, prefix):
if not m.__name__.startswith('torch.fx'):
return
if m.__name__.startswith('torch.fx.experimental'):
return
for k, v in m.__dict__.items():
if v is m:
continue
if k.startswith('_'):
continue
if isinstance(v, types.ModuleType):
check_symbols_have_bc_designation(v, prefix + [k])
elif isinstance(v, type) or isinstance(v, types.FunctionType):
if v not in _MARKED_WITH_COMATIBLITY:
non_back_compat_objects.setdefault(v)
check_symbols_have_bc_designation(torch.fx, ['torch', 'fx'])
check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes'])
non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]
# Only want objects in torch.fx
non_back_compat_strs = [
s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]
# Only want objects in public namespaces
non_back_compat_strs = [
s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]
non_back_compat_strs.sort()
if len(non_back_compat_strs) != 0:
raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a "
f"backwards-compatibility classification! Please decorate these "
f"API(s) with `@torch.fx._compatibility.compatibility` to specify "
f"BC guarantees.")
class TestFunctionalTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
MUTABLE = (RuntimeError, r"Tried to trace mutable operation")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"bilinear": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"native_channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"gelu": BUILT_IN_FUNC,
"hardshrink": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"linear": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pairwise_distance": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"prelu": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
"normalize" : MUTABLE,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
| 36.643428 | 128 | 0.585269 |
import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import typing
import types
import warnings
import unittest
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH, CodeGen
from torch.fx.node import Target, Argument, _format_arg
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY
from fx.test_subgraph_rewriter import TestSubgraphRewriter
from fx.test_dce_pass import TestDCE
from fx.test_fx_const_fold import TestConstFold
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
TEST_WITH_ROCM,
find_library_location,
run_tests,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
wrap('getattr')
def wrapped_named_tup(p1, *, p2):
return p1.x + p2.y
wrap(wrapped_named_tup)
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
def _custom_fx_repr_fn(self) -> str:
return f"Pair(x={_format_arg(self.x)}, y={_format_arg(self.y)})"
class Foo(object):
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS):
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
class M3(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
m3 = M3()
gm3 = symbolic_trace(m3)
new_instance = gm3.__new__(type(gm3))
new_instance.__init__(gm3, gm3.graph)
x = torch.randn(5, 3)
torch.testing.assert_allclose(new_instance(x), torch.relu(x))
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs):
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_fx_and_or(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x & x, x | x
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_matmul_tracing(self):
const = torch.randn(3)
def matmul_f(x):
return x @ const
mod = symbolic_trace(matmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), matmul_f(inp))
def rmatmul_f(x):
return const @ x
mod = symbolic_trace(rmatmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), rmatmul_f(inp))
def test_disallow_override(self):
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
orig_graph_nodes = list(graph.nodes)
for node in orig_graph_nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
new_node = graph.node_copy(node)
self.assertTrue(new_node.stack_trace is not None)
assert 'test_fx.py' in new_node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
mod = symbolic_trace(orig_mod)
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
interpreter_node = graph.create_node('get_attr', 'interpreter')
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
graph.output(output_node)
graph.lint()
return GraphModule(wrapper, graph)
lowered = lower_to_elementwise_interpreter(msm)
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_close(test_out, ref_out)
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_close(script_out, ref_out)
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_close(imported_out, ref_out)
def test_reserved_getattr(self):
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_trace_buffer_slice(self):
bs, d_hid = 10, 23
class ExampleCode(torch.nn.Module):
def __init__(self):
super().__init__()
self.mm_param = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.mm_param2 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.lin = torch.nn.Linear(d_hid, d_hid)
self.register_buffer('buffer', torch.randn(bs + 100, d_hid))
def forward(self, x):
x = torch.mm(x, self.mm_param)
skip_connection = x
x = torch.relu(x)
x = torch.mm(x, self.mm_param) + self.buffer[:x.shape[0]]
x = self.lin(x)
x = torch.relu(x)
x = x + skip_connection
x = torch.mm(x, self.mm_param2)
x = self.lin(x)
return x
ec = ExampleCode()
traced = torch.fx.symbolic_trace(ec)
x = torch.randn(bs, d_hid)
torch.testing.assert_allclose(ec(x), traced(x))
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_tensor_attribute_coalseced(self):
def count_attrs(fx_module):
targets = set()
for node in traced.graph.nodes:
if node.op == 'get_attr':
targets.add(node.target)
return len(targets)
val = torch.tensor(5)
def f(x):
return x + val + val
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 1)
val2 = torch.tensor(5)
def f(x):
val = torch.tensor(5)
return x + val + val2
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 2)
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_torch_fx_getattr(self):
class FXGetattrTest(torch.nn.Module):
def forward(self, x):
return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3]))
traced = symbolic_trace(FXGetattrTest())
self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3]))
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_remove_uses_with_custom_filter(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu, lambda x: x != neg)
self.assertTrue(neg in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
traced = symbolic_trace(m)
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_wrong_target_type(self):
graph : torch.fx.Graph = torch.fx.Graph()
with self.assertRaises(ValueError):
n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',
args=(), kwargs={})
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_interpreter_default_args(self):
class Model(torch.nn.Module):
def forward(self, x, y=3.14159):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
out = interp.run(x)
torch.testing.assert_allclose(out, x + 3.14159)
def test_interpreter_not_enough_args(self):
class Model(torch.nn.Module):
def forward(self, x, y):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
with self.assertRaisesRegex(RuntimeError,
'Expected positional argument for parameter y, but one was not passed in'):
out = interp.run(x)
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_layout(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0)
traced = symbolic_trace(M())
x = torch.rand(5, 9, 3, 4)
self.assertEqual(traced(x), torch.zeros_like(x))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_immutable_list_pytree_ops(self):
rand_tensor = torch.randn(5, 3)
l = immutable_list([3, [rand_tensor, 42]])
flattened, spec = pytree.tree_flatten(l)
assert flattened == [3, rand_tensor, 42]
unflattened = pytree.tree_unflatten(flattened, spec)
assert unflattened == l
assert isinstance(unflattened, immutable_list)
def test_immutable_dict_pytree_ops(self):
rand_tensor = torch.randn(5, 3)
d = immutable_dict({'a': 3, 'b': [rand_tensor, 42]})
flattened, spec = pytree.tree_flatten(d)
assert flattened == [3, rand_tensor, 42]
unflattened = pytree.tree_unflatten(flattened, spec)
assert unflattened == d
assert isinstance(unflattened, immutable_dict)
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_prepend_self(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.prepend(b)
x.append(b)
self.assertEqual(len(graph.nodes), 3)
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(list(x.node.users.keys()), [z.node, zed.node])
z.node.args = (y.node, y.node)
self.assertEqual(list(x.node.users.keys()), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_module_deepcopy_edit_nodes(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
traced1 = symbolic_trace(Foo())
copied = copy.deepcopy(traced1)
for node in copied.graph.nodes:
if node.target == torch.relu:
node.target = torch.neg
copied.recompile()
traced1.recompile()
x = torch.randn(15, 15)
torch.testing.assert_allclose(traced1(x), torch.relu(x))
torch.testing.assert_allclose(copied(x), torch.neg(x))
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_named_tuple_inlined(self):
class NamedTupMod(torch.nn.Module):
def forward(self, inp):
return wrapped_named_tup(Pair(inp, 1.2), p2=Pair(3.4, inp))
m = NamedTupMod()
input = torch.rand(3, 4)
ref = m(input)
traced = symbolic_trace(m)
res = traced(input)
self.assertEqual(ref, res)
ph = call_func = None
for node in traced.graph.nodes:
if node.op == "placeholder":
ph = node
elif node.op == "call_function" and node.target == wrapped_named_tup:
node.update_arg(0, Pair(ph, 1.2))
node.update_kwarg("p2", Pair(3.4, ph))
call_func = node
break
self.assertTrue(call_func is not None)
self.assertTrue(isinstance(call_func.args[0], Pair))
self.assertTrue(isinstance(call_func.kwargs["p2"], Pair))
self.assertEqual(_format_arg(call_func.args[0]), "Pair(x=%inp, y=1.2)")
self.assertEqual(_format_arg(call_func.kwargs["p2"]), "Pair(x=3.4, y=%inp)")
traced.graph.eliminate_dead_code()
traced.recompile()
res = traced(input)
self.assertEqual(ref, res)
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4]
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_graph_module_replicate_for_dp(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
gm = torch.fx.symbolic_trace(Foo())
x = torch.randn(5, 3)
out = gm(x)
replica = gm._replicate_for_data_parallel()
out_replica = replica(x)
torch.testing.assert_allclose(out_replica, out)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_throw_out_variant(self):
def foo(x):
y = torch.rand_like(x)
torch.sigmoid(x, out=y)
return y
class MyTracer(torch.fx.Tracer):
check_mutable_operations = True
tracer = MyTracer()
with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'):
traced_graph = tracer.trace(foo)
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf((4, y), 3)
+ a_lifted_leaf((3, 4), 5)
+ a_lifted_leaf((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_ast_rewriter_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf2((4, y), 3)
+ a_lifted_leaf2((3, 4), 5)
+ a_lifted_leaf2((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf2", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_profiler_ranges_side_effect(self):
g = torch.fx.Graph()
handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',))
g.call_function(torch.ops.profiler._record_function_exit, (handle,))
g.output(None)
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
g.eliminate_dead_code()
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
def test_ast_rewriter_wrapped_via_decorator(self):
class F(torch.nn.Module):
def forward(self, x):
return wrapped_via_decorator(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(F())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(traced).transform()
self.assertIn("wrapped_via_decorator", transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_with_submodule", traced.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), traced(input))
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
a.get_submodule("")
a.get_parameter("param")
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
a.delete_all_unused_submodules()
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_delete_unused_submodules_leaf(self):
class SubModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.submod = SubModule()
def forward(self, x):
x = self.submod(x)
return x
model = Model()
class MyCustomTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return module_qualified_name == "submod"
inputs = torch.randn(1, 10)
traced_graph = MyCustomTracer().trace(model)
gm2 = torch.fx.GraphModule(model, traced_graph)
gm2.delete_all_unused_submodules()
torch.testing.assert_allclose(gm2(inputs), model(inputs))
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@unittest.skipIf(IS_WINDOWS, "Python Windows bug? https://bugs.python.org/issue45108")
def test_assert(self):
def f(x):
assert x > 1
return x + 1
try:
torch.fx.proxy.TracerBase.trace_asserts = True
traced = symbolic_trace(f)
finally:
torch.fx.proxy.TracerBase.trace_asserts = False
self.assertEqual(f(2), traced(2))
with self.assertRaises(AssertionError):
traced(0)
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(val))), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def test_custom_codegen(self):
class ListCodeGen(CodeGen):
def gen_fn_def(self, free_vars, maybe_return_annotation):
lst_unpack = f"""
def forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:
{', '.join(free_vars)} = args_list"""
return lst_unpack
def additional_globals(self):
return [('List', typing.List)]
def process_inputs(self, *inputs):
assert(len(inputs) == 1)
return inputs[0]
def f(a, b):
return a + b
nf = symbolic_trace(f)
vals = [torch.randn(3), torch.randn(3)]
self.assertEqual(nf(*vals), f(*vals))
nf.graph.set_codegen(ListCodeGen())
nf.recompile()
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf(vals), f(*vals))
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(vals))), f(*vals))
ts_f = torch.jit.script(nf)
self.assertEqual(nf(vals), ts_f(vals))
def test_imul_code_print(self):
graph = torch.fx.Graph()
a = graph.placeholder("a")
b = graph.placeholder("b")
graph.call_function(operator.imul, (a, b), {})
graph.output(a)
gm = torch.fx.GraphModule({}, graph)
gm.recompile()
self.assertEqual(gm(2, 3), 6)
self.assertIn("a *= b", gm.code)
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
def setUp(self):
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
if not isinstance(op.op, types.BuiltinFunctionType):
raise unittest.SkipTest("This path doesn't work on Python functions")
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
class TestFXAPIBackwardCompatibility(JitTestCase):
def setUp(self):
self.maxDiff = None
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def _fn_to_stable_annotation_str(self, obj):
fn_name = torch.typename(obj)
signature = inspect.signature(obj)
sig_str = f'{fn_name}{signature}'
arg_strs = []
for k, v in signature.parameters.items():
maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\
if v.annotation is not inspect.Signature.empty else ''
def default_val_str(val):
if isinstance(val, (tuple, list)):
str_pieces = ['(' if isinstance(val, tuple) else '[']
str_pieces.append(', '.join(default_val_str(v) for v in val))
if isinstance(val, tuple) and len(str_pieces) == 2:
str_pieces.append(',')
str_pieces.append(')' if isinstance(val, tuple) else ']')
return ''.join(str_pieces)
if isinstance(val, types.ModuleType):
return f'<module {val.__name__}>'
# Second case: callables. Callables (such as lambdas) encode their address in
# their string repr. Don't do that
if callable(val):
return f'<function {val.__name__}>'
return str(val)
if v.default is not inspect.Signature.empty:
default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'"
maybe_default = f' = {default_val_str}'
else:
maybe_default = ''
maybe_stars = ''
if v.kind == inspect.Parameter.VAR_POSITIONAL:
maybe_stars = '*'
elif v.kind == inspect.Parameter.VAR_KEYWORD:
maybe_stars = '**'
arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')
return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\
if signature.return_annotation is not inspect.Signature.empty else ''
return f'{fn_name}({", ".join(arg_strs)}){return_annot}'
def _annotation_type_to_stable_str(self, t, sig_str):
if t is inspect.Signature.empty:
return ''
if isinstance(t, str):
return f"'{t}'"
if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):
return t.__forward_arg__
if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):
return t.__forward_arg__
trivial_mappings = {
str : 'str',
int : 'int',
float: 'float',
bool: 'bool',
torch.dtype: 'torch.dtype',
torch.Tensor: 'torch.Tensor',
torch.device: 'torch.device',
torch.memory_format: 'torch.memory_format',
slice: 'slice',
torch.nn.Module: 'torch.nn.modules.module.Module',
torch.fx.Graph : 'torch.fx.graph.Graph',
torch.fx.Node : 'torch.fx.node.Node',
torch.fx.Proxy : 'torch.fx.proxy.Proxy',
torch.fx.node.Target : 'torch.fx.node.Target',
torch.fx.node.Argument : 'torch.fx.node.Argument',
torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',
torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',
torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',
Ellipsis : '...',
typing.Any: 'Any',
type(None): 'NoneType',
None: 'None',
typing.Iterator: 'Iterator',
}
mapping = trivial_mappings.get(t, None)
if mapping:
return mapping
contained = getattr(t, '__args__', None) or []
contained = t if isinstance(t, list) else contained
if all(isinstance(ct, typing.TypeVar) for ct in contained):
contained = []
contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]
contained_type_str = f'[{", ".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''
origin = getattr(t, '__origin__', None)
if origin is None:
origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin
if origin in {tuple, typing.Tuple}:
return f'Tuple{contained_type_str}'
if origin in {typing.Union}:
# Annoying hack to detect Optional
if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):
not_none_param = contained[0] if contained[0] is not type(None) else contained[1]
return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'
return f'Union{contained_type_str}'
if origin in {dict, typing.Dict}:
return f'Dict{contained_type_str}'
if origin in {list, typing.List}:
return f'List{contained_type_str}'
if origin in {type, typing.Type}:
return f'Type{contained_type_str}'
if isinstance(t, typing.Callable):
if len(contained) > 0 and contained[0] is not Ellipsis:
return f'Callable[[{", ".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'
else:
return f'Callable{contained_type_str}'
raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'
f'Please add support for this type and confirm with the '
f'FX team that your signature change is valid.')
def test_function_back_compat(self):
signature_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if not isinstance(obj, type):
signature_strs.append(self._fn_to_stable_annotation_str(obj))
signature_strs.sort()
try:
self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \
f"as backwards-compatible has experienced a signature change. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_class_member_back_compat(self):
class_method_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if isinstance(obj, type):
public_members = [name for name in obj.__dict__ if not name.startswith('_')]
class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')
class_method_strs.sort()
try:
self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \
f"as backwards-compatible has experienced change in its public members. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_public_api_surface(self):
non_back_compat_objects = {}
def check_symbols_have_bc_designation(m, prefix):
if not m.__name__.startswith('torch.fx'):
return
if m.__name__.startswith('torch.fx.experimental'):
return
for k, v in m.__dict__.items():
if v is m:
continue
if k.startswith('_'):
continue
if isinstance(v, types.ModuleType):
check_symbols_have_bc_designation(v, prefix + [k])
elif isinstance(v, type) or isinstance(v, types.FunctionType):
if v not in _MARKED_WITH_COMATIBLITY:
non_back_compat_objects.setdefault(v)
check_symbols_have_bc_designation(torch.fx, ['torch', 'fx'])
check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes'])
non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]
# Only want objects in torch.fx
non_back_compat_strs = [
s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]
# Only want objects in public namespaces
non_back_compat_strs = [
s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]
non_back_compat_strs.sort()
if len(non_back_compat_strs) != 0:
raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a "
f"backwards-compatibility classification! Please decorate these "
f"API(s) with `@torch.fx._compatibility.compatibility` to specify "
f"BC guarantees.")
class TestFunctionalTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
MUTABLE = (RuntimeError, r"Tried to trace mutable operation")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"bilinear": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"native_channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"gelu": BUILT_IN_FUNC,
"hardshrink": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"linear": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pairwise_distance": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"prelu": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
"normalize" : MUTABLE,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
| true | true |
f72dbe45902a8df7047b55fff39c740679077883 | 2,057 | py | Python | tests/st/ops/cpu/test_smooth_l1_loss_grad_op.py | GuoSuiming/mindspore | 48afc4cfa53d970c0b20eedfb46e039db2a133d5 | [
"Apache-2.0"
] | 55 | 2020-12-17T10:26:06.000Z | 2022-03-28T07:18:26.000Z | tests/st/ops/cpu/test_smooth_l1_loss_grad_op.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | null | null | null | tests/st/ops/cpu/test_smooth_l1_loss_grad_op.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | 14 | 2021-01-29T02:39:47.000Z | 2022-03-23T05:00:26.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops.composite import GradOperation
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class Net(nn.Cell):
def __init__(self, sigma=1.0):
super(Net, self).__init__()
self.SmoothL1Loss = P.SmoothL1Loss(sigma)
def construct(self, pred, gt):
return self.SmoothL1Loss(pred, gt)
class Grad(nn.Cell):
def __init__(self, network):
super(Grad, self).__init__()
self.grad = GradOperation(get_all=True, sens_param=True)
self.network = network
def construct(self, pred, gt, dout):
return self.grad(self.network)(pred, gt, dout)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_net():
pred = np.random.randn(2, 4).astype(np.float32)
gt = np.random.randn(2, 4).astype(np.float32)
dout = np.random.randn(2, 4).astype(np.float32)
smooth_l1_loss_grad = Grad(Net())
output = smooth_l1_loss_grad(Tensor(pred), Tensor(gt), Tensor(dout))
print("------------- input ---------------")
print("predict:\n", pred)
print("grount truth:\n", gt)
print("dout:\n", dout)
print("------------- output ---------------")
print("predict grad:\n", output[0].asnumpy())
| 33.177419 | 78 | 0.665046 |
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops.composite import GradOperation
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class Net(nn.Cell):
def __init__(self, sigma=1.0):
super(Net, self).__init__()
self.SmoothL1Loss = P.SmoothL1Loss(sigma)
def construct(self, pred, gt):
return self.SmoothL1Loss(pred, gt)
class Grad(nn.Cell):
def __init__(self, network):
super(Grad, self).__init__()
self.grad = GradOperation(get_all=True, sens_param=True)
self.network = network
def construct(self, pred, gt, dout):
return self.grad(self.network)(pred, gt, dout)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_net():
pred = np.random.randn(2, 4).astype(np.float32)
gt = np.random.randn(2, 4).astype(np.float32)
dout = np.random.randn(2, 4).astype(np.float32)
smooth_l1_loss_grad = Grad(Net())
output = smooth_l1_loss_grad(Tensor(pred), Tensor(gt), Tensor(dout))
print("------------- input ---------------")
print("predict:\n", pred)
print("grount truth:\n", gt)
print("dout:\n", dout)
print("------------- output ---------------")
print("predict grad:\n", output[0].asnumpy())
| true | true |
f72dbe61d537c913f6fac5a7bb4dfb15ab49600c | 693 | py | Python | everyday_wechat/control/onewords/acib.py | youcailize/EverydayWechat | 59f0b1185ad1f84480bd1f2540e18271cc3d9185 | [
"MIT"
] | 1 | 2019-07-15T12:56:11.000Z | 2019-07-15T12:56:11.000Z | everyday_wechat/control/onewords/acib.py | youcailize/EverydayWechat | 59f0b1185ad1f84480bd1f2540e18271cc3d9185 | [
"MIT"
] | null | null | null | everyday_wechat/control/onewords/acib.py | youcailize/EverydayWechat | 59f0b1185ad1f84480bd1f2540e18271cc3d9185 | [
"MIT"
] | null | null | null | # coding=utf-8
"""
从词霸中获取每日一句,带英文。
"""
import requests
from everyday_wechat.utils.common import (
is_json
)
def get_acib_info():
"""
从词霸中获取每日一句,带英文。
:return:str ,返回每日一句(双语)
"""
print('获取格言信息(双语)...')
try:
resp = requests.get('http://open.iciba.com/dsapi')
if resp.status_code == 200 and is_json(resp):
content_dict = resp.json()
content = content_dict.get('content')
note = content_dict.get('note')
return '{}{}'.format(content, note)
print('没有获取到格言数据。')
except requests.exceptions.RequestException as exception:
print(exception)
return None
get_one_words = get_acib_info
| 21.65625 | 61 | 0.608947 |
import requests
from everyday_wechat.utils.common import (
is_json
)
def get_acib_info():
print('获取格言信息(双语)...')
try:
resp = requests.get('http://open.iciba.com/dsapi')
if resp.status_code == 200 and is_json(resp):
content_dict = resp.json()
content = content_dict.get('content')
note = content_dict.get('note')
return '{}{}'.format(content, note)
print('没有获取到格言数据。')
except requests.exceptions.RequestException as exception:
print(exception)
return None
get_one_words = get_acib_info
| true | true |
f72dbf2909a71f1c80158d7c3570317e8c6a98fc | 1,132 | py | Python | examples/javascript/force.py | KmolYuan/networkx | 6bdcdcf07b9d84911697012a2c5833c62fa25992 | [
"BSD-3-Clause"
] | null | null | null | examples/javascript/force.py | KmolYuan/networkx | 6bdcdcf07b9d84911697012a2c5833c62fa25992 | [
"BSD-3-Clause"
] | null | null | null | examples/javascript/force.py | KmolYuan/networkx | 6bdcdcf07b9d84911697012a2c5833c62fa25992 | [
"BSD-3-Clause"
] | null | null | null | """
==========
Javascript
==========
Example of writing JSON format graph data and using the D3 Javascript library to produce an HTML/Javascript drawing.
"""
# Author: Aric Hagberg <aric.hagberg@gmail.com>
# Copyright (C) 2011-2018 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import json
import flask
import networkx as nx
from networkx.readwrite import json_graph
G = nx.barbell_graph(6, 3)
# this d3 example uses the name attribute for the mouse-hover value,
# so add a name to each node
for n in G:
G.nodes[n]['name'] = n
# write json formatted data
d = json_graph.node_link_data(G) # node-link format to serialize
# write json
json.dump(d, open('force/force.json', 'w'))
print('Wrote node-link JSON data to force/force.json')
# Serve the file over http to allow for cross origin requests
app = flask.Flask(__name__, static_folder="force")
@app.route('/')
def static_proxy():
return app.send_static_file('force.html')
print('\nGo to http://localhost:8000 to see the example\n')
app.run(port=8000)
| 26.952381 | 116 | 0.711131 |
import json
import flask
import networkx as nx
from networkx.readwrite import json_graph
G = nx.barbell_graph(6, 3)
for n in G:
G.nodes[n]['name'] = n
d = json_graph.node_link_data(G)
json.dump(d, open('force/force.json', 'w'))
print('Wrote node-link JSON data to force/force.json')
app = flask.Flask(__name__, static_folder="force")
@app.route('/')
def static_proxy():
return app.send_static_file('force.html')
print('\nGo to http://localhost:8000 to see the example\n')
app.run(port=8000)
| true | true |
f72dbf9e2aef9ed659858bbdb150d757a67b6d8a | 1,648 | py | Python | config/wsgi.py | sahilpysquad/SMT | b03d5d2e32fcda26cdbae35588cfd0f785c02d3a | [
"MIT"
] | null | null | null | config/wsgi.py | sahilpysquad/SMT | b03d5d2e32fcda26cdbae35588cfd0f785c02d3a | [
"MIT"
] | 1 | 2022-03-30T20:23:58.000Z | 2022-03-30T20:23:58.000Z | config/wsgi.py | sahilpysquad/SMT | b03d5d2e32fcda26cdbae35588cfd0f785c02d3a | [
"MIT"
] | null | null | null | """
WSGI config for SMT project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from pathlib import Path
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# smt directory.
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent
sys.path.append(str(ROOT_DIR / "smt"))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 42.25641 | 79 | 0.800971 | import os
import sys
from pathlib import Path
from django.core.wsgi import get_wsgi_application
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent
sys.path.append(str(ROOT_DIR / "smt"))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| true | true |
f72dc035759b2fe9fbacf17e53f4c080e1eee636 | 277 | py | Python | swd_bot/editor/sprites/wonder_sprite.py | dfomin/7wd-bot | a152229a1758cab90d70fe893e2978516a38bacd | [
"MIT"
] | null | null | null | swd_bot/editor/sprites/wonder_sprite.py | dfomin/7wd-bot | a152229a1758cab90d70fe893e2978516a38bacd | [
"MIT"
] | null | null | null | swd_bot/editor/sprites/wonder_sprite.py | dfomin/7wd-bot | a152229a1758cab90d70fe893e2978516a38bacd | [
"MIT"
] | null | null | null | from pyglet.sprite import Sprite
from swd_bot.editor.sprite_loader import SpriteLoader
class WonderSprite(Sprite):
def __init__(self, wonder_id: int):
super().__init__(SpriteLoader.wonder(wonder_id))
self.scale = 0.5
self.wonder_id = wonder_id
| 21.307692 | 56 | 0.718412 | from pyglet.sprite import Sprite
from swd_bot.editor.sprite_loader import SpriteLoader
class WonderSprite(Sprite):
def __init__(self, wonder_id: int):
super().__init__(SpriteLoader.wonder(wonder_id))
self.scale = 0.5
self.wonder_id = wonder_id
| true | true |
f72dc0cd9eb65702353e75aef2a7ffc49d8d868f | 3,613 | py | Python | scrape_mars.py | skohtz1/web-scrapingHW | 11cf4686286a4fa51ef23a9e0afc5adca21f40c1 | [
"MIT"
] | null | null | null | scrape_mars.py | skohtz1/web-scrapingHW | 11cf4686286a4fa51ef23a9e0afc5adca21f40c1 | [
"MIT"
] | null | null | null | scrape_mars.py | skohtz1/web-scrapingHW | 11cf4686286a4fa51ef23a9e0afc5adca21f40c1 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import requests
from splinter import Browser
import pandas as pd
import time
def init_browser():
# @NOTE: Replace the path with your actual path to the chromedriver
executable_path = {"executable_path": "./chromedriver"}
return Browser("chrome", **executable_path, headless=False)
def scrape():
browser = init_browser()
url_nasa = "https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest"
# Retrieve page with the requests module
response_nasa = requests.get(url_nasa)
# Create BeautifulSoup object; parse with 'html.parser'
soup_nasa = BeautifulSoup(response_nasa.text, 'html.parser')
##finding the title and summary of first article
results_titles = soup_nasa.find_all('div', class_='content_title')
summaries = soup_nasa.find_all("div", class_ = "rollover_description_inner")
title_first = results_titles[0].text.strip()
summaries_first = summaries[0].text.strip()
##finding feature image url
url_mars_img = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url_mars_img)
browser.click_link_by_partial_text('FULL IMAGE')
time.sleep(5)
browser.click_link_by_partial_text('more info')
time.sleep(5)
browser.click_link_by_partial_href('spaceimages/images')
feature_image_url = browser.url
time.sleep(5)
##getting the twitter weather
url_twitter = "https://twitter.com/marswxreport?lang=en"
# Retrieve page with the requests module
response_twitter = requests.get(url_twitter)
# Create BeautifulSoup object; parse with 'html.parser'
soup3 = BeautifulSoup(response_twitter.text, 'html.parser')
mars_weather = soup3.find_all("p",class_ = "TweetTextSize TweetTextSize--normal js-tweet-text tweet-text")[0].text
##scraping Mars facts
url_facts = "https://space-facts.com/mars/"
tables = pd.read_html(url_facts)
df = tables[0]
df.columns = ["Parameter", "Values"]
mars_data_df = df.set_index(["Parameter"])
mars_data_df.to_html("mars_facts.html")
mars_data_html = mars_data_df.to_html()
mars_data_html = mars_data_html.replace("\n", "")
##hemisphere
url_hemis = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(url_hemis)
time.sleep(5)
html4 = browser.html
soup4 = BeautifulSoup(html4, 'html.parser')
links = []
for link in soup4.find_all('a'):
finds = link.get("href")
if ("/search/map/Mars" in finds):
links.append(finds)
links = list(set(links))
hemisphere_image_urls = []
for i in range(len(links)):
dicts1 = {}
dicts1["title"] = soup4.find_all("h3")[i].text
browser.click_link_by_partial_text(soup4.find_all("h3")[i].text)
time.sleep(5)
n_html = browser.html
soup5 = BeautifulSoup(n_html, "html.parser")
for link in soup5.find_all("a"):
finds = link.get("href")
if ("/full.jpg" in finds):
dicts1["img_url"] = finds
hemisphere_image_urls.append(dicts1)
browser.back()
print(hemisphere_image_urls)
mars_data_dict = {"weather":mars_weather,"mars_facts":mars_data_html,"hemisphere":hemisphere_image_urls,"feature_image": feature_image_url,"title_feature":title_first,"summary_feature":summaries_first}
return mars_data_dict
| 34.409524 | 205 | 0.673678 | from bs4 import BeautifulSoup
import requests
from splinter import Browser
import pandas as pd
import time
def init_browser():
executable_path = {"executable_path": "./chromedriver"}
return Browser("chrome", **executable_path, headless=False)
def scrape():
browser = init_browser()
url_nasa = "https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest"
response_nasa = requests.get(url_nasa)
soup_nasa = BeautifulSoup(response_nasa.text, 'html.parser')
class_='content_title')
summaries = soup_nasa.find_all("div", class_ = "rollover_description_inner")
title_first = results_titles[0].text.strip()
summaries_first = summaries[0].text.strip()
://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url_mars_img)
browser.click_link_by_partial_text('FULL IMAGE')
time.sleep(5)
browser.click_link_by_partial_text('more info')
time.sleep(5)
browser.click_link_by_partial_href('spaceimages/images')
feature_image_url = browser.url
time.sleep(5)
twitter.com/marswxreport?lang=en"
response_twitter = requests.get(url_twitter)
soup3 = BeautifulSoup(response_twitter.text, 'html.parser')
mars_weather = soup3.find_all("p",class_ = "TweetTextSize TweetTextSize--normal js-tweet-text tweet-text")[0].text
tps://space-facts.com/mars/"
tables = pd.read_html(url_facts)
df = tables[0]
df.columns = ["Parameter", "Values"]
mars_data_df = df.set_index(["Parameter"])
mars_data_df.to_html("mars_facts.html")
mars_data_html = mars_data_df.to_html()
mars_data_html = mars_data_html.replace("\n", "")
mis = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(url_hemis)
time.sleep(5)
html4 = browser.html
soup4 = BeautifulSoup(html4, 'html.parser')
links = []
for link in soup4.find_all('a'):
finds = link.get("href")
if ("/search/map/Mars" in finds):
links.append(finds)
links = list(set(links))
hemisphere_image_urls = []
for i in range(len(links)):
dicts1 = {}
dicts1["title"] = soup4.find_all("h3")[i].text
browser.click_link_by_partial_text(soup4.find_all("h3")[i].text)
time.sleep(5)
n_html = browser.html
soup5 = BeautifulSoup(n_html, "html.parser")
for link in soup5.find_all("a"):
finds = link.get("href")
if ("/full.jpg" in finds):
dicts1["img_url"] = finds
hemisphere_image_urls.append(dicts1)
browser.back()
print(hemisphere_image_urls)
mars_data_dict = {"weather":mars_weather,"mars_facts":mars_data_html,"hemisphere":hemisphere_image_urls,"feature_image": feature_image_url,"title_feature":title_first,"summary_feature":summaries_first}
return mars_data_dict
| true | true |
f72dc0e9d331a618fb5b836f132c29eba750b4fb | 2,331 | py | Python | tests/smee/test_smee.py | andrew-mcgrath/zeroae-goblet | 711fb5f63714525bf20f98b2092ca89c77eb6c9a | [
"Apache-2.0"
] | 1 | 2021-03-11T23:04:21.000Z | 2021-03-11T23:04:21.000Z | tests/smee/test_smee.py | andrew-mcgrath/zeroae-goblet | 711fb5f63714525bf20f98b2092ca89c77eb6c9a | [
"Apache-2.0"
] | 12 | 2020-03-31T00:43:22.000Z | 2020-05-01T22:35:07.000Z | tests/smee/test_smee.py | andrew-mcgrath/zeroae-goblet | 711fb5f63714525bf20f98b2092ca89c77eb6c9a | [
"Apache-2.0"
] | null | null | null | # ------------------------------------------------------------------------------
# Copyright (c) 2020 Zero A.E., LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import pytest
from click.testing import CliRunner
from zeroae.smee import cli
@pytest.fixture()
def smee_server_mock(requests_mock):
url = "mock://smee.io/new"
requests_mock.get(
url,
text="\n".join(
[
"event:ready\ndata:{}\n",
"event:ping\ndata:{}\n",
'data:{"body":{},"timestamp":1,"query":{}}\n\n',
]
),
)
return url
def test_command_line_interface(smee_server_mock, requests_mock):
"""Test the SMEE CLI."""
runner = CliRunner()
args = [f"--url={smee_server_mock}"]
target_url = "mock://target.io/events"
requests_mock.post(target_url)
args += [f"--target={target_url}"]
help_result = runner.invoke(cli.smee, args)
assert help_result.exit_code == 0
assert f"Connected {smee_server_mock}" in help_result.output
@pytest.mark.parametrize(
"port,path", [(None, None), (6000, None), (None, "/events"), (6000, "/events")]
)
def test_command_line_interface_port_path(port, path, smee_server_mock, requests_mock):
"""Test the SMEE CLI."""
runner = CliRunner()
args = [f"--url={smee_server_mock}"]
if port is None:
port = 3000
else:
args += [f"--port={port}"]
if path is None:
path = "/"
else:
args += [f"--path={path}"]
target_url = f"http://127.0.0.1:{port}{path}"
requests_mock.post(target_url)
help_result = runner.invoke(cli.smee, args)
assert help_result.exit_code == 0
assert f"Connected {smee_server_mock}" in help_result.output
| 30.671053 | 87 | 0.597169 |
import pytest
from click.testing import CliRunner
from zeroae.smee import cli
@pytest.fixture()
def smee_server_mock(requests_mock):
url = "mock://smee.io/new"
requests_mock.get(
url,
text="\n".join(
[
"event:ready\ndata:{}\n",
"event:ping\ndata:{}\n",
'data:{"body":{},"timestamp":1,"query":{}}\n\n',
]
),
)
return url
def test_command_line_interface(smee_server_mock, requests_mock):
runner = CliRunner()
args = [f"--url={smee_server_mock}"]
target_url = "mock://target.io/events"
requests_mock.post(target_url)
args += [f"--target={target_url}"]
help_result = runner.invoke(cli.smee, args)
assert help_result.exit_code == 0
assert f"Connected {smee_server_mock}" in help_result.output
@pytest.mark.parametrize(
"port,path", [(None, None), (6000, None), (None, "/events"), (6000, "/events")]
)
def test_command_line_interface_port_path(port, path, smee_server_mock, requests_mock):
runner = CliRunner()
args = [f"--url={smee_server_mock}"]
if port is None:
port = 3000
else:
args += [f"--port={port}"]
if path is None:
path = "/"
else:
args += [f"--path={path}"]
target_url = f"http://127.0.0.1:{port}{path}"
requests_mock.post(target_url)
help_result = runner.invoke(cli.smee, args)
assert help_result.exit_code == 0
assert f"Connected {smee_server_mock}" in help_result.output
| true | true |
f72dc0f385c5eddcac7526a8a648ad7ab027af1b | 319 | py | Python | src/rtdb/admin.py | UNINETT/django-rtdb | a8af13c1756581fee0a02a9da9cbb4d252d77dab | [
"MIT"
] | null | null | null | src/rtdb/admin.py | UNINETT/django-rtdb | a8af13c1756581fee0a02a9da9cbb4d252d77dab | [
"MIT"
] | null | null | null | src/rtdb/admin.py | UNINETT/django-rtdb | a8af13c1756581fee0a02a9da9cbb4d252d77dab | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import (
Customfield,
CustomfieldValue,
Queue,
TicketCustomfieldValue,
Ticket,
)
admin.site.register(Customfield)
admin.site.register(CustomfieldValue)
admin.site.register(Queue)
admin.site.register(TicketCustomfieldValue)
admin.site.register(Ticket)
| 19.9375 | 43 | 0.780564 | from django.contrib import admin
from .models import (
Customfield,
CustomfieldValue,
Queue,
TicketCustomfieldValue,
Ticket,
)
admin.site.register(Customfield)
admin.site.register(CustomfieldValue)
admin.site.register(Queue)
admin.site.register(TicketCustomfieldValue)
admin.site.register(Ticket)
| true | true |
f72dc137ecb61c7cbc3546578c7ef0aae7aa9729 | 779 | py | Python | deps/src/boost_1_65_1/libs/parameter/test/python_test.py | shreyasvj25/turicreate | 32e84ca16aef8d04aff3d49ae9984bd49326bffd | [
"BSD-3-Clause"
] | 11,356 | 2017-12-08T19:42:32.000Z | 2022-03-31T16:55:25.000Z | deps/src/boost_1_65_1/libs/parameter/test/python_test.py | shreyasvj25/turicreate | 32e84ca16aef8d04aff3d49ae9984bd49326bffd | [
"BSD-3-Clause"
] | 2,402 | 2017-12-08T22:31:01.000Z | 2022-03-28T19:25:52.000Z | deps/src/boost_1_65_1/libs/parameter/test/python_test.py | shreyasvj25/turicreate | 32e84ca16aef8d04aff3d49ae9984bd49326bffd | [
"BSD-3-Clause"
] | 1,343 | 2017-12-08T19:47:19.000Z | 2022-03-26T11:31:36.000Z | # Copyright Daniel Wallin 2006. Distributed under the
# Boost Software License, Version 1.0. (See accompanying file
# LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
'''
>>> from python_test_ext import X
>>> x = X(y = 'baz')
>>> x.value
'foobaz'
>>> x.f(1,2)
3
>>> x.f(1,2,3)
6
>>> x.f(1,2, z = 3)
6
>>> x.f(z = 3, y = 2, x = 1)
6
>>> x.g()
'foobar'
>>> x.g(y = "baz")
'foobaz'
>>> x.g(x = "baz")
'bazbar'
>>> x.g(y = "foo", x = "bar")
'barfoo'
>>> y = x.h(x = "bar", y = "foo")
>>> assert x == y
>>> y = x(0)
>>> assert x == y
'''
def run(args = None):
if args is not None:
import sys
sys.argv = args
import doctest, python_test
return doctest.testmod(python_test)
if __name__ == '__main__':
import sys
sys.exit(run()[0])
| 18.547619 | 66 | 0.557125 |
def run(args = None):
if args is not None:
import sys
sys.argv = args
import doctest, python_test
return doctest.testmod(python_test)
if __name__ == '__main__':
import sys
sys.exit(run()[0])
| true | true |
f72dc1c2dc76d85adb0735b6801cc6df39987105 | 734 | py | Python | 11/test_cells.py | euribates/advent_of_code_2018 | 133fac9b01cf46edf9ec8c6ad9cef785d6858c70 | [
"MIT"
] | null | null | null | 11/test_cells.py | euribates/advent_of_code_2018 | 133fac9b01cf46edf9ec8c6ad9cef785d6858c70 | [
"MIT"
] | null | null | null | 11/test_cells.py | euribates/advent_of_code_2018 | 133fac9b01cf46edf9ec8c6ad9cef785d6858c70 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import pytest
from tools import Cell
class Sample:
def __init__(self, serial, x, y, target):
self.serial = serial
self.x = x
self.y = y
self.target = target
def __str__(self):
return f'Cell at {self.x}x{self.y}, ' \
f'grid serial number {self.serial}:' \
f'power level is {self.target}'
samples = [
Sample(8, 3, 5, 4),
Sample(57, 122,79, -5),
Sample(39, 217, 196, 0),
Sample(71, 101, 153, 4),
]
@pytest.fixture(params=samples, ids=str)
def sample(request):
return request.param
def test_cell(sample):
cell = Cell(sample.serial)
assert cell(sample.x, sample.y) == sample.target
| 19.315789 | 54 | 0.572207 |
import pytest
from tools import Cell
class Sample:
def __init__(self, serial, x, y, target):
self.serial = serial
self.x = x
self.y = y
self.target = target
def __str__(self):
return f'Cell at {self.x}x{self.y}, ' \
f'grid serial number {self.serial}:' \
f'power level is {self.target}'
samples = [
Sample(8, 3, 5, 4),
Sample(57, 122,79, -5),
Sample(39, 217, 196, 0),
Sample(71, 101, 153, 4),
]
@pytest.fixture(params=samples, ids=str)
def sample(request):
return request.param
def test_cell(sample):
cell = Cell(sample.serial)
assert cell(sample.x, sample.y) == sample.target
| true | true |
f72dc2158c5a2554b93e37af62704d6600ade621 | 7,011 | py | Python | Class Projects/CS545(MachineLearning)/qLearning/qlearn.py | Zosit/Useful-Reusable-Code | e5eab12f1ebcc6f16e456a7515ff8cc068b5ab16 | [
"MIT"
] | null | null | null | Class Projects/CS545(MachineLearning)/qLearning/qlearn.py | Zosit/Useful-Reusable-Code | e5eab12f1ebcc6f16e456a7515ff8cc068b5ab16 | [
"MIT"
] | null | null | null | Class Projects/CS545(MachineLearning)/qLearning/qlearn.py | Zosit/Useful-Reusable-Code | e5eab12f1ebcc6f16e456a7515ff8cc068b5ab16 | [
"MIT"
] | 1 | 2020-06-07T15:37:57.000Z | 2020-06-07T15:37:57.000Z | print(__doc__)
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import math
from decimal import *
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
import pandas as pd
from matplotlib.backends.backend_pdf import PdfPages
import csv
from random import randint
#init data
EPISODE_COUNT = 5000
ACTION_COUNT = 200
trainingreward = np.zeros(50)
#init Q array (5 state value dimensions, 5 action dimension)
Qarr = np.zeros((3, 3, 3, 3, 3, 5))
#greedy selection variable (multiplied by 100 for simplicity)
epsilon = 100
for i in range(0, EPISODE_COUNT):
#init board (0 wall, 1 blank, 2 can)
board = np.zeros((12, 12))
for j in range(0, 10):
for k in range(0, 10):
board[j+1, k+1] = randint(1, 2)
#init bot location (horizontal 0, vertical 1 from top left)
botloc = np.zeros(2)
botloc[0] = randint(1, 10)
botloc[1] = randint(1, 10)
episodereward = 0
for j in range(0, ACTION_COUNT):
#observestate (self, up, left, right, down)
state = np.zeros(5)
#self
state[0] = board[int(botloc[0]), int(botloc[1])]
state[1] = board[int(botloc[0]), int(botloc[1] - 1)]
state[2] = board[int(botloc[0] - 1), int(botloc[1])]
state[3] = board[int(botloc[0] + 1), int(botloc[1])]
state[4] = board[int(botloc[0]), int(botloc[1] + 1)]
#greedy action selection
if (randint(0, 100) > epsilon):
#do greedy
randoma = np.random.choice(np.where(Qarr[int(state[0]), int(state[1]), int(state[2]), int(state[3]), int(state[4]), :] == Qarr[int(state[0]), int(state[1]), int(state[2]), int(state[3]), int(state[4]), :].max())[0])
else:
#do random action (0 can, 1 up, 2 left, 3 right, 4 down)
randoma = randint(0, 4)
#save qtable location
oldq = np.zeros(6)
oldq[0] = state[0]
oldq[1] = state[1]
oldq[2] = state[2]
oldq[3] = state[3]
oldq[4] = state[4]
oldq[5] = randoma
#take action get reward
#can grab
if(randoma == 0):
#can grabbed
if(state[0] == 2):
#remove can
board[int(botloc[0]), int(botloc[1])] = 1
reward = 10
#can not grabbed
else:
reward = -1
#move up
if(randoma == 1):
#wall
if(state[1] == 0):
reward = -5
#no wall
else:
botloc[1] = botloc[1] - 1
reward = 0
#move left
if(randoma == 2):
#wall
if(state[2] == 0):
reward = -5
#no wall
else:
botloc[0] = botloc[0] - 1
reward = 0
#move right
if(randoma == 3):
#wall
if(state[3] == 0):
reward = -5
#no wall
else:
botloc[0] = botloc[0] + 1
reward = 0
#move down
if(randoma == 4):
#wall
if(state[4] == 0):
reward = -5
#no wall
else:
botloc[1] = botloc[1] + 1
reward = 0
#print "movement data"
#print state
#print randoma
#updatestate
state = np.zeros(5)
#self
state[0] = board[int(botloc[0]), int(botloc[1])]
state[1] = board[int(botloc[0]), int(botloc[1] - 1)]
state[2] = board[int(botloc[0] - 1), int(botloc[1])]
state[3] = board[int(botloc[0] + 1), int(botloc[1])]
state[4] = board[int(botloc[0]), int(botloc[1] + 1)]
#calculate best Qtable action value in new state
maxq = Qarr[int(state[0]),int(state[1]),int(state[2]),int(state[3]),int(state[4]),:].max()
#update Q table
#if(oldq[0] == 1 and Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), 0] == Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), :].max()):
# print "ERROR"
Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), int(oldq[5])] = Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), int(oldq[5])] + 0.2 * (reward + 0.5 * maxq - Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), int(oldq[5])])
episodereward = episodereward + reward
#decrement epsilon
if(i % 50 == 49 and epsilon > 10):
epsilon = epsilon - 1
if(i % 100 == 99 ):
trainingreward[(int(i / 100))] = int(episodereward)
#save Training reward data
#trainingreward.to_csv('TrainReward.csv')
np.savetxt('TrainReward.csv', trainingreward, delimiter=',')
Qold = Qarr
#Test runs
testrewards = np.zeros(EPISODE_COUNT)
for i in range(0, EPISODE_COUNT):
#init board (0 wall, 1 blank, 2 can)
board = np.zeros((12, 12))
for j in range(0, 10):
for k in range(0, 10):
board[j+1, k+1] = randint(1, 2)
#init bot location (horizontal 0, vertical 1 from top left)
botloc = np.zeros(2)
botloc[0] = randint(1, 10)
botloc[1] = randint(1, 10)
episodereward = 0
for j in range(0, ACTION_COUNT):
#observestate (self, up, left, right, down)
state = np.zeros(5)
#self
state[0] = board[int(botloc[0]), int(botloc[1])]
state[1] = board[int(botloc[0]), int(botloc[1] - 1)]
state[2] = board[int(botloc[0] - 1), int(botloc[1])]
state[3] = board[int(botloc[0] + 1), int(botloc[1])]
state[4] = board[int(botloc[0]), int(botloc[1] + 1)]
#greedy action selection
randoma = np.random.choice(np.where(Qarr[int(state[0]), int(state[1]), int(state[2]), int(state[3]), int(state[4]), :] == Qarr[int(state[0]), int(state[1]), int(state[2]), int(state[3]), int(state[4]), :].max())[0])
#save qtable location
oldq = np.zeros(6)
oldq[0] = state[0]
oldq[1] = state[1]
oldq[2] = state[2]
oldq[3] = state[3]
oldq[4] = state[4]
oldq[5] = randoma
#take action get reward
#can grab
if(randoma == 0):
#can grabbed
if(state[0] == 2):
#remove can
board[int(botloc[0]), int(botloc[1])] = 1
reward = 10
#can not grabbed
else:
reward = -1
#move up
elif(randoma == 1):
#wall
if(state[1] == 0):
reward = -5
#no wall
else:
botloc[1] = botloc[1] - 1
reward = 0
#move left
elif(randoma == 2):
#wall
if(state[2] == 0):
reward = -5
#no wall
else:
botloc[0] = botloc[0] - 1
reward = 0
#move right
elif(randoma == 3):
#wall
if(state[3] == 0):
reward = -5
#no wall
else:
botloc[0] = botloc[0] + 1
reward = 0
#move down
elif(randoma == 4):
#wall
if(state[4] == 0):
reward = -5
#no wall
else:
botloc[1] = botloc[1] + 1
reward = 0
else:
print("ERROR")
#print "movement data"
#print state
#print randoma
#updatestate
state = np.zeros(5)
#self
state[0] = board[int(botloc[0]), int(botloc[1])]
state[1] = board[int(botloc[0]), int(botloc[1] - 1)]
state[2] = board[int(botloc[0] - 1), int(botloc[1])]
state[3] = board[int(botloc[0] + 1), int(botloc[1])]
state[4] = board[int(botloc[0]), int(botloc[1] + 1)]
#maxq = max(Qarr[int(state[0]),int(state[1]),int(state[2]),int(state[3]),int(state[4]),:])
#update Q table
#Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), int(oldq[5])] = Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), int(oldq[5])] + 0.01 * (reward + 0.9 * maxq - Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), int(oldq[5])])
episodereward = episodereward + reward
testrewards[i] = episodereward
print(np.mean(testrewards))
print(np.std(testrewards))
| 28.616327 | 305 | 0.602482 | print(__doc__)
import matplotlib
matplotlib.use('Agg')
import math
from decimal import *
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
import pandas as pd
from matplotlib.backends.backend_pdf import PdfPages
import csv
from random import randint
EPISODE_COUNT = 5000
ACTION_COUNT = 200
trainingreward = np.zeros(50)
Qarr = np.zeros((3, 3, 3, 3, 3, 5))
epsilon = 100
for i in range(0, EPISODE_COUNT):
board = np.zeros((12, 12))
for j in range(0, 10):
for k in range(0, 10):
board[j+1, k+1] = randint(1, 2)
botloc = np.zeros(2)
botloc[0] = randint(1, 10)
botloc[1] = randint(1, 10)
episodereward = 0
for j in range(0, ACTION_COUNT):
state = np.zeros(5)
state[0] = board[int(botloc[0]), int(botloc[1])]
state[1] = board[int(botloc[0]), int(botloc[1] - 1)]
state[2] = board[int(botloc[0] - 1), int(botloc[1])]
state[3] = board[int(botloc[0] + 1), int(botloc[1])]
state[4] = board[int(botloc[0]), int(botloc[1] + 1)]
if (randint(0, 100) > epsilon):
randoma = np.random.choice(np.where(Qarr[int(state[0]), int(state[1]), int(state[2]), int(state[3]), int(state[4]), :] == Qarr[int(state[0]), int(state[1]), int(state[2]), int(state[3]), int(state[4]), :].max())[0])
else:
randoma = randint(0, 4)
oldq = np.zeros(6)
oldq[0] = state[0]
oldq[1] = state[1]
oldq[2] = state[2]
oldq[3] = state[3]
oldq[4] = state[4]
oldq[5] = randoma
if(randoma == 0):
if(state[0] == 2):
board[int(botloc[0]), int(botloc[1])] = 1
reward = 10
else:
reward = -1
if(randoma == 1):
if(state[1] == 0):
reward = -5
else:
botloc[1] = botloc[1] - 1
reward = 0
if(randoma == 2):
if(state[2] == 0):
reward = -5
else:
botloc[0] = botloc[0] - 1
reward = 0
if(randoma == 3):
if(state[3] == 0):
reward = -5
else:
botloc[0] = botloc[0] + 1
reward = 0
if(randoma == 4):
if(state[4] == 0):
reward = -5
else:
botloc[1] = botloc[1] + 1
reward = 0
state = np.zeros(5)
state[0] = board[int(botloc[0]), int(botloc[1])]
state[1] = board[int(botloc[0]), int(botloc[1] - 1)]
state[2] = board[int(botloc[0] - 1), int(botloc[1])]
state[3] = board[int(botloc[0] + 1), int(botloc[1])]
state[4] = board[int(botloc[0]), int(botloc[1] + 1)]
maxq = Qarr[int(state[0]),int(state[1]),int(state[2]),int(state[3]),int(state[4]),:].max()
Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), int(oldq[5])] = Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), int(oldq[5])] + 0.2 * (reward + 0.5 * maxq - Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), int(oldq[5])])
episodereward = episodereward + reward
if(i % 50 == 49 and epsilon > 10):
epsilon = epsilon - 1
if(i % 100 == 99 ):
trainingreward[(int(i / 100))] = int(episodereward)
np.savetxt('TrainReward.csv', trainingreward, delimiter=',')
Qold = Qarr
testrewards = np.zeros(EPISODE_COUNT)
for i in range(0, EPISODE_COUNT):
board = np.zeros((12, 12))
for j in range(0, 10):
for k in range(0, 10):
board[j+1, k+1] = randint(1, 2)
botloc = np.zeros(2)
botloc[0] = randint(1, 10)
botloc[1] = randint(1, 10)
episodereward = 0
for j in range(0, ACTION_COUNT):
state = np.zeros(5)
state[0] = board[int(botloc[0]), int(botloc[1])]
state[1] = board[int(botloc[0]), int(botloc[1] - 1)]
state[2] = board[int(botloc[0] - 1), int(botloc[1])]
state[3] = board[int(botloc[0] + 1), int(botloc[1])]
state[4] = board[int(botloc[0]), int(botloc[1] + 1)]
randoma = np.random.choice(np.where(Qarr[int(state[0]), int(state[1]), int(state[2]), int(state[3]), int(state[4]), :] == Qarr[int(state[0]), int(state[1]), int(state[2]), int(state[3]), int(state[4]), :].max())[0])
oldq = np.zeros(6)
oldq[0] = state[0]
oldq[1] = state[1]
oldq[2] = state[2]
oldq[3] = state[3]
oldq[4] = state[4]
oldq[5] = randoma
if(randoma == 0):
if(state[0] == 2):
board[int(botloc[0]), int(botloc[1])] = 1
reward = 10
else:
reward = -1
elif(randoma == 1):
if(state[1] == 0):
reward = -5
else:
botloc[1] = botloc[1] - 1
reward = 0
elif(randoma == 2):
if(state[2] == 0):
reward = -5
else:
botloc[0] = botloc[0] - 1
reward = 0
elif(randoma == 3):
if(state[3] == 0):
reward = -5
else:
botloc[0] = botloc[0] + 1
reward = 0
elif(randoma == 4):
if(state[4] == 0):
reward = -5
else:
botloc[1] = botloc[1] + 1
reward = 0
else:
print("ERROR")
state = np.zeros(5)
state[0] = board[int(botloc[0]), int(botloc[1])]
state[1] = board[int(botloc[0]), int(botloc[1] - 1)]
state[2] = board[int(botloc[0] - 1), int(botloc[1])]
state[3] = board[int(botloc[0] + 1), int(botloc[1])]
state[4] = board[int(botloc[0]), int(botloc[1] + 1)]
episodereward = episodereward + reward
testrewards[i] = episodereward
print(np.mean(testrewards))
print(np.std(testrewards))
| true | true |
f72dc348256276279d0b58d7bb73672d0b303c5a | 99 | py | Python | releasenotes_project/releasenotes/apps.py | ajduncan/releasenotes | 9c97222a554f01c253e1ba07b3dbd67382edd5a9 | [
"MIT"
] | null | null | null | releasenotes_project/releasenotes/apps.py | ajduncan/releasenotes | 9c97222a554f01c253e1ba07b3dbd67382edd5a9 | [
"MIT"
] | null | null | null | releasenotes_project/releasenotes/apps.py | ajduncan/releasenotes | 9c97222a554f01c253e1ba07b3dbd67382edd5a9 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class ReleasenotesConfig(AppConfig):
name = 'releasenotes'
| 16.5 | 36 | 0.777778 | from django.apps import AppConfig
class ReleasenotesConfig(AppConfig):
name = 'releasenotes'
| true | true |
f72dc3ca8aa596b8bc7e93a42cd965a4678d9bb0 | 1,626 | py | Python | rates_classify/rdf.py | xhades/rates_classify | 225627dad22c162023bc6b5e4d8f5881c5a6f354 | [
"MIT"
] | 7 | 2017-12-23T05:34:01.000Z | 2021-01-03T10:10:03.000Z | rates_classify/rdf.py | xhades/rates_classify | 225627dad22c162023bc6b5e4d8f5881c5a6f354 | [
"MIT"
] | null | null | null | rates_classify/rdf.py | xhades/rates_classify | 225627dad22c162023bc6b5e4d8f5881c5a6f354 | [
"MIT"
] | 3 | 2019-05-23T20:15:44.000Z | 2020-01-14T07:27:58.000Z | # !/usr/bin/env python
# -*-coding:utf-8-*-
"""
@author: xhades
@Date: 2017/12/28
"""
# 随机森林分类器
import numpy as np
from numpy import *
from numpy import array, argmax
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import pickle
from sklearn.ensemble import RandomForestClassifier as RDF
np.set_printoptions(threshold=np.inf)
# 训练集测试集 3/7分割
def train(xFile, yFile):
with open(xFile, "rb") as file_r:
X = pickle.load(file_r)
X = reshape(X, (212841, -1)) # reshape一下 (212841, 30*128)
# 读取label数据,并且encodig
with open(yFile, "r") as yFile_r:
labelLines = [_.strip("\n") for _ in yFile_r.readlines()]
values = array(labelLines)
labelEncoder = LabelEncoder()
integerEncoded = labelEncoder.fit_transform(values)
integerEncoded = integerEncoded.reshape(len(integerEncoded), 1)
# print(integerEncoded)
# 获得label 编码
Y = integerEncoded.reshape(212841, )
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=42)
# 随机森林分类器
clf = RDF(criterion="gini")
# criterion 可以使用"gini"或者"entropy",前者代表基尼系数,后者代表信息增益。一般说使用默认的基尼系数"gini"就可以了,即CART算法。除非你更喜欢类似ID3, C4.5的最优特征选择方法。
clf.fit(X_train, Y_train)
# 测试数据
predict = clf.predict(X_test)
count = 0
for p, t in zip(predict, Y_test):
if p == t:
count += 1
print("RandomForest Accuracy is:", count/len(Y_test))
if __name__ == "__main__":
xFile = "Res/char_embedded.pkl"
yFile = "data/label.txt"
print("Start Training.....")
train(xFile, yFile)
print("End.....")
| 25.015385 | 114 | 0.673432 |
import numpy as np
from numpy import *
from numpy import array, argmax
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import pickle
from sklearn.ensemble import RandomForestClassifier as RDF
np.set_printoptions(threshold=np.inf)
def train(xFile, yFile):
with open(xFile, "rb") as file_r:
X = pickle.load(file_r)
X = reshape(X, (212841, -1))
with open(yFile, "r") as yFile_r:
labelLines = [_.strip("\n") for _ in yFile_r.readlines()]
values = array(labelLines)
labelEncoder = LabelEncoder()
integerEncoded = labelEncoder.fit_transform(values)
integerEncoded = integerEncoded.reshape(len(integerEncoded), 1)
Y = integerEncoded.reshape(212841, )
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=42)
clf = RDF(criterion="gini")
clf.fit(X_train, Y_train)
predict = clf.predict(X_test)
count = 0
for p, t in zip(predict, Y_test):
if p == t:
count += 1
print("RandomForest Accuracy is:", count/len(Y_test))
if __name__ == "__main__":
xFile = "Res/char_embedded.pkl"
yFile = "data/label.txt"
print("Start Training.....")
train(xFile, yFile)
print("End.....")
| true | true |
f72dc43a5ca4a9318d7545458b4eef42123d8e82 | 1,647 | py | Python | test/test_static_user_group.py | jensenbox/python-jamf | 85213085b1064a00375a7aa7df5e33c19f5178eb | [
"RSA-MD"
] | 1 | 2021-04-20T15:28:57.000Z | 2021-04-20T15:28:57.000Z | test/test_static_user_group.py | jensenbox/python-jamf | 85213085b1064a00375a7aa7df5e33c19f5178eb | [
"RSA-MD"
] | null | null | null | test/test_static_user_group.py | jensenbox/python-jamf | 85213085b1064a00375a7aa7df5e33c19f5178eb | [
"RSA-MD"
] | null | null | null | # coding: utf-8
"""
Jamf Pro API
## Overview This is a sample Jamf Pro server which allows for usage without any authentication. The Jamf Pro environment which supports the Try it Out functionality does not run the current beta version of Jamf Pro, thus any newly added endpoints will result in an error and should be used soley for documentation purposes. # noqa: E501
The version of the OpenAPI document: 10.25.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import jamf
from jamf.models.static_user_group import StaticUserGroup # noqa: E501
from jamf.rest import ApiException
class TestStaticUserGroup(unittest.TestCase):
"""StaticUserGroup unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test StaticUserGroup
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = jamf.models.static_user_group.StaticUserGroup() # noqa: E501
if include_optional :
return StaticUserGroup(
id = 1,
name = 'Grade School Teachers'
)
else :
return StaticUserGroup(
)
def testStaticUserGroup(self):
"""Test StaticUserGroup"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 30.5 | 342 | 0.681846 |
from __future__ import absolute_import
import unittest
import datetime
import jamf
from jamf.models.static_user_group import StaticUserGroup
from jamf.rest import ApiException
class TestStaticUserGroup(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
include_optional :
return StaticUserGroup(
id = 1,
name = 'Grade School Teachers'
)
else :
return StaticUserGroup(
)
def testStaticUserGroup(self):
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| true | true |
f72dc5b5a14de2068f7204af961ecf443feaa41d | 2,652 | py | Python | IMLearn/learners/regressors/linear_regression.py | TomeRozen/IML.HUJI | 84b0d835a2a4dd4f52ea415e36382cb25a9eebdc | [
"MIT"
] | null | null | null | IMLearn/learners/regressors/linear_regression.py | TomeRozen/IML.HUJI | 84b0d835a2a4dd4f52ea415e36382cb25a9eebdc | [
"MIT"
] | null | null | null | IMLearn/learners/regressors/linear_regression.py | TomeRozen/IML.HUJI | 84b0d835a2a4dd4f52ea415e36382cb25a9eebdc | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import NoReturn
from ...base import BaseEstimator
import numpy as np
from numpy.linalg import pinv
class LinearRegression(BaseEstimator):
"""
Linear Regression Estimator
Solving Ordinary Least Squares optimization problem
"""
def __init__(self, include_intercept: bool = True) -> LinearRegression:
"""
Instantiate a linear regression estimator
Parameters
----------
include_intercept: bool, default=True
Should fitted model include an intercept or not
Attributes
----------
include_intercept_: bool
Should fitted model include an intercept or not
coefs_: ndarray of shape (n_features,) or (n_features+1,)
Coefficients vector fitted by linear regression. To be set in
`LinearRegression.fit` function.
"""
super().__init__()
self.include_intercept_, self.coefs_ = include_intercept, None
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
Fit Least Squares model to given samples
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
Notes
-----
Fits model with or without an intercept depending on value of `self.include_intercept_`
"""
if self.include_intercept_:
X = np.hstack((np.ones((X.shape[0], 1)), X))
self.coefs = pinv(X)@y
def _predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
if self.include_intercept_:
X = np.hstack((np.ones((X.shape[0], 1)), X))
return X @ self.coefs
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under MSE loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under MSE loss function
"""
return mean_square_error(y, self.predict(X))
| 29.142857 | 95 | 0.585596 | from __future__ import annotations
from typing import NoReturn
from ...base import BaseEstimator
import numpy as np
from numpy.linalg import pinv
class LinearRegression(BaseEstimator):
def __init__(self, include_intercept: bool = True) -> LinearRegression:
super().__init__()
self.include_intercept_, self.coefs_ = include_intercept, None
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
if self.include_intercept_:
X = np.hstack((np.ones((X.shape[0], 1)), X))
self.coefs = pinv(X)@y
def _predict(self, X: np.ndarray) -> np.ndarray:
if self.include_intercept_:
X = np.hstack((np.ones((X.shape[0], 1)), X))
return X @ self.coefs
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
return mean_square_error(y, self.predict(X))
| true | true |
f72dc75f35b28a2c6889223dac8aa63a7be15727 | 2,462 | py | Python | functional_tests/test_all_users.py | davidwurster/taskbuster_project | c9d624ac6cae20d2cd1dedec0236731a2c9e1822 | [
"MIT"
] | null | null | null | functional_tests/test_all_users.py | davidwurster/taskbuster_project | c9d624ac6cae20d2cd1dedec0236731a2c9e1822 | [
"MIT"
] | null | null | null | functional_tests/test_all_users.py | davidwurster/taskbuster_project | c9d624ac6cae20d2cd1dedec0236731a2c9e1822 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from datetime import date
from selenium import webdriver
from django.urls import reverse
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.utils.translation import activate
from django.utils import formats
class HomeNewVisitorTest(StaticLiveServerTestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3)
activate('en')
def tearDown(self):
self.browser.quit()
def get_full_url(self, namespace):
return self.live_server_url + reverse(namespace)
def test_home_title(self):
self.browser.get(self.get_full_url("home"))
self.assertIn("TaskBuster", self.browser.title)
def test_h1_css(self):
self.browser.get(self.get_full_url("home"))
h1 = self.browser.find_element_by_tag_name("h1")
self.assertEqual(h1.value_of_css_property("color"),
"rgb(200, 50, 255)")
def test_home_files(self):
self.browser.get(self.live_server_url + "/robots.txt")
self.assertNotIn("Not Found", self.browser.title)
self.browser.get(self.live_server_url + "/humans.txt")
self.assertNotIn("Not Found", self.browser.title)
def test_internationalization(self):
for lang, h1_text in [('en', 'Welcome to TaskBuster!'),
('ca', 'Benvingut a TaskBuster!')]:
activate(lang)
self.browser.get(self.get_full_url("home"))
h1 = self.browser.find_element_by_tag_name("h1")
self.assertEqual(h1.text, h1_text)
def test_localization(self):
today = date.today()
for lang in ['en', 'ca']:
activate(lang)
self.browser.get(self.get_full_url("home"))
local_date = self.browser.find_element_by_id("local-date")
non_local_date = self.browser.find_element_by_id("non-local-date")
self.assertEqual(formats.date_format(today, use_l10n=True), local_date.text)
# self.assertEqual(today.strftime('%b. %d, %Y'), non_local_date.text)
def test_time_zone(self):
self.browser.get(self.get_full_url("home"))
tz = self.browser.find_element_by_id("time-tz").text
utc = self.browser.find_element_by_id("time-utc").text
ny = self.browser.find_element_by_id("time-ny").text
self.assertNotEqual(tz, utc)
self.assertNotIn(ny, [tz, utc])
| 38.46875 | 88 | 0.652315 |
from datetime import date
from selenium import webdriver
from django.urls import reverse
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.utils.translation import activate
from django.utils import formats
class HomeNewVisitorTest(StaticLiveServerTestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3)
activate('en')
def tearDown(self):
self.browser.quit()
def get_full_url(self, namespace):
return self.live_server_url + reverse(namespace)
def test_home_title(self):
self.browser.get(self.get_full_url("home"))
self.assertIn("TaskBuster", self.browser.title)
def test_h1_css(self):
self.browser.get(self.get_full_url("home"))
h1 = self.browser.find_element_by_tag_name("h1")
self.assertEqual(h1.value_of_css_property("color"),
"rgb(200, 50, 255)")
def test_home_files(self):
self.browser.get(self.live_server_url + "/robots.txt")
self.assertNotIn("Not Found", self.browser.title)
self.browser.get(self.live_server_url + "/humans.txt")
self.assertNotIn("Not Found", self.browser.title)
def test_internationalization(self):
for lang, h1_text in [('en', 'Welcome to TaskBuster!'),
('ca', 'Benvingut a TaskBuster!')]:
activate(lang)
self.browser.get(self.get_full_url("home"))
h1 = self.browser.find_element_by_tag_name("h1")
self.assertEqual(h1.text, h1_text)
def test_localization(self):
today = date.today()
for lang in ['en', 'ca']:
activate(lang)
self.browser.get(self.get_full_url("home"))
local_date = self.browser.find_element_by_id("local-date")
non_local_date = self.browser.find_element_by_id("non-local-date")
self.assertEqual(formats.date_format(today, use_l10n=True), local_date.text)
def test_time_zone(self):
self.browser.get(self.get_full_url("home"))
tz = self.browser.find_element_by_id("time-tz").text
utc = self.browser.find_element_by_id("time-utc").text
ny = self.browser.find_element_by_id("time-ny").text
self.assertNotEqual(tz, utc)
self.assertNotIn(ny, [tz, utc])
| true | true |
f72dc763ddb11135ead84d926783faef8b242635 | 1,318 | py | Python | sleepless/tests/test_sleepless.py | Shagrat/eye-of-sauron | a80072f7af0fd6d324532eb405e831e34267d797 | [
"MIT"
] | null | null | null | sleepless/tests/test_sleepless.py | Shagrat/eye-of-sauron | a80072f7af0fd6d324532eb405e831e34267d797 | [
"MIT"
] | null | null | null | sleepless/tests/test_sleepless.py | Shagrat/eye-of-sauron | a80072f7af0fd6d324532eb405e831e34267d797 | [
"MIT"
] | null | null | null | import pytest
import os
from app import create_app
@pytest.fixture
def app(monkeypatch):
app = create_app()
monkeypatch.setenv('DATA_PATH', os.path.join(os.path.dirname(os.path.abspath(__file__)), 'sites.yml'))
app.config.update(
DATA_PATH=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'sites.yml')
)
yield app
try:
os.remove(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'sites.yml'))
except:
pass
@pytest.fixture
def client(app):
return app.test_client()
def test_dashboard(app, client):
rv = client.get('/')
assert rv.status == '200 OK'
def test_sites_list(app, client):
rv = client.get('/api/')
assert rv.status == '200 OK'
def test_sites_add(app, client):
rv = client.post('/api/sites/', json={'url': 'https://google.com'})
assert rv.status == '201 CREATED'
def test_modify_modify(app, client):
rv = client.put('/api/sites/', json={'url': 'https://google.com', 'new_url': 'https://amazon.com'})
assert rv.status == '204 NO CONTENT'
def test_modify_delete(app, client):
rv = client.delete('/api/sites/', json={'url': 'https://amazon.com'})
assert rv.status == '204 NO CONTENT'
def test_monitor(app, client):
rv = client.get('/api/monitoring/')
assert rv.status == '200 OK'
| 25.346154 | 106 | 0.649469 | import pytest
import os
from app import create_app
@pytest.fixture
def app(monkeypatch):
app = create_app()
monkeypatch.setenv('DATA_PATH', os.path.join(os.path.dirname(os.path.abspath(__file__)), 'sites.yml'))
app.config.update(
DATA_PATH=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'sites.yml')
)
yield app
try:
os.remove(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'sites.yml'))
except:
pass
@pytest.fixture
def client(app):
return app.test_client()
def test_dashboard(app, client):
rv = client.get('/')
assert rv.status == '200 OK'
def test_sites_list(app, client):
rv = client.get('/api/')
assert rv.status == '200 OK'
def test_sites_add(app, client):
rv = client.post('/api/sites/', json={'url': 'https://google.com'})
assert rv.status == '201 CREATED'
def test_modify_modify(app, client):
rv = client.put('/api/sites/', json={'url': 'https://google.com', 'new_url': 'https://amazon.com'})
assert rv.status == '204 NO CONTENT'
def test_modify_delete(app, client):
rv = client.delete('/api/sites/', json={'url': 'https://amazon.com'})
assert rv.status == '204 NO CONTENT'
def test_monitor(app, client):
rv = client.get('/api/monitoring/')
assert rv.status == '200 OK'
| true | true |
f72dc7d80a31456b6dd4baa949bea3ba0088d9b2 | 1,913 | py | Python | task_templates/pipelines/python3_calibrated_anomaly_detection/custom.py | amperie/user-models | 5236c50d0f20a7bac81acc5d1936a3502de2f5f3 | [
"Apache-2.0"
] | null | null | null | task_templates/pipelines/python3_calibrated_anomaly_detection/custom.py | amperie/user-models | 5236c50d0f20a7bac81acc5d1936a3502de2f5f3 | [
"Apache-2.0"
] | 9 | 2021-11-10T20:16:41.000Z | 2022-03-12T00:59:05.000Z | task_templates/pipelines/python3_calibrated_anomaly_detection/custom.py | amperie/user-models | 5236c50d0f20a7bac81acc5d1936a3502de2f5f3 | [
"Apache-2.0"
] | 1 | 2021-06-17T22:05:33.000Z | 2021-06-17T22:05:33.000Z | import pickle
from build_pipeline import make_anomaly
def fit(
X, output_dir, class_order=None, row_weights=None, **kwargs,
):
"""
This hook must be implemented with your fitting code, for running drum in the fit mode.
This hook MUST ALWAYS be implemented for custom tasks.
For inference models, this hook can stick around unimplemented, and won’t be triggered.
Parameters
----------
X: pd.DataFrame - training data to perform fit on
y: pd.Series - target data to perform fit on
output_dir: the path to write output. This is the path provided in '--output' parameter of the
'drum fit' command.
class_order : A two element long list dictating the order of classes which should be used for
modeling. This will not be used for anomaly detection models
row_weights: An array of non-negative numeric values which can be used to dictate how important
a row is. Row weights is only optionally used, and there will be no filtering for which
custom models support this. There are two situations when values will be passed into
row_weights, during smart downsampling and when weights are explicitly provided by the user
kwargs: Added for forwards compatibility
Returns
-------
Nothing
"""
estimator = make_anomaly()
estimator.fit(X)
# You must serialize out your model to the output_dir given, however if you wish to change this
# code, you will probably have to add a load_model method to read the serialized model back in
# When prediction is done.
# Check out this doc for more information on serialization https://github.com/datarobot/custom-\
# model-templates/tree/master/custom_model_runner#python
# NOTE: We currently set a 10GB limit to the size of the serialized model
with open("{}/artifact.pkl".format(output_dir), "wb") as fp:
pickle.dump(estimator, fp)
| 46.658537 | 100 | 0.719289 | import pickle
from build_pipeline import make_anomaly
def fit(
X, output_dir, class_order=None, row_weights=None, **kwargs,
):
estimator = make_anomaly()
estimator.fit(X)
with open("{}/artifact.pkl".format(output_dir), "wb") as fp:
pickle.dump(estimator, fp)
| true | true |
f72dc8316a82c5f73e59a6498145758dff2e6fa1 | 345 | py | Python | python/ABC/2020-07-25_m-solutions2020/c.py | KATO-Hiro/atcoder-1 | c2cbfcfd5c3d46ac9810ba330a37d437aa2839c2 | [
"MIT"
] | null | null | null | python/ABC/2020-07-25_m-solutions2020/c.py | KATO-Hiro/atcoder-1 | c2cbfcfd5c3d46ac9810ba330a37d437aa2839c2 | [
"MIT"
] | null | null | null | python/ABC/2020-07-25_m-solutions2020/c.py | KATO-Hiro/atcoder-1 | c2cbfcfd5c3d46ac9810ba330a37d437aa2839c2 | [
"MIT"
] | null | null | null | import sys
input = sys.stdin.readline
def main():
N, K = map(int, input().split())
A = tuple(map(int, input().split()))
ans = [0] * (N - K)
for i in range(N - K):
if A[i] < A[K + i]:
ans[i] = "Yes"
else:
ans[i] = "No"
print("\n".join(ans))
if __name__ == "__main__":
main()
| 15.681818 | 40 | 0.449275 | import sys
input = sys.stdin.readline
def main():
N, K = map(int, input().split())
A = tuple(map(int, input().split()))
ans = [0] * (N - K)
for i in range(N - K):
if A[i] < A[K + i]:
ans[i] = "Yes"
else:
ans[i] = "No"
print("\n".join(ans))
if __name__ == "__main__":
main()
| true | true |
f72dc8a228a12c422d1e6298dc6947ac192397d0 | 1,682 | py | Python | idpay.py | amirhossein4778/idPay-python | f98ce8fb11c5e6d4a9f657032c98a4124106afc6 | [
"MIT"
] | 1 | 2021-01-18T10:22:27.000Z | 2021-01-18T10:22:27.000Z | idpay.py | amirhossein4778/idPay-python | f98ce8fb11c5e6d4a9f657032c98a4124106afc6 | [
"MIT"
] | null | null | null | idpay.py | amirhossein4778/idPay-python | f98ce8fb11c5e6d4a9f657032c98a4124106afc6 | [
"MIT"
] | null | null | null | import requests
import json
APIURL = "https://api.idpay.ir/v1.1/"
# You can get your Token from this url => https://idpay.ir/dashboard/web-services
TOKEN = "Your Token Here"
SANDBOX = str(1) # 1 or 0
Headers = {
"Content-Type": "application/json",
"X-SANDBOX":SANDBOX,
"X-API-KEY":TOKEN
}
def Payment(OrderId, Amount, Callback, Name = "", Mail="", Phone = "", Description=""):
OrderId = str(OrderId)
Amount = int(Amount)
Posts = {
"order_id":OrderId,
"amount":Amount,
"callback":Callback,
"name": Name,
"mail": Mail,
"phone": Phone,
"desc": Description
}
Posts = json.dumps(Posts)
try:
Response = requests.post(f"{APIURL}payment",data=Posts,headers=Headers).text
Response = json.loads(Response)
Response['id']
except:
return False
return Response
def Verify(Id,OrderId):
OrderId = str(OrderId)
Id = str(Id)
Posts = {
"id": Id,
"order_id": OrderId
}
Posts = json.dumps(Posts)
try:
Response = requests.post(f"{APIURL}payment/verify",data=Posts,headers=Headers).text
Response = json.loads(Response)
except:
return False
return Response
def Inquiry(Id,OrderId):
OrderId = str(OrderId)
Id = str(Id)
Posts = {
"id": Id,
"order_id": OrderId
}
Posts = json.dumps(Posts)
try:
Response = requests.post(f"{APIURL}payment/inquiry",data=Posts,headers=Headers).text
Response = json.loads(Response)
except:
return False
return Response
| 23.361111 | 93 | 0.565398 | import requests
import json
APIURL = "https://api.idpay.ir/v1.1/"
TOKEN = "Your Token Here"
SANDBOX = str(1)
Headers = {
"Content-Type": "application/json",
"X-SANDBOX":SANDBOX,
"X-API-KEY":TOKEN
}
def Payment(OrderId, Amount, Callback, Name = "", Mail="", Phone = "", Description=""):
OrderId = str(OrderId)
Amount = int(Amount)
Posts = {
"order_id":OrderId,
"amount":Amount,
"callback":Callback,
"name": Name,
"mail": Mail,
"phone": Phone,
"desc": Description
}
Posts = json.dumps(Posts)
try:
Response = requests.post(f"{APIURL}payment",data=Posts,headers=Headers).text
Response = json.loads(Response)
Response['id']
except:
return False
return Response
def Verify(Id,OrderId):
OrderId = str(OrderId)
Id = str(Id)
Posts = {
"id": Id,
"order_id": OrderId
}
Posts = json.dumps(Posts)
try:
Response = requests.post(f"{APIURL}payment/verify",data=Posts,headers=Headers).text
Response = json.loads(Response)
except:
return False
return Response
def Inquiry(Id,OrderId):
OrderId = str(OrderId)
Id = str(Id)
Posts = {
"id": Id,
"order_id": OrderId
}
Posts = json.dumps(Posts)
try:
Response = requests.post(f"{APIURL}payment/inquiry",data=Posts,headers=Headers).text
Response = json.loads(Response)
except:
return False
return Response
| true | true |
f72dc9fdc54892421b383d5dd9ff38be42fc2864 | 741 | py | Python | leadreader/db.py | raindrift/leadreader | c360630a75a149e20c9cf64bcb2c14646effb9ae | [
"MIT"
] | null | null | null | leadreader/db.py | raindrift/leadreader | c360630a75a149e20c9cf64bcb2c14646effb9ae | [
"MIT"
] | null | null | null | leadreader/db.py | raindrift/leadreader | c360630a75a149e20c9cf64bcb2c14646effb9ae | [
"MIT"
] | null | null | null | """
db.py
"""
from pymongo import MongoClient
class Db: # pylint: disable=too-few-public-methods
"""
Database.
Singleton pattern, from Bruce Eckel
"""
class __Db: # pylint: disable=invalid-name
def __init__(self, dbname):
self.val = dbname
self.client = MongoClient('mongodb://localhost:27017/')
self.conn = self.client['leadreader_' + dbname]
def __str__(self):
return repr(self) + self.val
instance = None
def __init__(self, dbname='prod'):
if not Db.instance:
Db.instance = Db.__Db(dbname)
else:
Db.instance.val = dbname
def __getattr__(self, name):
return getattr(self.instance, name)
| 23.903226 | 67 | 0.588394 | from pymongo import MongoClient
class Db:
class __Db:
def __init__(self, dbname):
self.val = dbname
self.client = MongoClient('mongodb://localhost:27017/')
self.conn = self.client['leadreader_' + dbname]
def __str__(self):
return repr(self) + self.val
instance = None
def __init__(self, dbname='prod'):
if not Db.instance:
Db.instance = Db.__Db(dbname)
else:
Db.instance.val = dbname
def __getattr__(self, name):
return getattr(self.instance, name)
| true | true |
f72dcb234b3e5c6c2045220639db1c2f9a81206a | 4,633 | py | Python | userbot/plugins/mega_downloader.py | staxx1/TurhanUser | 555e7e7a781104981b92e33bb9ad583b062bc14d | [
"MIT"
] | 5 | 2020-08-17T08:05:53.000Z | 2020-09-11T18:27:41.000Z | userbot/plugins/mega_downloader.py | staxx1/TurhanUser | 555e7e7a781104981b92e33bb9ad583b062bc14d | [
"MIT"
] | null | null | null | userbot/plugins/mega_downloader.py | staxx1/TurhanUser | 555e7e7a781104981b92e33bb9ad583b062bc14d | [
"MIT"
] | null | null | null | # Copyright (C) 2020 Adek Maulana.
# All rights reserved.
import json
import logging
import os
import re
import time
from os.path import exists
from subprocess import PIPE, Popen
from urllib.error import HTTPError
from pySmartDL import SmartDL
from uniborg.util import admin_cmd, humanbytes
logging.basicConfig(format='[%(levelname) 5s/%(asctime)s] %(name)s: %(message)s',
level=logging.WARNING)
logger = logging.getLogger(__name__)
def subprocess_run(cmd):
reply = ''
subproc = Popen(cmd, stdout=PIPE, stderr=PIPE,
shell=True, universal_newlines=True)
talk = subproc.communicate()
exitCode = subproc.returncode
if exitCode != 0:
reply += ('An error was detected while running the subprocess:\n'
f'exit code: {exitCode}\n'
f'stdout: {talk[0]}\n'
f'stderr: {talk[1]}')
return reply
return talk
@borg.on(admin_cmd(pattern="mega ?(.*)"))
async def mega_downloader(megadl):
await megadl.edit("`Processing...`")
textx = await megadl.get_reply_message()
link = megadl.pattern_match.group(1)
if link:
pass
elif textx:
link = textx.text
else:
await megadl.edit("`Usage: .mega <mega url>`")
return
if not link:
await megadl.edit("`No MEGA.nz link found!`")
await mega_download(link, megadl)
async def mega_download(url, megadl):
try:
link = re.findall(r'\bhttps?://.*mega.*\.nz\S+', url)[0]
except IndexError:
await megadl.edit("`No MEGA.nz link found`\n")
return
cmd = f'./bin/megadirect {link}'
result = subprocess_run(cmd)
try:
data = json.loads(result[0])
except json.JSONDecodeError:
await megadl.edit("`Error: Can't extract the link`\n")
return
file_name = data['file_name']
file_url = data['url']
file_hex = data['hex']
file_raw_hex = data['raw_hex']
if exists(file_name):
os.remove(file_name)
if not exists(file_name):
temp_file_name = file_name + ".temp"
downloaded_file_name = "./" + "" + temp_file_name
downloader = SmartDL(
file_url, downloaded_file_name, progress_bar=False)
display_message = None
try:
downloader.start(blocking=False)
except HTTPError as e:
await megadl.edit("`" + str(e) + "`")
logger.info(str(e))
return
while not downloader.isFinished():
status = downloader.get_status().capitalize()
total_length = downloader.filesize if downloader.filesize else None
downloaded = downloader.get_dl_size()
percentage = int(downloader.get_progress() * 100)
progress = downloader.get_progress_bar()
speed = downloader.get_speed(human=True)
estimated_total_time = downloader.get_eta(human=True)
try:
current_message = (
f"**{status}**..."
f"\nFile Name: `{file_name}`\n"
f"\n{progress} `{percentage}%`"
f"\n{humanbytes(downloaded)} of {humanbytes(total_length)}"
f" @ {speed}"
f"\nETA: {estimated_total_time}"
)
if status == "Downloading":
await megadl.edit(current_message)
time.sleep(0.2)
elif status == "Combining":
if display_message != current_message:
await megadl.edit(current_message)
display_message = current_message
except Exception as e:
logger.info(str(e))
if downloader.isSuccessful():
download_time = downloader.get_dl_time(human=True)
if exists(temp_file_name):
await megadl.edit("Decrypting file...")
decrypt_file(file_name, temp_file_name, file_hex, file_raw_hex)
await megadl.edit(f"`{file_name}`\n\n"
"Successfully downloaded\n"
f"Download took: {download_time}")
else:
await megadl.edit("Failed to download...")
for e in downloader.get_errors():
logger.info(str(e))
return
def decrypt_file(file_name, temp_file_name, file_hex, file_raw_hex):
cmd = ("cat '{}' | openssl enc -d -aes-128-ctr -K {} -iv {} > '{}'"
.format(temp_file_name, file_hex, file_raw_hex, file_name))
subprocess_run(cmd)
os.remove(r"{}".format(temp_file_name))
| 35.638462 | 81 | 0.575221 |
import json
import logging
import os
import re
import time
from os.path import exists
from subprocess import PIPE, Popen
from urllib.error import HTTPError
from pySmartDL import SmartDL
from uniborg.util import admin_cmd, humanbytes
logging.basicConfig(format='[%(levelname) 5s/%(asctime)s] %(name)s: %(message)s',
level=logging.WARNING)
logger = logging.getLogger(__name__)
def subprocess_run(cmd):
reply = ''
subproc = Popen(cmd, stdout=PIPE, stderr=PIPE,
shell=True, universal_newlines=True)
talk = subproc.communicate()
exitCode = subproc.returncode
if exitCode != 0:
reply += ('An error was detected while running the subprocess:\n'
f'exit code: {exitCode}\n'
f'stdout: {talk[0]}\n'
f'stderr: {talk[1]}')
return reply
return talk
@borg.on(admin_cmd(pattern="mega ?(.*)"))
async def mega_downloader(megadl):
await megadl.edit("`Processing...`")
textx = await megadl.get_reply_message()
link = megadl.pattern_match.group(1)
if link:
pass
elif textx:
link = textx.text
else:
await megadl.edit("`Usage: .mega <mega url>`")
return
if not link:
await megadl.edit("`No MEGA.nz link found!`")
await mega_download(link, megadl)
async def mega_download(url, megadl):
try:
link = re.findall(r'\bhttps?://.*mega.*\.nz\S+', url)[0]
except IndexError:
await megadl.edit("`No MEGA.nz link found`\n")
return
cmd = f'./bin/megadirect {link}'
result = subprocess_run(cmd)
try:
data = json.loads(result[0])
except json.JSONDecodeError:
await megadl.edit("`Error: Can't extract the link`\n")
return
file_name = data['file_name']
file_url = data['url']
file_hex = data['hex']
file_raw_hex = data['raw_hex']
if exists(file_name):
os.remove(file_name)
if not exists(file_name):
temp_file_name = file_name + ".temp"
downloaded_file_name = "./" + "" + temp_file_name
downloader = SmartDL(
file_url, downloaded_file_name, progress_bar=False)
display_message = None
try:
downloader.start(blocking=False)
except HTTPError as e:
await megadl.edit("`" + str(e) + "`")
logger.info(str(e))
return
while not downloader.isFinished():
status = downloader.get_status().capitalize()
total_length = downloader.filesize if downloader.filesize else None
downloaded = downloader.get_dl_size()
percentage = int(downloader.get_progress() * 100)
progress = downloader.get_progress_bar()
speed = downloader.get_speed(human=True)
estimated_total_time = downloader.get_eta(human=True)
try:
current_message = (
f"**{status}**..."
f"\nFile Name: `{file_name}`\n"
f"\n{progress} `{percentage}%`"
f"\n{humanbytes(downloaded)} of {humanbytes(total_length)}"
f" @ {speed}"
f"\nETA: {estimated_total_time}"
)
if status == "Downloading":
await megadl.edit(current_message)
time.sleep(0.2)
elif status == "Combining":
if display_message != current_message:
await megadl.edit(current_message)
display_message = current_message
except Exception as e:
logger.info(str(e))
if downloader.isSuccessful():
download_time = downloader.get_dl_time(human=True)
if exists(temp_file_name):
await megadl.edit("Decrypting file...")
decrypt_file(file_name, temp_file_name, file_hex, file_raw_hex)
await megadl.edit(f"`{file_name}`\n\n"
"Successfully downloaded\n"
f"Download took: {download_time}")
else:
await megadl.edit("Failed to download...")
for e in downloader.get_errors():
logger.info(str(e))
return
def decrypt_file(file_name, temp_file_name, file_hex, file_raw_hex):
cmd = ("cat '{}' | openssl enc -d -aes-128-ctr -K {} -iv {} > '{}'"
.format(temp_file_name, file_hex, file_raw_hex, file_name))
subprocess_run(cmd)
os.remove(r"{}".format(temp_file_name))
| true | true |
f72dcb4220e85fb412fc4977dc976eacf47d6db8 | 2,737 | py | Python | tx_salaries/utils/transformers/beaumont_isd.py | texastribune/tx_salaries | 197d8da4e1783216830b8d0a5adb23c0200fd3e8 | [
"Apache-2.0"
] | 6 | 2016-05-18T05:53:44.000Z | 2019-06-13T18:27:50.000Z | tx_salaries/utils/transformers/beaumont_isd.py | texastribune/tx_salaries | 197d8da4e1783216830b8d0a5adb23c0200fd3e8 | [
"Apache-2.0"
] | 64 | 2015-02-13T18:29:04.000Z | 2018-06-15T19:48:56.000Z | tx_salaries/utils/transformers/beaumont_isd.py | texastribune/tx_salaries | 197d8da4e1783216830b8d0a5adb23c0200fd3e8 | [
"Apache-2.0"
] | 2 | 2015-05-08T19:22:12.000Z | 2016-07-11T16:57:49.000Z | from . import base
from . import mixins
from datetime import date
class TransformedRecord(
mixins.GenericCompensationMixin,
mixins.GenericDepartmentMixin, mixins.GenericIdentifierMixin,
mixins.GenericJobTitleMixin, mixins.GenericPersonMixin,
mixins.MembershipMixin, mixins.OrganizationMixin, mixins.PostMixin,
mixins.RaceMixin, mixins.LinkMixin, base.BaseTransformedRecord):
MAP = {
'last_name': 'PER_LAST_NAME',
'first_name': 'PER_FIRST_NAME',
'middle_name': 'PER_MIDDLE_NAME',
'department': 'Organization',
'job_title': 'ROLE_NAME',
'hire_date': 'EMP_HIRE_DT',
'compensation': 'EMP_ASGN_PAY_HIST_A_NRML_PAY',
'gender': 'PER_GENDER',
'nationality': 'PRIMARY_ETHNICITY_CODE',
'employee_type': 'Status'
}
NAME_FIELDS = ('first_name', 'last_name', )
ORGANIZATION_NAME = 'Beaumont ISD'
ORGANIZATION_CLASSIFICATION = 'School District'
DATE_PROVIDED = date(2018, 6, 14)
# The URL to find the raw data in our S3 bucket.
URL = ('https://s3.amazonaws.com/raw.texastribune.org/beaumont_isd/'
'salaries/2018-06/foia.xlsx')
race_map = {
'AFRICAN AM': 'African American',
'WHITE': 'White',
'HISPANIC': 'Hispanic',
'ASIAN': 'Asian',
'AMER IND': 'American Indian'
}
# This is how the loader checks for valid people. Defaults to checking to see if `last_name` is empty.
@property
def is_valid(self):
# Adjust to return False on invalid fields. For example:
return self.last_name.strip() != ''
@property
def compensation_type(self):
employee_type = self.employee_type
if employee_type == 'Part-Time':
return 'PT'
if employee_type == '':
return 'FT'
return 'FT'
@property
def description(self):
employee_type = self.employee_type
if employee_type == '':
return "Yearly salary"
if employee_type == 'Part-Time':
return "Part-time, hourly rate"
return "Yearly salary"
@property
def hire_date(self):
raw_date = self.get_mapped_value('hire_date')[:10]
return raw_date
@property
def race(self):
return {
'name': self.race_map[self.nationality.strip()]
}
@property
def person(self):
name = self.get_name()
r = {
'family_name': name.last,
'given_name': name.first,
'additional_name': name.middle,
'name': unicode(name),
'gender': self.gender,
}
return r
transform = base.transform_factory(TransformedRecord)
| 26.317308 | 106 | 0.606503 | from . import base
from . import mixins
from datetime import date
class TransformedRecord(
mixins.GenericCompensationMixin,
mixins.GenericDepartmentMixin, mixins.GenericIdentifierMixin,
mixins.GenericJobTitleMixin, mixins.GenericPersonMixin,
mixins.MembershipMixin, mixins.OrganizationMixin, mixins.PostMixin,
mixins.RaceMixin, mixins.LinkMixin, base.BaseTransformedRecord):
MAP = {
'last_name': 'PER_LAST_NAME',
'first_name': 'PER_FIRST_NAME',
'middle_name': 'PER_MIDDLE_NAME',
'department': 'Organization',
'job_title': 'ROLE_NAME',
'hire_date': 'EMP_HIRE_DT',
'compensation': 'EMP_ASGN_PAY_HIST_A_NRML_PAY',
'gender': 'PER_GENDER',
'nationality': 'PRIMARY_ETHNICITY_CODE',
'employee_type': 'Status'
}
NAME_FIELDS = ('first_name', 'last_name', )
ORGANIZATION_NAME = 'Beaumont ISD'
ORGANIZATION_CLASSIFICATION = 'School District'
DATE_PROVIDED = date(2018, 6, 14)
URL = ('https://s3.amazonaws.com/raw.texastribune.org/beaumont_isd/'
'salaries/2018-06/foia.xlsx')
race_map = {
'AFRICAN AM': 'African American',
'WHITE': 'White',
'HISPANIC': 'Hispanic',
'ASIAN': 'Asian',
'AMER IND': 'American Indian'
}
@property
def is_valid(self):
return self.last_name.strip() != ''
@property
def compensation_type(self):
employee_type = self.employee_type
if employee_type == 'Part-Time':
return 'PT'
if employee_type == '':
return 'FT'
return 'FT'
@property
def description(self):
employee_type = self.employee_type
if employee_type == '':
return "Yearly salary"
if employee_type == 'Part-Time':
return "Part-time, hourly rate"
return "Yearly salary"
@property
def hire_date(self):
raw_date = self.get_mapped_value('hire_date')[:10]
return raw_date
@property
def race(self):
return {
'name': self.race_map[self.nationality.strip()]
}
@property
def person(self):
name = self.get_name()
r = {
'family_name': name.last,
'given_name': name.first,
'additional_name': name.middle,
'name': unicode(name),
'gender': self.gender,
}
return r
transform = base.transform_factory(TransformedRecord)
| true | true |
f72dcb7858ad4a4c4164a23285c83061503d7a74 | 15,494 | py | Python | plenum/test/consensus/order_service/test_ordering_service_on_view_change.py | jandayanan/indy-plenum | 2815e994404c77ad87eddcfd09062d5fe6efc1c5 | [
"Apache-2.0"
] | null | null | null | plenum/test/consensus/order_service/test_ordering_service_on_view_change.py | jandayanan/indy-plenum | 2815e994404c77ad87eddcfd09062d5fe6efc1c5 | [
"Apache-2.0"
] | null | null | null | plenum/test/consensus/order_service/test_ordering_service_on_view_change.py | jandayanan/indy-plenum | 2815e994404c77ad87eddcfd09062d5fe6efc1c5 | [
"Apache-2.0"
] | null | null | null | import pytest
from plenum.common.messages.internal_messages import ViewChangeStarted, NewViewAccepted, NewViewCheckpointsApplied
from plenum.common.messages.node_messages import OldViewPrePrepareRequest, OldViewPrePrepareReply
from plenum.common.util import updateNamedTuple
from plenum.server.consensus.batch_id import BatchID
from plenum.server.consensus.ordering_service import OrderingService
from plenum.server.consensus.utils import preprepare_to_batch_id
from plenum.server.replica_helper import generateName
from plenum.test.consensus.helper import copy_shared_data, create_batches, \
check_service_changed_only_owned_fields_in_shared_data, create_new_view, \
create_pre_prepares, create_batches_from_preprepares
from plenum.test.consensus.order_service.helper import check_prepares_sent, check_request_old_view_preprepares_sent, \
check_reply_old_view_preprepares_sent
from plenum.test.helper import create_pre_prepare_no_bls, generate_state_root, create_prepare, create_commit_no_bls_sig
from plenum.test.consensus.order_service.conftest import orderer as _orderer
applied_pre_prepares = 0
@pytest.fixture(params=[True, False], ids=['Primary', 'Non-Primary'])
def is_primary(request):
return request.param == 'Primary'
@pytest.fixture()
def orderer(_orderer, is_primary, ):
_orderer.name = 'Alpha:0'
_orderer._data.primary_name = 'some_node:0' if not is_primary else orderer.name
def _apply_and_validate_applied_pre_prepare_fake(pp, sender):
global applied_pre_prepares
applied_pre_prepares += 1
_orderer._can_process_pre_prepare = lambda pp, sender: None
_orderer._apply_and_validate_applied_pre_prepare = _apply_and_validate_applied_pre_prepare_fake
return _orderer
@pytest.fixture()
def initial_view_no():
return 3
@pytest.fixture()
def pre_prepares(initial_view_no):
return create_pre_prepares(view_no=initial_view_no)
@pytest.fixture(params=['all', 'first', 'last', 'no'])
def stored_old_view_pre_prepares(request, pre_prepares):
if request.param == 'all':
return pre_prepares
if request.param == 'first':
return [pre_prepares[0]]
if request.param == 'last':
return [pre_prepares[-1]]
return []
@pytest.fixture(params=['all', 'first', 'last', 'no'])
def requested_old_view_pre_prepares(request, pre_prepares):
if request.param == 'all':
return pre_prepares
if request.param == 'first':
return [pre_prepares[0]]
if request.param == 'last':
return [pre_prepares[-1]]
return []
def test_update_shared_data_on_view_change_started(internal_bus, orderer):
orderer._data.preprepared = create_batches(view_no=3)
orderer._data.prepared = create_batches(view_no=3)
old_data = copy_shared_data(orderer._data)
internal_bus.send(ViewChangeStarted(view_no=4))
new_data = copy_shared_data(orderer._data)
check_service_changed_only_owned_fields_in_shared_data(OrderingService, old_data, new_data)
assert orderer._data.preprepared == []
assert orderer._data.prepared == []
def test_clear_data_on_view_change_started(internal_bus, orderer):
pp = create_pre_prepare_no_bls(generate_state_root(),
view_no=0, pp_seq_no=10, inst_id=0,
audit_txn_root="HSai3sMHKeAva4gWMabDrm1yNhezvPHfXnGyHf2ex1L4")
prepare = create_prepare(req_key=(0, 10),
state_root=generate_state_root(), inst_id=0)
commit = create_commit_no_bls_sig(req_key=(0, 10), inst_id=0)
key = (pp.viewNo, pp.ppSeqNo)
orderer.prePrepares[key] = pp
orderer.prepares[key] = prepare
orderer.commits[key] = commit
orderer.pre_prepare_tss[key][pp.auditTxnRootHash, "Node1"] = 1234
orderer.prePreparesPendingFinReqs.append(pp)
orderer.prePreparesPendingPrevPP[key] = pp
orderer.sent_preprepares[key] = pp
orderer.batches[key] = [pp.ledgerId, pp.discarded,
pp.ppTime, generate_state_root(), len(pp.reqIdr)]
orderer.ordered.add(*key)
internal_bus.send(ViewChangeStarted(view_no=4))
assert not orderer.prePrepares
assert not orderer.prepares
assert not orderer.commits
assert not orderer.pre_prepare_tss
assert not orderer.prePreparesPendingFinReqs
assert not orderer.prePreparesPendingPrevPP
assert not orderer.sent_preprepares
assert not orderer.batches
assert not orderer.ordered
def test_stores_old_pre_prepares_on_view_change_started(internal_bus, orderer):
pp1 = create_pre_prepare_no_bls(generate_state_root(),
view_no=0, pp_seq_no=1, inst_id=0)
pp2 = create_pre_prepare_no_bls(generate_state_root(),
view_no=0, pp_seq_no=2, inst_id=0)
pp3 = create_pre_prepare_no_bls(generate_state_root(),
view_no=1, pp_seq_no=3, inst_id=0)
pp4 = create_pre_prepare_no_bls(generate_state_root(),
view_no=2, pp_seq_no=4, inst_id=0)
pp5 = create_pre_prepare_no_bls(generate_state_root(),
view_no=3, pp_seq_no=5, inst_id=0)
pp6 = create_pre_prepare_no_bls(generate_state_root(),
view_no=3, pp_seq_no=6, inst_id=0)
orderer.prePrepares[(pp1.viewNo, pp1.ppSeqNo)] = pp1
orderer.prePrepares[(pp3.viewNo, pp3.ppSeqNo)] = pp3
orderer.sent_preprepares[(pp2.viewNo, pp2.ppSeqNo)] = pp2
orderer.sent_preprepares[(pp4.viewNo, pp4.ppSeqNo)] = pp4
assert not orderer.old_view_preprepares
internal_bus.send(ViewChangeStarted(view_no=4))
assert orderer.old_view_preprepares[(pp1.viewNo, pp1.ppSeqNo, pp1.digest)] == pp1
assert orderer.old_view_preprepares[(pp2.viewNo, pp2.ppSeqNo, pp2.digest)] == pp2
assert orderer.old_view_preprepares[(pp3.viewNo, pp3.ppSeqNo, pp3.digest)] == pp3
assert orderer.old_view_preprepares[(pp4.viewNo, pp4.ppSeqNo, pp4.digest)] == pp4
# next calls append to existing data
orderer.prePrepares[(pp5.viewNo, pp5.ppSeqNo)] = pp5
orderer.sent_preprepares[(pp6.viewNo, pp6.ppSeqNo)] = pp6
internal_bus.send(ViewChangeStarted(view_no=4))
assert orderer.old_view_preprepares[(pp1.viewNo, pp1.ppSeqNo, pp1.digest)] == pp1
assert orderer.old_view_preprepares[(pp2.viewNo, pp2.ppSeqNo, pp2.digest)] == pp2
assert orderer.old_view_preprepares[(pp3.viewNo, pp3.ppSeqNo, pp3.digest)] == pp3
assert orderer.old_view_preprepares[(pp4.viewNo, pp4.ppSeqNo, pp4.digest)] == pp4
assert orderer.old_view_preprepares[(pp5.viewNo, pp5.ppSeqNo, pp5.digest)] == pp5
assert orderer.old_view_preprepares[(pp6.viewNo, pp6.ppSeqNo, pp6.digest)] == pp6
def test_do_nothing_on_new_view_accepted(internal_bus, orderer):
orderer._data.preprepared = create_batches(view_no=0)
orderer._data.prepared = create_batches(view_no=0)
old_data = copy_shared_data(orderer._data)
initial_view_no = 3
new_view = create_new_view(initial_view_no=initial_view_no, stable_cp=200)
internal_bus.send(NewViewAccepted(view_no=initial_view_no + 1,
view_changes=new_view.viewChanges,
checkpoint=new_view.checkpoint,
batches=new_view.batches))
new_data = copy_shared_data(orderer._data)
assert old_data == new_data
def test_update_shared_data_on_new_view_checkpoint_applied(internal_bus, orderer):
initial_view_no = 3
orderer._data.preprepared = []
orderer._data.prepared = []
orderer._data.view_no = initial_view_no + 1
old_data = copy_shared_data(orderer._data)
new_view = create_new_view(initial_view_no=initial_view_no, stable_cp=200)
internal_bus.send(NewViewCheckpointsApplied(view_no=initial_view_no + 1,
view_changes=new_view.viewChanges,
checkpoint=new_view.checkpoint,
batches=new_view.batches))
new_data = copy_shared_data(orderer._data)
check_service_changed_only_owned_fields_in_shared_data(OrderingService, old_data, new_data)
# Since we didn't order the PrePrepare from Batches, it should not be added into shared data
# (we will request the PrePrepares instead, see next tests)
assert orderer._data.preprepared == []
assert orderer._data.prepared == []
@pytest.mark.parametrize('all_ordered', [True, False], ids=['All-ordered', 'All-non-ordered'])
def test_process_preprepare_on_new_view_checkpoint_applied(internal_bus, external_bus,
orderer, is_primary,
all_ordered,
initial_view_no,
pre_prepares, stored_old_view_pre_prepares):
# !!!SETUP!!!
orderer._data.view_no = initial_view_no + 1
batches = create_batches_from_preprepares(pre_prepares)
orderer._data.prev_view_prepare_cert = batches[-1].pp_seq_no
new_view = create_new_view(initial_view_no=initial_view_no, stable_cp=200,
batches=batches)
# emulate that we received all PrePrepares before View Change
orderer._update_old_view_preprepares(stored_old_view_pre_prepares)
# emulate that we've already ordered the PrePrepares
if all_ordered and stored_old_view_pre_prepares:
orderer.last_ordered_3pc = (initial_view_no, stored_old_view_pre_prepares[-1].ppSeqNo)
# !!!EXECUTE!!!
# send NewViewCheckpointsApplied
internal_bus.send(NewViewCheckpointsApplied(view_no=initial_view_no + 1,
view_changes=new_view.viewChanges,
checkpoint=new_view.checkpoint,
batches=new_view.batches))
# !!!CHECK!!!
if not orderer.is_master:
# no re-ordering is expected on non-master
assert orderer._data.preprepared == []
assert orderer._data.prepared == []
return
# check that PPs were added
stored_batch_ids = [preprepare_to_batch_id(pp) for pp in stored_old_view_pre_prepares]
assert orderer._data.preprepared == [BatchID(view_no=initial_view_no + 1, pp_view_no=initial_view_no,
pp_seq_no=batch_id.pp_seq_no, pp_digest=batch_id.pp_digest)
for batch_id in new_view.batches if batch_id in stored_batch_ids]
# check that sentPrePrepares is updated in case of Primary and prePrepares in case of non-primary
updated_prepares_collection = orderer.prePrepares if not is_primary else orderer.sent_preprepares
non_updated_prepares_collection = orderer.sent_preprepares if not is_primary else orderer.prePrepares
for pp in stored_old_view_pre_prepares:
new_pp = updateNamedTuple(pp, viewNo=initial_view_no + 1, originalViewNo=pp.viewNo)
assert (initial_view_no + 1, new_pp.ppSeqNo) in updated_prepares_collection
assert updated_prepares_collection[(initial_view_no + 1, new_pp.ppSeqNo)] == new_pp
assert not non_updated_prepares_collection
# check that Prepare is sent in case of non primary
if not is_primary:
check_prepares_sent(external_bus, stored_old_view_pre_prepares, initial_view_no + 1)
else:
# only MessageReqs are sent
assert len(external_bus.sent_messages) == len(pre_prepares) - len(stored_old_view_pre_prepares)
# we don't have a quorum of Prepares yet
assert orderer._data.prepared == []
# check that missing PrePrepares have been requested
expected_requested_batches = [batch_id for batch_id in new_view.batches if batch_id not in stored_batch_ids]
check_request_old_view_preprepares_sent(external_bus, expected_requested_batches)
def test_send_reply_on_old_view_pre_prepares_request(external_bus, orderer,
initial_view_no,
stored_old_view_pre_prepares,
requested_old_view_pre_prepares):
# Setup
orderer._data.view_no = initial_view_no + 2
orderer._update_old_view_preprepares(stored_old_view_pre_prepares)
# Receive OldViewPrePrepareRequest req
batches = [preprepare_to_batch_id(pp) for pp in requested_old_view_pre_prepares]
req = OldViewPrePrepareRequest(0, batches)
frm = "node1"
orderer._network.process_incoming(req, generateName(frm, orderer._data.inst_id))
# Check that OldViewPrePrepareReply is sent for all requested PrePrepares
if not orderer.is_master:
assert len(external_bus.sent_messages) == 0
return
# equal to set's union operation
expected_pps = [i for i in stored_old_view_pre_prepares if i in requested_old_view_pre_prepares]
expected_pps = sorted(expected_pps, key=lambda pp: pp.ppSeqNo)
check_reply_old_view_preprepares_sent(external_bus, frm, expected_pps)
def test_process_preprepare_on_old_view_pre_prepares_reply(external_bus, internal_bus,
orderer, is_primary,
initial_view_no,
pre_prepares):
# !!!SETUP!!!
orderer._data.view_no = initial_view_no + 1
orderer._data.prev_view_prepare_cert = orderer.lastPrePrepareSeqNo + 1
new_view = create_new_view(initial_view_no=initial_view_no, stable_cp=200,
batches=create_batches_from_preprepares(pre_prepares))
orderer._data.new_view = new_view
# !!!EXECUTE!!!
rep = OldViewPrePrepareReply(0, [pp._asdict() for pp in pre_prepares])
orderer._network.process_incoming(rep, generateName("node1", orderer._data.inst_id))
# !!!CHECK!!!
if not orderer.is_master:
# no re-ordering is expected on non-master
assert orderer._data.preprepared == []
assert orderer._data.prepared == []
return
# check that PPs were added
assert orderer._data.preprepared == [BatchID(view_no=initial_view_no + 1, pp_view_no=pp.viewNo,
pp_seq_no=pp.ppSeqNo, pp_digest=pp.digest)
for pp in pre_prepares]
# check that sent_preprepares is updated in case of Primary and prePrepares in case of non-primary
updated_prepares_collection = orderer.prePrepares if not is_primary else orderer.sent_preprepares
non_updated_prepares_collection = orderer.sent_preprepares if not is_primary else orderer.prePrepares
for pp in pre_prepares:
new_pp = updateNamedTuple(pp, viewNo=initial_view_no + 1, originalViewNo=pp.viewNo)
assert (initial_view_no + 1, new_pp.ppSeqNo) in updated_prepares_collection
assert updated_prepares_collection[(initial_view_no + 1, new_pp.ppSeqNo)] == new_pp
assert not non_updated_prepares_collection
# check that Prepare is sent in case of non primary
if not is_primary:
check_prepares_sent(external_bus, pre_prepares, initial_view_no + 1)
else:
assert len(external_bus.sent_messages) == 0
# we don't have a quorum of Prepares yet
assert orderer._data.prepared == []
| 46.528529 | 119 | 0.692526 | import pytest
from plenum.common.messages.internal_messages import ViewChangeStarted, NewViewAccepted, NewViewCheckpointsApplied
from plenum.common.messages.node_messages import OldViewPrePrepareRequest, OldViewPrePrepareReply
from plenum.common.util import updateNamedTuple
from plenum.server.consensus.batch_id import BatchID
from plenum.server.consensus.ordering_service import OrderingService
from plenum.server.consensus.utils import preprepare_to_batch_id
from plenum.server.replica_helper import generateName
from plenum.test.consensus.helper import copy_shared_data, create_batches, \
check_service_changed_only_owned_fields_in_shared_data, create_new_view, \
create_pre_prepares, create_batches_from_preprepares
from plenum.test.consensus.order_service.helper import check_prepares_sent, check_request_old_view_preprepares_sent, \
check_reply_old_view_preprepares_sent
from plenum.test.helper import create_pre_prepare_no_bls, generate_state_root, create_prepare, create_commit_no_bls_sig
from plenum.test.consensus.order_service.conftest import orderer as _orderer
applied_pre_prepares = 0
@pytest.fixture(params=[True, False], ids=['Primary', 'Non-Primary'])
def is_primary(request):
return request.param == 'Primary'
@pytest.fixture()
def orderer(_orderer, is_primary, ):
_orderer.name = 'Alpha:0'
_orderer._data.primary_name = 'some_node:0' if not is_primary else orderer.name
def _apply_and_validate_applied_pre_prepare_fake(pp, sender):
global applied_pre_prepares
applied_pre_prepares += 1
_orderer._can_process_pre_prepare = lambda pp, sender: None
_orderer._apply_and_validate_applied_pre_prepare = _apply_and_validate_applied_pre_prepare_fake
return _orderer
@pytest.fixture()
def initial_view_no():
return 3
@pytest.fixture()
def pre_prepares(initial_view_no):
return create_pre_prepares(view_no=initial_view_no)
@pytest.fixture(params=['all', 'first', 'last', 'no'])
def stored_old_view_pre_prepares(request, pre_prepares):
if request.param == 'all':
return pre_prepares
if request.param == 'first':
return [pre_prepares[0]]
if request.param == 'last':
return [pre_prepares[-1]]
return []
@pytest.fixture(params=['all', 'first', 'last', 'no'])
def requested_old_view_pre_prepares(request, pre_prepares):
if request.param == 'all':
return pre_prepares
if request.param == 'first':
return [pre_prepares[0]]
if request.param == 'last':
return [pre_prepares[-1]]
return []
def test_update_shared_data_on_view_change_started(internal_bus, orderer):
orderer._data.preprepared = create_batches(view_no=3)
orderer._data.prepared = create_batches(view_no=3)
old_data = copy_shared_data(orderer._data)
internal_bus.send(ViewChangeStarted(view_no=4))
new_data = copy_shared_data(orderer._data)
check_service_changed_only_owned_fields_in_shared_data(OrderingService, old_data, new_data)
assert orderer._data.preprepared == []
assert orderer._data.prepared == []
def test_clear_data_on_view_change_started(internal_bus, orderer):
pp = create_pre_prepare_no_bls(generate_state_root(),
view_no=0, pp_seq_no=10, inst_id=0,
audit_txn_root="HSai3sMHKeAva4gWMabDrm1yNhezvPHfXnGyHf2ex1L4")
prepare = create_prepare(req_key=(0, 10),
state_root=generate_state_root(), inst_id=0)
commit = create_commit_no_bls_sig(req_key=(0, 10), inst_id=0)
key = (pp.viewNo, pp.ppSeqNo)
orderer.prePrepares[key] = pp
orderer.prepares[key] = prepare
orderer.commits[key] = commit
orderer.pre_prepare_tss[key][pp.auditTxnRootHash, "Node1"] = 1234
orderer.prePreparesPendingFinReqs.append(pp)
orderer.prePreparesPendingPrevPP[key] = pp
orderer.sent_preprepares[key] = pp
orderer.batches[key] = [pp.ledgerId, pp.discarded,
pp.ppTime, generate_state_root(), len(pp.reqIdr)]
orderer.ordered.add(*key)
internal_bus.send(ViewChangeStarted(view_no=4))
assert not orderer.prePrepares
assert not orderer.prepares
assert not orderer.commits
assert not orderer.pre_prepare_tss
assert not orderer.prePreparesPendingFinReqs
assert not orderer.prePreparesPendingPrevPP
assert not orderer.sent_preprepares
assert not orderer.batches
assert not orderer.ordered
def test_stores_old_pre_prepares_on_view_change_started(internal_bus, orderer):
pp1 = create_pre_prepare_no_bls(generate_state_root(),
view_no=0, pp_seq_no=1, inst_id=0)
pp2 = create_pre_prepare_no_bls(generate_state_root(),
view_no=0, pp_seq_no=2, inst_id=0)
pp3 = create_pre_prepare_no_bls(generate_state_root(),
view_no=1, pp_seq_no=3, inst_id=0)
pp4 = create_pre_prepare_no_bls(generate_state_root(),
view_no=2, pp_seq_no=4, inst_id=0)
pp5 = create_pre_prepare_no_bls(generate_state_root(),
view_no=3, pp_seq_no=5, inst_id=0)
pp6 = create_pre_prepare_no_bls(generate_state_root(),
view_no=3, pp_seq_no=6, inst_id=0)
orderer.prePrepares[(pp1.viewNo, pp1.ppSeqNo)] = pp1
orderer.prePrepares[(pp3.viewNo, pp3.ppSeqNo)] = pp3
orderer.sent_preprepares[(pp2.viewNo, pp2.ppSeqNo)] = pp2
orderer.sent_preprepares[(pp4.viewNo, pp4.ppSeqNo)] = pp4
assert not orderer.old_view_preprepares
internal_bus.send(ViewChangeStarted(view_no=4))
assert orderer.old_view_preprepares[(pp1.viewNo, pp1.ppSeqNo, pp1.digest)] == pp1
assert orderer.old_view_preprepares[(pp2.viewNo, pp2.ppSeqNo, pp2.digest)] == pp2
assert orderer.old_view_preprepares[(pp3.viewNo, pp3.ppSeqNo, pp3.digest)] == pp3
assert orderer.old_view_preprepares[(pp4.viewNo, pp4.ppSeqNo, pp4.digest)] == pp4
orderer.prePrepares[(pp5.viewNo, pp5.ppSeqNo)] = pp5
orderer.sent_preprepares[(pp6.viewNo, pp6.ppSeqNo)] = pp6
internal_bus.send(ViewChangeStarted(view_no=4))
assert orderer.old_view_preprepares[(pp1.viewNo, pp1.ppSeqNo, pp1.digest)] == pp1
assert orderer.old_view_preprepares[(pp2.viewNo, pp2.ppSeqNo, pp2.digest)] == pp2
assert orderer.old_view_preprepares[(pp3.viewNo, pp3.ppSeqNo, pp3.digest)] == pp3
assert orderer.old_view_preprepares[(pp4.viewNo, pp4.ppSeqNo, pp4.digest)] == pp4
assert orderer.old_view_preprepares[(pp5.viewNo, pp5.ppSeqNo, pp5.digest)] == pp5
assert orderer.old_view_preprepares[(pp6.viewNo, pp6.ppSeqNo, pp6.digest)] == pp6
def test_do_nothing_on_new_view_accepted(internal_bus, orderer):
orderer._data.preprepared = create_batches(view_no=0)
orderer._data.prepared = create_batches(view_no=0)
old_data = copy_shared_data(orderer._data)
initial_view_no = 3
new_view = create_new_view(initial_view_no=initial_view_no, stable_cp=200)
internal_bus.send(NewViewAccepted(view_no=initial_view_no + 1,
view_changes=new_view.viewChanges,
checkpoint=new_view.checkpoint,
batches=new_view.batches))
new_data = copy_shared_data(orderer._data)
assert old_data == new_data
def test_update_shared_data_on_new_view_checkpoint_applied(internal_bus, orderer):
initial_view_no = 3
orderer._data.preprepared = []
orderer._data.prepared = []
orderer._data.view_no = initial_view_no + 1
old_data = copy_shared_data(orderer._data)
new_view = create_new_view(initial_view_no=initial_view_no, stable_cp=200)
internal_bus.send(NewViewCheckpointsApplied(view_no=initial_view_no + 1,
view_changes=new_view.viewChanges,
checkpoint=new_view.checkpoint,
batches=new_view.batches))
new_data = copy_shared_data(orderer._data)
check_service_changed_only_owned_fields_in_shared_data(OrderingService, old_data, new_data)
# (we will request the PrePrepares instead, see next tests)
assert orderer._data.preprepared == []
assert orderer._data.prepared == []
@pytest.mark.parametrize('all_ordered', [True, False], ids=['All-ordered', 'All-non-ordered'])
def test_process_preprepare_on_new_view_checkpoint_applied(internal_bus, external_bus,
orderer, is_primary,
all_ordered,
initial_view_no,
pre_prepares, stored_old_view_pre_prepares):
# !!!SETUP!!!
orderer._data.view_no = initial_view_no + 1
batches = create_batches_from_preprepares(pre_prepares)
orderer._data.prev_view_prepare_cert = batches[-1].pp_seq_no
new_view = create_new_view(initial_view_no=initial_view_no, stable_cp=200,
batches=batches)
# emulate that we received all PrePrepares before View Change
orderer._update_old_view_preprepares(stored_old_view_pre_prepares)
# emulate that we've already ordered the PrePrepares
if all_ordered and stored_old_view_pre_prepares:
orderer.last_ordered_3pc = (initial_view_no, stored_old_view_pre_prepares[-1].ppSeqNo)
internal_bus.send(NewViewCheckpointsApplied(view_no=initial_view_no + 1,
view_changes=new_view.viewChanges,
checkpoint=new_view.checkpoint,
batches=new_view.batches))
if not orderer.is_master:
assert orderer._data.preprepared == []
assert orderer._data.prepared == []
return
stored_batch_ids = [preprepare_to_batch_id(pp) for pp in stored_old_view_pre_prepares]
assert orderer._data.preprepared == [BatchID(view_no=initial_view_no + 1, pp_view_no=initial_view_no,
pp_seq_no=batch_id.pp_seq_no, pp_digest=batch_id.pp_digest)
for batch_id in new_view.batches if batch_id in stored_batch_ids]
updated_prepares_collection = orderer.prePrepares if not is_primary else orderer.sent_preprepares
non_updated_prepares_collection = orderer.sent_preprepares if not is_primary else orderer.prePrepares
for pp in stored_old_view_pre_prepares:
new_pp = updateNamedTuple(pp, viewNo=initial_view_no + 1, originalViewNo=pp.viewNo)
assert (initial_view_no + 1, new_pp.ppSeqNo) in updated_prepares_collection
assert updated_prepares_collection[(initial_view_no + 1, new_pp.ppSeqNo)] == new_pp
assert not non_updated_prepares_collection
if not is_primary:
check_prepares_sent(external_bus, stored_old_view_pre_prepares, initial_view_no + 1)
else:
assert len(external_bus.sent_messages) == len(pre_prepares) - len(stored_old_view_pre_prepares)
assert orderer._data.prepared == []
# check that missing PrePrepares have been requested
expected_requested_batches = [batch_id for batch_id in new_view.batches if batch_id not in stored_batch_ids]
check_request_old_view_preprepares_sent(external_bus, expected_requested_batches)
def test_send_reply_on_old_view_pre_prepares_request(external_bus, orderer,
initial_view_no,
stored_old_view_pre_prepares,
requested_old_view_pre_prepares):
# Setup
orderer._data.view_no = initial_view_no + 2
orderer._update_old_view_preprepares(stored_old_view_pre_prepares)
# Receive OldViewPrePrepareRequest req
batches = [preprepare_to_batch_id(pp) for pp in requested_old_view_pre_prepares]
req = OldViewPrePrepareRequest(0, batches)
frm = "node1"
orderer._network.process_incoming(req, generateName(frm, orderer._data.inst_id))
# Check that OldViewPrePrepareReply is sent for all requested PrePrepares
if not orderer.is_master:
assert len(external_bus.sent_messages) == 0
return
# equal to set's union operation
expected_pps = [i for i in stored_old_view_pre_prepares if i in requested_old_view_pre_prepares]
expected_pps = sorted(expected_pps, key=lambda pp: pp.ppSeqNo)
check_reply_old_view_preprepares_sent(external_bus, frm, expected_pps)
def test_process_preprepare_on_old_view_pre_prepares_reply(external_bus, internal_bus,
orderer, is_primary,
initial_view_no,
pre_prepares):
orderer._data.view_no = initial_view_no + 1
orderer._data.prev_view_prepare_cert = orderer.lastPrePrepareSeqNo + 1
new_view = create_new_view(initial_view_no=initial_view_no, stable_cp=200,
batches=create_batches_from_preprepares(pre_prepares))
orderer._data.new_view = new_view
rep = OldViewPrePrepareReply(0, [pp._asdict() for pp in pre_prepares])
orderer._network.process_incoming(rep, generateName("node1", orderer._data.inst_id))
if not orderer.is_master:
assert orderer._data.preprepared == []
assert orderer._data.prepared == []
return
assert orderer._data.preprepared == [BatchID(view_no=initial_view_no + 1, pp_view_no=pp.viewNo,
pp_seq_no=pp.ppSeqNo, pp_digest=pp.digest)
for pp in pre_prepares]
updated_prepares_collection = orderer.prePrepares if not is_primary else orderer.sent_preprepares
non_updated_prepares_collection = orderer.sent_preprepares if not is_primary else orderer.prePrepares
for pp in pre_prepares:
new_pp = updateNamedTuple(pp, viewNo=initial_view_no + 1, originalViewNo=pp.viewNo)
assert (initial_view_no + 1, new_pp.ppSeqNo) in updated_prepares_collection
assert updated_prepares_collection[(initial_view_no + 1, new_pp.ppSeqNo)] == new_pp
assert not non_updated_prepares_collection
if not is_primary:
check_prepares_sent(external_bus, pre_prepares, initial_view_no + 1)
else:
assert len(external_bus.sent_messages) == 0
assert orderer._data.prepared == []
| true | true |
f72dcdf889578279b6860c581fa614da112b719a | 8,112 | py | Python | fromage/utils/run_sequence.py | Yulin832/fromage | f6c84d5684ca5abfcc979540bb97cc8f105f963d | [
"MIT"
] | 14 | 2018-11-19T09:36:59.000Z | 2021-05-12T13:46:20.000Z | fromage/utils/run_sequence.py | Yulin832/fromage | f6c84d5684ca5abfcc979540bb97cc8f105f963d | [
"MIT"
] | 3 | 2020-01-22T17:26:38.000Z | 2020-07-21T14:23:52.000Z | fromage/utils/run_sequence.py | Yulin832/fromage | f6c84d5684ca5abfcc979540bb97cc8f105f963d | [
"MIT"
] | 9 | 2019-04-22T14:56:08.000Z | 2022-02-02T13:30:58.000Z | """Defines the Ewa object which interfaces with Ewald"""
import os
import subprocess
import time
import sys
import fromage.io.edit_file as ef
import fromage.io.read_file as rf
from fromage.scripts.fro_assign_charges import assign_charges
class RunSeq(object):
"""
Class which sets up the order of operations for preparing calculation
Attributes
----------
region_1 : Mol object
The atoms in the central molecule
cell : Mol object
The atoms in the unit cell
inputs : dict
The input keywords
mode : str
String summarising the kind of run sequence required. options are:
noew_nosc : EC
noew_sc : SC-EC
ew_nosc : EEC
ew_sc : SC-EEC
"""
def __init__(self, region_1, cell, inputs):
self.region_1 = region_1
self.cell = cell
self.inputs = inputs
if self.inputs["ewald"]:
pref = "ew_"
else:
pref = "noew_"
if self.inputs["self_consistent"]:
post = "sc"
else:
post = "nosc"
self.mode = pref + post
# dirs
self.here = os.getcwd()
self.ewald_path = os.path.join(self.here,"ewald/")
self.out_file = open("prep.out","a")
return
def write_out(self,string):
self.out_file.write(string)
self.out_file.flush()
return
def make_region_2(self):
"""
Get region 2 Mols with different charges
Returns
-------
shell_high : Mol object
Region 2 molecules with high level of theory charges
shell_low : Mole object
Region 2 molecules with low level of theory charges
"""
if self.inputs["target_shell"]:
shell_high = rf.mol_from_file(self.inputs["target_shell"])
self.write_out("Outer region read in with " + str(len(shell_high)) + " atoms.\n")
high_level_pop_mol = rf.mol_from_gauss(self.inputs["high_pop_file"], pop=self.inputs["high_pop_method"])
shell_high.populate(high_level_pop_mol)
else:
shell_high = self.cell.make_cluster(self.inputs["clust_rad"], central_mol = self.region_1, mode = self.inputs["clust_mode"])
for atom_i in self.region_1:
for atom_j in shell_high:
if atom_i.very_close(atom_j):
shell_high.remove(atom_j)
break
self.write_out("Outer region generated with " + str(len(shell_high)) + " atoms.\n")
low_level_pop_mol = rf.mol_from_gauss(self.inputs["low_pop_file"], pop=self.inputs["low_pop_method"])
shell_low = shell_high.copy()
shell_low.populate(low_level_pop_mol)
return shell_low, shell_high
def run_ewald(self, calc_name=None):
if calc_name is None:
calc_name = self.inputs["name"]
if not os.path.exists(self.ewald_path):
os.makedirs(self.ewald_path)
os.chdir(self.ewald_path)
# no stdout
FNULL = open(os.devnull, 'w')
ef.write_uc(calc_name + ".uc", self.inputs["vectors"], self.inputs["an"], self.inputs["bn"], self.inputs["cn"], self.cell)
ef.write_qc(calc_name + ".qc", self.region_1)
ef.write_ew_in(calc_name, "ewald.in." + calc_name, self.inputs["nchk"], self.inputs["nat"])
ef.write_seed()
# run Ewald
self.write_out("Ewald calculation started\n")
ew_start = time.time()
subprocess.call("${FRO_EWALD} < ewald.in." + calc_name, stdout=FNULL, shell=True)
ew_end = time.time()
self.write_out("Ewald calculation finished after "+str(round(ew_end - ew_start,3))+" s\n")
points = rf.read_points(calc_name + ".pts-fro")
if len(points) == 0:
self.write_out("Something went wrong with the Ewald calculation, stopping...\n")
sys.exit()
os.chdir(self.here)
return points
def run(self):
"""
Run the calculation for the corresponding self.mode
Returns
-------
region_2 : Mol object
Region 2 atoms with low level of theory charges
high_points : Mol object
Points that will embed mh, regardless of self.mode
"""
run_types = {"noew_nosc":self.run_ec,
"noew_sc":self.run_scec,
"ew_nosc":self.run_eec,
"ew_sc":self.run_sceec}
# execute the appropriate run type
region_2, high_points = run_types[self.mode]()
self.out_file.close()
return region_2, high_points
def run_ec(self):
region_2_low , region_2_high = self.make_region_2()
return region_2_low, region_2_high
def run_scec(self):
region_2_low , region_2_high = self.make_region_2()
self.self_consistent(region_2_high)
return region_2_low, region_2_high
def run_eec(self):
region_2_low , region_2_high = self.make_region_2()
ew_points = self.run_ewald()
return region_2_low, ew_points
def run_sceec(self):
region_2_low , region_2_high = self.make_region_2()
self.self_consistent(None) # here, the None argument means that the initial background has yet to be computed
ew_points = self.run_ewald()
return region_2_low, ew_points
def single_sc_loop(self, sc_loop, initial_bg):
"""Run a single iteration of the sc loop, with or without Ewald"""
sc_name = "sc_" + self.inputs["name"]
# Initial charges in mol
old_charges = self.region_1.charges()
# if sc_eec then there is no initial_bg so it needs to be computed
if self.mode == "ew_sc":
points = self.run_ewald(calc_name = sc_name)
initial_bg = points
ef.write_gauss(sc_name + ".com", self.region_1, initial_bg, self.inputs["sc_temp"])
subprocess.call("${FRO_GAUSS} " + sc_name + ".com", shell=True)
# Calculate new charges
intact_charges, new_energy, char_self, char_int = rf.read_g_char(sc_name + ".log", self.inputs["high_pop_method"], debug=True)
# Correct charges if they are not perfectly neutral
if sum(intact_charges) != 0.0:
temp_correct = sum(intact_charges) / len(intact_charges)
intact_charges = [i - temp_correct for i in intact_charges]
dummy_mol = self.region_1.copy()
dummy_mol.raw_assign_charges(intact_charges)
self.region_1.populate(dummy_mol)
# Damp the change in charges
new_charges = [new * (1 - self.inputs["damping"]) + old * self.inputs["damping"] for new, old in zip(self.region_1.charges(), old_charges)]
# Correct charges again (due to damping)
if sum(new_charges) != 0.0:
temp_correct = sum(new_charges) / len(new_charges)
new_charges = [i - temp_correct for i in new_charges]
# assign damped charges
self.region_1.raw_assign_charges(new_charges)
self.cell.populate(self.region_1)
if self.mode == "noew_sc":
assign_charges(self.region_1, initial_bg)
# Calculate deviation between initial and new charges
deviation = sum([abs(i - j)
for (i, j) in zip(self.region_1.charges(), old_charges)]) / len(self.region_1)
out_str = ("Iteration:", sc_loop, "Deviation:",
deviation, "Energy:", new_energy, "Charge self energy:", char_self, "Total - charge self:", new_energy - char_self)
self.write_out("{:<6} {:<5} {:<6} {:10.6f} {:<6} {:10.6f} {:<6} {:10.6f} {:<6} {:10.6f}\n".format(*out_str))
return deviation
def self_consistent(self, initial_bg):
"""Run single iterations until the charge deviation is below the tol"""
sc_iter = 0
dev = float("inf")
while dev > self.inputs["dev_tol"]:
sc_iter += 1
dev = self.single_sc_loop(sc_iter, initial_bg)
self.write_out("Tolerance reached: " + str(dev) + " < " + str(self.inputs["dev_tol"]) + "\n")
return
| 37.382488 | 147 | 0.608358 | import os
import subprocess
import time
import sys
import fromage.io.edit_file as ef
import fromage.io.read_file as rf
from fromage.scripts.fro_assign_charges import assign_charges
class RunSeq(object):
def __init__(self, region_1, cell, inputs):
self.region_1 = region_1
self.cell = cell
self.inputs = inputs
if self.inputs["ewald"]:
pref = "ew_"
else:
pref = "noew_"
if self.inputs["self_consistent"]:
post = "sc"
else:
post = "nosc"
self.mode = pref + post
self.here = os.getcwd()
self.ewald_path = os.path.join(self.here,"ewald/")
self.out_file = open("prep.out","a")
return
def write_out(self,string):
self.out_file.write(string)
self.out_file.flush()
return
def make_region_2(self):
if self.inputs["target_shell"]:
shell_high = rf.mol_from_file(self.inputs["target_shell"])
self.write_out("Outer region read in with " + str(len(shell_high)) + " atoms.\n")
high_level_pop_mol = rf.mol_from_gauss(self.inputs["high_pop_file"], pop=self.inputs["high_pop_method"])
shell_high.populate(high_level_pop_mol)
else:
shell_high = self.cell.make_cluster(self.inputs["clust_rad"], central_mol = self.region_1, mode = self.inputs["clust_mode"])
for atom_i in self.region_1:
for atom_j in shell_high:
if atom_i.very_close(atom_j):
shell_high.remove(atom_j)
break
self.write_out("Outer region generated with " + str(len(shell_high)) + " atoms.\n")
low_level_pop_mol = rf.mol_from_gauss(self.inputs["low_pop_file"], pop=self.inputs["low_pop_method"])
shell_low = shell_high.copy()
shell_low.populate(low_level_pop_mol)
return shell_low, shell_high
def run_ewald(self, calc_name=None):
if calc_name is None:
calc_name = self.inputs["name"]
if not os.path.exists(self.ewald_path):
os.makedirs(self.ewald_path)
os.chdir(self.ewald_path)
FNULL = open(os.devnull, 'w')
ef.write_uc(calc_name + ".uc", self.inputs["vectors"], self.inputs["an"], self.inputs["bn"], self.inputs["cn"], self.cell)
ef.write_qc(calc_name + ".qc", self.region_1)
ef.write_ew_in(calc_name, "ewald.in." + calc_name, self.inputs["nchk"], self.inputs["nat"])
ef.write_seed()
self.write_out("Ewald calculation started\n")
ew_start = time.time()
subprocess.call("${FRO_EWALD} < ewald.in." + calc_name, stdout=FNULL, shell=True)
ew_end = time.time()
self.write_out("Ewald calculation finished after "+str(round(ew_end - ew_start,3))+" s\n")
points = rf.read_points(calc_name + ".pts-fro")
if len(points) == 0:
self.write_out("Something went wrong with the Ewald calculation, stopping...\n")
sys.exit()
os.chdir(self.here)
return points
def run(self):
run_types = {"noew_nosc":self.run_ec,
"noew_sc":self.run_scec,
"ew_nosc":self.run_eec,
"ew_sc":self.run_sceec}
region_2, high_points = run_types[self.mode]()
self.out_file.close()
return region_2, high_points
def run_ec(self):
region_2_low , region_2_high = self.make_region_2()
return region_2_low, region_2_high
def run_scec(self):
region_2_low , region_2_high = self.make_region_2()
self.self_consistent(region_2_high)
return region_2_low, region_2_high
def run_eec(self):
region_2_low , region_2_high = self.make_region_2()
ew_points = self.run_ewald()
return region_2_low, ew_points
def run_sceec(self):
region_2_low , region_2_high = self.make_region_2()
self.self_consistent(None)
ew_points = self.run_ewald()
return region_2_low, ew_points
def single_sc_loop(self, sc_loop, initial_bg):
sc_name = "sc_" + self.inputs["name"]
old_charges = self.region_1.charges()
if self.mode == "ew_sc":
points = self.run_ewald(calc_name = sc_name)
initial_bg = points
ef.write_gauss(sc_name + ".com", self.region_1, initial_bg, self.inputs["sc_temp"])
subprocess.call("${FRO_GAUSS} " + sc_name + ".com", shell=True)
intact_charges, new_energy, char_self, char_int = rf.read_g_char(sc_name + ".log", self.inputs["high_pop_method"], debug=True)
if sum(intact_charges) != 0.0:
temp_correct = sum(intact_charges) / len(intact_charges)
intact_charges = [i - temp_correct for i in intact_charges]
dummy_mol = self.region_1.copy()
dummy_mol.raw_assign_charges(intact_charges)
self.region_1.populate(dummy_mol)
new_charges = [new * (1 - self.inputs["damping"]) + old * self.inputs["damping"] for new, old in zip(self.region_1.charges(), old_charges)]
if sum(new_charges) != 0.0:
temp_correct = sum(new_charges) / len(new_charges)
new_charges = [i - temp_correct for i in new_charges]
self.region_1.raw_assign_charges(new_charges)
self.cell.populate(self.region_1)
if self.mode == "noew_sc":
assign_charges(self.region_1, initial_bg)
deviation = sum([abs(i - j)
for (i, j) in zip(self.region_1.charges(), old_charges)]) / len(self.region_1)
out_str = ("Iteration:", sc_loop, "Deviation:",
deviation, "Energy:", new_energy, "Charge self energy:", char_self, "Total - charge self:", new_energy - char_self)
self.write_out("{:<6} {:<5} {:<6} {:10.6f} {:<6} {:10.6f} {:<6} {:10.6f} {:<6} {:10.6f}\n".format(*out_str))
return deviation
def self_consistent(self, initial_bg):
sc_iter = 0
dev = float("inf")
while dev > self.inputs["dev_tol"]:
sc_iter += 1
dev = self.single_sc_loop(sc_iter, initial_bg)
self.write_out("Tolerance reached: " + str(dev) + " < " + str(self.inputs["dev_tol"]) + "\n")
return
| true | true |
f72dcefb884ae7a37171f07d40e607e0a565ea72 | 803 | py | Python | HW6/VeronyWise/task6.2.py | kolyasalubov/Lv-677.PythonCore | c9f9107c734a61e398154a90b8a3e249276c2704 | [
"MIT"
] | null | null | null | HW6/VeronyWise/task6.2.py | kolyasalubov/Lv-677.PythonCore | c9f9107c734a61e398154a90b8a3e249276c2704 | [
"MIT"
] | null | null | null | HW6/VeronyWise/task6.2.py | kolyasalubov/Lv-677.PythonCore | c9f9107c734a61e398154a90b8a3e249276c2704 | [
"MIT"
] | 6 | 2022-02-22T22:30:49.000Z | 2022-03-28T12:51:19.000Z | PI = 3.14
def rectangle_funk():
a = float(input("Please, enter first side of rectangle: "))
b = float(input("Please, enter second side of rectangle: "))
return a * b
def triangle_funk():
a = float(input("Please, enter side of triangle: "))
h = float(input("Please, enter height of triangle: "))
return 0.5 * a * h
def circle_funk():
radius = float(input("Please, enter radius of circle: "))
return (PI * radius**2)
area = input("What area do you want: rectangle - 1, triangle - 2, circle - 3? ")
if area == "1":
print(f"{rectangle_funk()} is area of rectangle")
elif area == "2":
print(f"{triangle_funk()} is area of triangle")
elif area == "3":
print(f"{circle_funk()} is area of circle")
else:
print("Sorry, you entered wrong number")
| 30.884615 | 80 | 0.622665 | PI = 3.14
def rectangle_funk():
a = float(input("Please, enter first side of rectangle: "))
b = float(input("Please, enter second side of rectangle: "))
return a * b
def triangle_funk():
a = float(input("Please, enter side of triangle: "))
h = float(input("Please, enter height of triangle: "))
return 0.5 * a * h
def circle_funk():
radius = float(input("Please, enter radius of circle: "))
return (PI * radius**2)
area = input("What area do you want: rectangle - 1, triangle - 2, circle - 3? ")
if area == "1":
print(f"{rectangle_funk()} is area of rectangle")
elif area == "2":
print(f"{triangle_funk()} is area of triangle")
elif area == "3":
print(f"{circle_funk()} is area of circle")
else:
print("Sorry, you entered wrong number")
| true | true |
f72dcf7abb7904d8b177a84c9aed4d7d56af6ce1 | 2,746 | py | Python | tests/test_vizplugin.py | dummyindex/viztracer | 73d7b9a1cdcd8f91ad6ef9dd522b6be94de22b4d | [
"Apache-2.0"
] | 1,798 | 2020-08-13T05:10:54.000Z | 2022-03-31T16:43:17.000Z | tests/test_vizplugin.py | dummyindex/viztracer | 73d7b9a1cdcd8f91ad6ef9dd522b6be94de22b4d | [
"Apache-2.0"
] | 185 | 2020-08-15T15:37:42.000Z | 2022-03-31T19:31:23.000Z | tests/test_vizplugin.py | dummyindex/viztracer | 73d7b9a1cdcd8f91ad6ef9dd522b6be94de22b4d | [
"Apache-2.0"
] | 100 | 2020-08-20T06:11:20.000Z | 2022-03-29T03:29:00.000Z | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/gaogaotiantian/viztracer/blob/master/NOTICE.txt
from contextlib import redirect_stdout
import io
from .cmdline_tmpl import CmdlineTmpl
from viztracer import VizTracer
from viztracer.vizplugin import VizPluginBase, VizPluginError
class MyPlugin(VizPluginBase):
def __init__(self, terminate_well=True):
self.event_counter = 0
self.handler_triggered = False
self.terminate_well = terminate_well
def support_version(self):
return "0.10.5"
def message(self, m_type, payload):
def f(data):
self.handler_triggered = True
self.event_counter += 1
if m_type == "event" and payload["when"] == "pre-save":
return {
"action": "handle_data",
"handler": f
}
if m_type == "command":
if payload["cmd_type"] == "terminate":
return {"success": self.terminate_well}
return {}
class MyPluginIncomplete(VizPluginBase):
pass
class MyPluginFuture(VizPluginBase):
def support_version(self):
return "9999.999.99"
class TestVizPlugin(CmdlineTmpl):
def test_basic(self):
pl = MyPlugin()
tracer = VizTracer(plugins=[pl])
tracer.start()
tracer.stop()
tracer.save()
self.assertEqual(pl.event_counter, 4)
self.assertEqual(pl.handler_triggered, True)
def test_invalid(self):
invalid_pl = []
with self.assertRaises(TypeError):
_ = VizTracer(plugins=[invalid_pl])
with self.assertRaises(NotImplementedError):
_ = VizTracer(plugins=[MyPluginIncomplete()])
def test_terminate(self):
pl = MyPlugin()
with VizTracer(plugins=[pl]):
_ = []
pl = MyPlugin(terminate_well=False)
with self.assertRaises(VizPluginError):
with VizTracer(plugins=[pl]):
_ = []
def test_version(self):
pl = MyPluginFuture()
s = io.StringIO()
with redirect_stdout(s):
with VizTracer(plugins=[pl]):
_ = []
self.assertIn("support version is higher", s.getvalue())
def test_cmdline(self):
self.template(["viztracer", "--plugin", "tests.modules.dummy_vizplugin", "--", "cmdline_test.py"])
self.template(["viztracer", "--plugin", "tests.modules.dummy_vizplugin_wrong", "--", "cmdline_test.py"], success=False)
self.template(["viztracer", "--plugin", "tests.modules", "--", "cmdline_test.py"], success=False)
self.template(["viztracer", "--plugin", "invalid", "--", "cmdline_test.py"], success=False)
| 31.204545 | 127 | 0.616533 |
from contextlib import redirect_stdout
import io
from .cmdline_tmpl import CmdlineTmpl
from viztracer import VizTracer
from viztracer.vizplugin import VizPluginBase, VizPluginError
class MyPlugin(VizPluginBase):
def __init__(self, terminate_well=True):
self.event_counter = 0
self.handler_triggered = False
self.terminate_well = terminate_well
def support_version(self):
return "0.10.5"
def message(self, m_type, payload):
def f(data):
self.handler_triggered = True
self.event_counter += 1
if m_type == "event" and payload["when"] == "pre-save":
return {
"action": "handle_data",
"handler": f
}
if m_type == "command":
if payload["cmd_type"] == "terminate":
return {"success": self.terminate_well}
return {}
class MyPluginIncomplete(VizPluginBase):
pass
class MyPluginFuture(VizPluginBase):
def support_version(self):
return "9999.999.99"
class TestVizPlugin(CmdlineTmpl):
def test_basic(self):
pl = MyPlugin()
tracer = VizTracer(plugins=[pl])
tracer.start()
tracer.stop()
tracer.save()
self.assertEqual(pl.event_counter, 4)
self.assertEqual(pl.handler_triggered, True)
def test_invalid(self):
invalid_pl = []
with self.assertRaises(TypeError):
_ = VizTracer(plugins=[invalid_pl])
with self.assertRaises(NotImplementedError):
_ = VizTracer(plugins=[MyPluginIncomplete()])
def test_terminate(self):
pl = MyPlugin()
with VizTracer(plugins=[pl]):
_ = []
pl = MyPlugin(terminate_well=False)
with self.assertRaises(VizPluginError):
with VizTracer(plugins=[pl]):
_ = []
def test_version(self):
pl = MyPluginFuture()
s = io.StringIO()
with redirect_stdout(s):
with VizTracer(plugins=[pl]):
_ = []
self.assertIn("support version is higher", s.getvalue())
def test_cmdline(self):
self.template(["viztracer", "--plugin", "tests.modules.dummy_vizplugin", "--", "cmdline_test.py"])
self.template(["viztracer", "--plugin", "tests.modules.dummy_vizplugin_wrong", "--", "cmdline_test.py"], success=False)
self.template(["viztracer", "--plugin", "tests.modules", "--", "cmdline_test.py"], success=False)
self.template(["viztracer", "--plugin", "invalid", "--", "cmdline_test.py"], success=False)
| true | true |
f72dd1d6165a3a3ef372a83b1913c16b5e3470a6 | 3,636 | py | Python | pdm/models/caches.py | pohlt/pdm | f39be04b876f1fae572df33990a9325c79d8679f | [
"MIT"
] | null | null | null | pdm/models/caches.py | pohlt/pdm | f39be04b876f1fae572df33990a9325c79d8679f | [
"MIT"
] | null | null | null | pdm/models/caches.py | pohlt/pdm | f39be04b876f1fae572df33990a9325c79d8679f | [
"MIT"
] | null | null | null | import hashlib
import json
from pathlib import Path
from typing import TYPE_CHECKING, Dict, Optional
from pdm._types import CandidateInfo
from pdm.exceptions import CorruptedCacheError
from pdm.models import pip_shims
from pdm.utils import open_file
if TYPE_CHECKING:
from pip._vendor import requests
from pdm.models.candidates import Candidate
class CandidateInfoCache:
"""Cache manager to hold (dependencies, requires_python, summary) info."""
def __init__(self, cache_file: Path) -> None:
self.cache_file = cache_file
self._cache = {} # type: Dict[str, CandidateInfo]
self._read_cache()
def _read_cache(self) -> None:
if not self.cache_file.exists():
self._cache = {}
return
with self.cache_file.open() as fp:
try:
self._cache = json.load(fp)
except json.JSONDecodeError:
raise CorruptedCacheError("The dependencies cache seems to be broken.")
def _write_cache(self) -> None:
with self.cache_file.open("w") as fp:
json.dump(self._cache, fp)
@staticmethod
def _get_key(candidate):
# type: (Candidate) -> str
# Name and version are set when dependencies are resolved,
# so use them for cache key. Local directories won't be cached.
if not candidate.name or not candidate.version:
raise KeyError
extras = (
"[{}]".format(",".join(sorted(candidate.req.extras)))
if candidate.req.extras
else ""
)
return f"{candidate.name}{extras}-{candidate.version}"
def get(self, candidate):
# type: (Candidate) -> CandidateInfo
key = self._get_key(candidate)
return self._cache[key]
def set(self, candidate, value):
# type: (Candidate, CandidateInfo) -> None
key = self._get_key(candidate)
self._cache[key] = value
self._write_cache()
def delete(self, candidate):
# type: (Candidate) -> None
try:
del self._cache[self._get_key(candidate)]
except KeyError:
pass
self._write_cache()
def clear(self) -> None:
self._cache.clear()
self._write_cache()
class HashCache(pip_shims.SafeFileCache):
"""Caches hashes of PyPI artifacts so we do not need to re-download them.
Hashes are only cached when the URL appears to contain a hash in it and the
cache key includes the hash value returned from the server). This ought to
avoid issues where the location on the server changes.
"""
def __init__(self, *args, **kwargs):
self.session = None # type: Optional[requests.Session]
super(HashCache, self).__init__(*args, **kwargs)
def get_hash(self, link: pip_shims.Link) -> str:
# If there is no link hash (i.e., md5, sha256, etc.), we don't want
# to store it.
hash_value = self.get(link.url)
if not hash_value:
if link.hash and link.hash_name in pip_shims.STRONG_HASHES:
hash_value = f"{link.hash_name}:{link.hash}"
else:
hash_value = self._get_file_hash(link)
hash_value = hash_value.encode()
self.set(link.url, hash_value)
return hash_value.decode("utf8")
def _get_file_hash(self, link: pip_shims.Link) -> str:
h = hashlib.new(pip_shims.FAVORITE_HASH)
with open_file(link.url, self.session) as fp:
for chunk in iter(lambda: fp.read(8096), b""):
h.update(chunk)
return ":".join([h.name, h.hexdigest()])
| 33.357798 | 87 | 0.621837 | import hashlib
import json
from pathlib import Path
from typing import TYPE_CHECKING, Dict, Optional
from pdm._types import CandidateInfo
from pdm.exceptions import CorruptedCacheError
from pdm.models import pip_shims
from pdm.utils import open_file
if TYPE_CHECKING:
from pip._vendor import requests
from pdm.models.candidates import Candidate
class CandidateInfoCache:
def __init__(self, cache_file: Path) -> None:
self.cache_file = cache_file
self._cache = {}
self._read_cache()
def _read_cache(self) -> None:
if not self.cache_file.exists():
self._cache = {}
return
with self.cache_file.open() as fp:
try:
self._cache = json.load(fp)
except json.JSONDecodeError:
raise CorruptedCacheError("The dependencies cache seems to be broken.")
def _write_cache(self) -> None:
with self.cache_file.open("w") as fp:
json.dump(self._cache, fp)
@staticmethod
def _get_key(candidate):
if not candidate.name or not candidate.version:
raise KeyError
extras = (
"[{}]".format(",".join(sorted(candidate.req.extras)))
if candidate.req.extras
else ""
)
return f"{candidate.name}{extras}-{candidate.version}"
def get(self, candidate):
# type: (Candidate) -> CandidateInfo
key = self._get_key(candidate)
return self._cache[key]
def set(self, candidate, value):
# type: (Candidate, CandidateInfo) -> None
key = self._get_key(candidate)
self._cache[key] = value
self._write_cache()
def delete(self, candidate):
# type: (Candidate) -> None
try:
del self._cache[self._get_key(candidate)]
except KeyError:
pass
self._write_cache()
def clear(self) -> None:
self._cache.clear()
self._write_cache()
class HashCache(pip_shims.SafeFileCache):
def __init__(self, *args, **kwargs):
self.session = None # type: Optional[requests.Session]
super(HashCache, self).__init__(*args, **kwargs)
def get_hash(self, link: pip_shims.Link) -> str:
# If there is no link hash (i.e., md5, sha256, etc.), we don't want
hash_value = self.get(link.url)
if not hash_value:
if link.hash and link.hash_name in pip_shims.STRONG_HASHES:
hash_value = f"{link.hash_name}:{link.hash}"
else:
hash_value = self._get_file_hash(link)
hash_value = hash_value.encode()
self.set(link.url, hash_value)
return hash_value.decode("utf8")
def _get_file_hash(self, link: pip_shims.Link) -> str:
h = hashlib.new(pip_shims.FAVORITE_HASH)
with open_file(link.url, self.session) as fp:
for chunk in iter(lambda: fp.read(8096), b""):
h.update(chunk)
return ":".join([h.name, h.hexdigest()])
| true | true |
f72dd40cd6572923ea57e8bfdaec31d5b10e268a | 1,581 | py | Python | taskdo/urls.py | reven-tang/ITMP | 8d6686edb19fcc26c9cf1f7e14037f9d38a6e702 | [
"BSD-2-Clause"
] | null | null | null | taskdo/urls.py | reven-tang/ITMP | 8d6686edb19fcc26c9cf1f7e14037f9d38a6e702 | [
"BSD-2-Clause"
] | 11 | 2020-06-05T19:40:52.000Z | 2022-03-11T23:38:17.000Z | taskdo/urls.py | reven-tang/ITMP | 8d6686edb19fcc26c9cf1f7e14037f9d38a6e702 | [
"BSD-2-Clause"
] | null | null | null | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url
from . import views
from django.conf.urls.static import static
from django.conf import settings
from mysite.utils.uploadfile import upload_file
app_name = 'taskdo'
urlpatterns = [
url(r'^$', views.taskdo, name='taskdo'),
url(r'^deploy', views.deploy, name='deploy'),
url(r'^adhoc', views.adhoc, name='adhoc'),
url(r'^playbook', views.playbook, name='playbook'),
url(r'^history', views.HistoryView.as_view(), name='history'),
url(r'^exec_result', views.exec_result, name='exec_result'),
url(r'^search', views.SearchView.as_view(), name='search'),
url(r'^addmc', views.addmc, name='addmc'),
url(r'^editmc', views.editmc, name='editmc'),
url(r'^lookmc', views.lookmc, name='lookmc'),
url(r'^uploadfile/(?P<dir_name>[^/]+)$', upload_file, name='upload_image'),
url(r'^upload$', views.upload, name='upload'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | 43.916667 | 79 | 0.693865 | from django.conf.urls import url
from . import views
from django.conf.urls.static import static
from django.conf import settings
from mysite.utils.uploadfile import upload_file
app_name = 'taskdo'
urlpatterns = [
url(r'^$', views.taskdo, name='taskdo'),
url(r'^deploy', views.deploy, name='deploy'),
url(r'^adhoc', views.adhoc, name='adhoc'),
url(r'^playbook', views.playbook, name='playbook'),
url(r'^history', views.HistoryView.as_view(), name='history'),
url(r'^exec_result', views.exec_result, name='exec_result'),
url(r'^search', views.SearchView.as_view(), name='search'),
url(r'^addmc', views.addmc, name='addmc'),
url(r'^editmc', views.editmc, name='editmc'),
url(r'^lookmc', views.lookmc, name='lookmc'),
url(r'^uploadfile/(?P<dir_name>[^/]+)$', upload_file, name='upload_image'),
url(r'^upload$', views.upload, name='upload'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | true | true |
f72dd5d068e536c1623eb55e84023e8a824941d4 | 686 | py | Python | app/core/migrations/0003_ingredient.py | kyuds/recipe-app | b697b4d471d5352144e0b2549904c09ea125e47e | [
"MIT"
] | null | null | null | app/core/migrations/0003_ingredient.py | kyuds/recipe-app | b697b4d471d5352144e0b2549904c09ea125e47e | [
"MIT"
] | null | null | null | app/core/migrations/0003_ingredient.py | kyuds/recipe-app | b697b4d471d5352144e0b2549904c09ea125e47e | [
"MIT"
] | null | null | null | # Generated by Django 2.1.15 on 2022-01-18 04:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.583333 | 118 | 0.618076 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f72dd6a26dd85a10f064ecb0503c18adbf76e449 | 12,860 | py | Python | src/awsstepfuncs/choice.py | suzil/awsstepfuncs | dd195b54bf8eaa381ea07244b276db6a1e82007b | [
"MIT"
] | 3 | 2020-11-29T18:31:50.000Z | 2021-01-14T07:46:40.000Z | src/awsstepfuncs/choice.py | suzil/aws-step-functions | dd195b54bf8eaa381ea07244b276db6a1e82007b | [
"MIT"
] | 54 | 2020-10-17T13:30:05.000Z | 2020-10-28T01:46:59.000Z | src/awsstepfuncs/choice.py | suzil/aws-step-functions | dd195b54bf8eaa381ea07244b276db6a1e82007b | [
"MIT"
] | 1 | 2021-08-04T04:40:27.000Z | 2021-08-04T04:40:27.000Z | from __future__ import annotations
from abc import ABC
from enum import Enum
from typing import Any, List, Union
from awsstepfuncs.abstract_state import AbstractState
from awsstepfuncs.errors import AWSStepFuncsValueError
from awsstepfuncs.reference_path import ReferencePath
class DataTestExpressionType(Enum):
"""All the different types of data-test expressions.
Check section "Data-test expression" for a full list:
https://states-language.net/#choice-state
"""
STRING_EQUALS = "string_equals"
STRING_EQUALS_PATH = "string_equals_path"
STRING_LESS_THAN = "string_less_than"
STRING_LESS_THAN_PATH = "string_less_than_path"
STRING_GREATER_THAN = "string_greater_than"
STRING_GREATER_THAN_PATH = "string_greater_than_path"
STRING_LESS_THAN_EQUALS = "string_less_than_equals"
STRING_LESS_THAN_EQUALS_PATH = "string_less_than_equals_path"
STRING_GREATER_THAN_EQUALS = "string_greater_than_equals"
STRING_GREATER_THAN_EQUALS_PATH = "string_greater_than_equals_path"
STRING_MATCHES = "string_matches"
NUMERIC_EQUALS = "numeric_equals"
NUMERIC_EQUALS_PATH = "numeric_equals_path"
NUMERIC_LESS_THAN = "numeric_less_than"
NUMERIC_LESS_THAN_PATH = "numeric_less_than_path"
NUMERIC_GREATER_THAN = "numeric_greater_than"
NUMERIC_GREATER_THAN_PATH = "numeric_greater_than_path"
NUMERIC_LESS_THAN_EQUALS = "numeric_less_than_equals"
NUMERIC_LESS_THAN_EQUALS_PATH = "numeric_less_than_equals_path"
NUMERIC_GREATER_THAN_EQUALS = "numeric_greater_than_equals"
NUMERIC_GREATER_THAN_EQUALS_PATH = "numeric_greater_than_equals_path"
BOOLEAN_EQUALS = "boolean_equals"
BOOLEAN_EQUALS_PATH = "boolean_equals_path"
TIMESTAMP_EQUALS = "timestamp_equals"
TIMESTAMP_EQUALS_PATH = "timestamp_equals_path"
TIMESTAMP_LESS_THAN = "timestamp_less_than"
TIMESTAMP_LESS_THAN_PATH = "timestamp_less_than_path"
TIMESTAMP_GREATER_THAN = "timestamp_greater_than"
TIMESTAMP_GREATER_THAN_PATH = "timestamp_greater_than_path"
TIMESTAMP_LESS_THAN_EQUALS = "timestamp_less_than_equals"
TIMESTAMP_LESS_THAN_EQUALS_PATH = "timestamp_less_than_equals_path"
TIMESTAMP_GREATER_THAN_EQUALS = "timestamp_greater_than_equals"
TIMESTAMP_GREATER_THAN_EQUALS_PATH = "timestamp_greater_than_equals_path"
IS_NULL = "is_null"
IS_PRESENT = "is_present"
IS_NUMERIC = "is_numeric"
IS_STRING = "is_string"
IS_BOOLEAN = "is_boolean"
IS_TIMESTAMP = "is_timestamp"
class DataTestExpression:
"""A data-test expression."""
def __init__(self, type: str, expression: Any): # noqa: A002
"""Initialize a data-test expression.
Args:
type: The type of data-test expression, such as string_equals.
expression: The expression to use when evaluating based on the type.
"""
# NOTE: The enum is just used for validation
self.type = DataTestExpressionType(type).value
self.expression = ReferencePath(expression) if "path" in type else expression
def __repr__(self) -> str:
"""A string representation of a data-test expression."""
return f"{self.__class__.__name__}({self.type}={self.expression!r})"
class ChoiceRule:
"""Choice Rules are used in Choices.
When initializing a Choice Rule, a data test expression must be provided. A
Choice Rule evalulates to `True` or `False` based on the data-test
expression on some data.
"""
def __init__(self, variable: str, **data_test_expression: Any):
"""Initialize a Choice Rule.
Args:
variable: The Reference Path to a variable in the state input.
data_test_expression: The data-test expression to use.
Raises:
AWSStepFuncsValueError: Raised when there is not exactly one data-test
expression defined.
"""
self.variable = ReferencePath(variable)
if len(data_test_expression) != 1:
raise AWSStepFuncsValueError(
"Exactly one data-test expression must be defined"
)
self.data_test_expression = DataTestExpression(
*list(data_test_expression.items())[0]
)
def __repr__(self) -> str:
"""Return a string representation of the Choice Rule.
Returns:
A string representing the Choice Rule.
"""
return f"{self.__class__.__name__}({self.variable!r}, {self.data_test_expression.type}={self.data_test_expression.expression!r})"
def evaluate(self, data: Any) -> bool:
"""Evaulate the Choice Rule with a data-test expression on some data.
Args:
data: Input data to evaluate.
Returns:
True or false based on the data and the Choice Rule.
"""
variable_value = self.variable.apply(data)
if variable_value is None:
return False
if "path" in self.data_test_expression.type:
return eval(f"self._{self.data_test_expression.type}(data, variable_value)")
else:
return eval(f"self._{self.data_test_expression.type}(variable_value)")
def _is_present(self, variable_value: Any) -> bool:
return variable_value is not None
def _string_equals(self, variable_value: str) -> bool:
return variable_value == self.data_test_expression.expression
def _string_equals_path(self, data: Any, variable_value: str) -> bool:
string_equals = self.data_test_expression.expression.apply(data) # type: ignore
if not (isinstance(string_equals, str)):
raise AWSStepFuncsValueError(
"string_equals_path must evaluate to a string value"
)
return variable_value == string_equals
def _string_greater_than(self, variable_value: str) -> bool:
return variable_value > self.data_test_expression.expression # type: ignore
def _string_greater_than_path(self, data: Any, variable_value: str) -> bool:
string_greater_than = self.data_test_expression.expression.apply(data) # type: ignore
if not (isinstance(string_greater_than, str)): # pragma: no cover
raise AWSStepFuncsValueError(
"string_greater_than_path must evaluate to a string value"
)
return variable_value > string_greater_than
def _string_less_than(self, variable_value: str) -> bool:
return variable_value < self.data_test_expression.expression # type: ignore
def _string_less_than_path(self, data: Any, variable_value: str) -> bool:
string_less_than = self.data_test_expression.expression.apply(data) # type: ignore
if not (isinstance(string_less_than, str)): # pragma: no cover
raise AWSStepFuncsValueError(
"string_less_than_path must evaluate to a string value"
)
return variable_value < string_less_than
def _string_greater_than_equals(self, variable_value: str) -> bool:
return variable_value >= self.data_test_expression.expression # type: ignore
def _string_greater_than_equals_path(self, data: Any, variable_value: str) -> bool:
string_greater_than_equals = self.data_test_expression.expression.apply(data) # type: ignore
if not (isinstance(string_greater_than_equals, str)): # pragma: no cover
raise AWSStepFuncsValueError(
"string_greater_than_equals_path must evaluate to a string value"
)
return variable_value >= string_greater_than_equals
def _string_less_than_equals(self, variable_value: str) -> bool:
return variable_value <= self.data_test_expression.expression # type: ignore
def _string_less_than_equals_path(self, data: Any, variable_value: str) -> bool:
string_less_than_equals = self.data_test_expression.expression.apply(data) # type: ignore
if not (isinstance(string_less_than_equals, str)): # pragma: no cover
raise AWSStepFuncsValueError(
"string_less_than_equals_path must evaluate to a string value"
)
return variable_value <= string_less_than_equals
def _numeric_greater_than_equals(self, variable_value: Union[float, int]) -> bool:
return variable_value >= self.data_test_expression.expression # type: ignore
def _numeric_greater_than_path(
self, data: Any, variable_value: Union[float, int]
) -> bool:
numeric_greater_than = self.data_test_expression.expression.apply(data) # type: ignore
if not (
isinstance(numeric_greater_than, int)
or isinstance(numeric_greater_than, float)
):
raise AWSStepFuncsValueError(
"numeric_greater_than_path must evaluate to a numeric value"
)
return variable_value > numeric_greater_than
def _numeric_less_than(self, variable_value: Union[float, int]) -> bool:
return variable_value < self.data_test_expression.expression # type: ignore
class AbstractChoice(ABC):
"""Choices for Choice State."""
def __init__(self, next_state: AbstractState):
"""Perform common initialization steps for all choices.
Args:
next_state: The state that the choice should transition to if true.
"""
self.next_state = next_state
def evaluate(self, data: Any) -> bool:
"""Evaulate the choice on some given data.
Args:
data: Input data to evaluate.
Raises:
NotImplementedError: Raised if not implemented in child classes.
"""
raise NotImplementedError
class NotChoice(AbstractChoice):
"""Not choice for the Choice State.
The Not Choice can be evaluated based on input data to true or false based
on whether the Choice Rule is false.
"""
def __init__(
self,
variable: str,
*,
next_state: AbstractState,
**data_test_expression: Any,
):
"""Initialize a NotChoice.
Args:
variable: The Reference Path to a variable in the state input.
next_state: The state to transition to if evaluated to true.
data_test_expression: The data-test expression to use.
"""
super().__init__(next_state)
self.choice_rule = ChoiceRule(
variable,
**data_test_expression,
)
def evaluate(self, data: Any) -> bool:
"""Evaulate the Not Choice on some given data.
Args:
data: Input data to evaluate.
Returns:
Whether the choice evaluates to true based on the input data.
"""
return not self.choice_rule.evaluate(data)
class AndChoice(AbstractChoice):
"""And Choice for the Choice State.
The And Choice can be evaluated based on input data to true or false based
on whether all Choice Rules are true.
"""
def __init__(
self,
choice_rules: List[ChoiceRule],
*,
next_state: AbstractState,
):
"""Initialize an AndChoice.
Args:
choice_rules: A list of Choice Rules which must ALL evaluate to true.
next_state: The state to transition to if true.
"""
super().__init__(next_state)
self.choice_rules = choice_rules
def evaluate(self, data: Any) -> bool:
"""Evaulate the And Choice on some given data.
Args:
data: Input data to evaluate.
Returns:
Whether the choice evaluates to true based on the input data.
"""
return all(choice_rule.evaluate(data) for choice_rule in self.choice_rules)
class VariableChoice(AbstractChoice):
"""Variable Choice for the Choice State.
The Variable Choice can be evaluated based on input data to true or false
based on whether the Choice Rule is true.
Be careful if you use a Reference Path that it evaluates to the correct
type.
"""
def __init__(
self,
variable: str,
*,
next_state: AbstractState,
**data_test_expression: Any,
):
"""Initialize a VariableChoice.
Args:
variable: The Reference Path to a variable in the state input.
next_state: The state to transition to if evaluated to true.
data_test_expression: The data-test expression to use.
"""
super().__init__(next_state)
self.choice_rule = ChoiceRule(
variable,
**data_test_expression,
)
def evaluate(self, data: Any) -> bool:
"""Evaulate the Variable Choice on some given data.
Args:
data: Input data to evaluate.
Returns:
Whether the choice evaluates to true based on the input data.
"""
return self.choice_rule.evaluate(data)
| 36.742857 | 137 | 0.676594 | from __future__ import annotations
from abc import ABC
from enum import Enum
from typing import Any, List, Union
from awsstepfuncs.abstract_state import AbstractState
from awsstepfuncs.errors import AWSStepFuncsValueError
from awsstepfuncs.reference_path import ReferencePath
class DataTestExpressionType(Enum):
STRING_EQUALS = "string_equals"
STRING_EQUALS_PATH = "string_equals_path"
STRING_LESS_THAN = "string_less_than"
STRING_LESS_THAN_PATH = "string_less_than_path"
STRING_GREATER_THAN = "string_greater_than"
STRING_GREATER_THAN_PATH = "string_greater_than_path"
STRING_LESS_THAN_EQUALS = "string_less_than_equals"
STRING_LESS_THAN_EQUALS_PATH = "string_less_than_equals_path"
STRING_GREATER_THAN_EQUALS = "string_greater_than_equals"
STRING_GREATER_THAN_EQUALS_PATH = "string_greater_than_equals_path"
STRING_MATCHES = "string_matches"
NUMERIC_EQUALS = "numeric_equals"
NUMERIC_EQUALS_PATH = "numeric_equals_path"
NUMERIC_LESS_THAN = "numeric_less_than"
NUMERIC_LESS_THAN_PATH = "numeric_less_than_path"
NUMERIC_GREATER_THAN = "numeric_greater_than"
NUMERIC_GREATER_THAN_PATH = "numeric_greater_than_path"
NUMERIC_LESS_THAN_EQUALS = "numeric_less_than_equals"
NUMERIC_LESS_THAN_EQUALS_PATH = "numeric_less_than_equals_path"
NUMERIC_GREATER_THAN_EQUALS = "numeric_greater_than_equals"
NUMERIC_GREATER_THAN_EQUALS_PATH = "numeric_greater_than_equals_path"
BOOLEAN_EQUALS = "boolean_equals"
BOOLEAN_EQUALS_PATH = "boolean_equals_path"
TIMESTAMP_EQUALS = "timestamp_equals"
TIMESTAMP_EQUALS_PATH = "timestamp_equals_path"
TIMESTAMP_LESS_THAN = "timestamp_less_than"
TIMESTAMP_LESS_THAN_PATH = "timestamp_less_than_path"
TIMESTAMP_GREATER_THAN = "timestamp_greater_than"
TIMESTAMP_GREATER_THAN_PATH = "timestamp_greater_than_path"
TIMESTAMP_LESS_THAN_EQUALS = "timestamp_less_than_equals"
TIMESTAMP_LESS_THAN_EQUALS_PATH = "timestamp_less_than_equals_path"
TIMESTAMP_GREATER_THAN_EQUALS = "timestamp_greater_than_equals"
TIMESTAMP_GREATER_THAN_EQUALS_PATH = "timestamp_greater_than_equals_path"
IS_NULL = "is_null"
IS_PRESENT = "is_present"
IS_NUMERIC = "is_numeric"
IS_STRING = "is_string"
IS_BOOLEAN = "is_boolean"
IS_TIMESTAMP = "is_timestamp"
class DataTestExpression:
def __init__(self, type: str, expression: Any):
self.type = DataTestExpressionType(type).value
self.expression = ReferencePath(expression) if "path" in type else expression
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.type}={self.expression!r})"
class ChoiceRule:
def __init__(self, variable: str, **data_test_expression: Any):
self.variable = ReferencePath(variable)
if len(data_test_expression) != 1:
raise AWSStepFuncsValueError(
"Exactly one data-test expression must be defined"
)
self.data_test_expression = DataTestExpression(
*list(data_test_expression.items())[0]
)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.variable!r}, {self.data_test_expression.type}={self.data_test_expression.expression!r})"
def evaluate(self, data: Any) -> bool:
variable_value = self.variable.apply(data)
if variable_value is None:
return False
if "path" in self.data_test_expression.type:
return eval(f"self._{self.data_test_expression.type}(data, variable_value)")
else:
return eval(f"self._{self.data_test_expression.type}(variable_value)")
def _is_present(self, variable_value: Any) -> bool:
return variable_value is not None
def _string_equals(self, variable_value: str) -> bool:
return variable_value == self.data_test_expression.expression
def _string_equals_path(self, data: Any, variable_value: str) -> bool:
string_equals = self.data_test_expression.expression.apply(data)
if not (isinstance(string_equals, str)):
raise AWSStepFuncsValueError(
"string_equals_path must evaluate to a string value"
)
return variable_value == string_equals
def _string_greater_than(self, variable_value: str) -> bool:
return variable_value > self.data_test_expression.expression
def _string_greater_than_path(self, data: Any, variable_value: str) -> bool:
string_greater_than = self.data_test_expression.expression.apply(data)
if not (isinstance(string_greater_than, str)):
raise AWSStepFuncsValueError(
"string_greater_than_path must evaluate to a string value"
)
return variable_value > string_greater_than
def _string_less_than(self, variable_value: str) -> bool:
return variable_value < self.data_test_expression.expression
def _string_less_than_path(self, data: Any, variable_value: str) -> bool:
string_less_than = self.data_test_expression.expression.apply(data)
if not (isinstance(string_less_than, str)):
raise AWSStepFuncsValueError(
"string_less_than_path must evaluate to a string value"
)
return variable_value < string_less_than
def _string_greater_than_equals(self, variable_value: str) -> bool:
return variable_value >= self.data_test_expression.expression
def _string_greater_than_equals_path(self, data: Any, variable_value: str) -> bool:
string_greater_than_equals = self.data_test_expression.expression.apply(data)
if not (isinstance(string_greater_than_equals, str)):
raise AWSStepFuncsValueError(
"string_greater_than_equals_path must evaluate to a string value"
)
return variable_value >= string_greater_than_equals
def _string_less_than_equals(self, variable_value: str) -> bool:
return variable_value <= self.data_test_expression.expression
def _string_less_than_equals_path(self, data: Any, variable_value: str) -> bool:
string_less_than_equals = self.data_test_expression.expression.apply(data)
if not (isinstance(string_less_than_equals, str)):
raise AWSStepFuncsValueError(
"string_less_than_equals_path must evaluate to a string value"
)
return variable_value <= string_less_than_equals
def _numeric_greater_than_equals(self, variable_value: Union[float, int]) -> bool:
return variable_value >= self.data_test_expression.expression
def _numeric_greater_than_path(
self, data: Any, variable_value: Union[float, int]
) -> bool:
numeric_greater_than = self.data_test_expression.expression.apply(data)
if not (
isinstance(numeric_greater_than, int)
or isinstance(numeric_greater_than, float)
):
raise AWSStepFuncsValueError(
"numeric_greater_than_path must evaluate to a numeric value"
)
return variable_value > numeric_greater_than
def _numeric_less_than(self, variable_value: Union[float, int]) -> bool:
return variable_value < self.data_test_expression.expression
class AbstractChoice(ABC):
def __init__(self, next_state: AbstractState):
self.next_state = next_state
def evaluate(self, data: Any) -> bool:
raise NotImplementedError
class NotChoice(AbstractChoice):
def __init__(
self,
variable: str,
*,
next_state: AbstractState,
**data_test_expression: Any,
):
super().__init__(next_state)
self.choice_rule = ChoiceRule(
variable,
**data_test_expression,
)
def evaluate(self, data: Any) -> bool:
return not self.choice_rule.evaluate(data)
class AndChoice(AbstractChoice):
def __init__(
self,
choice_rules: List[ChoiceRule],
*,
next_state: AbstractState,
):
super().__init__(next_state)
self.choice_rules = choice_rules
def evaluate(self, data: Any) -> bool:
return all(choice_rule.evaluate(data) for choice_rule in self.choice_rules)
class VariableChoice(AbstractChoice):
def __init__(
self,
variable: str,
*,
next_state: AbstractState,
**data_test_expression: Any,
):
super().__init__(next_state)
self.choice_rule = ChoiceRule(
variable,
**data_test_expression,
)
def evaluate(self, data: Any) -> bool:
return self.choice_rule.evaluate(data)
| true | true |
f72dd6ad9f38ef2c2c9a2d3c9ba175b13446494f | 296 | py | Python | app/user/urls.py | sadijrp/recipe-api | 015b47850e351d932737b16158fda38e5b4f97a0 | [
"MIT"
] | null | null | null | app/user/urls.py | sadijrp/recipe-api | 015b47850e351d932737b16158fda38e5b4f97a0 | [
"MIT"
] | null | null | null | app/user/urls.py | sadijrp/recipe-api | 015b47850e351d932737b16158fda38e5b4f97a0 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
app_name = 'user'
urlpatterns = [
path('create/', views.CreateUserView.as_view(), name='create'),
path('token/', views.CreateTokenView.as_view(), name='token'),
path('me/', views.ManageUserView.as_view(), name='me'),
]
| 22.769231 | 68 | 0.64527 | from django.urls import path
from . import views
app_name = 'user'
urlpatterns = [
path('create/', views.CreateUserView.as_view(), name='create'),
path('token/', views.CreateTokenView.as_view(), name='token'),
path('me/', views.ManageUserView.as_view(), name='me'),
]
| true | true |
f72dd71c29167635a4412fa663eb4990775e416a | 25,028 | py | Python | packages/Qpyl/core/qtopology.py | mpurg/qtools | 48fed120449b2cb288a55489313ab2d13bc28b57 | [
"MIT"
] | 14 | 2017-03-14T10:44:07.000Z | 2022-03-08T14:27:39.000Z | packages/Qpyl/core/qtopology.py | mpurg/qtools | 48fed120449b2cb288a55489313ab2d13bc28b57 | [
"MIT"
] | 26 | 2017-03-10T12:34:03.000Z | 2020-09-15T13:46:58.000Z | packages/Qpyl/core/qtopology.py | mpurg/qtools | 48fed120449b2cb288a55489313ab2d13bc28b57 | [
"MIT"
] | 9 | 2017-05-20T04:53:53.000Z | 2021-11-04T01:13:22.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2018 Miha Purg <miha.purg@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
#
"""
This module implements an internal topology builder class QTopology.
QTopology creates a mapping between the system's structure (QStruct),
bonding patterns/charges (QLib), and the parameters (Qrm), allowing
evaluation of individual topological components of the system.
"""
from __future__ import absolute_import, unicode_literals, division
from Qpyl.core import qlibrary
from Qpyl.core import qparameter
from Qpyl.core import qstructure
from Qpyl.core import qpotential
from six.moves import range
class QTopologyError(Exception):
pass
class QTopology(object):
"""
Class for storing topology information.
(mashup of library, parameter and structure data)
Contains lists of atoms, bonds, angles, torsions
and impropers, along with their parameters.
The constructor takes in three arguments:
qlib (qlibrary.QLib object)
qprm (qparameter.QPrm object)
qstruct (qstructure.QStruct object)
Typical usage:
qlib = qlibrary.QLib("amber")
qprm = qparameter.QPrm("amber")
qstruct = qstructure.QStruct()
qlib.read_lib(".../qamber14.lib")
qprm.read_prm(".../qamber14.prm")
qstruct.read_pdb(".../14u3.pdb")
try:
qtopo = QTopology(qlib, qprm, qstruct)
except QTopologyError as e:
print "Failed to make topology: " + str(e)
for bond in qtopo.bonds:
print "%s: fk %.2f, r0 %.2f, r %.2f, E(r) %.2f" %
(bond, bond.prm.fc, bond.prm.r0, bond.r, bond.energy)
"""
def __init__(self, qlib, qprm, qstruct):
# do some type checking to prevent bad things from happening
for arg, _type in ((qlib, qlibrary.QLib),
(qprm, qparameter.QPrm),
(qstruct, qstructure.QStruct)):
if not isinstance(arg, _type):
raise QTopologyError("{} not of type {}".format(arg, _type))
if qlib.ff_type != qprm.ff_type:
raise QTopologyError("QLib FF ({}) not "
"compatible with QPrm FF ({})"
.format(qlib.ff_type, qprm.ff_type))
self.qlib = qlib
self.qprm = qprm
self.qstruct = qstruct
try:
self.qlib.check_valid() # check if lib entries are good
except qlibrary.QLibError as e:
raise QTopologyError(e)
self.residues = []
self.atoms = []
self.bonds = []
self.angles = []
self.torsions = []
self.impropers = []
for residue_struct in self.qstruct.residues:
# see if it is defined in the library
try:
residue_lib = self.qlib.residue_dict[residue_struct.name]
except KeyError:
raise QTopologyError("Residue '{}' not found in library"
.format(residue_struct.name))
# create new object for the residue
res_index = len(self.residues) + 1
residue_top = _TopoResidue(res_index, residue_struct, residue_lib)
self.residues.append(residue_top)
# get atoms and bonds within the residue
self._get_atoms(residue_top)
self._get_bonds(residue_top)
# get angles, torsions and impropers from the bonds
self._get_angles_torsions()
# get impropers (from the lib entries)
self._get_impropers()
def _get_atoms(self, residue_top):
"""
Creates _TopoAtom objects and adds them to
_TopoResidue and QTopology.atoms
"""
# iterate over the atoms in the library
for atom in residue_top.lib.atoms:
aname, atype, charge = atom.name, atom.atom_type, atom.charge
# check if atoms exist in the structure
try:
atom_struct = [a for a in residue_top.struct.atoms
if a.name == aname][0]
except IndexError:
raise QTopologyError("Atom '{}' in residue '{}.{}'"
" missing in the structure"
.format(aname,
residue_top.struct.index_struct,
residue_top.struct.name))
# check if atom parameters exist
try:
atom_prm = self.qprm.atom_types[atype]
except KeyError:
raise QTopologyError("Atom type '{}' not found!"
.format(atype))
# add new atom to list (and to the residue_top)
atom_index = len(self.atoms) + 1
a = _TopoAtom(atom_index, aname, charge, atom_prm,
atom_struct, residue_top)
self.atoms.append(a)
residue_top.add_atom(a)
def _get_bonds(self, residue_top):
"""
Creates _TopoBond objects and adds them to QTopology.bonds
"""
# iterate over the bonds in the library
for bond in residue_top.lib.bonds:
# find the atom objects with those names
atoms = [a for a in residue_top.atoms if a.name in bond]
# find parameters
atom_types = [a.prm.atom_type for a in atoms]
prm_id = qparameter._PrmBond.get_id(atom_types)
try:
bond_prm = self.qprm.bonds[prm_id]
except KeyError:
raise QTopologyError("Bond type '{}' not found!"
.format(prm_id))
# create _TopoBond object
self.bonds.append(_TopoBond(atoms, bond_prm))
try:
# -2 is assuming that the current one was just added (-1)
prev_res = self.residues[-2]
except IndexError:
# first residue
pass
# don't check separate chains
else:
if residue_top.struct.molecule == prev_res.struct.molecule:
for conn in residue_top.lib.connections:
for conn_prev in prev_res.lib.connections:
if "head" in conn and "tail" in conn_prev:
ahead = [a for a in residue_top.atoms if
a.name == conn.split()[1]][0]
atail = [a for a in prev_res.atoms if
a.name == conn_prev.split()[1]][0]
atoms = [atail, ahead]
atom_types = [a.prm.atom_type for a in atoms]
prm_id = qparameter._PrmBond.get_id(atom_types)
try:
bond_prm = self.qprm.bonds[prm_id]
except KeyError:
raise QTopologyError("Bond type '{}'"
"not found!"
.format(prm_id))
# create _TopoBond object
self.bonds.append(_TopoBond(atoms, bond_prm))
def _get_angles_torsions(self):
"""
Creates _TopoAngle and _TopoTorsion objects and
adds them to QTopology.angles and QTopology.torsions
"""
# to prevent backtracking
processed_bonds = set()
# iterate over all bonds and find the angles
for bond1 in self.bonds:
processed_angle_bonds = set()
for bond2 in bond1.atoms[0].bonds + bond1.atoms[1].bonds:
if bond2 == bond1 or bond2 in processed_bonds:
continue
atoms1 = set(bond1.atoms)
atoms2 = set(bond2.atoms)
common_atom = atoms1 & atoms2
side_atoms = atoms1 ^ atoms2
angle_atoms = [side_atoms.pop(),
common_atom.pop(),
side_atoms.pop()]
# find the angle parameter
angle_atypes = [a.prm.atom_type for a in angle_atoms]
prm_id = qparameter._PrmAngle.get_id(angle_atypes)
try:
angle_prm = self.qprm.angles[prm_id]
except KeyError:
raise QTopologyError("Angle type '{}' not found!"
.format(prm_id))
# create _TopoAngle object
self.angles.append(_TopoAngle(angle_atoms, angle_prm))
# find the torsions by looking at the bonds
# of the angle's side atoms
for side_atom_index in [0, 2]:
for bond3 in angle_atoms[side_atom_index].bonds:
if bond3 in processed_bonds or \
bond3 in processed_angle_bonds:
continue
try:
atom4 = [a for a in bond3.atoms
if a not in angle_atoms][0]
except IndexError:
# both atoms are part of the angle
continue
if side_atom_index == 0:
torsion_atoms = [atom4] + angle_atoms
else:
torsion_atoms = angle_atoms + [atom4]
# TODO: QPrm.find_type() would be better
#
# find parameters
atom_types = [a.prm.atom_type for a in torsion_atoms]
prm_id = qparameter._PrmTorsion.get_id(atom_types)
try:
torsion_prm = self.qprm.torsions[prm_id]
except KeyError:
# see if generic parameters exist
gen_atypes = ["?"] + prm_id.split()[1:3] + ["?"]
prm_id_gen = qparameter._PrmTorsion.get_id(gen_atypes)
try:
torsion_prm = \
self.qprm.generic_torsions[prm_id_gen]
except KeyError:
raise QTopologyError("Torsions type '{}' "
"for torsion '{}'"
"not found!"
.format(prm_id,
" ".join([a.name for a in
torsion_atoms])))
# create _TopoTorsion object
self.torsions.append(_TopoTorsion(torsion_atoms,
torsion_prm))
# remove the 'angle' bond from the torsion search
# (otherwise you get forward and reverse duplicates)
processed_angle_bonds.add(bond2)
# remove the bond from the search (to prevent backtracking)
processed_bonds.add(bond1)
def _get_impropers(self):
# create impropers -
# only those that are explicitly defined in the library
for residue_index, residue in enumerate(self.residues):
for improper in residue.lib.impropers:
# find _TopoAtom-s involved
atoms = []
for aname in improper:
res = residue
# some impropers span to next or
# previous residues (-C, +N)
if "+" in aname:
if residue_index+1 == len(self.residues):
continue
res = self.residues[residue_index+1]
if "-" in aname:
if residue_index == 0:
continue
res = self.residues[residue_index-1]
# if separate chains, skip
if residue.struct.molecule != res.struct.molecule:
continue
aname = aname.strip("+-")
try:
atoms.append([a for a in res.atoms
if a.name == aname][0])
except IndexError:
if not res.lib.connections:
# no connectivity between residues
# (end of protein - ligands or water)
continue
else:
raise QTopologyError("Bad improper '{}' between "
"residues '{}' and '{}'"
.format(" ".join(improper),
residue.index,
res.index))
if len(atoms) != 4:
continue
# find parameters
other_atypes = [a.prm.atom_type for a in atoms]
center_atype = other_atypes.pop(1)
prm_id = qparameter._PrmImproper.get_id(center_atype, other_atypes)
try:
improper_prm = self.qprm.impropers[prm_id]
except KeyError:
improper_prm = None
# see if general parameters exist - a lot of options here, example:
# CA O2 CB CN
#
# Single wildcard:
# ? O2 CA CN
# ? O2 CA CB
# ? O2 CB CN
if improper_prm == None:
for i in range(3):
ots = [other_atypes[i], other_atypes[(i+1)%3], "?"]
prm_id_gen = qparameter._PrmImproper.get_id(center_atype, ots)
try:
improper_prm = \
self.qprm.generic_impropers[prm_id_gen]
break
except KeyError:
improper_prm = None
# Two wildcards:
# ? O2 CB ?
# ? O2 CA ?
# ? O2 CN ?
if improper_prm == None:
for i in range(3):
otypes = [other_atypes[i], "?", "?"]
prm_id_gen = qparameter._PrmImproper.get_id(center_atype, otypes)
try:
improper_prm = \
self.qprm.generic_impropers[prm_id_gen]
break
except KeyError:
improper_prm = None
if improper_prm == None:
raise QTopologyError("Improper type '{}' "
"not found!"
.format(prm_id))
# create _TopoImproper object (same order as library)
self.impropers.append(_TopoImproper(atoms, improper_prm))
class _TopoAtom(object):
"""
Class containing topological information for an atom.
Arguments:
index (int): topology index of atom (1-based)
name (string): atom name as defined in the library/structure
charge (float): charge as defined in the QLib library
prm (_PrmAtom): atom parameter as defined in QPrm
struct (_StructAtom): atom structure object (stuff from PDB)
residue (_TopoResidue): reference to its parent residue
All these arguments are stored as object properties with the same name.
Additionaly, bonds, angles, torsions and impropers are lists that contain
references to _TopoBond, _TopoAngles etc. objects. These are filled in
automatically when creating aforementioned objects.
"""
def __init__(self, index, name, charge, prm, struct, residue):
self.index = index
self.name = name
self.charge = charge
self.prm = prm
self.struct = struct
self.residue = residue
self.bonds = []
self.angles = []
self.torsions = []
self.impropers = []
self.bati_map = {_TopoBond: self.bonds,
_TopoAngle: self.angles,
_TopoTorsion: self.torsions,
_TopoImproper: self.impropers}
def __repr__(self):
return "_TopoAtom: {}_{}.{}".format(self.residue.name,
self.residue.index,
self.name)
def add_ref(self, bond_angle_tor_imp):
"""
Add bond, angle, torsion and improper references.
See class docstring.
"""
_type = type(bond_angle_tor_imp)
try:
self.bati_map[_type].append(bond_angle_tor_imp)
except KeyError:
raise TypeError("bond_agle_tor_imp of unsupported "
"type: {}".format(_type))
class _TopoResidue(object):
"""
Class containing topological information for a residue.
Arguments:
index (int): topology index of residue (1-based)
struct (_StructResidue): object with structure information
lib (_LibResidue): object with library information
Besides the two properties above, it contains a list of its atoms
as _TopoAtom objects in the 'atoms' property. This list is filled
automatically as atoms are created with this residue passed in as an
argument.
"""
def __init__(self, index, struct, lib):
self.index = index
self.struct = struct
self.lib = lib
self.name = struct.name
self.atoms = []
def add_atom(self, atom):
"""Append a _TopoAtom object to the 'atoms' list"""
self.atoms.append(atom)
class _TopoBonding(object):
"""Abstract class for topology bonds, angles, torsions and impropers.
Arguments:
atoms (list): list of _TopoAtom objects
prm (_PrmBond): object with bond parameters
"""
def __init__(self, atoms, prm):
self.prm = prm
atom_indexes = [(a.index, a) for a in atoms]
if isinstance(self, _TopoBond):
atom_indexes.sort()
elif isinstance(self, _TopoAngle):
atom_indexes = min(atom_indexes, list(reversed(atom_indexes)))
elif isinstance(self, _TopoTorsion):
atom_indexes = min(atom_indexes, list(reversed(atom_indexes)))
elif isinstance(self, _TopoImproper):
# order is defined in the library
pass
self.atoms = [a for (i,a) in atom_indexes]
for a in self.atoms:
a.add_ref(self)
def __repr__(self):
atoms_str = "-".join([a.name for a in self.atoms])
return "{}: ({})".format(self.__class__.__name__, atoms_str)
class _TopoBond(_TopoBonding):
"""Contains topological information for a bond.
Extends _TopoBonding, implements calc()
"""
def __init__(self, *args):
super(self.__class__, self).__init__(*args)
def calc(self, r=None):
"""Calculate bond distance and energy.
Args:
r (float, optional): define the bond distance instead of
calculating it from the structure
Returns tuple (E [kcal/mol], r [angstrom])
"""
if not r:
ac1, ac2 = [a.struct.coordinates for a in self.atoms]
r = qpotential.bond_distance(ac1, ac2)
e = qpotential.bond_energy(r, self.prm.fc, self.prm.r0)
return (e,r)
class _TopoAngle(_TopoBonding):
"""Contains topological information for an angle.
Extends _TopoBonding, implements calc()
"""
def __init__(self, *args):
super(self.__class__, self).__init__(*args)
def calc(self, theta=None):
"""Calculate angle and energy
Args:
theta (float, optional): define the angle instead of calculating it
from the structure
Returns tuple (E [kcal/mol], theta [degrees])
"""
if theta == None:
ac1, ac2, ac3 = [a.struct.coordinates for a in self.atoms]
theta = qpotential.angle_angle(ac1, ac2, ac3)
e = qpotential.angle_energy(theta,
self.prm.fc,
self.prm.theta0)
return (e, theta)
class _TopoTorsion(_TopoBonding):
"""Contains topological information for a torsion.
Extends _TopoBonding, implements calc(), prm_full
"""
def __init__(self, *args):
super(self.__class__, self).__init__(*args)
def calc(self, phi=None):
"""Calculate torsion angle and energy
Args:
phi (float, optional): define the angle instead of calculating it
from the structure
Returns tuple (E [kcal/mol], phi [degrees])
"""
if phi == None:
ac1, ac2, ac3, ac4 = [a.struct.coordinates for a in self.atoms]
phi = qpotential.torsion_angle(ac1, ac2, ac3, ac4)
energy = 0
for fc, multiplicity, phase, npaths in self.prm.get_prms():
energy += qpotential.torsion_energy(phi,
fc, multiplicity,
npaths, phase)
return (energy, phi)
@property
def prm_full(self):
"""Return full parameter in case it is generic.
Basically, make a copy of the generic parameter,
but use actual atom-types instead of X's.
"""
if self.prm.is_generic:
atypes = [a.prm.prm_id for a in self.atoms]
comment = "Generic: {}".format(self.prm.prm_id)
full_prm = type(self.prm)(atypes, comment=comment)
for p in self.prm.get_prms():
full_prm.add_prm(*p)
return full_prm
else:
return self.prm
class _TopoImproper(_TopoBonding):
"""Contains topological information for an improper.
Extends _TopoBonding, implements calc(), prm_full
"""
def __init__(self, *args):
super(self.__class__, self).__init__(*args)
def calc(self, phi=None):
"""Calculate improper angle and energy
Args:
phi (float, optional): define the angle instead of calculating it
from the structure
Returns tuple (E [kcal/mol], phi [degrees])
"""
if phi == None:
ac1, ac2, ac3, ac4 = [a.struct.coordinates for a in self.atoms]
phi = qpotential.improper_angle(ac1, ac2, ac3, ac4)
e = qpotential.improper_energy_periodic(phi,
self.prm.fc,
self.prm.multiplicity,
self.prm.phi0)
return (e, phi)
@property
def prm_full(self):
"""Return full parameter in case it is generic.
Basically, make a copy of the generic parameter,
but use actual atom-types instead of X's.
"""
if self.prm.is_generic:
atypes = [a.prm.prm_id for a in self.atoms]
center = atypes.pop(1)
comment = "Generic: {}".format(self.prm.prm_id)
full_prm = type(self.prm)(center, atypes, self.prm.fc,
self.prm.phi0, self.prm.multiplicity,
comment=comment)
return full_prm
else:
return self.prm
| 37.63609 | 89 | 0.51798 |
from __future__ import absolute_import, unicode_literals, division
from Qpyl.core import qlibrary
from Qpyl.core import qparameter
from Qpyl.core import qstructure
from Qpyl.core import qpotential
from six.moves import range
class QTopologyError(Exception):
pass
class QTopology(object):
def __init__(self, qlib, qprm, qstruct):
for arg, _type in ((qlib, qlibrary.QLib),
(qprm, qparameter.QPrm),
(qstruct, qstructure.QStruct)):
if not isinstance(arg, _type):
raise QTopologyError("{} not of type {}".format(arg, _type))
if qlib.ff_type != qprm.ff_type:
raise QTopologyError("QLib FF ({}) not "
"compatible with QPrm FF ({})"
.format(qlib.ff_type, qprm.ff_type))
self.qlib = qlib
self.qprm = qprm
self.qstruct = qstruct
try:
self.qlib.check_valid()
except qlibrary.QLibError as e:
raise QTopologyError(e)
self.residues = []
self.atoms = []
self.bonds = []
self.angles = []
self.torsions = []
self.impropers = []
for residue_struct in self.qstruct.residues:
try:
residue_lib = self.qlib.residue_dict[residue_struct.name]
except KeyError:
raise QTopologyError("Residue '{}' not found in library"
.format(residue_struct.name))
res_index = len(self.residues) + 1
residue_top = _TopoResidue(res_index, residue_struct, residue_lib)
self.residues.append(residue_top)
self._get_atoms(residue_top)
self._get_bonds(residue_top)
self._get_angles_torsions()
self._get_impropers()
def _get_atoms(self, residue_top):
for atom in residue_top.lib.atoms:
aname, atype, charge = atom.name, atom.atom_type, atom.charge
try:
atom_struct = [a for a in residue_top.struct.atoms
if a.name == aname][0]
except IndexError:
raise QTopologyError("Atom '{}' in residue '{}.{}'"
" missing in the structure"
.format(aname,
residue_top.struct.index_struct,
residue_top.struct.name))
try:
atom_prm = self.qprm.atom_types[atype]
except KeyError:
raise QTopologyError("Atom type '{}' not found!"
.format(atype))
atom_index = len(self.atoms) + 1
a = _TopoAtom(atom_index, aname, charge, atom_prm,
atom_struct, residue_top)
self.atoms.append(a)
residue_top.add_atom(a)
def _get_bonds(self, residue_top):
for bond in residue_top.lib.bonds:
atoms = [a for a in residue_top.atoms if a.name in bond]
atom_types = [a.prm.atom_type for a in atoms]
prm_id = qparameter._PrmBond.get_id(atom_types)
try:
bond_prm = self.qprm.bonds[prm_id]
except KeyError:
raise QTopologyError("Bond type '{}' not found!"
.format(prm_id))
self.bonds.append(_TopoBond(atoms, bond_prm))
try:
prev_res = self.residues[-2]
except IndexError:
pass
else:
if residue_top.struct.molecule == prev_res.struct.molecule:
for conn in residue_top.lib.connections:
for conn_prev in prev_res.lib.connections:
if "head" in conn and "tail" in conn_prev:
ahead = [a for a in residue_top.atoms if
a.name == conn.split()[1]][0]
atail = [a for a in prev_res.atoms if
a.name == conn_prev.split()[1]][0]
atoms = [atail, ahead]
atom_types = [a.prm.atom_type for a in atoms]
prm_id = qparameter._PrmBond.get_id(atom_types)
try:
bond_prm = self.qprm.bonds[prm_id]
except KeyError:
raise QTopologyError("Bond type '{}'"
"not found!"
.format(prm_id))
# create _TopoBond object
self.bonds.append(_TopoBond(atoms, bond_prm))
def _get_angles_torsions(self):
# to prevent backtracking
processed_bonds = set()
# iterate over all bonds and find the angles
for bond1 in self.bonds:
processed_angle_bonds = set()
for bond2 in bond1.atoms[0].bonds + bond1.atoms[1].bonds:
if bond2 == bond1 or bond2 in processed_bonds:
continue
atoms1 = set(bond1.atoms)
atoms2 = set(bond2.atoms)
common_atom = atoms1 & atoms2
side_atoms = atoms1 ^ atoms2
angle_atoms = [side_atoms.pop(),
common_atom.pop(),
side_atoms.pop()]
# find the angle parameter
angle_atypes = [a.prm.atom_type for a in angle_atoms]
prm_id = qparameter._PrmAngle.get_id(angle_atypes)
try:
angle_prm = self.qprm.angles[prm_id]
except KeyError:
raise QTopologyError("Angle type '{}' not found!"
.format(prm_id))
# create _TopoAngle object
self.angles.append(_TopoAngle(angle_atoms, angle_prm))
# find the torsions by looking at the bonds
# of the angle's side atoms
for side_atom_index in [0, 2]:
for bond3 in angle_atoms[side_atom_index].bonds:
if bond3 in processed_bonds or \
bond3 in processed_angle_bonds:
continue
try:
atom4 = [a for a in bond3.atoms
if a not in angle_atoms][0]
except IndexError:
continue
if side_atom_index == 0:
torsion_atoms = [atom4] + angle_atoms
else:
torsion_atoms = angle_atoms + [atom4]
atom_types = [a.prm.atom_type for a in torsion_atoms]
prm_id = qparameter._PrmTorsion.get_id(atom_types)
try:
torsion_prm = self.qprm.torsions[prm_id]
except KeyError:
gen_atypes = ["?"] + prm_id.split()[1:3] + ["?"]
prm_id_gen = qparameter._PrmTorsion.get_id(gen_atypes)
try:
torsion_prm = \
self.qprm.generic_torsions[prm_id_gen]
except KeyError:
raise QTopologyError("Torsions type '{}' "
"for torsion '{}'"
"not found!"
.format(prm_id,
" ".join([a.name for a in
torsion_atoms])))
self.torsions.append(_TopoTorsion(torsion_atoms,
torsion_prm))
processed_angle_bonds.add(bond2)
processed_bonds.add(bond1)
def _get_impropers(self):
for residue_index, residue in enumerate(self.residues):
for improper in residue.lib.impropers:
atoms = []
for aname in improper:
res = residue
if "+" in aname:
if residue_index+1 == len(self.residues):
continue
res = self.residues[residue_index+1]
if "-" in aname:
if residue_index == 0:
continue
res = self.residues[residue_index-1]
if residue.struct.molecule != res.struct.molecule:
continue
aname = aname.strip("+-")
try:
atoms.append([a for a in res.atoms
if a.name == aname][0])
except IndexError:
if not res.lib.connections:
continue
else:
raise QTopologyError("Bad improper '{}' between "
"residues '{}' and '{}'"
.format(" ".join(improper),
residue.index,
res.index))
if len(atoms) != 4:
continue
other_atypes = [a.prm.atom_type for a in atoms]
center_atype = other_atypes.pop(1)
prm_id = qparameter._PrmImproper.get_id(center_atype, other_atypes)
try:
improper_prm = self.qprm.impropers[prm_id]
except KeyError:
improper_prm = None
if improper_prm == None:
for i in range(3):
ots = [other_atypes[i], other_atypes[(i+1)%3], "?"]
prm_id_gen = qparameter._PrmImproper.get_id(center_atype, ots)
try:
improper_prm = \
self.qprm.generic_impropers[prm_id_gen]
break
except KeyError:
improper_prm = None
if improper_prm == None:
for i in range(3):
otypes = [other_atypes[i], "?", "?"]
prm_id_gen = qparameter._PrmImproper.get_id(center_atype, otypes)
try:
improper_prm = \
self.qprm.generic_impropers[prm_id_gen]
break
except KeyError:
improper_prm = None
if improper_prm == None:
raise QTopologyError("Improper type '{}' "
"not found!"
.format(prm_id))
self.impropers.append(_TopoImproper(atoms, improper_prm))
class _TopoAtom(object):
def __init__(self, index, name, charge, prm, struct, residue):
self.index = index
self.name = name
self.charge = charge
self.prm = prm
self.struct = struct
self.residue = residue
self.bonds = []
self.angles = []
self.torsions = []
self.impropers = []
self.bati_map = {_TopoBond: self.bonds,
_TopoAngle: self.angles,
_TopoTorsion: self.torsions,
_TopoImproper: self.impropers}
def __repr__(self):
return "_TopoAtom: {}_{}.{}".format(self.residue.name,
self.residue.index,
self.name)
def add_ref(self, bond_angle_tor_imp):
_type = type(bond_angle_tor_imp)
try:
self.bati_map[_type].append(bond_angle_tor_imp)
except KeyError:
raise TypeError("bond_agle_tor_imp of unsupported "
"type: {}".format(_type))
class _TopoResidue(object):
def __init__(self, index, struct, lib):
self.index = index
self.struct = struct
self.lib = lib
self.name = struct.name
self.atoms = []
def add_atom(self, atom):
self.atoms.append(atom)
class _TopoBonding(object):
def __init__(self, atoms, prm):
self.prm = prm
atom_indexes = [(a.index, a) for a in atoms]
if isinstance(self, _TopoBond):
atom_indexes.sort()
elif isinstance(self, _TopoAngle):
atom_indexes = min(atom_indexes, list(reversed(atom_indexes)))
elif isinstance(self, _TopoTorsion):
atom_indexes = min(atom_indexes, list(reversed(atom_indexes)))
elif isinstance(self, _TopoImproper):
pass
self.atoms = [a for (i,a) in atom_indexes]
for a in self.atoms:
a.add_ref(self)
def __repr__(self):
atoms_str = "-".join([a.name for a in self.atoms])
return "{}: ({})".format(self.__class__.__name__, atoms_str)
class _TopoBond(_TopoBonding):
def __init__(self, *args):
super(self.__class__, self).__init__(*args)
def calc(self, r=None):
if not r:
ac1, ac2 = [a.struct.coordinates for a in self.atoms]
r = qpotential.bond_distance(ac1, ac2)
e = qpotential.bond_energy(r, self.prm.fc, self.prm.r0)
return (e,r)
class _TopoAngle(_TopoBonding):
def __init__(self, *args):
super(self.__class__, self).__init__(*args)
def calc(self, theta=None):
if theta == None:
ac1, ac2, ac3 = [a.struct.coordinates for a in self.atoms]
theta = qpotential.angle_angle(ac1, ac2, ac3)
e = qpotential.angle_energy(theta,
self.prm.fc,
self.prm.theta0)
return (e, theta)
class _TopoTorsion(_TopoBonding):
def __init__(self, *args):
super(self.__class__, self).__init__(*args)
def calc(self, phi=None):
if phi == None:
ac1, ac2, ac3, ac4 = [a.struct.coordinates for a in self.atoms]
phi = qpotential.torsion_angle(ac1, ac2, ac3, ac4)
energy = 0
for fc, multiplicity, phase, npaths in self.prm.get_prms():
energy += qpotential.torsion_energy(phi,
fc, multiplicity,
npaths, phase)
return (energy, phi)
@property
def prm_full(self):
if self.prm.is_generic:
atypes = [a.prm.prm_id for a in self.atoms]
comment = "Generic: {}".format(self.prm.prm_id)
full_prm = type(self.prm)(atypes, comment=comment)
for p in self.prm.get_prms():
full_prm.add_prm(*p)
return full_prm
else:
return self.prm
class _TopoImproper(_TopoBonding):
def __init__(self, *args):
super(self.__class__, self).__init__(*args)
def calc(self, phi=None):
if phi == None:
ac1, ac2, ac3, ac4 = [a.struct.coordinates for a in self.atoms]
phi = qpotential.improper_angle(ac1, ac2, ac3, ac4)
e = qpotential.improper_energy_periodic(phi,
self.prm.fc,
self.prm.multiplicity,
self.prm.phi0)
return (e, phi)
@property
def prm_full(self):
if self.prm.is_generic:
atypes = [a.prm.prm_id for a in self.atoms]
center = atypes.pop(1)
comment = "Generic: {}".format(self.prm.prm_id)
full_prm = type(self.prm)(center, atypes, self.prm.fc,
self.prm.phi0, self.prm.multiplicity,
comment=comment)
return full_prm
else:
return self.prm
| true | true |
f72dd7ee45a1a5417dd550741777c145ca6add1c | 5,936 | py | Python | isensus/.warnings/warnings.py | MPI-IS/isensus | 23171afc7f5b1d2b322a4ab2ef274d5bd3457fdc | [
"BSD-3-Clause"
] | null | null | null | isensus/.warnings/warnings.py | MPI-IS/isensus | 23171afc7f5b1d2b322a4ab2ef274d5bd3457fdc | [
"BSD-3-Clause"
] | null | null | null | isensus/.warnings/warnings.py | MPI-IS/isensus | 23171afc7f5b1d2b322a4ab2ef274d5bd3457fdc | [
"BSD-3-Clause"
] | null | null | null | import inspect
class Warnings:
def __init__(self, config, user_data):
self._config = config
self._user_data = user_data
def _get_warnings(warnings_instance):
"""
returns a list of the returned value of
all private functions of warnings_instance
"""
# all functions of this class
warning_functions = inspect.getmembers(
warnings_instance.__class__, predicate=inspect.isfunction
)
# all private functions of this class, with the
# exclusion of __init__
warning_functions = [
wf
for wf in warning_functions
if not wf[0].startswith("__") and wf[0].startswith("_")
]
warnings = []
for _, function in warning_functions:
warnings.append(function(warnings_instance))
return [str(w) for w in warning if w is not None]
class CoreWarnings(Warnings):
"""
Warnings for all users (alumni or not)
"""
def __init__(self, config, user_data):
super().__init__(config, user_data)
expiration = self.user_data["expiration"]
date = datetime.date(expiration.year, expiration.month, expiration.day)
now = datetime.datetime.now()
now = datetime.date(now.year, now.month, now.day)
self._expired = (date - now).days > 0
def _not_set_as_alumni(self):
if self._expired and not self._user_data["contract"] == "alumni":
return str("contract expired but not set " "alumni")
return []
def get(self):
"""
returns the list of warnings of the user
"""
return get_warnings(self)
class ActiveWarnings(Warnings):
"""
Warnings for "active" user (i.e. not alumni)
"""
def __init__(self, config, user_data):
super().__init__(config, user_data)
def _attribute_not_set(self, attr):
user_value = self._user_data[attr]
if user_value is None:
return "{} is not set".format(attr)
if user_value == "false":
return "{} is not set".format(attr)
return None
def _firstname_not_set(self):
return self._attribute_not_set("firstname")
def _lastname_not_set(self):
return self._attribute_not_set("lastname")
def _ldap_not_set(self):
return self._attribute_not_set("ldap")
def _forms_status(self):
forms_sent = self._attribute_not_set("forms_sent")
if forms_sent is not None:
return forms_sent
forms_received = self._attribute_not_set("forms_received")
if forms_received is not None:
return forms_received
is_website = self._attribute_not_set("is_website")
return is_website
def _expiration_not_set(self):
return self._attribute_not_set("expiration")
def _contract_not_set(self):
return self._attribute_not_set("contract")
def _type_not_set(self):
return self._attribute_not_set("type")
def get(self):
"""
returns the list of warnings of the user
"""
# this execute all private functions of
# this class, each returning either None
# (no warning) or a string (warning message)
if self._user_data["contract"] == "alumni":
return []
return _get_warnings(self)
class TransitionWarnings(Warnings):
"""
Warnings for active users for which
the contract will soon expire
"""
def __init__(self, config, user_data, threshold_days=10):
super().__init__(config, user_data)
expiration = self.user_data["expiration"]
date = datetime.date(expiration.year, expiration.month, expiration.day)
now = datetime.datetime.now()
now = datetime.date(now.year, now.month, now.day)
self._expire_in = (date - now).days
self._threshold_days = threshold_days
def _no_closure_mail(self):
if not user_data["closure_mail"]:
return str(
"contract expires soon, " "but no closure contract has been sent"
)
return None
def get(self):
"""
returns the list of warnings of the user
"""
if self._expire_in > self.threshold_days:
return []
return _get_warnings(self)
class AlumniWarnings(Warnings):
"""
Warnings for "inactive" user (i.e alumni)
"""
def __init__(self, config, user_data):
super().__init__(config, user_data)
def _not_vaulted(self):
if not self._user_data["vaulted"]:
return "user is not (ldap) vaulted"
return None
def _no_forwarder(self):
if not self._user_data["forwarder"]:
return str("user email has not been replaced " "by a forwarder")
return None
def _not_set_as_alumni(self):
if not self._user_data["website_alumni"]:
return str("user not set as alumni " "in the website")
return None
def _has_hardware(self):
if self._user_data["hardware"]:
return str("user still has some" "hardware")
return None
def _has_licenses(self):
if self._user_data["licenses"]:
return str("user still has some" "licenses")
return None
def _assets_in_is_snipe(self):
if not self._user_data["is_snipe_cleared"]:
return str("user may still have some " "assets deployed to in is-snipe")
def get(self):
"""
returns the list of warnings of the user
"""
if not self._user_data["contract"] == "alumni":
return []
return _get_warnings(self)
def all_warnings(self, config, user_data):
"""
returns the list of warnings (str)
of the user
"""
warnings_classes = list(Warnings.__subclasses__())
instances = [wc(config, user_data) for wc in warning_classes]
warnings = []
for instance in instances:
warnings.extend(instance.get())
return warnings
| 29.386139 | 84 | 0.626179 | import inspect
class Warnings:
def __init__(self, config, user_data):
self._config = config
self._user_data = user_data
def _get_warnings(warnings_instance):
warning_functions = inspect.getmembers(
warnings_instance.__class__, predicate=inspect.isfunction
)
warning_functions = [
wf
for wf in warning_functions
if not wf[0].startswith("__") and wf[0].startswith("_")
]
warnings = []
for _, function in warning_functions:
warnings.append(function(warnings_instance))
return [str(w) for w in warning if w is not None]
class CoreWarnings(Warnings):
def __init__(self, config, user_data):
super().__init__(config, user_data)
expiration = self.user_data["expiration"]
date = datetime.date(expiration.year, expiration.month, expiration.day)
now = datetime.datetime.now()
now = datetime.date(now.year, now.month, now.day)
self._expired = (date - now).days > 0
def _not_set_as_alumni(self):
if self._expired and not self._user_data["contract"] == "alumni":
return str("contract expired but not set " "alumni")
return []
def get(self):
return get_warnings(self)
class ActiveWarnings(Warnings):
def __init__(self, config, user_data):
super().__init__(config, user_data)
def _attribute_not_set(self, attr):
user_value = self._user_data[attr]
if user_value is None:
return "{} is not set".format(attr)
if user_value == "false":
return "{} is not set".format(attr)
return None
def _firstname_not_set(self):
return self._attribute_not_set("firstname")
def _lastname_not_set(self):
return self._attribute_not_set("lastname")
def _ldap_not_set(self):
return self._attribute_not_set("ldap")
def _forms_status(self):
forms_sent = self._attribute_not_set("forms_sent")
if forms_sent is not None:
return forms_sent
forms_received = self._attribute_not_set("forms_received")
if forms_received is not None:
return forms_received
is_website = self._attribute_not_set("is_website")
return is_website
def _expiration_not_set(self):
return self._attribute_not_set("expiration")
def _contract_not_set(self):
return self._attribute_not_set("contract")
def _type_not_set(self):
return self._attribute_not_set("type")
def get(self):
if self._user_data["contract"] == "alumni":
return []
return _get_warnings(self)
class TransitionWarnings(Warnings):
def __init__(self, config, user_data, threshold_days=10):
super().__init__(config, user_data)
expiration = self.user_data["expiration"]
date = datetime.date(expiration.year, expiration.month, expiration.day)
now = datetime.datetime.now()
now = datetime.date(now.year, now.month, now.day)
self._expire_in = (date - now).days
self._threshold_days = threshold_days
def _no_closure_mail(self):
if not user_data["closure_mail"]:
return str(
"contract expires soon, " "but no closure contract has been sent"
)
return None
def get(self):
if self._expire_in > self.threshold_days:
return []
return _get_warnings(self)
class AlumniWarnings(Warnings):
def __init__(self, config, user_data):
super().__init__(config, user_data)
def _not_vaulted(self):
if not self._user_data["vaulted"]:
return "user is not (ldap) vaulted"
return None
def _no_forwarder(self):
if not self._user_data["forwarder"]:
return str("user email has not been replaced " "by a forwarder")
return None
def _not_set_as_alumni(self):
if not self._user_data["website_alumni"]:
return str("user not set as alumni " "in the website")
return None
def _has_hardware(self):
if self._user_data["hardware"]:
return str("user still has some" "hardware")
return None
def _has_licenses(self):
if self._user_data["licenses"]:
return str("user still has some" "licenses")
return None
def _assets_in_is_snipe(self):
if not self._user_data["is_snipe_cleared"]:
return str("user may still have some " "assets deployed to in is-snipe")
def get(self):
if not self._user_data["contract"] == "alumni":
return []
return _get_warnings(self)
def all_warnings(self, config, user_data):
warnings_classes = list(Warnings.__subclasses__())
instances = [wc(config, user_data) for wc in warning_classes]
warnings = []
for instance in instances:
warnings.extend(instance.get())
return warnings
| true | true |
f72dd948e5a154151ffe73b2af697605d2a92f7f | 61,972 | py | Python | mmdet3d/datasets/pipelines/transforms_3d.py | xizaoqu/mmdetection3d | 1809f9650de95d7bc80035787b09e3b69390b702 | [
"Apache-2.0"
] | null | null | null | mmdet3d/datasets/pipelines/transforms_3d.py | xizaoqu/mmdetection3d | 1809f9650de95d7bc80035787b09e3b69390b702 | [
"Apache-2.0"
] | null | null | null | mmdet3d/datasets/pipelines/transforms_3d.py | xizaoqu/mmdetection3d | 1809f9650de95d7bc80035787b09e3b69390b702 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import warnings
from mmcv import is_tuple_of
from mmcv.utils import build_from_cfg
from mmdet3d.core import VoxelGenerator
from mmdet3d.core.bbox import (CameraInstance3DBoxes, DepthInstance3DBoxes,
LiDARInstance3DBoxes, box_np_ops)
from mmdet.datasets.builder import PIPELINES
from mmdet.datasets.pipelines import RandomFlip
from ..builder import OBJECTSAMPLERS
from .data_augment_utils import noise_per_object_v3_
@PIPELINES.register_module()
class RandomDropPointsColor(object):
r"""Randomly set the color of points to all zeros.
Once this transform is executed, all the points' color will be dropped.
Refer to `PAConv <https://github.com/CVMI-Lab/PAConv/blob/main/scene_seg/
util/transform.py#L223>`_ for more details.
Args:
drop_ratio (float): The probability of dropping point colors.
Defaults to 0.2.
"""
def __init__(self, drop_ratio=0.2):
assert isinstance(drop_ratio, (int, float)) and 0 <= drop_ratio <= 1, \
f'invalid drop_ratio value {drop_ratio}'
self.drop_ratio = drop_ratio
def __call__(self, input_dict):
"""Call function to drop point colors.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after color dropping, \
'points' key is updated in the result dict.
"""
points = input_dict['points']
assert points.attribute_dims is not None and \
'color' in points.attribute_dims, \
'Expect points have color attribute'
# this if-expression is a bit strange
# `RandomDropPointsColor` is used in training 3D segmentor PAConv
# we discovered in our experiments that, using
# `if np.random.rand() > 1.0 - self.drop_ratio` consistently leads to
# better results than using `if np.random.rand() < self.drop_ratio`
# so we keep this hack in our codebase
if np.random.rand() > 1.0 - self.drop_ratio:
points.color = points.color * 0.0
return input_dict
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__
repr_str += f'(drop_ratio={self.drop_ratio})'
return repr_str
@PIPELINES.register_module()
class RandomFlip3D(RandomFlip):
"""Flip the points & bbox.
If the input dict contains the key "flip", then the flag will be used,
otherwise it will be randomly decided by a ratio specified in the init
method.
Args:
sync_2d (bool, optional): Whether to apply flip according to the 2D
images. If True, it will apply the same flip as that to 2D images.
If False, it will decide whether to flip randomly and independently
to that of 2D images. Defaults to True.
flip_ratio_bev_horizontal (float, optional): The flipping probability
in horizontal direction. Defaults to 0.0.
flip_ratio_bev_vertical (float, optional): The flipping probability
in vertical direction. Defaults to 0.0.
"""
def __init__(self,
sync_2d=True,
flip_ratio_bev_horizontal=0.0,
flip_ratio_bev_vertical=0.0,
**kwargs):
super(RandomFlip3D, self).__init__(
flip_ratio=flip_ratio_bev_horizontal, **kwargs)
self.sync_2d = sync_2d
self.flip_ratio_bev_vertical = flip_ratio_bev_vertical
if flip_ratio_bev_horizontal is not None:
assert isinstance(
flip_ratio_bev_horizontal,
(int, float)) and 0 <= flip_ratio_bev_horizontal <= 1
if flip_ratio_bev_vertical is not None:
assert isinstance(
flip_ratio_bev_vertical,
(int, float)) and 0 <= flip_ratio_bev_vertical <= 1
def random_flip_data_3d(self, input_dict, direction='horizontal'):
"""Flip 3D data randomly.
Args:
input_dict (dict): Result dict from loading pipeline.
direction (str): Flip direction. Default: horizontal.
Returns:
dict: Flipped results, 'points', 'bbox3d_fields' keys are \
updated in the result dict.
"""
assert direction in ['horizontal', 'vertical']
if len(input_dict['bbox3d_fields']) == 0: # test mode
input_dict['bbox3d_fields'].append('empty_box3d')
input_dict['empty_box3d'] = input_dict['box_type_3d'](
np.array([], dtype=np.float32))
assert len(input_dict['bbox3d_fields']) == 1
for key in input_dict['bbox3d_fields']:
if 'points' in input_dict:
input_dict['points'] = input_dict[key].flip(
direction, points=input_dict['points'])
else:
input_dict[key].flip(direction)
if 'centers2d' in input_dict:
assert self.sync_2d is True and direction == 'horizontal', \
'Only support sync_2d=True and horizontal flip with images'
w = input_dict['ori_shape'][1]
input_dict['centers2d'][..., 0] = \
w - input_dict['centers2d'][..., 0]
# need to modify the horizontal position of camera center
# along u-axis in the image (flip like centers2d)
# ['cam2img'][0][2] = c_u
# see more details and examples at
# https://github.com/open-mmlab/mmdetection3d/pull/744
input_dict['cam2img'][0][2] = w - input_dict['cam2img'][0][2]
def __call__(self, input_dict):
"""Call function to flip points, values in the ``bbox3d_fields`` and \
also flip 2D image and its annotations.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Flipped results, 'flip', 'flip_direction', \
'pcd_horizontal_flip' and 'pcd_vertical_flip' keys are added \
into result dict.
"""
# filp 2D image and its annotations
super(RandomFlip3D, self).__call__(input_dict)
if self.sync_2d:
input_dict['pcd_horizontal_flip'] = input_dict['flip']
input_dict['pcd_vertical_flip'] = False
else:
if 'pcd_horizontal_flip' not in input_dict:
flip_horizontal = True if np.random.rand(
) < self.flip_ratio else False
input_dict['pcd_horizontal_flip'] = flip_horizontal
if 'pcd_vertical_flip' not in input_dict:
flip_vertical = True if np.random.rand(
) < self.flip_ratio_bev_vertical else False
input_dict['pcd_vertical_flip'] = flip_vertical
if 'transformation_3d_flow' not in input_dict:
input_dict['transformation_3d_flow'] = []
if input_dict['pcd_horizontal_flip']:
self.random_flip_data_3d(input_dict, 'horizontal')
input_dict['transformation_3d_flow'].extend(['HF'])
if input_dict['pcd_vertical_flip']:
self.random_flip_data_3d(input_dict, 'vertical')
input_dict['transformation_3d_flow'].extend(['VF'])
return input_dict
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__
repr_str += f'(sync_2d={self.sync_2d},'
repr_str += f' flip_ratio_bev_vertical={self.flip_ratio_bev_vertical})'
return repr_str
@PIPELINES.register_module()
class RandomJitterPoints(object):
"""Randomly jitter point coordinates.
Different from the global translation in ``GlobalRotScaleTrans``, here we \
apply different noises to each point in a scene.
Args:
jitter_std (list[float]): The standard deviation of jittering noise.
This applies random noise to all points in a 3D scene, which is \
sampled from a gaussian distribution whose standard deviation is \
set by ``jitter_std``. Defaults to [0.01, 0.01, 0.01]
clip_range (list[float] | None): Clip the randomly generated jitter \
noise into this range. If None is given, don't perform clipping.
Defaults to [-0.05, 0.05]
Note:
This transform should only be used in point cloud segmentation tasks \
because we don't transform ground-truth bboxes accordingly.
For similar transform in detection task, please refer to `ObjectNoise`.
"""
def __init__(self,
jitter_std=[0.01, 0.01, 0.01],
clip_range=[-0.05, 0.05]):
seq_types = (list, tuple, np.ndarray)
if not isinstance(jitter_std, seq_types):
assert isinstance(jitter_std, (int, float)), \
f'unsupported jitter_std type {type(jitter_std)}'
jitter_std = [jitter_std, jitter_std, jitter_std]
self.jitter_std = jitter_std
if clip_range is not None:
if not isinstance(clip_range, seq_types):
assert isinstance(clip_range, (int, float)), \
f'unsupported clip_range type {type(clip_range)}'
clip_range = [-clip_range, clip_range]
self.clip_range = clip_range
def __call__(self, input_dict):
"""Call function to jitter all the points in the scene.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after adding noise to each point, \
'points' key is updated in the result dict.
"""
points = input_dict['points']
jitter_std = np.array(self.jitter_std, dtype=np.float32)
jitter_noise = \
np.random.randn(points.shape[0], 3) * jitter_std[None, :]
if self.clip_range is not None:
jitter_noise = np.clip(jitter_noise, self.clip_range[0],
self.clip_range[1])
points.translate(jitter_noise)
return input_dict
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__
repr_str += f'(jitter_std={self.jitter_std},'
repr_str += f' clip_range={self.clip_range})'
return repr_str
@PIPELINES.register_module()
class ObjectSample(object):
"""Sample GT objects to the data.
Args:
db_sampler (dict): Config dict of the database sampler.
sample_2d (bool): Whether to also paste 2D image patch to the images
This should be true when applying multi-modality cut-and-paste.
Defaults to False.
"""
def __init__(self, db_sampler, sample_2d=False):
self.sampler_cfg = db_sampler
self.sample_2d = sample_2d
if 'type' not in db_sampler.keys():
db_sampler['type'] = 'DataBaseSampler'
self.db_sampler = build_from_cfg(db_sampler, OBJECTSAMPLERS)
@staticmethod
def remove_points_in_boxes(points, boxes):
"""Remove the points in the sampled bounding boxes.
Args:
points (:obj:`BasePoints`): Input point cloud array.
boxes (np.ndarray): Sampled ground truth boxes.
Returns:
np.ndarray: Points with those in the boxes removed.
"""
masks = box_np_ops.points_in_rbbox(points.coord.numpy(), boxes)
points = points[np.logical_not(masks.any(-1))]
return points
def __call__(self, input_dict):
"""Call function to sample ground truth objects to the data.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after object sampling augmentation, \
'points', 'gt_bboxes_3d', 'gt_labels_3d' keys are updated \
in the result dict.
"""
gt_bboxes_3d = input_dict['gt_bboxes_3d']
gt_labels_3d = input_dict['gt_labels_3d']
# change to float for blending operation
points = input_dict['points']
if self.sample_2d:
img = input_dict['img']
gt_bboxes_2d = input_dict['gt_bboxes']
# Assume for now 3D & 2D bboxes are the same
sampled_dict = self.db_sampler.sample_all(
gt_bboxes_3d.tensor.numpy(),
gt_labels_3d,
gt_bboxes_2d=gt_bboxes_2d,
img=img)
else:
sampled_dict = self.db_sampler.sample_all(
gt_bboxes_3d.tensor.numpy(), gt_labels_3d, img=None)
if sampled_dict is not None:
sampled_gt_bboxes_3d = sampled_dict['gt_bboxes_3d']
sampled_points = sampled_dict['points']
sampled_gt_labels = sampled_dict['gt_labels_3d']
gt_labels_3d = np.concatenate([gt_labels_3d, sampled_gt_labels],
axis=0)
gt_bboxes_3d = gt_bboxes_3d.new_box(
np.concatenate(
[gt_bboxes_3d.tensor.numpy(), sampled_gt_bboxes_3d]))
points = self.remove_points_in_boxes(points, sampled_gt_bboxes_3d)
# check the points dimension
points = points.cat([sampled_points, points])
if self.sample_2d:
sampled_gt_bboxes_2d = sampled_dict['gt_bboxes_2d']
gt_bboxes_2d = np.concatenate(
[gt_bboxes_2d, sampled_gt_bboxes_2d]).astype(np.float32)
input_dict['gt_bboxes'] = gt_bboxes_2d
input_dict['img'] = sampled_dict['img']
input_dict['gt_bboxes_3d'] = gt_bboxes_3d
input_dict['gt_labels_3d'] = gt_labels_3d.astype(np.long)
input_dict['points'] = points
return input_dict
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__
repr_str += f' sample_2d={self.sample_2d},'
repr_str += f' data_root={self.sampler_cfg.data_root},'
repr_str += f' info_path={self.sampler_cfg.info_path},'
repr_str += f' rate={self.sampler_cfg.rate},'
repr_str += f' prepare={self.sampler_cfg.prepare},'
repr_str += f' classes={self.sampler_cfg.classes},'
repr_str += f' sample_groups={self.sampler_cfg.sample_groups}'
return repr_str
@PIPELINES.register_module()
class ObjectNoise(object):
"""Apply noise to each GT objects in the scene.
Args:
translation_std (list[float], optional): Standard deviation of the
distribution where translation noise are sampled from.
Defaults to [0.25, 0.25, 0.25].
global_rot_range (list[float], optional): Global rotation to the scene.
Defaults to [0.0, 0.0].
rot_range (list[float], optional): Object rotation range.
Defaults to [-0.15707963267, 0.15707963267].
num_try (int, optional): Number of times to try if the noise applied is
invalid. Defaults to 100.
"""
def __init__(self,
translation_std=[0.25, 0.25, 0.25],
global_rot_range=[0.0, 0.0],
rot_range=[-0.15707963267, 0.15707963267],
num_try=100):
self.translation_std = translation_std
self.global_rot_range = global_rot_range
self.rot_range = rot_range
self.num_try = num_try
def __call__(self, input_dict):
"""Call function to apply noise to each ground truth in the scene.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after adding noise to each object, \
'points', 'gt_bboxes_3d' keys are updated in the result dict.
"""
gt_bboxes_3d = input_dict['gt_bboxes_3d']
points = input_dict['points']
# TODO: check this inplace function
numpy_box = gt_bboxes_3d.tensor.numpy()
numpy_points = points.tensor.numpy()
noise_per_object_v3_(
numpy_box,
numpy_points,
rotation_perturb=self.rot_range,
center_noise_std=self.translation_std,
global_random_rot_range=self.global_rot_range,
num_try=self.num_try)
input_dict['gt_bboxes_3d'] = gt_bboxes_3d.new_box(numpy_box)
input_dict['points'] = points.new_point(numpy_points)
return input_dict
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__
repr_str += f'(num_try={self.num_try},'
repr_str += f' translation_std={self.translation_std},'
repr_str += f' global_rot_range={self.global_rot_range},'
repr_str += f' rot_range={self.rot_range})'
return repr_str
@PIPELINES.register_module()
class GlobalAlignment(object):
"""Apply global alignment to 3D scene points by rotation and translation.
Args:
rotation_axis (int): Rotation axis for points and bboxes rotation.
Note:
We do not record the applied rotation and translation as in \
GlobalRotScaleTrans. Because usually, we do not need to reverse \
the alignment step.
For example, ScanNet 3D detection task uses aligned ground-truth \
bounding boxes for evaluation.
"""
def __init__(self, rotation_axis):
self.rotation_axis = rotation_axis
def _trans_points(self, input_dict, trans_factor):
"""Private function to translate points.
Args:
input_dict (dict): Result dict from loading pipeline.
trans_factor (np.ndarray): Translation vector to be applied.
Returns:
dict: Results after translation, 'points' is updated in the dict.
"""
input_dict['points'].translate(trans_factor)
def _rot_points(self, input_dict, rot_mat):
"""Private function to rotate bounding boxes and points.
Args:
input_dict (dict): Result dict from loading pipeline.
rot_mat (np.ndarray): Rotation matrix to be applied.
Returns:
dict: Results after rotation, 'points' is updated in the dict.
"""
# input should be rot_mat_T so I transpose it here
input_dict['points'].rotate(rot_mat.T)
def _check_rot_mat(self, rot_mat):
"""Check if rotation matrix is valid for self.rotation_axis.
Args:
rot_mat (np.ndarray): Rotation matrix to be applied.
"""
is_valid = np.allclose(np.linalg.det(rot_mat), 1.0)
valid_array = np.zeros(3)
valid_array[self.rotation_axis] = 1.0
is_valid &= (rot_mat[self.rotation_axis, :] == valid_array).all()
is_valid &= (rot_mat[:, self.rotation_axis] == valid_array).all()
assert is_valid, f'invalid rotation matrix {rot_mat}'
def __call__(self, input_dict):
"""Call function to shuffle points.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after global alignment, 'points' and keys in \
input_dict['bbox3d_fields'] are updated in the result dict.
"""
assert 'axis_align_matrix' in input_dict['ann_info'].keys(), \
'axis_align_matrix is not provided in GlobalAlignment'
axis_align_matrix = input_dict['ann_info']['axis_align_matrix']
assert axis_align_matrix.shape == (4, 4), \
f'invalid shape {axis_align_matrix.shape} for axis_align_matrix'
rot_mat = axis_align_matrix[:3, :3]
trans_vec = axis_align_matrix[:3, -1]
self._check_rot_mat(rot_mat)
self._rot_points(input_dict, rot_mat)
self._trans_points(input_dict, trans_vec)
return input_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(rotation_axis={self.rotation_axis})'
return repr_str
@PIPELINES.register_module()
class GlobalRotScaleTrans(object):
"""Apply global rotation, scaling and translation to a 3D scene.
Args:
rot_range (list[float]): Range of rotation angle.
Defaults to [-0.78539816, 0.78539816] (close to [-pi/4, pi/4]).
scale_ratio_range (list[float]): Range of scale ratio.
Defaults to [0.95, 1.05].
translation_std (list[float]): The standard deviation of translation
noise. This applies random translation to a scene by a noise, which
is sampled from a gaussian distribution whose standard deviation
is set by ``translation_std``. Defaults to [0, 0, 0]
shift_height (bool): Whether to shift height.
(the fourth dimension of indoor points) when scaling.
Defaults to False.
"""
def __init__(self,
rot_range=[-0.78539816, 0.78539816],
scale_ratio_range=[0.95, 1.05],
translation_std=[0, 0, 0],
shift_height=False):
seq_types = (list, tuple, np.ndarray)
if not isinstance(rot_range, seq_types):
assert isinstance(rot_range, (int, float)), \
f'unsupported rot_range type {type(rot_range)}'
rot_range = [-rot_range, rot_range]
self.rot_range = rot_range
assert isinstance(scale_ratio_range, seq_types), \
f'unsupported scale_ratio_range type {type(scale_ratio_range)}'
self.scale_ratio_range = scale_ratio_range
if not isinstance(translation_std, seq_types):
assert isinstance(translation_std, (int, float)), \
f'unsupported translation_std type {type(translation_std)}'
translation_std = [
translation_std, translation_std, translation_std
]
assert all([std >= 0 for std in translation_std]), \
'translation_std should be positive'
self.translation_std = translation_std
self.shift_height = shift_height
def _trans_bbox_points(self, input_dict):
"""Private function to translate bounding boxes and points.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after translation, 'points', 'pcd_trans' \
and keys in input_dict['bbox3d_fields'] are updated \
in the result dict.
"""
translation_std = np.array(self.translation_std, dtype=np.float32)
trans_factor = np.random.normal(scale=translation_std, size=3).T
input_dict['points'].translate(trans_factor)
input_dict['pcd_trans'] = trans_factor
for key in input_dict['bbox3d_fields']:
input_dict[key].translate(trans_factor)
def _rot_bbox_points(self, input_dict):
"""Private function to rotate bounding boxes and points.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after rotation, 'points', 'pcd_rotation' \
and keys in input_dict['bbox3d_fields'] are updated \
in the result dict.
"""
rotation = self.rot_range
noise_rotation = np.random.uniform(rotation[0], rotation[1])
# if no bbox in input_dict, only rotate points
if len(input_dict['bbox3d_fields']) == 0:
rot_mat_T = input_dict['points'].rotate(noise_rotation)
input_dict['pcd_rotation'] = rot_mat_T
return
# rotate points with bboxes
for key in input_dict['bbox3d_fields']:
if len(input_dict[key].tensor) != 0:
points, rot_mat_T = input_dict[key].rotate(
noise_rotation, input_dict['points'])
input_dict['points'] = points
input_dict['pcd_rotation'] = rot_mat_T
def _scale_bbox_points(self, input_dict):
"""Private function to scale bounding boxes and points.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after scaling, 'points'and keys in \
input_dict['bbox3d_fields'] are updated in the result dict.
"""
scale = input_dict['pcd_scale_factor']
points = input_dict['points']
points.scale(scale)
if self.shift_height:
assert 'height' in points.attribute_dims.keys(), \
'setting shift_height=True but points have no height attribute'
points.tensor[:, points.attribute_dims['height']] *= scale
input_dict['points'] = points
for key in input_dict['bbox3d_fields']:
input_dict[key].scale(scale)
def _random_scale(self, input_dict):
"""Private function to randomly set the scale factor.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after scaling, 'pcd_scale_factor' are updated \
in the result dict.
"""
scale_factor = np.random.uniform(self.scale_ratio_range[0],
self.scale_ratio_range[1])
input_dict['pcd_scale_factor'] = scale_factor
def __call__(self, input_dict):
"""Private function to rotate, scale and translate bounding boxes and \
points.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after scaling, 'points', 'pcd_rotation',
'pcd_scale_factor', 'pcd_trans' and keys in \
input_dict['bbox3d_fields'] are updated in the result dict.
"""
if 'transformation_3d_flow' not in input_dict:
input_dict['transformation_3d_flow'] = []
self._rot_bbox_points(input_dict)
if 'pcd_scale_factor' not in input_dict:
self._random_scale(input_dict)
self._scale_bbox_points(input_dict)
self._trans_bbox_points(input_dict)
input_dict['transformation_3d_flow'].extend(['R', 'S', 'T'])
return input_dict
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__
repr_str += f'(rot_range={self.rot_range},'
repr_str += f' scale_ratio_range={self.scale_ratio_range},'
repr_str += f' translation_std={self.translation_std},'
repr_str += f' shift_height={self.shift_height})'
return repr_str
@PIPELINES.register_module()
class RotFlipScaleTrans(object):
def __init__(self,
scale_ratio_range=[0.95, 1.05],
translation_std=[0, 0, 0],
#TODO
):
seq_types = (list, tuple, np.ndarray)
if not isinstance(rot_range, seq_types):
assert isinstance(rot_range, (int, float)), \
f'unsupported rot_range type {type(rot_range)}'
rot_range = [-rot_range, rot_range]
self.rot_range = rot_range
assert isinstance(scale_ratio_range, seq_types), \
f'unsupported scale_ratio_range type {type(scale_ratio_range)}'
self.scale_ratio_range = scale_ratio_range
if not isinstance(translation_std, seq_types):
assert isinstance(translation_std, (int, float)), \
f'unsupported translation_std type {type(translation_std)}'
translation_std = [
translation_std, translation_std, translation_std
]
assert all([std >= 0 for std in translation_std]), \
'translation_std should be positive'
self.translation_std = translation_std
self.shift_height = shift_height
def _trans_bbox_points(self, input_dict):
"""Private function to translate bounding boxes and points.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after translation, 'points', 'pcd_trans' \
and keys in input_dict['bbox3d_fields'] are updated \
in the result dict.
"""
translation_std = np.array(self.translation_std, dtype=np.float32)
trans_factor = np.random.normal(scale=translation_std, size=3).T
input_dict['points'].translate(trans_factor)
input_dict['pcd_trans'] = trans_factor
for key in input_dict['bbox3d_fields']:
input_dict[key].translate(trans_factor)
def _rot_bbox_points(self, input_dict):
"""Private function to rotate bounding boxes and points.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after rotation, 'points', 'pcd_rotation' \
and keys in input_dict['bbox3d_fields'] are updated \
in the result dict.
"""
rotation = self.rot_range
noise_rotation = np.random.uniform(rotation[0], rotation[1])
# if no bbox in input_dict, only rotate points
if len(input_dict['bbox3d_fields']) == 0:
rot_mat_T = input_dict['points'].rotate(noise_rotation)
input_dict['pcd_rotation'] = rot_mat_T
return
# rotate points with bboxes
for key in input_dict['bbox3d_fields']:
if len(input_dict[key].tensor) != 0:
points, rot_mat_T = input_dict[key].rotate(
noise_rotation, input_dict['points'])
input_dict['points'] = points
input_dict['pcd_rotation'] = rot_mat_T
def _scale_bbox_points(self, input_dict):
"""Private function to scale bounding boxes and points.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after scaling, 'points'and keys in \
input_dict['bbox3d_fields'] are updated in the result dict.
"""
scale = input_dict['pcd_scale_factor']
points = input_dict['points']
points.scale(scale)
if self.shift_height:
assert 'height' in points.attribute_dims.keys(), \
'setting shift_height=True but points have no height attribute'
points.tensor[:, points.attribute_dims['height']] *= scale
input_dict['points'] = points
for key in input_dict['bbox3d_fields']:
input_dict[key].scale(scale)
def _random_scale(self, input_dict):
"""Private function to randomly set the scale factor.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after scaling, 'pcd_scale_factor' are updated \
in the result dict.
"""
scale_factor = np.random.uniform(self.scale_ratio_range[0],
self.scale_ratio_range[1])
input_dict['pcd_scale_factor'] = scale_factor
def __call__(self, input_dict):
"""Private function to rotate, scale and translate bounding boxes and \
points.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after scaling, 'points', 'pcd_rotation',
'pcd_scale_factor', 'pcd_trans' and keys in \
input_dict['bbox3d_fields'] are updated in the result dict.
"""
if 'transformation_3d_flow' not in input_dict:
input_dict['transformation_3d_flow'] = []
self._rot_bbox_points(input_dict)
if 'pcd_scale_factor' not in input_dict:
self._random_scale(input_dict)
self._scale_bbox_points(input_dict)
self._trans_bbox_points(input_dict)
input_dict['transformation_3d_flow'].extend(['R', 'S', 'T'])
return input_dict
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__
repr_str += f'(rot_range={self.rot_range},'
repr_str += f' scale_ratio_range={self.scale_ratio_range},'
repr_str += f' translation_std={self.translation_std},'
repr_str += f' shift_height={self.shift_height})'
return repr_str
@PIPELINES.register_module()
class PointShuffle(object):
"""Shuffle input points."""
def __call__(self, input_dict):
"""Call function to shuffle points.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after filtering, 'points', 'pts_instance_mask' \
and 'pts_semantic_mask' keys are updated in the result dict.
"""
idx = input_dict['points'].shuffle()
idx = idx.numpy()
pts_instance_mask = input_dict.get('pts_instance_mask', None)
pts_semantic_mask = input_dict.get('pts_semantic_mask', None)
if pts_instance_mask is not None:
input_dict['pts_instance_mask'] = pts_instance_mask[idx]
if pts_semantic_mask is not None:
input_dict['pts_semantic_mask'] = pts_semantic_mask[idx]
return input_dict
def __repr__(self):
return self.__class__.__name__
@PIPELINES.register_module()
class ObjectRangeFilter(object):
"""Filter objects by the range.
Args:
point_cloud_range (list[float]): Point cloud range.
"""
def __init__(self, point_cloud_range):
self.pcd_range = np.array(point_cloud_range, dtype=np.float32)
def __call__(self, input_dict):
"""Call function to filter objects by the range.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after filtering, 'gt_bboxes_3d', 'gt_labels_3d' \
keys are updated in the result dict.
"""
# Check points instance type and initialise bev_range
if isinstance(input_dict['gt_bboxes_3d'],
(LiDARInstance3DBoxes, DepthInstance3DBoxes)):
bev_range = self.pcd_range[[0, 1, 3, 4]]
elif isinstance(input_dict['gt_bboxes_3d'], CameraInstance3DBoxes):
bev_range = self.pcd_range[[0, 2, 3, 5]]
gt_bboxes_3d = input_dict['gt_bboxes_3d']
gt_labels_3d = input_dict['gt_labels_3d']
mask = gt_bboxes_3d.in_range_bev(bev_range)
gt_bboxes_3d = gt_bboxes_3d[mask]
# mask is a torch tensor but gt_labels_3d is still numpy array
# using mask to index gt_labels_3d will cause bug when
# len(gt_labels_3d) == 1, where mask=1 will be interpreted
# as gt_labels_3d[1] and cause out of index error
gt_labels_3d = gt_labels_3d[mask.numpy().astype(np.bool)]
# limit rad to [-pi, pi]
gt_bboxes_3d.limit_yaw(offset=0.5, period=2 * np.pi)
input_dict['gt_bboxes_3d'] = gt_bboxes_3d
input_dict['gt_labels_3d'] = gt_labels_3d
return input_dict
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__
repr_str += f'(point_cloud_range={self.pcd_range.tolist()})'
return repr_str
@PIPELINES.register_module()
class PointsRangeFilter(object):
"""Filter points by the range.
Args:
point_cloud_range (list[float]): Point cloud range.
"""
def __init__(self, point_cloud_range):
self.pcd_range = np.array(point_cloud_range, dtype=np.float32)
def __call__(self, input_dict):
"""Call function to filter points by the range.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after filtering, 'points', 'pts_instance_mask' \
and 'pts_semantic_mask' keys are updated in the result dict.
"""
points = input_dict['points']
points_mask = points.in_range_3d(self.pcd_range)
clean_points = points[points_mask]
input_dict['points'] = clean_points
points_mask = points_mask.numpy()
pts_instance_mask = input_dict.get('pts_instance_mask', None)
pts_semantic_mask = input_dict.get('pts_semantic_mask', None)
if pts_instance_mask is not None:
input_dict['pts_instance_mask'] = pts_instance_mask[points_mask]
if pts_semantic_mask is not None:
input_dict['pts_semantic_mask'] = pts_semantic_mask[points_mask]
return input_dict
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__
repr_str += f'(point_cloud_range={self.pcd_range.tolist()})'
return repr_str
@PIPELINES.register_module()
class ObjectNameFilter(object):
"""Filter GT objects by their names.
Args:
classes (list[str]): List of class names to be kept for training.
"""
def __init__(self, classes):
self.classes = classes
self.labels = list(range(len(self.classes)))
def __call__(self, input_dict):
"""Call function to filter objects by their names.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after filtering, 'gt_bboxes_3d', 'gt_labels_3d' \
keys are updated in the result dict.
"""
gt_labels_3d = input_dict['gt_labels_3d']
gt_bboxes_mask = np.array([n in self.labels for n in gt_labels_3d],
dtype=np.bool_)
input_dict['gt_bboxes_3d'] = input_dict['gt_bboxes_3d'][gt_bboxes_mask]
input_dict['gt_labels_3d'] = input_dict['gt_labels_3d'][gt_bboxes_mask]
return input_dict
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__
repr_str += f'(classes={self.classes})'
return repr_str
@PIPELINES.register_module()
class PointSample(object):
"""Point sample.
Sampling data to a certain number.
Args:
num_points (int): Number of points to be sampled.
sample_range (float, optional): The range where to sample points.
If not None, the points with depth larger than `sample_range` are
prior to be sampled. Defaults to None.
replace (bool, optional): Whether the sampling is with or without
replacement. Defaults to False.
"""
def __init__(self, num_points, sample_range=None, replace=False):
self.num_points = num_points
self.sample_range = sample_range
self.replace = replace
def _points_random_sampling(self,
points,
num_samples,
sample_range=None,
replace=False,
return_choices=False):
"""Points random sampling.
Sample points to a certain number.
Args:
points (np.ndarray | :obj:`BasePoints`): 3D Points.
num_samples (int): Number of samples to be sampled.
sample_range (float, optional): Indicating the range where the
points will be sampled. Defaults to None.
replace (bool, optional): Sampling with or without replacement.
Defaults to None.
return_choices (bool, optional): Whether return choice.
Defaults to False.
Returns:
tuple[np.ndarray] | np.ndarray:
- points (np.ndarray | :obj:`BasePoints`): 3D Points.
- choices (np.ndarray, optional): The generated random samples.
"""
if not replace:
replace = (points.shape[0] < num_samples)
point_range = range(len(points))
if sample_range is not None and not replace:
# Only sampling the near points when len(points) >= num_samples
depth = np.linalg.norm(points.tensor, axis=1)
far_inds = np.where(depth > sample_range)[0]
near_inds = np.where(depth <= sample_range)[0]
# in case there are too many far points
if len(far_inds) > num_samples:
far_inds = np.random.choice(
far_inds, num_samples, replace=False)
point_range = near_inds
num_samples -= len(far_inds)
choices = np.random.choice(point_range, num_samples, replace=replace)
if sample_range is not None and not replace:
choices = np.concatenate((far_inds, choices))
# Shuffle points after sampling
np.random.shuffle(choices)
if return_choices:
return points[choices], choices
else:
return points[choices]
def __call__(self, results):
"""Call function to sample points to in indoor scenes.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after sampling, 'points', 'pts_instance_mask' \
and 'pts_semantic_mask' keys are updated in the result dict.
"""
points = results['points']
# Points in Camera coord can provide the depth information.
# TODO: Need to suport distance-based sampling for other coord system.
if self.sample_range is not None:
from mmdet3d.core.points import CameraPoints
assert isinstance(points, CameraPoints), \
'Sampling based on distance is only appliable for CAMERA coord'
points, choices = self._points_random_sampling(
points,
self.num_points,
self.sample_range,
self.replace,
return_choices=True)
results['points'] = points
pts_instance_mask = results.get('pts_instance_mask', None)
pts_semantic_mask = results.get('pts_semantic_mask', None)
if pts_instance_mask is not None:
pts_instance_mask = pts_instance_mask[choices]
results['pts_instance_mask'] = pts_instance_mask
if pts_semantic_mask is not None:
pts_semantic_mask = pts_semantic_mask[choices]
results['pts_semantic_mask'] = pts_semantic_mask
return results
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__
repr_str += f'(num_points={self.num_points},'
repr_str += f' sample_range={self.sample_range},'
repr_str += f' replace={self.replace})'
return repr_str
@PIPELINES.register_module()
class IndoorPointSample(PointSample):
"""Indoor point sample.
Sampling data to a certain number.
NOTE: IndoorPointSample is deprecated in favor of PointSample
Args:
num_points (int): Number of points to be sampled.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
'IndoorPointSample is deprecated in favor of PointSample')
super(IndoorPointSample, self).__init__(*args, **kwargs)
@PIPELINES.register_module()
class IndoorPatchPointSample(object):
r"""Indoor point sample within a patch. Modified from `PointNet++ <https://
github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py>`_.
Sampling data to a certain number for semantic segmentation.
Args:
num_points (int): Number of points to be sampled.
block_size (float, optional): Size of a block to sample points from.
Defaults to 1.5.
sample_rate (float, optional): Stride used in sliding patch generation.
This parameter is unused in `IndoorPatchPointSample` and thus has
been deprecated. We plan to remove it in the future.
Defaults to None.
ignore_index (int, optional): Label index that won't be used for the
segmentation task. This is set in PointSegClassMapping as neg_cls.
If not None, will be used as a patch selection criterion.
Defaults to None.
use_normalized_coord (bool, optional): Whether to use normalized xyz as
additional features. Defaults to False.
num_try (int, optional): Number of times to try if the patch selected
is invalid. Defaults to 10.
enlarge_size (float | None, optional): Enlarge the sampled patch to
[-block_size / 2 - enlarge_size, block_size / 2 + enlarge_size] as
an augmentation. If None, set it as 0. Defaults to 0.2.
min_unique_num (int | None, optional): Minimum number of unique points
the sampled patch should contain. If None, use PointNet++'s method
to judge uniqueness. Defaults to None.
eps (float, optional): A value added to patch boundary to guarantee
points coverage. Defaults to 1e-2.
Note:
This transform should only be used in the training process of point
cloud segmentation tasks. For the sliding patch generation and
inference process in testing, please refer to the `slide_inference`
function of `EncoderDecoder3D` class.
"""
def __init__(self,
num_points,
block_size=1.5,
sample_rate=None,
ignore_index=None,
use_normalized_coord=False,
num_try=10,
enlarge_size=0.2,
min_unique_num=None,
eps=1e-2):
self.num_points = num_points
self.block_size = block_size
self.ignore_index = ignore_index
self.use_normalized_coord = use_normalized_coord
self.num_try = num_try
self.enlarge_size = enlarge_size if enlarge_size is not None else 0.0
self.min_unique_num = min_unique_num
self.eps = eps
if sample_rate is not None:
warnings.warn(
"'sample_rate' has been deprecated and will be removed in "
'the future. Please remove them from your code.')
def _input_generation(self, coords, patch_center, coord_max, attributes,
attribute_dims, point_type):
"""Generating model input.
Generate input by subtracting patch center and adding additional \
features. Currently support colors and normalized xyz as features.
Args:
coords (np.ndarray): Sampled 3D Points.
patch_center (np.ndarray): Center coordinate of the selected patch.
coord_max (np.ndarray): Max coordinate of all 3D Points.
attributes (np.ndarray): features of input points.
attribute_dims (dict): Dictionary to indicate the meaning of extra
dimension.
point_type (type): class of input points inherited from BasePoints.
Returns:
:obj:`BasePoints`: The generated input data.
"""
# subtract patch center, the z dimension is not centered
centered_coords = coords.copy()
centered_coords[:, 0] -= patch_center[0]
centered_coords[:, 1] -= patch_center[1]
if self.use_normalized_coord:
normalized_coord = coords / coord_max
attributes = np.concatenate([attributes, normalized_coord], axis=1)
if attribute_dims is None:
attribute_dims = dict()
attribute_dims.update(
dict(normalized_coord=[
attributes.shape[1], attributes.shape[1] +
1, attributes.shape[1] + 2
]))
points = np.concatenate([centered_coords, attributes], axis=1)
points = point_type(
points, points_dim=points.shape[1], attribute_dims=attribute_dims)
return points
def _patch_points_sampling(self, points, sem_mask):
"""Patch points sampling.
First sample a valid patch.
Then sample points within that patch to a certain number.
Args:
points (:obj:`BasePoints`): 3D Points.
sem_mask (np.ndarray): semantic segmentation mask for input points.
Returns:
tuple[:obj:`BasePoints`, np.ndarray] | :obj:`BasePoints`:
- points (:obj:`BasePoints`): 3D Points.
- choices (np.ndarray): The generated random samples.
"""
coords = points.coord.numpy()
attributes = points.tensor[:, 3:].numpy()
attribute_dims = points.attribute_dims
point_type = type(points)
coord_max = np.amax(coords, axis=0)
coord_min = np.amin(coords, axis=0)
for _ in range(self.num_try):
# random sample a point as patch center
cur_center = coords[np.random.choice(coords.shape[0])]
# boundary of a patch, which would be enlarged by
# `self.enlarge_size` as an augmentation
cur_max = cur_center + np.array(
[self.block_size / 2.0, self.block_size / 2.0, 0.0])
cur_min = cur_center - np.array(
[self.block_size / 2.0, self.block_size / 2.0, 0.0])
cur_max[2] = coord_max[2]
cur_min[2] = coord_min[2]
cur_choice = np.sum(
(coords >= (cur_min - self.enlarge_size)) *
(coords <= (cur_max + self.enlarge_size)),
axis=1) == 3
if not cur_choice.any(): # no points in this patch
continue
cur_coords = coords[cur_choice, :]
cur_sem_mask = sem_mask[cur_choice]
point_idxs = np.where(cur_choice)[0]
mask = np.sum(
(cur_coords >= (cur_min - self.eps)) * (cur_coords <=
(cur_max + self.eps)),
axis=1) == 3
# two criteria for patch sampling, adopted from PointNet++
# 1. selected patch should contain enough unique points
if self.min_unique_num is None:
# use PointNet++'s method as default
# [31, 31, 62] are just some big values used to transform
# coords from 3d array to 1d and then check their uniqueness
# this is used in all the ScanNet code following PointNet++
vidx = np.ceil(
(cur_coords[mask, :] - cur_min) / (cur_max - cur_min) *
np.array([31.0, 31.0, 62.0]))
vidx = np.unique(vidx[:, 0] * 31.0 * 62.0 + vidx[:, 1] * 62.0 +
vidx[:, 2])
flag1 = len(vidx) / 31.0 / 31.0 / 62.0 >= 0.02
else:
# if `min_unique_num` is provided, directly compare with it
flag1 = mask.sum() >= self.min_unique_num
# 2. selected patch should contain enough annotated points
if self.ignore_index is None:
flag2 = True
else:
flag2 = np.sum(cur_sem_mask != self.ignore_index) / \
len(cur_sem_mask) >= 0.7
if flag1 and flag2:
break
# sample idx to `self.num_points`
if point_idxs.size >= self.num_points:
# no duplicate in sub-sampling
choices = np.random.choice(
point_idxs, self.num_points, replace=False)
else:
# do not use random choice here to avoid some points not counted
dup = np.random.choice(point_idxs.size,
self.num_points - point_idxs.size)
idx_dup = np.concatenate(
[np.arange(point_idxs.size),
np.array(dup)], 0)
choices = point_idxs[idx_dup]
# construct model input
points = self._input_generation(coords[choices], cur_center, coord_max,
attributes[choices], attribute_dims,
point_type)
return points, choices
def __call__(self, results):
"""Call function to sample points to in indoor scenes.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after sampling, 'points', 'pts_instance_mask' \
and 'pts_semantic_mask' keys are updated in the result dict.
"""
points = results['points']
assert 'pts_semantic_mask' in results.keys(), \
'semantic mask should be provided in training and evaluation'
pts_semantic_mask = results['pts_semantic_mask']
points, choices = self._patch_points_sampling(points,
pts_semantic_mask)
results['points'] = points
results['pts_semantic_mask'] = pts_semantic_mask[choices]
pts_instance_mask = results.get('pts_instance_mask', None)
if pts_instance_mask is not None:
results['pts_instance_mask'] = pts_instance_mask[choices]
return results
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__
repr_str += f'(num_points={self.num_points},'
repr_str += f' block_size={self.block_size},'
repr_str += f' ignore_index={self.ignore_index},'
repr_str += f' use_normalized_coord={self.use_normalized_coord},'
repr_str += f' num_try={self.num_try},'
repr_str += f' enlarge_size={self.enlarge_size},'
repr_str += f' min_unique_num={self.min_unique_num},'
repr_str += f' eps={self.eps})'
return repr_str
@PIPELINES.register_module()
class BackgroundPointsFilter(object):
"""Filter background points near the bounding box.
Args:
bbox_enlarge_range (tuple[float], float): Bbox enlarge range.
"""
def __init__(self, bbox_enlarge_range):
assert (is_tuple_of(bbox_enlarge_range, float)
and len(bbox_enlarge_range) == 3) \
or isinstance(bbox_enlarge_range, float), \
f'Invalid arguments bbox_enlarge_range {bbox_enlarge_range}'
if isinstance(bbox_enlarge_range, float):
bbox_enlarge_range = [bbox_enlarge_range] * 3
self.bbox_enlarge_range = np.array(
bbox_enlarge_range, dtype=np.float32)[np.newaxis, :]
def __call__(self, input_dict):
"""Call function to filter points by the range.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after filtering, 'points', 'pts_instance_mask' \
and 'pts_semantic_mask' keys are updated in the result dict.
"""
points = input_dict['points']
gt_bboxes_3d = input_dict['gt_bboxes_3d']
# avoid groundtruth being modified
gt_bboxes_3d_np = gt_bboxes_3d.tensor.clone().numpy()
gt_bboxes_3d_np[:, :3] = gt_bboxes_3d.gravity_center.clone().numpy()
enlarged_gt_bboxes_3d = gt_bboxes_3d_np.copy()
enlarged_gt_bboxes_3d[:, 3:6] += self.bbox_enlarge_range
points_numpy = points.tensor.clone().numpy()
foreground_masks = box_np_ops.points_in_rbbox(
points_numpy, gt_bboxes_3d_np, origin=(0.5, 0.5, 0.5))
enlarge_foreground_masks = box_np_ops.points_in_rbbox(
points_numpy, enlarged_gt_bboxes_3d, origin=(0.5, 0.5, 0.5))
foreground_masks = foreground_masks.max(1)
enlarge_foreground_masks = enlarge_foreground_masks.max(1)
valid_masks = ~np.logical_and(~foreground_masks,
enlarge_foreground_masks)
input_dict['points'] = points[valid_masks]
pts_instance_mask = input_dict.get('pts_instance_mask', None)
if pts_instance_mask is not None:
input_dict['pts_instance_mask'] = pts_instance_mask[valid_masks]
pts_semantic_mask = input_dict.get('pts_semantic_mask', None)
if pts_semantic_mask is not None:
input_dict['pts_semantic_mask'] = pts_semantic_mask[valid_masks]
return input_dict
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__
repr_str += f'(bbox_enlarge_range={self.bbox_enlarge_range.tolist()})'
return repr_str
@PIPELINES.register_module()
class VoxelBasedPointSampler(object):
"""Voxel based point sampler.
Apply voxel sampling to multiple sweep points.
Args:
cur_sweep_cfg (dict): Config for sampling current points.
prev_sweep_cfg (dict): Config for sampling previous points.
time_dim (int): Index that indicate the time dimention
for input points.
"""
def __init__(self, cur_sweep_cfg, prev_sweep_cfg=None, time_dim=3):
self.cur_voxel_generator = VoxelGenerator(**cur_sweep_cfg)
self.cur_voxel_num = self.cur_voxel_generator._max_voxels
self.time_dim = time_dim
if prev_sweep_cfg is not None:
assert prev_sweep_cfg['max_num_points'] == \
cur_sweep_cfg['max_num_points']
self.prev_voxel_generator = VoxelGenerator(**prev_sweep_cfg)
self.prev_voxel_num = self.prev_voxel_generator._max_voxels
else:
self.prev_voxel_generator = None
self.prev_voxel_num = 0
def _sample_points(self, points, sampler, point_dim):
"""Sample points for each points subset.
Args:
points (np.ndarray): Points subset to be sampled.
sampler (VoxelGenerator): Voxel based sampler for
each points subset.
point_dim (int): The dimention of each points
Returns:
np.ndarray: Sampled points.
"""
voxels, coors, num_points_per_voxel = sampler.generate(points)
if voxels.shape[0] < sampler._max_voxels:
padding_points = np.zeros([
sampler._max_voxels - voxels.shape[0], sampler._max_num_points,
point_dim
],
dtype=points.dtype)
padding_points[:] = voxels[0]
sample_points = np.concatenate([voxels, padding_points], axis=0)
else:
sample_points = voxels
return sample_points
def __call__(self, results):
"""Call function to sample points from multiple sweeps.
Args:
input_dict (dict): Result dict from loading pipeline.
Returns:
dict: Results after sampling, 'points', 'pts_instance_mask' \
and 'pts_semantic_mask' keys are updated in the result dict.
"""
points = results['points']
original_dim = points.shape[1]
# TODO: process instance and semantic mask while _max_num_points
# is larger than 1
# Extend points with seg and mask fields
map_fields2dim = []
start_dim = original_dim
points_numpy = points.tensor.numpy()
extra_channel = [points_numpy]
for idx, key in enumerate(results['pts_mask_fields']):
map_fields2dim.append((key, idx + start_dim))
extra_channel.append(results[key][..., None])
start_dim += len(results['pts_mask_fields'])
for idx, key in enumerate(results['pts_seg_fields']):
map_fields2dim.append((key, idx + start_dim))
extra_channel.append(results[key][..., None])
points_numpy = np.concatenate(extra_channel, axis=-1)
# Split points into two part, current sweep points and
# previous sweeps points.
# TODO: support different sampling methods for next sweeps points
# and previous sweeps points.
cur_points_flag = (points_numpy[:, self.time_dim] == 0)
cur_sweep_points = points_numpy[cur_points_flag]
prev_sweeps_points = points_numpy[~cur_points_flag]
if prev_sweeps_points.shape[0] == 0:
prev_sweeps_points = cur_sweep_points
# Shuffle points before sampling
np.random.shuffle(cur_sweep_points)
np.random.shuffle(prev_sweeps_points)
cur_sweep_points = self._sample_points(cur_sweep_points,
self.cur_voxel_generator,
points_numpy.shape[1])
if self.prev_voxel_generator is not None:
prev_sweeps_points = self._sample_points(prev_sweeps_points,
self.prev_voxel_generator,
points_numpy.shape[1])
points_numpy = np.concatenate(
[cur_sweep_points, prev_sweeps_points], 0)
else:
points_numpy = cur_sweep_points
if self.cur_voxel_generator._max_num_points == 1:
points_numpy = points_numpy.squeeze(1)
results['points'] = points.new_point(points_numpy[..., :original_dim])
# Restore the correspoinding seg and mask fields
for key, dim_index in map_fields2dim:
results[key] = points_numpy[..., dim_index]
return results
def __repr__(self):
"""str: Return a string that describes the module."""
def _auto_indent(repr_str, indent):
repr_str = repr_str.split('\n')
repr_str = [' ' * indent + t + '\n' for t in repr_str]
repr_str = ''.join(repr_str)[:-1]
return repr_str
repr_str = self.__class__.__name__
indent = 4
repr_str += '(\n'
repr_str += ' ' * indent + f'num_cur_sweep={self.cur_voxel_num},\n'
repr_str += ' ' * indent + f'num_prev_sweep={self.prev_voxel_num},\n'
repr_str += ' ' * indent + f'time_dim={self.time_dim},\n'
repr_str += ' ' * indent + 'cur_voxel_generator=\n'
repr_str += f'{_auto_indent(repr(self.cur_voxel_generator), 8)},\n'
repr_str += ' ' * indent + 'prev_voxel_generator=\n'
repr_str += f'{_auto_indent(repr(self.prev_voxel_generator), 8)})'
return repr_str
| 39.522959 | 79 | 0.613793 |
import numpy as np
import warnings
from mmcv import is_tuple_of
from mmcv.utils import build_from_cfg
from mmdet3d.core import VoxelGenerator
from mmdet3d.core.bbox import (CameraInstance3DBoxes, DepthInstance3DBoxes,
LiDARInstance3DBoxes, box_np_ops)
from mmdet.datasets.builder import PIPELINES
from mmdet.datasets.pipelines import RandomFlip
from ..builder import OBJECTSAMPLERS
from .data_augment_utils import noise_per_object_v3_
@PIPELINES.register_module()
class RandomDropPointsColor(object):
def __init__(self, drop_ratio=0.2):
assert isinstance(drop_ratio, (int, float)) and 0 <= drop_ratio <= 1, \
f'invalid drop_ratio value {drop_ratio}'
self.drop_ratio = drop_ratio
def __call__(self, input_dict):
points = input_dict['points']
assert points.attribute_dims is not None and \
'color' in points.attribute_dims, \
'Expect points have color attribute'
if np.random.rand() > 1.0 - self.drop_ratio:
points.color = points.color * 0.0
return input_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(drop_ratio={self.drop_ratio})'
return repr_str
@PIPELINES.register_module()
class RandomFlip3D(RandomFlip):
def __init__(self,
sync_2d=True,
flip_ratio_bev_horizontal=0.0,
flip_ratio_bev_vertical=0.0,
**kwargs):
super(RandomFlip3D, self).__init__(
flip_ratio=flip_ratio_bev_horizontal, **kwargs)
self.sync_2d = sync_2d
self.flip_ratio_bev_vertical = flip_ratio_bev_vertical
if flip_ratio_bev_horizontal is not None:
assert isinstance(
flip_ratio_bev_horizontal,
(int, float)) and 0 <= flip_ratio_bev_horizontal <= 1
if flip_ratio_bev_vertical is not None:
assert isinstance(
flip_ratio_bev_vertical,
(int, float)) and 0 <= flip_ratio_bev_vertical <= 1
def random_flip_data_3d(self, input_dict, direction='horizontal'):
assert direction in ['horizontal', 'vertical']
if len(input_dict['bbox3d_fields']) == 0:
input_dict['bbox3d_fields'].append('empty_box3d')
input_dict['empty_box3d'] = input_dict['box_type_3d'](
np.array([], dtype=np.float32))
assert len(input_dict['bbox3d_fields']) == 1
for key in input_dict['bbox3d_fields']:
if 'points' in input_dict:
input_dict['points'] = input_dict[key].flip(
direction, points=input_dict['points'])
else:
input_dict[key].flip(direction)
if 'centers2d' in input_dict:
assert self.sync_2d is True and direction == 'horizontal', \
'Only support sync_2d=True and horizontal flip with images'
w = input_dict['ori_shape'][1]
input_dict['centers2d'][..., 0] = \
w - input_dict['centers2d'][..., 0]
input_dict['cam2img'][0][2] = w - input_dict['cam2img'][0][2]
def __call__(self, input_dict):
super(RandomFlip3D, self).__call__(input_dict)
if self.sync_2d:
input_dict['pcd_horizontal_flip'] = input_dict['flip']
input_dict['pcd_vertical_flip'] = False
else:
if 'pcd_horizontal_flip' not in input_dict:
flip_horizontal = True if np.random.rand(
) < self.flip_ratio else False
input_dict['pcd_horizontal_flip'] = flip_horizontal
if 'pcd_vertical_flip' not in input_dict:
flip_vertical = True if np.random.rand(
) < self.flip_ratio_bev_vertical else False
input_dict['pcd_vertical_flip'] = flip_vertical
if 'transformation_3d_flow' not in input_dict:
input_dict['transformation_3d_flow'] = []
if input_dict['pcd_horizontal_flip']:
self.random_flip_data_3d(input_dict, 'horizontal')
input_dict['transformation_3d_flow'].extend(['HF'])
if input_dict['pcd_vertical_flip']:
self.random_flip_data_3d(input_dict, 'vertical')
input_dict['transformation_3d_flow'].extend(['VF'])
return input_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(sync_2d={self.sync_2d},'
repr_str += f' flip_ratio_bev_vertical={self.flip_ratio_bev_vertical})'
return repr_str
@PIPELINES.register_module()
class RandomJitterPoints(object):
def __init__(self,
jitter_std=[0.01, 0.01, 0.01],
clip_range=[-0.05, 0.05]):
seq_types = (list, tuple, np.ndarray)
if not isinstance(jitter_std, seq_types):
assert isinstance(jitter_std, (int, float)), \
f'unsupported jitter_std type {type(jitter_std)}'
jitter_std = [jitter_std, jitter_std, jitter_std]
self.jitter_std = jitter_std
if clip_range is not None:
if not isinstance(clip_range, seq_types):
assert isinstance(clip_range, (int, float)), \
f'unsupported clip_range type {type(clip_range)}'
clip_range = [-clip_range, clip_range]
self.clip_range = clip_range
def __call__(self, input_dict):
points = input_dict['points']
jitter_std = np.array(self.jitter_std, dtype=np.float32)
jitter_noise = \
np.random.randn(points.shape[0], 3) * jitter_std[None, :]
if self.clip_range is not None:
jitter_noise = np.clip(jitter_noise, self.clip_range[0],
self.clip_range[1])
points.translate(jitter_noise)
return input_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(jitter_std={self.jitter_std},'
repr_str += f' clip_range={self.clip_range})'
return repr_str
@PIPELINES.register_module()
class ObjectSample(object):
def __init__(self, db_sampler, sample_2d=False):
self.sampler_cfg = db_sampler
self.sample_2d = sample_2d
if 'type' not in db_sampler.keys():
db_sampler['type'] = 'DataBaseSampler'
self.db_sampler = build_from_cfg(db_sampler, OBJECTSAMPLERS)
@staticmethod
def remove_points_in_boxes(points, boxes):
masks = box_np_ops.points_in_rbbox(points.coord.numpy(), boxes)
points = points[np.logical_not(masks.any(-1))]
return points
def __call__(self, input_dict):
gt_bboxes_3d = input_dict['gt_bboxes_3d']
gt_labels_3d = input_dict['gt_labels_3d']
points = input_dict['points']
if self.sample_2d:
img = input_dict['img']
gt_bboxes_2d = input_dict['gt_bboxes']
sampled_dict = self.db_sampler.sample_all(
gt_bboxes_3d.tensor.numpy(),
gt_labels_3d,
gt_bboxes_2d=gt_bboxes_2d,
img=img)
else:
sampled_dict = self.db_sampler.sample_all(
gt_bboxes_3d.tensor.numpy(), gt_labels_3d, img=None)
if sampled_dict is not None:
sampled_gt_bboxes_3d = sampled_dict['gt_bboxes_3d']
sampled_points = sampled_dict['points']
sampled_gt_labels = sampled_dict['gt_labels_3d']
gt_labels_3d = np.concatenate([gt_labels_3d, sampled_gt_labels],
axis=0)
gt_bboxes_3d = gt_bboxes_3d.new_box(
np.concatenate(
[gt_bboxes_3d.tensor.numpy(), sampled_gt_bboxes_3d]))
points = self.remove_points_in_boxes(points, sampled_gt_bboxes_3d)
points = points.cat([sampled_points, points])
if self.sample_2d:
sampled_gt_bboxes_2d = sampled_dict['gt_bboxes_2d']
gt_bboxes_2d = np.concatenate(
[gt_bboxes_2d, sampled_gt_bboxes_2d]).astype(np.float32)
input_dict['gt_bboxes'] = gt_bboxes_2d
input_dict['img'] = sampled_dict['img']
input_dict['gt_bboxes_3d'] = gt_bboxes_3d
input_dict['gt_labels_3d'] = gt_labels_3d.astype(np.long)
input_dict['points'] = points
return input_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f' sample_2d={self.sample_2d},'
repr_str += f' data_root={self.sampler_cfg.data_root},'
repr_str += f' info_path={self.sampler_cfg.info_path},'
repr_str += f' rate={self.sampler_cfg.rate},'
repr_str += f' prepare={self.sampler_cfg.prepare},'
repr_str += f' classes={self.sampler_cfg.classes},'
repr_str += f' sample_groups={self.sampler_cfg.sample_groups}'
return repr_str
@PIPELINES.register_module()
class ObjectNoise(object):
def __init__(self,
translation_std=[0.25, 0.25, 0.25],
global_rot_range=[0.0, 0.0],
rot_range=[-0.15707963267, 0.15707963267],
num_try=100):
self.translation_std = translation_std
self.global_rot_range = global_rot_range
self.rot_range = rot_range
self.num_try = num_try
def __call__(self, input_dict):
gt_bboxes_3d = input_dict['gt_bboxes_3d']
points = input_dict['points']
numpy_box = gt_bboxes_3d.tensor.numpy()
numpy_points = points.tensor.numpy()
noise_per_object_v3_(
numpy_box,
numpy_points,
rotation_perturb=self.rot_range,
center_noise_std=self.translation_std,
global_random_rot_range=self.global_rot_range,
num_try=self.num_try)
input_dict['gt_bboxes_3d'] = gt_bboxes_3d.new_box(numpy_box)
input_dict['points'] = points.new_point(numpy_points)
return input_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(num_try={self.num_try},'
repr_str += f' translation_std={self.translation_std},'
repr_str += f' global_rot_range={self.global_rot_range},'
repr_str += f' rot_range={self.rot_range})'
return repr_str
@PIPELINES.register_module()
class GlobalAlignment(object):
def __init__(self, rotation_axis):
self.rotation_axis = rotation_axis
def _trans_points(self, input_dict, trans_factor):
input_dict['points'].translate(trans_factor)
def _rot_points(self, input_dict, rot_mat):
input_dict['points'].rotate(rot_mat.T)
def _check_rot_mat(self, rot_mat):
is_valid = np.allclose(np.linalg.det(rot_mat), 1.0)
valid_array = np.zeros(3)
valid_array[self.rotation_axis] = 1.0
is_valid &= (rot_mat[self.rotation_axis, :] == valid_array).all()
is_valid &= (rot_mat[:, self.rotation_axis] == valid_array).all()
assert is_valid, f'invalid rotation matrix {rot_mat}'
def __call__(self, input_dict):
assert 'axis_align_matrix' in input_dict['ann_info'].keys(), \
'axis_align_matrix is not provided in GlobalAlignment'
axis_align_matrix = input_dict['ann_info']['axis_align_matrix']
assert axis_align_matrix.shape == (4, 4), \
f'invalid shape {axis_align_matrix.shape} for axis_align_matrix'
rot_mat = axis_align_matrix[:3, :3]
trans_vec = axis_align_matrix[:3, -1]
self._check_rot_mat(rot_mat)
self._rot_points(input_dict, rot_mat)
self._trans_points(input_dict, trans_vec)
return input_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(rotation_axis={self.rotation_axis})'
return repr_str
@PIPELINES.register_module()
class GlobalRotScaleTrans(object):
def __init__(self,
rot_range=[-0.78539816, 0.78539816],
scale_ratio_range=[0.95, 1.05],
translation_std=[0, 0, 0],
shift_height=False):
seq_types = (list, tuple, np.ndarray)
if not isinstance(rot_range, seq_types):
assert isinstance(rot_range, (int, float)), \
f'unsupported rot_range type {type(rot_range)}'
rot_range = [-rot_range, rot_range]
self.rot_range = rot_range
assert isinstance(scale_ratio_range, seq_types), \
f'unsupported scale_ratio_range type {type(scale_ratio_range)}'
self.scale_ratio_range = scale_ratio_range
if not isinstance(translation_std, seq_types):
assert isinstance(translation_std, (int, float)), \
f'unsupported translation_std type {type(translation_std)}'
translation_std = [
translation_std, translation_std, translation_std
]
assert all([std >= 0 for std in translation_std]), \
'translation_std should be positive'
self.translation_std = translation_std
self.shift_height = shift_height
def _trans_bbox_points(self, input_dict):
translation_std = np.array(self.translation_std, dtype=np.float32)
trans_factor = np.random.normal(scale=translation_std, size=3).T
input_dict['points'].translate(trans_factor)
input_dict['pcd_trans'] = trans_factor
for key in input_dict['bbox3d_fields']:
input_dict[key].translate(trans_factor)
def _rot_bbox_points(self, input_dict):
rotation = self.rot_range
noise_rotation = np.random.uniform(rotation[0], rotation[1])
if len(input_dict['bbox3d_fields']) == 0:
rot_mat_T = input_dict['points'].rotate(noise_rotation)
input_dict['pcd_rotation'] = rot_mat_T
return
for key in input_dict['bbox3d_fields']:
if len(input_dict[key].tensor) != 0:
points, rot_mat_T = input_dict[key].rotate(
noise_rotation, input_dict['points'])
input_dict['points'] = points
input_dict['pcd_rotation'] = rot_mat_T
def _scale_bbox_points(self, input_dict):
scale = input_dict['pcd_scale_factor']
points = input_dict['points']
points.scale(scale)
if self.shift_height:
assert 'height' in points.attribute_dims.keys(), \
'setting shift_height=True but points have no height attribute'
points.tensor[:, points.attribute_dims['height']] *= scale
input_dict['points'] = points
for key in input_dict['bbox3d_fields']:
input_dict[key].scale(scale)
def _random_scale(self, input_dict):
scale_factor = np.random.uniform(self.scale_ratio_range[0],
self.scale_ratio_range[1])
input_dict['pcd_scale_factor'] = scale_factor
def __call__(self, input_dict):
if 'transformation_3d_flow' not in input_dict:
input_dict['transformation_3d_flow'] = []
self._rot_bbox_points(input_dict)
if 'pcd_scale_factor' not in input_dict:
self._random_scale(input_dict)
self._scale_bbox_points(input_dict)
self._trans_bbox_points(input_dict)
input_dict['transformation_3d_flow'].extend(['R', 'S', 'T'])
return input_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(rot_range={self.rot_range},'
repr_str += f' scale_ratio_range={self.scale_ratio_range},'
repr_str += f' translation_std={self.translation_std},'
repr_str += f' shift_height={self.shift_height})'
return repr_str
@PIPELINES.register_module()
class RotFlipScaleTrans(object):
def __init__(self,
scale_ratio_range=[0.95, 1.05],
translation_std=[0, 0, 0],
):
seq_types = (list, tuple, np.ndarray)
if not isinstance(rot_range, seq_types):
assert isinstance(rot_range, (int, float)), \
f'unsupported rot_range type {type(rot_range)}'
rot_range = [-rot_range, rot_range]
self.rot_range = rot_range
assert isinstance(scale_ratio_range, seq_types), \
f'unsupported scale_ratio_range type {type(scale_ratio_range)}'
self.scale_ratio_range = scale_ratio_range
if not isinstance(translation_std, seq_types):
assert isinstance(translation_std, (int, float)), \
f'unsupported translation_std type {type(translation_std)}'
translation_std = [
translation_std, translation_std, translation_std
]
assert all([std >= 0 for std in translation_std]), \
'translation_std should be positive'
self.translation_std = translation_std
self.shift_height = shift_height
def _trans_bbox_points(self, input_dict):
translation_std = np.array(self.translation_std, dtype=np.float32)
trans_factor = np.random.normal(scale=translation_std, size=3).T
input_dict['points'].translate(trans_factor)
input_dict['pcd_trans'] = trans_factor
for key in input_dict['bbox3d_fields']:
input_dict[key].translate(trans_factor)
def _rot_bbox_points(self, input_dict):
rotation = self.rot_range
noise_rotation = np.random.uniform(rotation[0], rotation[1])
if len(input_dict['bbox3d_fields']) == 0:
rot_mat_T = input_dict['points'].rotate(noise_rotation)
input_dict['pcd_rotation'] = rot_mat_T
return
for key in input_dict['bbox3d_fields']:
if len(input_dict[key].tensor) != 0:
points, rot_mat_T = input_dict[key].rotate(
noise_rotation, input_dict['points'])
input_dict['points'] = points
input_dict['pcd_rotation'] = rot_mat_T
def _scale_bbox_points(self, input_dict):
scale = input_dict['pcd_scale_factor']
points = input_dict['points']
points.scale(scale)
if self.shift_height:
assert 'height' in points.attribute_dims.keys(), \
'setting shift_height=True but points have no height attribute'
points.tensor[:, points.attribute_dims['height']] *= scale
input_dict['points'] = points
for key in input_dict['bbox3d_fields']:
input_dict[key].scale(scale)
def _random_scale(self, input_dict):
scale_factor = np.random.uniform(self.scale_ratio_range[0],
self.scale_ratio_range[1])
input_dict['pcd_scale_factor'] = scale_factor
def __call__(self, input_dict):
if 'transformation_3d_flow' not in input_dict:
input_dict['transformation_3d_flow'] = []
self._rot_bbox_points(input_dict)
if 'pcd_scale_factor' not in input_dict:
self._random_scale(input_dict)
self._scale_bbox_points(input_dict)
self._trans_bbox_points(input_dict)
input_dict['transformation_3d_flow'].extend(['R', 'S', 'T'])
return input_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(rot_range={self.rot_range},'
repr_str += f' scale_ratio_range={self.scale_ratio_range},'
repr_str += f' translation_std={self.translation_std},'
repr_str += f' shift_height={self.shift_height})'
return repr_str
@PIPELINES.register_module()
class PointShuffle(object):
def __call__(self, input_dict):
idx = input_dict['points'].shuffle()
idx = idx.numpy()
pts_instance_mask = input_dict.get('pts_instance_mask', None)
pts_semantic_mask = input_dict.get('pts_semantic_mask', None)
if pts_instance_mask is not None:
input_dict['pts_instance_mask'] = pts_instance_mask[idx]
if pts_semantic_mask is not None:
input_dict['pts_semantic_mask'] = pts_semantic_mask[idx]
return input_dict
def __repr__(self):
return self.__class__.__name__
@PIPELINES.register_module()
class ObjectRangeFilter(object):
def __init__(self, point_cloud_range):
self.pcd_range = np.array(point_cloud_range, dtype=np.float32)
def __call__(self, input_dict):
if isinstance(input_dict['gt_bboxes_3d'],
(LiDARInstance3DBoxes, DepthInstance3DBoxes)):
bev_range = self.pcd_range[[0, 1, 3, 4]]
elif isinstance(input_dict['gt_bboxes_3d'], CameraInstance3DBoxes):
bev_range = self.pcd_range[[0, 2, 3, 5]]
gt_bboxes_3d = input_dict['gt_bboxes_3d']
gt_labels_3d = input_dict['gt_labels_3d']
mask = gt_bboxes_3d.in_range_bev(bev_range)
gt_bboxes_3d = gt_bboxes_3d[mask]
gt_labels_3d = gt_labels_3d[mask.numpy().astype(np.bool)]
gt_bboxes_3d.limit_yaw(offset=0.5, period=2 * np.pi)
input_dict['gt_bboxes_3d'] = gt_bboxes_3d
input_dict['gt_labels_3d'] = gt_labels_3d
return input_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(point_cloud_range={self.pcd_range.tolist()})'
return repr_str
@PIPELINES.register_module()
class PointsRangeFilter(object):
def __init__(self, point_cloud_range):
self.pcd_range = np.array(point_cloud_range, dtype=np.float32)
def __call__(self, input_dict):
points = input_dict['points']
points_mask = points.in_range_3d(self.pcd_range)
clean_points = points[points_mask]
input_dict['points'] = clean_points
points_mask = points_mask.numpy()
pts_instance_mask = input_dict.get('pts_instance_mask', None)
pts_semantic_mask = input_dict.get('pts_semantic_mask', None)
if pts_instance_mask is not None:
input_dict['pts_instance_mask'] = pts_instance_mask[points_mask]
if pts_semantic_mask is not None:
input_dict['pts_semantic_mask'] = pts_semantic_mask[points_mask]
return input_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(point_cloud_range={self.pcd_range.tolist()})'
return repr_str
@PIPELINES.register_module()
class ObjectNameFilter(object):
def __init__(self, classes):
self.classes = classes
self.labels = list(range(len(self.classes)))
def __call__(self, input_dict):
gt_labels_3d = input_dict['gt_labels_3d']
gt_bboxes_mask = np.array([n in self.labels for n in gt_labels_3d],
dtype=np.bool_)
input_dict['gt_bboxes_3d'] = input_dict['gt_bboxes_3d'][gt_bboxes_mask]
input_dict['gt_labels_3d'] = input_dict['gt_labels_3d'][gt_bboxes_mask]
return input_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(classes={self.classes})'
return repr_str
@PIPELINES.register_module()
class PointSample(object):
def __init__(self, num_points, sample_range=None, replace=False):
self.num_points = num_points
self.sample_range = sample_range
self.replace = replace
def _points_random_sampling(self,
points,
num_samples,
sample_range=None,
replace=False,
return_choices=False):
if not replace:
replace = (points.shape[0] < num_samples)
point_range = range(len(points))
if sample_range is not None and not replace:
depth = np.linalg.norm(points.tensor, axis=1)
far_inds = np.where(depth > sample_range)[0]
near_inds = np.where(depth <= sample_range)[0]
if len(far_inds) > num_samples:
far_inds = np.random.choice(
far_inds, num_samples, replace=False)
point_range = near_inds
num_samples -= len(far_inds)
choices = np.random.choice(point_range, num_samples, replace=replace)
if sample_range is not None and not replace:
choices = np.concatenate((far_inds, choices))
np.random.shuffle(choices)
if return_choices:
return points[choices], choices
else:
return points[choices]
def __call__(self, results):
points = results['points']
if self.sample_range is not None:
from mmdet3d.core.points import CameraPoints
assert isinstance(points, CameraPoints), \
'Sampling based on distance is only appliable for CAMERA coord'
points, choices = self._points_random_sampling(
points,
self.num_points,
self.sample_range,
self.replace,
return_choices=True)
results['points'] = points
pts_instance_mask = results.get('pts_instance_mask', None)
pts_semantic_mask = results.get('pts_semantic_mask', None)
if pts_instance_mask is not None:
pts_instance_mask = pts_instance_mask[choices]
results['pts_instance_mask'] = pts_instance_mask
if pts_semantic_mask is not None:
pts_semantic_mask = pts_semantic_mask[choices]
results['pts_semantic_mask'] = pts_semantic_mask
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(num_points={self.num_points},'
repr_str += f' sample_range={self.sample_range},'
repr_str += f' replace={self.replace})'
return repr_str
@PIPELINES.register_module()
class IndoorPointSample(PointSample):
def __init__(self, *args, **kwargs):
warnings.warn(
'IndoorPointSample is deprecated in favor of PointSample')
super(IndoorPointSample, self).__init__(*args, **kwargs)
@PIPELINES.register_module()
class IndoorPatchPointSample(object):
def __init__(self,
num_points,
block_size=1.5,
sample_rate=None,
ignore_index=None,
use_normalized_coord=False,
num_try=10,
enlarge_size=0.2,
min_unique_num=None,
eps=1e-2):
self.num_points = num_points
self.block_size = block_size
self.ignore_index = ignore_index
self.use_normalized_coord = use_normalized_coord
self.num_try = num_try
self.enlarge_size = enlarge_size if enlarge_size is not None else 0.0
self.min_unique_num = min_unique_num
self.eps = eps
if sample_rate is not None:
warnings.warn(
"'sample_rate' has been deprecated and will be removed in "
'the future. Please remove them from your code.')
def _input_generation(self, coords, patch_center, coord_max, attributes,
attribute_dims, point_type):
centered_coords = coords.copy()
centered_coords[:, 0] -= patch_center[0]
centered_coords[:, 1] -= patch_center[1]
if self.use_normalized_coord:
normalized_coord = coords / coord_max
attributes = np.concatenate([attributes, normalized_coord], axis=1)
if attribute_dims is None:
attribute_dims = dict()
attribute_dims.update(
dict(normalized_coord=[
attributes.shape[1], attributes.shape[1] +
1, attributes.shape[1] + 2
]))
points = np.concatenate([centered_coords, attributes], axis=1)
points = point_type(
points, points_dim=points.shape[1], attribute_dims=attribute_dims)
return points
def _patch_points_sampling(self, points, sem_mask):
coords = points.coord.numpy()
attributes = points.tensor[:, 3:].numpy()
attribute_dims = points.attribute_dims
point_type = type(points)
coord_max = np.amax(coords, axis=0)
coord_min = np.amin(coords, axis=0)
for _ in range(self.num_try):
cur_center = coords[np.random.choice(coords.shape[0])]
cur_max = cur_center + np.array(
[self.block_size / 2.0, self.block_size / 2.0, 0.0])
cur_min = cur_center - np.array(
[self.block_size / 2.0, self.block_size / 2.0, 0.0])
cur_max[2] = coord_max[2]
cur_min[2] = coord_min[2]
cur_choice = np.sum(
(coords >= (cur_min - self.enlarge_size)) *
(coords <= (cur_max + self.enlarge_size)),
axis=1) == 3
if not cur_choice.any():
continue
cur_coords = coords[cur_choice, :]
cur_sem_mask = sem_mask[cur_choice]
point_idxs = np.where(cur_choice)[0]
mask = np.sum(
(cur_coords >= (cur_min - self.eps)) * (cur_coords <=
(cur_max + self.eps)),
axis=1) == 3
if self.min_unique_num is None:
# [31, 31, 62] are just some big values used to transform
# coords from 3d array to 1d and then check their uniqueness
# this is used in all the ScanNet code following PointNet++
vidx = np.ceil(
(cur_coords[mask, :] - cur_min) / (cur_max - cur_min) *
np.array([31.0, 31.0, 62.0]))
vidx = np.unique(vidx[:, 0] * 31.0 * 62.0 + vidx[:, 1] * 62.0 +
vidx[:, 2])
flag1 = len(vidx) / 31.0 / 31.0 / 62.0 >= 0.02
else:
# if `min_unique_num` is provided, directly compare with it
flag1 = mask.sum() >= self.min_unique_num
# 2. selected patch should contain enough annotated points
if self.ignore_index is None:
flag2 = True
else:
flag2 = np.sum(cur_sem_mask != self.ignore_index) / \
len(cur_sem_mask) >= 0.7
if flag1 and flag2:
break
# sample idx to `self.num_points`
if point_idxs.size >= self.num_points:
# no duplicate in sub-sampling
choices = np.random.choice(
point_idxs, self.num_points, replace=False)
else:
# do not use random choice here to avoid some points not counted
dup = np.random.choice(point_idxs.size,
self.num_points - point_idxs.size)
idx_dup = np.concatenate(
[np.arange(point_idxs.size),
np.array(dup)], 0)
choices = point_idxs[idx_dup]
# construct model input
points = self._input_generation(coords[choices], cur_center, coord_max,
attributes[choices], attribute_dims,
point_type)
return points, choices
def __call__(self, results):
points = results['points']
assert 'pts_semantic_mask' in results.keys(), \
'semantic mask should be provided in training and evaluation'
pts_semantic_mask = results['pts_semantic_mask']
points, choices = self._patch_points_sampling(points,
pts_semantic_mask)
results['points'] = points
results['pts_semantic_mask'] = pts_semantic_mask[choices]
pts_instance_mask = results.get('pts_instance_mask', None)
if pts_instance_mask is not None:
results['pts_instance_mask'] = pts_instance_mask[choices]
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(num_points={self.num_points},'
repr_str += f' block_size={self.block_size},'
repr_str += f' ignore_index={self.ignore_index},'
repr_str += f' use_normalized_coord={self.use_normalized_coord},'
repr_str += f' num_try={self.num_try},'
repr_str += f' enlarge_size={self.enlarge_size},'
repr_str += f' min_unique_num={self.min_unique_num},'
repr_str += f' eps={self.eps})'
return repr_str
@PIPELINES.register_module()
class BackgroundPointsFilter(object):
def __init__(self, bbox_enlarge_range):
assert (is_tuple_of(bbox_enlarge_range, float)
and len(bbox_enlarge_range) == 3) \
or isinstance(bbox_enlarge_range, float), \
f'Invalid arguments bbox_enlarge_range {bbox_enlarge_range}'
if isinstance(bbox_enlarge_range, float):
bbox_enlarge_range = [bbox_enlarge_range] * 3
self.bbox_enlarge_range = np.array(
bbox_enlarge_range, dtype=np.float32)[np.newaxis, :]
def __call__(self, input_dict):
points = input_dict['points']
gt_bboxes_3d = input_dict['gt_bboxes_3d']
# avoid groundtruth being modified
gt_bboxes_3d_np = gt_bboxes_3d.tensor.clone().numpy()
gt_bboxes_3d_np[:, :3] = gt_bboxes_3d.gravity_center.clone().numpy()
enlarged_gt_bboxes_3d = gt_bboxes_3d_np.copy()
enlarged_gt_bboxes_3d[:, 3:6] += self.bbox_enlarge_range
points_numpy = points.tensor.clone().numpy()
foreground_masks = box_np_ops.points_in_rbbox(
points_numpy, gt_bboxes_3d_np, origin=(0.5, 0.5, 0.5))
enlarge_foreground_masks = box_np_ops.points_in_rbbox(
points_numpy, enlarged_gt_bboxes_3d, origin=(0.5, 0.5, 0.5))
foreground_masks = foreground_masks.max(1)
enlarge_foreground_masks = enlarge_foreground_masks.max(1)
valid_masks = ~np.logical_and(~foreground_masks,
enlarge_foreground_masks)
input_dict['points'] = points[valid_masks]
pts_instance_mask = input_dict.get('pts_instance_mask', None)
if pts_instance_mask is not None:
input_dict['pts_instance_mask'] = pts_instance_mask[valid_masks]
pts_semantic_mask = input_dict.get('pts_semantic_mask', None)
if pts_semantic_mask is not None:
input_dict['pts_semantic_mask'] = pts_semantic_mask[valid_masks]
return input_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(bbox_enlarge_range={self.bbox_enlarge_range.tolist()})'
return repr_str
@PIPELINES.register_module()
class VoxelBasedPointSampler(object):
def __init__(self, cur_sweep_cfg, prev_sweep_cfg=None, time_dim=3):
self.cur_voxel_generator = VoxelGenerator(**cur_sweep_cfg)
self.cur_voxel_num = self.cur_voxel_generator._max_voxels
self.time_dim = time_dim
if prev_sweep_cfg is not None:
assert prev_sweep_cfg['max_num_points'] == \
cur_sweep_cfg['max_num_points']
self.prev_voxel_generator = VoxelGenerator(**prev_sweep_cfg)
self.prev_voxel_num = self.prev_voxel_generator._max_voxels
else:
self.prev_voxel_generator = None
self.prev_voxel_num = 0
def _sample_points(self, points, sampler, point_dim):
voxels, coors, num_points_per_voxel = sampler.generate(points)
if voxels.shape[0] < sampler._max_voxels:
padding_points = np.zeros([
sampler._max_voxels - voxels.shape[0], sampler._max_num_points,
point_dim
],
dtype=points.dtype)
padding_points[:] = voxels[0]
sample_points = np.concatenate([voxels, padding_points], axis=0)
else:
sample_points = voxels
return sample_points
def __call__(self, results):
points = results['points']
original_dim = points.shape[1]
# TODO: process instance and semantic mask while _max_num_points
# is larger than 1
# Extend points with seg and mask fields
map_fields2dim = []
start_dim = original_dim
points_numpy = points.tensor.numpy()
extra_channel = [points_numpy]
for idx, key in enumerate(results['pts_mask_fields']):
map_fields2dim.append((key, idx + start_dim))
extra_channel.append(results[key][..., None])
start_dim += len(results['pts_mask_fields'])
for idx, key in enumerate(results['pts_seg_fields']):
map_fields2dim.append((key, idx + start_dim))
extra_channel.append(results[key][..., None])
points_numpy = np.concatenate(extra_channel, axis=-1)
# Split points into two part, current sweep points and
# previous sweeps points.
# TODO: support different sampling methods for next sweeps points
# and previous sweeps points.
cur_points_flag = (points_numpy[:, self.time_dim] == 0)
cur_sweep_points = points_numpy[cur_points_flag]
prev_sweeps_points = points_numpy[~cur_points_flag]
if prev_sweeps_points.shape[0] == 0:
prev_sweeps_points = cur_sweep_points
# Shuffle points before sampling
np.random.shuffle(cur_sweep_points)
np.random.shuffle(prev_sweeps_points)
cur_sweep_points = self._sample_points(cur_sweep_points,
self.cur_voxel_generator,
points_numpy.shape[1])
if self.prev_voxel_generator is not None:
prev_sweeps_points = self._sample_points(prev_sweeps_points,
self.prev_voxel_generator,
points_numpy.shape[1])
points_numpy = np.concatenate(
[cur_sweep_points, prev_sweeps_points], 0)
else:
points_numpy = cur_sweep_points
if self.cur_voxel_generator._max_num_points == 1:
points_numpy = points_numpy.squeeze(1)
results['points'] = points.new_point(points_numpy[..., :original_dim])
# Restore the correspoinding seg and mask fields
for key, dim_index in map_fields2dim:
results[key] = points_numpy[..., dim_index]
return results
def __repr__(self):
def _auto_indent(repr_str, indent):
repr_str = repr_str.split('\n')
repr_str = [' ' * indent + t + '\n' for t in repr_str]
repr_str = ''.join(repr_str)[:-1]
return repr_str
repr_str = self.__class__.__name__
indent = 4
repr_str += '(\n'
repr_str += ' ' * indent + f'num_cur_sweep={self.cur_voxel_num},\n'
repr_str += ' ' * indent + f'num_prev_sweep={self.prev_voxel_num},\n'
repr_str += ' ' * indent + f'time_dim={self.time_dim},\n'
repr_str += ' ' * indent + 'cur_voxel_generator=\n'
repr_str += f'{_auto_indent(repr(self.cur_voxel_generator), 8)},\n'
repr_str += ' ' * indent + 'prev_voxel_generator=\n'
repr_str += f'{_auto_indent(repr(self.prev_voxel_generator), 8)})'
return repr_str
| true | true |
f72dd94c7d1ce9df355532696ed084fa0a0a4f4d | 3,081 | py | Python | src/opencv_backend/ui.py | AidenPearce7/python-tictactoe | 1f5aaaca87bfb8487a1366b4bc2bd567df8feb5e | [
"Apache-1.1"
] | null | null | null | src/opencv_backend/ui.py | AidenPearce7/python-tictactoe | 1f5aaaca87bfb8487a1366b4bc2bd567df8feb5e | [
"Apache-1.1"
] | 8 | 2021-05-14T10:24:43.000Z | 2021-07-04T21:40:03.000Z | src/opencv_backend/ui.py | AidenPearce7/python-tictactoe | 1f5aaaca87bfb8487a1366b4bc2bd567df8feb5e | [
"Apache-1.1"
] | 2 | 2021-05-27T16:59:57.000Z | 2021-06-07T11:03:30.000Z | """UI class"""
import cv2 as cv
import numpy as np
class UI:
"""Handles UI drawing and managing"""
def __init__(self, frame):
height, width, channels = frame.shape
self.width = width
self.height = height
self.separators = {
"y": (0, height // 3, 2 * height // 3),
"x": (0, width // 3, 2 * width // 3),
}
self.figure = np.zeros((height, width, channels), dtype=np.uint8)
self.grid_drawn = False
def draw_grid(self, color=(255, 0, 0), thickness=9):
"""Draws a 3 by 3 grid on the frame"""
if not self.grid_drawn:
for i in range(1, 3):
startpoint_height = (0, self.separators["y"][i])
startpoint_width = (self.separators["x"][i], 0)
endpoint_height = (self.width, self.separators["y"][i])
endpoint_width = (self.separators["x"][i], self.height)
self.figure = cv.line(
self.figure, startpoint_height, endpoint_height, color, thickness
)
self.figure = cv.line(
self.figure, startpoint_width, endpoint_width, color, thickness
)
self.grid_drawn = True
def _draw_x(self, x, y, color, thickness):
"""Draws X on the selected grid marker.\n
location should be a tuple with two numbers indicating place on the grid"""
width_offset = self.separators["x"][1] * 0.25
height_offset = self.separators["y"][1] * 0.25
left = int(self.separators["x"][x] + width_offset)
up = int(self.separators["y"][y] + height_offset)
right = int(self.separators["x"][x] + width_offset * 3)
down = int(self.separators["y"][y] + height_offset * 3)
self.figure = cv.line(self.figure, (left, up), (right, down), color, thickness)
self.figure = cv.line(self.figure, (left, down), (right, up), color, thickness)
def _draw_circle(self, x, y, color, thickness):
"""Draws circle on the selected grid marker.\n
location should be a tuple with two numbers indicating place on the grid"""
width_offset = self.separators["x"][1] * 0.5
height_offset = self.separators["y"][1] * 0.5
center = (
int(self.separators["x"][x] + width_offset),
int(self.separators["y"][y] + height_offset),
)
radius = int(height_offset * 0.75)
self.figure = cv.circle(self.figure, center, radius, color, thickness)
def draw_move(self, coords, color=(0, 0, 255), thickness=7):
"""Draws a shape based on the coordinate object"""
if coords.symbol == "x":
self._draw_x(coords.x, coords.y, color, thickness)
else:
self._draw_circle(coords.x, coords.y, color, thickness)
def get_separators(self):
"""Returns the separators used for the processing"""
return self.separators
def overlay(self, frame):
"""Returns the frame with added figure array"""
return cv.add(frame, self.figure)
| 41.08 | 87 | 0.576761 | import cv2 as cv
import numpy as np
class UI:
def __init__(self, frame):
height, width, channels = frame.shape
self.width = width
self.height = height
self.separators = {
"y": (0, height // 3, 2 * height // 3),
"x": (0, width // 3, 2 * width // 3),
}
self.figure = np.zeros((height, width, channels), dtype=np.uint8)
self.grid_drawn = False
def draw_grid(self, color=(255, 0, 0), thickness=9):
if not self.grid_drawn:
for i in range(1, 3):
startpoint_height = (0, self.separators["y"][i])
startpoint_width = (self.separators["x"][i], 0)
endpoint_height = (self.width, self.separators["y"][i])
endpoint_width = (self.separators["x"][i], self.height)
self.figure = cv.line(
self.figure, startpoint_height, endpoint_height, color, thickness
)
self.figure = cv.line(
self.figure, startpoint_width, endpoint_width, color, thickness
)
self.grid_drawn = True
def _draw_x(self, x, y, color, thickness):
width_offset = self.separators["x"][1] * 0.25
height_offset = self.separators["y"][1] * 0.25
left = int(self.separators["x"][x] + width_offset)
up = int(self.separators["y"][y] + height_offset)
right = int(self.separators["x"][x] + width_offset * 3)
down = int(self.separators["y"][y] + height_offset * 3)
self.figure = cv.line(self.figure, (left, up), (right, down), color, thickness)
self.figure = cv.line(self.figure, (left, down), (right, up), color, thickness)
def _draw_circle(self, x, y, color, thickness):
width_offset = self.separators["x"][1] * 0.5
height_offset = self.separators["y"][1] * 0.5
center = (
int(self.separators["x"][x] + width_offset),
int(self.separators["y"][y] + height_offset),
)
radius = int(height_offset * 0.75)
self.figure = cv.circle(self.figure, center, radius, color, thickness)
def draw_move(self, coords, color=(0, 0, 255), thickness=7):
if coords.symbol == "x":
self._draw_x(coords.x, coords.y, color, thickness)
else:
self._draw_circle(coords.x, coords.y, color, thickness)
def get_separators(self):
return self.separators
def overlay(self, frame):
return cv.add(frame, self.figure)
| true | true |
f72dda423a62d469f02bca3f143c9ab93ba3aec2 | 3,558 | py | Python | src/qna.py | ithaaswin/TeachersPetBot | e826083b64ea6ab78a223635fcb1d7881ba8b082 | [
"MIT"
] | null | null | null | src/qna.py | ithaaswin/TeachersPetBot | e826083b64ea6ab78a223635fcb1d7881ba8b082 | [
"MIT"
] | 57 | 2021-10-31T23:15:20.000Z | 2021-11-15T18:10:41.000Z | src/qna.py | chandur626/TeachersPetBot | ad1fd36be5bd3690949d0e3a6e29c9100bf43e15 | [
"MIT"
] | 5 | 2021-11-17T23:21:01.000Z | 2021-11-30T03:44:05.000Z | ###########################
# Implements Q and A functionality
###########################
from discord import NotFound
import db
# keep track of next question number
QUESTION_NUMBER = 1
# dictionary of questions with answers
QNA = {}
###########################
# Class: QuestionsAnswers
# Description: object with question details
# Inputs:
# - q: question text
# - number: question number
# - message: id of the message associated with question
# - ans: answers to the question
# Outputs: None
###########################
class QuestionsAnswers:
''' Class containing needed question/answer information and identification '''
def __init__(self, qs, number, message, ans):
self.question = qs
self.number = number
self.msg = message
self.answer = ans
###########################
# Function: question
# Description: takes question from user and reposts anonymously and numbered
# Inputs:
# - ctx: context of the command
# - q: question text
# Outputs:
# - User question in new post
###########################
async def question(ctx, qs):
''' add a question '''
global QUESTION_NUMBER
# format question
q_str = 'Q' + str(QUESTION_NUMBER) + ': ' + qs + '\n'
message = await ctx.send(q_str)
# create QNA object
new_question = QuestionsAnswers(qs, QUESTION_NUMBER, message.id, '')
# add question to list
QNA[QUESTION_NUMBER] = new_question
db.mutation_query(
'INSERT INTO qna VALUES (?, ?, ?, ?)',
[ctx.guild.id, ctx.author.name, '', QUESTION_NUMBER]
)
# increment question number for next question
QUESTION_NUMBER += 1
# delete original question
await ctx.message.delete()
###########################
# Function: answer
# Description: adds user answer to specific question and post anonymously
# Inputs:
# - ctx: context of the command
# - num: question number being answered
# - ans: answer text to question specified in num
# Outputs:
# - User answer added to question post
###########################
async def answer(ctx, num, ans):
''' answer the specific question '''
if int(num) not in QNA.keys():
await ctx.author.send('Invalid question number: ' + str(num))
# delete user msg
await ctx.message.delete()
return
# get question
q_answered = QNA[int(num)]
# check if message exists
try:
message = await ctx.fetch_message(q_answered.msg)
except NotFound:
await ctx.author.send('Invalid question number: ' + str(num))
# delete user msg
await ctx.message.delete()
return
# generate and edit msg with answer
if "instructor" in [y.name.lower() for y in ctx.author.roles]:
role = 'Instructor'
else:
role = 'Student'
new_answer = role + ' Ans: ' + ans
db.mutation_query(
'UPDATE qna SET answer = ? WHERE qnumber = ?',(ans, int(num)),
)
# store new answer and uopdate the new answer to the database
if not q_answered.answer == '':
q_answered.answer += '\n'
q_answered.answer += new_answer
# check if message exists and edit
q_str = 'Q' + str(q_answered.number) + ': ' + q_answered.question
content = q_str + '\n' + q_answered.answer
try:
await message.edit(content=content)
# message.content = content
except NotFound:
await ctx.author.send('Invalid question number: ' + str(num))
# delete user msg
await ctx.message.delete()
| 29.89916 | 82 | 0.59837 | true | true | |
f72ddac9ba01a301e98f3c344bf12ac75b9ae4c9 | 5,610 | py | Python | gammapy/astro/population/tests/test_simulate.py | gabemery/gammapy | 99e5c5d38e4920dddd7bca41fb1539ccda8bea2d | [
"BSD-3-Clause"
] | null | null | null | gammapy/astro/population/tests/test_simulate.py | gabemery/gammapy | 99e5c5d38e4920dddd7bca41fb1539ccda8bea2d | [
"BSD-3-Clause"
] | null | null | null | gammapy/astro/population/tests/test_simulate.py | gabemery/gammapy | 99e5c5d38e4920dddd7bca41fb1539ccda8bea2d | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
from numpy.testing import assert_allclose
from astropy.table import Table
import astropy.units as u
from ....utils.testing import requires_dependency
from ...population import (
make_base_catalog_galactic,
make_catalog_random_positions_cube,
make_catalog_random_positions_sphere,
add_snr_parameters,
add_pulsar_parameters,
add_pwn_parameters,
add_observed_parameters,
add_observed_source_parameters,
)
def test_make_catalog_random_positions_cube():
size = 100
table = make_catalog_random_positions_cube(size=size)
assert len(table) == size
def test_make_catalog_random_positions_sphere():
size = 100
table = make_catalog_random_positions_sphere(size=size,
center='Milky Way')
assert len(table) == size
def test_make_base_catalog_galactic():
"""Test that make_base_catalog_galactic uses random_state correctly.
Calling with a given seed should always give the same output.
Regression test for https://github.com/gammapy/gammapy/issues/959
"""
table = make_base_catalog_galactic(n_sources=10, random_state=0)
assert len(table) == 10
assert table.colnames == [
'age', 'n_ISM', 'spiralarm',
'x_birth', 'y_birth', 'z_birth',
'x', 'y', 'z',
'vx', 'vy', 'vz', 'v_abs',
]
d = table[0]
assert_allclose(d['age'], 548813.50392732478)
assert_allclose(d['n_ISM'], 1.0)
assert d['spiralarm'] == 'Crux Scutum'
assert_allclose(d['x_birth'], 0.58513884292018437)
assert_allclose(d['y_birth'], -11.682838052120154)
assert_allclose(d['z_birth'], 0.15710279448905115)
assert_allclose(d['x'], 0.5828226720259867)
assert_allclose(d['y'], -11.658959390801584)
assert_allclose(d['z'], 0.35098629652725671)
assert_allclose(d['vx'], -4.1266001441394655)
assert_allclose(d['vy'], 42.543357869627776)
assert_allclose(d['vz'], 345.43206179709432)
assert_allclose(d['v_abs'], 348.06648135803658)
def test_add_observed_parameters():
table = make_base_catalog_galactic(n_sources=10, random_state=0)
table = add_observed_parameters(table)
assert len(table) == 10
assert set(table.colnames).issuperset([
'distance', 'GLON', 'GLAT', 'VGLON', 'VGLAT', 'RA', 'DEC',
])
d = table[0]
assert_allclose(d['distance'], 3231.392591455106)
assert_allclose(d['GLON'], 169.54657778189639)
assert_allclose(d['GLAT'], 6.2356357665816162)
assert_allclose(d['VGLON'], 0.066778795313076678)
assert_allclose(d['VGLAT'], 5.6115948931932174)
assert_allclose(d['RA'], 86.308826288823127)
assert_allclose(d['DEC'], 41.090120056648828)
def test_add_snr_parameters():
table = Table()
table['age'] = [100, 1000] * u.yr
table['n_ISM'] = u.Quantity(1, 'cm-3')
table = add_snr_parameters(table)
assert len(table) == 2
assert table.colnames == ['age', 'n_ISM', 'E_SN', 'r_out', 'r_in', 'L_SNR']
assert_allclose(table['E_SN'], 1e51)
assert_allclose(table['r_out'], [1, 3.80730787743])
assert_allclose(table['r_in'], [0.9086, 3.45931993743])
assert_allclose(table['L_SNR'], [0, 1.0768e+33])
def test_add_pulsar_parameters():
table = Table()
table['age'] = [100, 1000] * u.yr
table = add_pulsar_parameters(table, random_state=0)
assert len(table) == 2
assert table.colnames == ['age', 'P0', 'P1', 'P0_birth', 'P1_birth', 'CharAge',
'Tau0', 'L_PSR', 'L0_PSR', 'logB']
assert_allclose(table['P0'], [0.322829453422, 0.51352778881])
assert_allclose(table['P1'], [4.54295751161e-14, 6.98423128444e-13])
assert_allclose(table['P0_birth'], [0.322254715288, 0.388110930459])
assert_allclose(table['P1_birth'], [4.55105983192e-14, 9.24116423053e-13])
assert_allclose(table['CharAge'], [2.32368825638e-22, 5.6826197937e-21])
assert_allclose(table['Tau0'], [112189.64476, 6654.19039158])
assert_allclose(table['L_PSR'], [5.37834069771e+34, 8.25708734631e+35])
assert_allclose(table['L0_PSR'], [5.36876555682e+34, 6.24049160082e+35])
assert_allclose(table['logB'], [12.5883058913, 13.2824912596])
@requires_dependency('scipy')
def test_add_pwn_parameters():
table = make_base_catalog_galactic(n_sources=10, random_state=0)
# To compute PWN parameters we need PSR and SNR parameters first
table = add_snr_parameters(table)
table = add_pulsar_parameters(table, random_state=0)
table = add_pwn_parameters(table)
assert len(table) == 10
d = table[0]
assert_allclose(d['r_out_PWN'], 0.5892196771927385, atol=1e-3)
assert_allclose(d['L_PWN'], 7.057857699785925e+45)
@requires_dependency('scipy')
def test_chain_all():
"""
Test that running the simulation functions in chain works
"""
table = make_base_catalog_galactic(n_sources=10, random_state=0)
table = add_snr_parameters(table)
table = add_pulsar_parameters(table, random_state=0)
table = add_pwn_parameters(table)
table = add_observed_parameters(table)
table = add_observed_source_parameters(table)
# Note: the individual functions are tested above.
# Here we just run them in a chain and do very basic asserts
# on the output so that we make sure we notice changes.
assert len(table) == 10
assert len(table.colnames) == 43
d = table[0]
assert_allclose(d['r_out_PWN'], 0.5892196771927385, atol=1e-3)
assert_allclose(d['RA'], 86.308826288823127)
| 35.732484 | 83 | 0.694474 |
from __future__ import absolute_import, division, print_function, unicode_literals
from numpy.testing import assert_allclose
from astropy.table import Table
import astropy.units as u
from ....utils.testing import requires_dependency
from ...population import (
make_base_catalog_galactic,
make_catalog_random_positions_cube,
make_catalog_random_positions_sphere,
add_snr_parameters,
add_pulsar_parameters,
add_pwn_parameters,
add_observed_parameters,
add_observed_source_parameters,
)
def test_make_catalog_random_positions_cube():
size = 100
table = make_catalog_random_positions_cube(size=size)
assert len(table) == size
def test_make_catalog_random_positions_sphere():
size = 100
table = make_catalog_random_positions_sphere(size=size,
center='Milky Way')
assert len(table) == size
def test_make_base_catalog_galactic():
table = make_base_catalog_galactic(n_sources=10, random_state=0)
assert len(table) == 10
assert table.colnames == [
'age', 'n_ISM', 'spiralarm',
'x_birth', 'y_birth', 'z_birth',
'x', 'y', 'z',
'vx', 'vy', 'vz', 'v_abs',
]
d = table[0]
assert_allclose(d['age'], 548813.50392732478)
assert_allclose(d['n_ISM'], 1.0)
assert d['spiralarm'] == 'Crux Scutum'
assert_allclose(d['x_birth'], 0.58513884292018437)
assert_allclose(d['y_birth'], -11.682838052120154)
assert_allclose(d['z_birth'], 0.15710279448905115)
assert_allclose(d['x'], 0.5828226720259867)
assert_allclose(d['y'], -11.658959390801584)
assert_allclose(d['z'], 0.35098629652725671)
assert_allclose(d['vx'], -4.1266001441394655)
assert_allclose(d['vy'], 42.543357869627776)
assert_allclose(d['vz'], 345.43206179709432)
assert_allclose(d['v_abs'], 348.06648135803658)
def test_add_observed_parameters():
table = make_base_catalog_galactic(n_sources=10, random_state=0)
table = add_observed_parameters(table)
assert len(table) == 10
assert set(table.colnames).issuperset([
'distance', 'GLON', 'GLAT', 'VGLON', 'VGLAT', 'RA', 'DEC',
])
d = table[0]
assert_allclose(d['distance'], 3231.392591455106)
assert_allclose(d['GLON'], 169.54657778189639)
assert_allclose(d['GLAT'], 6.2356357665816162)
assert_allclose(d['VGLON'], 0.066778795313076678)
assert_allclose(d['VGLAT'], 5.6115948931932174)
assert_allclose(d['RA'], 86.308826288823127)
assert_allclose(d['DEC'], 41.090120056648828)
def test_add_snr_parameters():
table = Table()
table['age'] = [100, 1000] * u.yr
table['n_ISM'] = u.Quantity(1, 'cm-3')
table = add_snr_parameters(table)
assert len(table) == 2
assert table.colnames == ['age', 'n_ISM', 'E_SN', 'r_out', 'r_in', 'L_SNR']
assert_allclose(table['E_SN'], 1e51)
assert_allclose(table['r_out'], [1, 3.80730787743])
assert_allclose(table['r_in'], [0.9086, 3.45931993743])
assert_allclose(table['L_SNR'], [0, 1.0768e+33])
def test_add_pulsar_parameters():
table = Table()
table['age'] = [100, 1000] * u.yr
table = add_pulsar_parameters(table, random_state=0)
assert len(table) == 2
assert table.colnames == ['age', 'P0', 'P1', 'P0_birth', 'P1_birth', 'CharAge',
'Tau0', 'L_PSR', 'L0_PSR', 'logB']
assert_allclose(table['P0'], [0.322829453422, 0.51352778881])
assert_allclose(table['P1'], [4.54295751161e-14, 6.98423128444e-13])
assert_allclose(table['P0_birth'], [0.322254715288, 0.388110930459])
assert_allclose(table['P1_birth'], [4.55105983192e-14, 9.24116423053e-13])
assert_allclose(table['CharAge'], [2.32368825638e-22, 5.6826197937e-21])
assert_allclose(table['Tau0'], [112189.64476, 6654.19039158])
assert_allclose(table['L_PSR'], [5.37834069771e+34, 8.25708734631e+35])
assert_allclose(table['L0_PSR'], [5.36876555682e+34, 6.24049160082e+35])
assert_allclose(table['logB'], [12.5883058913, 13.2824912596])
@requires_dependency('scipy')
def test_add_pwn_parameters():
table = make_base_catalog_galactic(n_sources=10, random_state=0)
table = add_snr_parameters(table)
table = add_pulsar_parameters(table, random_state=0)
table = add_pwn_parameters(table)
assert len(table) == 10
d = table[0]
assert_allclose(d['r_out_PWN'], 0.5892196771927385, atol=1e-3)
assert_allclose(d['L_PWN'], 7.057857699785925e+45)
@requires_dependency('scipy')
def test_chain_all():
table = make_base_catalog_galactic(n_sources=10, random_state=0)
table = add_snr_parameters(table)
table = add_pulsar_parameters(table, random_state=0)
table = add_pwn_parameters(table)
table = add_observed_parameters(table)
table = add_observed_source_parameters(table)
assert len(table) == 10
assert len(table.colnames) == 43
d = table[0]
assert_allclose(d['r_out_PWN'], 0.5892196771927385, atol=1e-3)
assert_allclose(d['RA'], 86.308826288823127)
| true | true |
f72ddb30bb43ae6eef900ba6d2bac985e88bdce3 | 329 | py | Python | Courses/100 Days of Code The Complete Python Pro Bootcamp/Day 6/Hurdle 4.py | edaaydinea/365-days-of-coding-challenge | baf06a9bef75ff45194e57357e20085b9cde2498 | [
"MIT"
] | 4 | 2022-01-05T12:14:13.000Z | 2022-01-08T16:03:32.000Z | Courses/100 Days of Code The Complete Python Pro Bootcamp/Day 6/Hurdle 4.py | edaaydinea/365-days-of-code | baf06a9bef75ff45194e57357e20085b9cde2498 | [
"MIT"
] | null | null | null | Courses/100 Days of Code The Complete Python Pro Bootcamp/Day 6/Hurdle 4.py | edaaydinea/365-days-of-code | baf06a9bef75ff45194e57357e20085b9cde2498 | [
"MIT"
] | null | null | null | def turn_right():
turn_left()
turn_left()
turn_left()
def jump():
turn_left()
while wall_on_right():
move()
turn_right()
move()
turn_right()
while front_is_clear():
move()
turn_left()
while not at_goal():
if wall_in_front():
jump()
else:
move()
| 13.708333 | 27 | 0.534954 | def turn_right():
turn_left()
turn_left()
turn_left()
def jump():
turn_left()
while wall_on_right():
move()
turn_right()
move()
turn_right()
while front_is_clear():
move()
turn_left()
while not at_goal():
if wall_in_front():
jump()
else:
move()
| true | true |
f72ddc020a4220657c0534fd213d5960651ecb30 | 1,367 | py | Python | prismriver/plugin/lyrster.py | anlar/prismriver-lyrics | b9adb7f860fc5bf0fc41029a8e9e472aa262a0f4 | [
"MIT"
] | 16 | 2016-11-08T03:31:37.000Z | 2021-11-17T20:00:56.000Z | prismriver/plugin/lyrster.py | odrling/prismriver-lyrics | 995b687b1e795298ce5e31348120c510f99a378f | [
"MIT"
] | 5 | 2016-06-14T19:59:57.000Z | 2021-01-24T10:35:43.000Z | prismriver/plugin/lyrster.py | odrling/prismriver-lyrics | 995b687b1e795298ce5e31348120c510f99a378f | [
"MIT"
] | 5 | 2016-06-14T18:01:20.000Z | 2021-02-24T05:52:31.000Z | from prismriver.plugin.common import Plugin
from prismriver.struct import Song
class LyrsterPlugin(Plugin):
ID = 'lyrster'
def __init__(self, config):
super(LyrsterPlugin, self).__init__('Lyrster', config)
def search_song(self, artist, title):
to_delete = ["'", '!', '(', ')', '[', ']']
link = "http://www.lyrster.com/lyrics/{}-lyrics-{}.html".format(
self.prepare_url_parameter(title, to_delete=to_delete),
self.prepare_url_parameter(artist, to_delete=to_delete))
page = self.download_webpage(link)
if page:
soup = self.prepare_soup(page)
head_pane = soup.find("div", {"id": "lyrics-info"})
if head_pane is None:
# song wasn't found and we're redirected to main page
return None
song_title = head_pane.find("h1").text.replace(" Lyrics", "")
song_artist = head_pane.find("a").text
lyric_pane = soup.find("div", {"id": "lyrics"})
lyric = self.parse_verse_block(lyric_pane)
if lyric == "We do not have the complete song's lyrics just yet." or lyric.startswith('Shortcut to '):
# empty song page without lyric
return None
else:
return Song(song_artist, song_title, self.sanitize_lyrics([lyric]))
| 35.973684 | 114 | 0.588881 | from prismriver.plugin.common import Plugin
from prismriver.struct import Song
class LyrsterPlugin(Plugin):
ID = 'lyrster'
def __init__(self, config):
super(LyrsterPlugin, self).__init__('Lyrster', config)
def search_song(self, artist, title):
to_delete = ["'", '!', '(', ')', '[', ']']
link = "http://www.lyrster.com/lyrics/{}-lyrics-{}.html".format(
self.prepare_url_parameter(title, to_delete=to_delete),
self.prepare_url_parameter(artist, to_delete=to_delete))
page = self.download_webpage(link)
if page:
soup = self.prepare_soup(page)
head_pane = soup.find("div", {"id": "lyrics-info"})
if head_pane is None:
# song wasn't found and we're redirected to main page
return None
song_title = head_pane.find("h1").text.replace(" Lyrics", "")
song_artist = head_pane.find("a").text
lyric_pane = soup.find("div", {"id": "lyrics"})
lyric = self.parse_verse_block(lyric_pane)
if lyric == "We do not have the complete song's lyrics just yet." or lyric.startswith('Shortcut to '):
return None
else:
return Song(song_artist, song_title, self.sanitize_lyrics([lyric]))
| true | true |
f72ddc8c25b9c4148bc113d6447b20acd94bf3ee | 100,225 | py | Python | manila/tests/db/migrations/alembic/migrations_data_checks.py | inspur-storage/manila | 0f8cc58e9454643b492b18c6284f6b0bc4aa311b | [
"Apache-2.0"
] | 3 | 2016-06-06T13:05:00.000Z | 2021-05-05T04:29:24.000Z | manila/tests/db/migrations/alembic/migrations_data_checks.py | ljzjohnson/manila | 7f990ffa16117769f7616779dd94f81c8d676511 | [
"Apache-2.0"
] | null | null | null | manila/tests/db/migrations/alembic/migrations_data_checks.py | ljzjohnson/manila | 7f990ffa16117769f7616779dd94f81c8d676511 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Mirantis inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests data for database migrations.
All database migrations with data manipulation
(like moving data from column to the table) should have data check class:
@map_to_migration('1f0bd302c1a6') # Revision of checked db migration
class FooMigrationChecks(BaseMigrationChecks):
def setup_upgrade_data(self, engine):
...
def check_upgrade(self, engine, data):
...
def check_downgrade(self, engine):
...
See BaseMigrationChecks class for more information.
"""
import abc
import datetime
from oslo_db import exception as oslo_db_exc
from oslo_utils import uuidutils
import six
from sqlalchemy import exc as sa_exc
from manila.common import constants
from manila.db.migrations import utils
class DbMigrationsData(object):
migration_mappings = {}
methods_mapping = {
'pre': 'setup_upgrade_data',
'check': 'check_upgrade',
'post': 'check_downgrade',
}
def __getattr__(self, item):
parts = item.split('_')
is_mapping_method = (
len(parts) > 2 and parts[0] == ''
and parts[1] in self.methods_mapping
)
if not is_mapping_method:
return super(DbMigrationsData, self).__getattribute__(item)
check_obj = self.migration_mappings.get(parts[-1], None)
if check_obj is None:
raise AttributeError
check_obj.set_test_case(self)
return getattr(check_obj, self.methods_mapping.get(parts[1]))
def map_to_migration(revision):
def decorator(cls):
DbMigrationsData.migration_mappings[revision] = cls()
return cls
return decorator
class BaseMigrationChecks(object):
six.add_metaclass(abc.ABCMeta)
def __init__(self):
self.test_case = None
def set_test_case(self, test_case):
self.test_case = test_case
@abc.abstractmethod
def setup_upgrade_data(self, engine):
"""This method should be used to insert test data for migration.
:param engine: SQLAlchemy engine
:return: any data which will be passed to 'check_upgrade' as 'data' arg
"""
@abc.abstractmethod
def check_upgrade(self, engine, data):
"""This method should be used to do assertions after upgrade method.
To perform assertions use 'self.test_case' instance property:
self.test_case.assertTrue(True)
:param engine: SQLAlchemy engine
:param data: data returned by 'setup_upgrade_data'
"""
@abc.abstractmethod
def check_downgrade(self, engine):
"""This method should be used to do assertions after downgrade method.
To perform assertions use 'self.test_case' instance property:
self.test_case.assertTrue(True)
:param engine: SQLAlchemy engine
"""
def fake_share(**kwargs):
share = {
'id': uuidutils.generate_uuid(),
'display_name': 'fake_share',
'display_description': 'my fake share',
'snapshot_id': uuidutils.generate_uuid(),
'share_proto': 'nfs',
'is_public': True,
'size': 1,
'deleted': 'False',
'share_proto': 'fake_proto',
'user_id': uuidutils.generate_uuid(),
'project_id': uuidutils.generate_uuid(),
'snapshot_support': True,
'task_state': None,
}
share.update(kwargs)
return share
def fake_instance(share_id=None, **kwargs):
instance = {
'id': uuidutils.generate_uuid(),
'share_id': share_id or uuidutils.generate_uuid(),
'deleted': 'False',
'host': 'openstack@BackendZ#PoolA',
'status': 'available',
'scheduled_at': datetime.datetime(2015, 8, 10, 0, 5, 58),
'launched_at': datetime.datetime(2015, 8, 10, 0, 5, 58),
'terminated_at': None,
'access_rules_status': 'active',
}
instance.update(kwargs)
return instance
@map_to_migration('38e632621e5a')
class ShareTypeMigrationChecks(BaseMigrationChecks):
def _get_fake_data(self):
extra_specs = []
self.share_type_ids = []
volume_types = [
{
'id': uuidutils.generate_uuid(),
'deleted': 'False',
'name': 'vol-type-A',
},
{
'id': uuidutils.generate_uuid(),
'deleted': 'False',
'name': 'vol-type-B',
},
]
for idx, volume_type in enumerate(volume_types):
extra_specs.append({
'volume_type_id': volume_type['id'],
'key': 'foo',
'value': 'bar%s' % idx,
'deleted': False,
})
extra_specs.append({
'volume_type_id': volume_type['id'],
'key': 'xyzzy',
'value': 'spoon_%s' % idx,
'deleted': False,
})
self.share_type_ids.append(volume_type['id'])
return volume_types, extra_specs
def setup_upgrade_data(self, engine):
(self.volume_types, self.extra_specs) = self._get_fake_data()
volume_types_table = utils.load_table('volume_types', engine)
engine.execute(volume_types_table.insert(self.volume_types))
extra_specs_table = utils.load_table('volume_type_extra_specs',
engine)
engine.execute(extra_specs_table.insert(self.extra_specs))
def check_upgrade(self, engine, data):
# Verify table transformations
share_types_table = utils.load_table('share_types', engine)
share_types_specs_table = utils.load_table(
'share_type_extra_specs', engine)
self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table,
'volume_types', engine)
self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table,
'volume_type_extra_specs', engine)
# Verify presence of data
share_type_ids = [
st['id'] for st in engine.execute(share_types_table.select())
if st['id'] in self.share_type_ids
]
self.test_case.assertEqual(sorted(self.share_type_ids),
sorted(share_type_ids))
extra_specs = [
{'type': es['share_type_id'], 'key': es['spec_key']}
for es in engine.execute(share_types_specs_table.select())
if es['share_type_id'] in self.share_type_ids
]
self.test_case.assertEqual(4, len(extra_specs))
def check_downgrade(self, engine):
# Verify table transformations
volume_types_table = utils.load_table('volume_types', engine)
volume_types_specs_table = utils.load_table(
'volume_type_extra_specs', engine)
self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table,
'share_types', engine)
self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table,
'share_type_extra_specs', engine)
# Verify presence of data
volume_type_ids = [
vt['id'] for vt in engine.execute(volume_types_table.select())
if vt['id'] in self.share_type_ids
]
self.test_case.assertEqual(sorted(self.share_type_ids),
sorted(volume_type_ids))
extra_specs = [
{'type': es['volume_type_id'], 'key': es['key']}
for es in engine.execute(volume_types_specs_table.select())
if es['volume_type_id'] in self.share_type_ids
]
self.test_case.assertEqual(4, len(extra_specs))
@map_to_migration('5077ffcc5f1c')
class ShareInstanceMigrationChecks(BaseMigrationChecks):
def _prepare_fake_data(self):
time = datetime.datetime(2017, 1, 12, 12, 12, 12)
self.share = {
'id': uuidutils.generate_uuid(),
'host': 'fake_host',
'status': 'fake_status',
'scheduled_at': time,
'launched_at': time,
'terminated_at': time,
'availability_zone': 'fake_az'}
self.share_snapshot = {
'id': uuidutils.generate_uuid(),
'status': 'fake_status',
'share_id': self.share['id'],
'progress': 'fake_progress'}
self.share_export_location = {
'id': 1001,
'share_id': self.share['id']}
def setup_upgrade_data(self, engine):
self._prepare_fake_data()
share_table = utils.load_table('shares', engine)
engine.execute(share_table.insert(self.share))
snapshot_table = utils.load_table('share_snapshots', engine)
engine.execute(snapshot_table.insert(self.share_snapshot))
el_table = utils.load_table('share_export_locations', engine)
engine.execute(el_table.insert(self.share_export_location))
def check_upgrade(self, engine, data):
share_table = utils.load_table('shares', engine)
s_instance_table = utils.load_table('share_instances', engine)
ss_instance_table = utils.load_table('share_snapshot_instances',
engine)
snapshot_table = utils.load_table('share_snapshots', engine)
instance_el_table = utils.load_table('share_instance_export_locations',
engine)
# Check shares table
for column in ['host', 'status', 'scheduled_at', 'launched_at',
'terminated_at', 'share_network_id', 'share_server_id',
'availability_zone']:
rows = engine.execute(share_table.select())
for row in rows:
self.test_case.assertFalse(hasattr(row, column))
# Check share instance table
s_instance_record = engine.execute(s_instance_table.select().where(
s_instance_table.c.share_id == self.share['id'])).first()
self.test_case.assertTrue(s_instance_record is not None)
for column in ['host', 'status', 'scheduled_at', 'launched_at',
'terminated_at', 'availability_zone']:
self.test_case.assertEqual(self.share[column],
s_instance_record[column])
# Check snapshot table
for column in ['status', 'progress']:
rows = engine.execute(snapshot_table.select())
for row in rows:
self.test_case.assertFalse(hasattr(row, column))
# Check snapshot instance table
ss_instance_record = engine.execute(ss_instance_table.select().where(
ss_instance_table.c.snapshot_id == self.share_snapshot['id'])
).first()
self.test_case.assertEqual(s_instance_record['id'],
ss_instance_record['share_instance_id'])
for column in ['status', 'progress']:
self.test_case.assertEqual(self.share_snapshot[column],
ss_instance_record[column])
# Check share export location table
self.test_case.assertRaises(
sa_exc.NoSuchTableError,
utils.load_table, 'share_export_locations', engine)
# Check share instance export location table
el_record = engine.execute(instance_el_table.select().where(
instance_el_table.c.share_instance_id == s_instance_record['id'])
).first()
self.test_case.assertFalse(el_record is None)
self.test_case.assertTrue(hasattr(el_record, 'share_instance_id'))
self.test_case.assertFalse(hasattr(el_record, 'share_id'))
def check_downgrade(self, engine):
self.test_case.assertRaises(
sa_exc.NoSuchTableError,
utils.load_table, 'share_snapshot_instances', engine)
self.test_case.assertRaises(
sa_exc.NoSuchTableError,
utils.load_table, 'share_instances', engine)
self.test_case.assertRaises(
sa_exc.NoSuchTableError,
utils.load_table, 'share_instance_export_locations', engine)
share_table = utils.load_table('shares', engine)
snapshot_table = utils.load_table('share_snapshots', engine)
share_el_table = utils.load_table('share_export_locations',
engine)
for column in ['host', 'status', 'scheduled_at', 'launched_at',
'terminated_at', 'share_network_id', 'share_server_id',
'availability_zone']:
rows = engine.execute(share_table.select())
for row in rows:
self.test_case.assertTrue(hasattr(row, column))
for column in ['status', 'progress']:
rows = engine.execute(snapshot_table.select())
for row in rows:
self.test_case.assertTrue(hasattr(row, column))
rows = engine.execute(share_el_table.select())
for row in rows:
self.test_case.assertFalse(hasattr(row, 'share_instance_id'))
self.test_case.assertTrue(
hasattr(row, 'share_id'))
@map_to_migration('1f0bd302c1a6')
class AvailabilityZoneMigrationChecks(BaseMigrationChecks):
valid_az_names = ('az1', 'az2')
def _get_service_data(self, options):
base_dict = {
'binary': 'manila-share',
'topic': 'share',
'disabled': False,
'report_count': '100',
}
base_dict.update(options)
return base_dict
def setup_upgrade_data(self, engine):
service_fixture = [
self._get_service_data(
{'deleted': 0, 'host': 'fake1', 'availability_zone': 'az1'}
),
self._get_service_data(
{'deleted': 0, 'host': 'fake2', 'availability_zone': 'az1'}
),
self._get_service_data(
{'deleted': 1, 'host': 'fake3', 'availability_zone': 'az2'}
),
]
services_table = utils.load_table('services', engine)
for fixture in service_fixture:
engine.execute(services_table.insert(fixture))
def check_upgrade(self, engine, _):
az_table = utils.load_table('availability_zones', engine)
for az in engine.execute(az_table.select()):
self.test_case.assertTrue(uuidutils.is_uuid_like(az.id))
self.test_case.assertIn(az.name, self.valid_az_names)
self.test_case.assertEqual('False', az.deleted)
services_table = utils.load_table('services', engine)
for service in engine.execute(services_table.select()):
self.test_case.assertTrue(
uuidutils.is_uuid_like(service.availability_zone_id)
)
def check_downgrade(self, engine):
services_table = utils.load_table('services', engine)
for service in engine.execute(services_table.select()):
self.test_case.assertIn(
service.availability_zone, self.valid_az_names
)
@map_to_migration('dda6de06349')
class ShareInstanceExportLocationMetadataChecks(BaseMigrationChecks):
el_table_name = 'share_instance_export_locations'
elm_table_name = 'share_instance_export_locations_metadata'
def setup_upgrade_data(self, engine):
# Setup shares
share_fixture = [{'id': 'foo_share_id'}, {'id': 'bar_share_id'}]
share_table = utils.load_table('shares', engine)
for fixture in share_fixture:
engine.execute(share_table.insert(fixture))
# Setup share instances
si_fixture = [
{'id': 'foo_share_instance_id_oof',
'share_id': share_fixture[0]['id']},
{'id': 'bar_share_instance_id_rab',
'share_id': share_fixture[1]['id']},
]
si_table = utils.load_table('share_instances', engine)
for fixture in si_fixture:
engine.execute(si_table.insert(fixture))
# Setup export locations
el_fixture = [
{'id': 1, 'path': '/1', 'share_instance_id': si_fixture[0]['id']},
{'id': 2, 'path': '/2', 'share_instance_id': si_fixture[1]['id']},
]
el_table = utils.load_table(self.el_table_name, engine)
for fixture in el_fixture:
engine.execute(el_table.insert(fixture))
def check_upgrade(self, engine, data):
el_table = utils.load_table(
'share_instance_export_locations', engine)
for el in engine.execute(el_table.select()):
self.test_case.assertTrue(hasattr(el, 'is_admin_only'))
self.test_case.assertTrue(hasattr(el, 'uuid'))
self.test_case.assertEqual(False, el.is_admin_only)
self.test_case.assertTrue(uuidutils.is_uuid_like(el.uuid))
# Write export location metadata
el_metadata = [
{'key': 'foo_key', 'value': 'foo_value', 'export_location_id': 1},
{'key': 'bar_key', 'value': 'bar_value', 'export_location_id': 2},
]
elm_table = utils.load_table(self.elm_table_name, engine)
engine.execute(elm_table.insert(el_metadata))
# Verify values of written metadata
for el_meta_datum in el_metadata:
el_id = el_meta_datum['export_location_id']
records = engine.execute(elm_table.select().where(
elm_table.c.export_location_id == el_id))
self.test_case.assertEqual(1, records.rowcount)
record = records.first()
expected_keys = (
'id', 'created_at', 'updated_at', 'deleted_at', 'deleted',
'export_location_id', 'key', 'value',
)
self.test_case.assertEqual(len(expected_keys), len(record.keys()))
for key in expected_keys:
self.test_case.assertIn(key, record.keys())
for k, v in el_meta_datum.items():
self.test_case.assertTrue(hasattr(record, k))
self.test_case.assertEqual(v, getattr(record, k))
def check_downgrade(self, engine):
el_table = utils.load_table(
'share_instance_export_locations', engine)
for el in engine.execute(el_table.select()):
self.test_case.assertFalse(hasattr(el, 'is_admin_only'))
self.test_case.assertFalse(hasattr(el, 'uuid'))
self.test_case.assertRaises(
sa_exc.NoSuchTableError,
utils.load_table, self.elm_table_name, engine)
@map_to_migration('344c1ac4747f')
class AccessRulesStatusMigrationChecks(BaseMigrationChecks):
def _get_instance_data(self, data):
base_dict = {}
base_dict.update(data)
return base_dict
def setup_upgrade_data(self, engine):
share_table = utils.load_table('shares', engine)
share = {
'id': 1,
'share_proto': "NFS",
'size': 0,
'snapshot_id': None,
'user_id': 'fake',
'project_id': 'fake',
}
engine.execute(share_table.insert(share))
rules1 = [
{'id': 'r1', 'share_instance_id': 1, 'state': 'active',
'deleted': 'False'},
{'id': 'r2', 'share_instance_id': 1, 'state': 'active',
'deleted': 'False'},
{'id': 'r3', 'share_instance_id': 1, 'state': 'deleting',
'deleted': 'False'},
]
rules2 = [
{'id': 'r4', 'share_instance_id': 2, 'state': 'active',
'deleted': 'False'},
{'id': 'r5', 'share_instance_id': 2, 'state': 'error',
'deleted': 'False'},
]
rules3 = [
{'id': 'r6', 'share_instance_id': 3, 'state': 'new',
'deleted': 'False'},
]
instance_fixtures = [
{'id': 1, 'deleted': 'False', 'host': 'fake1', 'share_id': 1,
'status': 'available', 'rules': rules1},
{'id': 2, 'deleted': 'False', 'host': 'fake2', 'share_id': 1,
'status': 'available', 'rules': rules2},
{'id': 3, 'deleted': 'False', 'host': 'fake3', 'share_id': 1,
'status': 'available', 'rules': rules3},
{'id': 4, 'deleted': 'False', 'host': 'fake4', 'share_id': 1,
'status': 'deleting', 'rules': []},
]
share_instances_table = utils.load_table('share_instances', engine)
share_instances_rules_table = utils.load_table(
'share_instance_access_map', engine)
for fixture in instance_fixtures:
rules = fixture.pop('rules')
engine.execute(share_instances_table.insert(fixture))
for rule in rules:
engine.execute(share_instances_rules_table.insert(rule))
def check_upgrade(self, engine, _):
instances_table = utils.load_table('share_instances', engine)
valid_statuses = {
'1': 'active',
'2': 'error',
'3': 'out_of_sync',
'4': None,
}
instances = engine.execute(instances_table.select().where(
instances_table.c.id in valid_statuses.keys()))
for instance in instances:
self.test_case.assertEqual(valid_statuses[instance['id']],
instance['access_rules_status'])
def check_downgrade(self, engine):
share_instances_rules_table = utils.load_table(
'share_instance_access_map', engine)
valid_statuses = {
'1': 'active',
'2': 'error',
'3': 'error',
'4': None,
}
for rule in engine.execute(share_instances_rules_table.select()):
valid_state = valid_statuses[rule['share_instance_id']]
self.test_case.assertEqual(valid_state, rule['state'])
@map_to_migration('293fac1130ca')
class ShareReplicationMigrationChecks(BaseMigrationChecks):
valid_share_display_names = ('FAKE_SHARE_1', 'FAKE_SHARE_2',
'FAKE_SHARE_3')
valid_share_ids = []
valid_replication_types = ('writable', 'readable', 'dr')
def _load_tables_and_get_data(self, engine):
share_table = utils.load_table('shares', engine)
share_instances_table = utils.load_table('share_instances', engine)
shares = engine.execute(
share_table.select().where(share_table.c.id.in_(
self.valid_share_ids))
).fetchall()
share_instances = engine.execute(share_instances_table.select().where(
share_instances_table.c.share_id.in_(self.valid_share_ids))
).fetchall()
return shares, share_instances
def setup_upgrade_data(self, engine):
shares_data = []
instances_data = []
self.valid_share_ids = []
for share_display_name in self.valid_share_display_names:
share_ref = fake_share(display_name=share_display_name)
shares_data.append(share_ref)
instances_data.append(fake_instance(share_id=share_ref['id']))
shares_table = utils.load_table('shares', engine)
for share in shares_data:
self.valid_share_ids.append(share['id'])
engine.execute(shares_table.insert(share))
shares_instances_table = utils.load_table('share_instances', engine)
for share_instance in instances_data:
engine.execute(shares_instances_table.insert(share_instance))
def check_upgrade(self, engine, _):
shares, share_instances = self._load_tables_and_get_data(engine)
share_ids = [share['id'] for share in shares]
share_instance_share_ids = [share_instance['share_id'] for
share_instance in share_instances]
# Assert no data is lost
for sid in self.valid_share_ids:
self.test_case.assertIn(sid, share_ids)
self.test_case.assertIn(sid, share_instance_share_ids)
for share in shares:
self.test_case.assertIn(share['display_name'],
self.valid_share_display_names)
self.test_case.assertEqual('False', share.deleted)
self.test_case.assertTrue(hasattr(share, 'replication_type'))
for share_instance in share_instances:
self.test_case.assertTrue(hasattr(share_instance, 'replica_state'))
def check_downgrade(self, engine):
shares, share_instances = self._load_tables_and_get_data(engine)
share_ids = [share['id'] for share in shares]
share_instance_share_ids = [share_instance['share_id'] for
share_instance in share_instances]
# Assert no data is lost
for sid in self.valid_share_ids:
self.test_case.assertIn(sid, share_ids)
self.test_case.assertIn(sid, share_instance_share_ids)
for share in shares:
self.test_case.assertEqual('False', share.deleted)
self.test_case.assertIn(share.display_name,
self.valid_share_display_names)
self.test_case.assertFalse(hasattr(share, 'replication_type'))
for share_instance in share_instances:
self.test_case.assertEqual('False', share_instance.deleted)
self.test_case.assertIn(share_instance.share_id,
self.valid_share_ids)
self.test_case.assertFalse(
hasattr(share_instance, 'replica_state'))
@map_to_migration('5155c7077f99')
class NetworkAllocationsNewLabelColumnChecks(BaseMigrationChecks):
table_name = 'network_allocations'
ids = ['fake_network_allocation_id_%d' % i for i in (1, 2, 3)]
def setup_upgrade_data(self, engine):
user_id = 'user_id'
project_id = 'project_id'
share_server_id = 'foo_share_server_id'
# Create share network
share_network_data = {
'id': 'foo_share_network_id',
'user_id': user_id,
'project_id': project_id,
}
sn_table = utils.load_table('share_networks', engine)
engine.execute(sn_table.insert(share_network_data))
# Create share server
share_server_data = {
'id': share_server_id,
'share_network_id': share_network_data['id'],
'host': 'fake_host',
'status': 'active',
}
ss_table = utils.load_table('share_servers', engine)
engine.execute(ss_table.insert(share_server_data))
# Create network allocations
network_allocations = [
{'id': self.ids[0],
'share_server_id': share_server_id,
'ip_address': '1.1.1.1'},
{'id': self.ids[1],
'share_server_id': share_server_id,
'ip_address': '2.2.2.2'},
]
na_table = utils.load_table(self.table_name, engine)
for network_allocation in network_allocations:
engine.execute(na_table.insert(network_allocation))
def check_upgrade(self, engine, data):
na_table = utils.load_table(self.table_name, engine)
for na in engine.execute(na_table.select()):
self.test_case.assertTrue(hasattr(na, 'label'))
self.test_case.assertEqual(na.label, 'user')
# Create admin network allocation
network_allocations = [
{'id': self.ids[2],
'share_server_id': na.share_server_id,
'ip_address': '3.3.3.3',
'label': 'admin',
'network_type': 'vlan',
'segmentation_id': 1005,
'ip_version': 4,
'cidr': '240.0.0.0/16'},
]
engine.execute(na_table.insert(network_allocations))
# Select admin network allocations
for na in engine.execute(
na_table.select().where(na_table.c.label == 'admin')):
self.test_case.assertTrue(hasattr(na, 'label'))
self.test_case.assertEqual('admin', na.label)
for col_name in ('network_type', 'segmentation_id', 'ip_version',
'cidr'):
self.test_case.assertTrue(hasattr(na, col_name))
self.test_case.assertEqual(
network_allocations[0][col_name], getattr(na, col_name))
def check_downgrade(self, engine):
na_table = utils.load_table(self.table_name, engine)
db_result = engine.execute(na_table.select())
self.test_case.assertTrue(db_result.rowcount >= len(self.ids))
for na in db_result:
for col_name in ('label', 'network_type', 'segmentation_id',
'ip_version', 'cidr'):
self.test_case.assertFalse(hasattr(na, col_name))
@map_to_migration('eb6d5544cbbd')
class ShareSnapshotInstanceNewProviderLocationColumnChecks(
BaseMigrationChecks):
table_name = 'share_snapshot_instances'
def setup_upgrade_data(self, engine):
# Setup shares
share_data = {'id': 'new_share_id'}
s_table = utils.load_table('shares', engine)
engine.execute(s_table.insert(share_data))
# Setup share instances
share_instance_data = {
'id': 'new_share_instance_id',
'share_id': share_data['id']
}
si_table = utils.load_table('share_instances', engine)
engine.execute(si_table.insert(share_instance_data))
# Setup share snapshots
share_snapshot_data = {
'id': 'new_snapshot_id',
'share_id': share_data['id']}
snap_table = utils.load_table('share_snapshots', engine)
engine.execute(snap_table.insert(share_snapshot_data))
# Setup snapshot instances
snapshot_instance_data = {
'id': 'new_snapshot_instance_id',
'snapshot_id': share_snapshot_data['id'],
'share_instance_id': share_instance_data['id']
}
snap_i_table = utils.load_table('share_snapshot_instances', engine)
engine.execute(snap_i_table.insert(snapshot_instance_data))
def check_upgrade(self, engine, data):
ss_table = utils.load_table(self.table_name, engine)
db_result = engine.execute(ss_table.select().where(
ss_table.c.id == 'new_snapshot_instance_id'))
self.test_case.assertTrue(db_result.rowcount > 0)
for ss in db_result:
self.test_case.assertTrue(hasattr(ss, 'provider_location'))
self.test_case.assertEqual('new_snapshot_id', ss.snapshot_id)
def check_downgrade(self, engine):
ss_table = utils.load_table(self.table_name, engine)
db_result = engine.execute(ss_table.select().where(
ss_table.c.id == 'new_snapshot_instance_id'))
self.test_case.assertTrue(db_result.rowcount > 0)
for ss in db_result:
self.test_case.assertFalse(hasattr(ss, 'provider_location'))
self.test_case.assertEqual('new_snapshot_id', ss.snapshot_id)
@map_to_migration('221a83cfd85b')
class ShareNetworksFieldLengthChecks(BaseMigrationChecks):
def setup_upgrade_data(self, engine):
user_id = '123456789123456789'
project_id = 'project_id'
# Create share network data
share_network_data = {
'id': 'foo_share_network_id_2',
'user_id': user_id,
'project_id': project_id,
}
sn_table = utils.load_table('share_networks', engine)
engine.execute(sn_table.insert(share_network_data))
# Create security_service data
security_services_data = {
'id': 'foo_security_services_id',
'type': 'foo_type',
'project_id': project_id
}
ss_table = utils.load_table('security_services', engine)
engine.execute(ss_table.insert(security_services_data))
def _check_length_for_table_columns(self, table_name, engine,
cols, length):
table = utils.load_table(table_name, engine)
db_result = engine.execute(table.select())
self.test_case.assertTrue(db_result.rowcount > 0)
for col in cols:
self.test_case.assertEqual(table.columns.get(col).type.length,
length)
def check_upgrade(self, engine, data):
self._check_length_for_table_columns('share_networks', engine,
('user_id', 'project_id'), 255)
self._check_length_for_table_columns('security_services', engine,
('project_id',), 255)
def check_downgrade(self, engine):
self._check_length_for_table_columns('share_networks', engine,
('user_id', 'project_id'), 36)
self._check_length_for_table_columns('security_services', engine,
('project_id',), 36)
@map_to_migration('fdfb668d19e1')
class NewGatewayColumnChecks(BaseMigrationChecks):
na_table_name = 'network_allocations'
sn_table_name = 'share_networks'
na_ids = ['network_allocation_id_fake_%d' % i for i in (1, 2, 3)]
sn_ids = ['share_network_id_fake_%d' % i for i in (1, 2)]
def setup_upgrade_data(self, engine):
user_id = 'user_id'
project_id = 'project_id'
share_server_id = 'share_server_id_foo'
# Create share network
share_network_data = {
'id': self.sn_ids[0],
'user_id': user_id,
'project_id': project_id,
}
sn_table = utils.load_table(self.sn_table_name, engine)
engine.execute(sn_table.insert(share_network_data))
# Create share server
share_server_data = {
'id': share_server_id,
'share_network_id': share_network_data['id'],
'host': 'fake_host',
'status': 'active',
}
ss_table = utils.load_table('share_servers', engine)
engine.execute(ss_table.insert(share_server_data))
# Create network allocations
network_allocations = [
{
'id': self.na_ids[0],
'share_server_id': share_server_id,
'ip_address': '1.1.1.1',
},
{
'id': self.na_ids[1],
'share_server_id': share_server_id,
'ip_address': '2.2.2.2',
},
]
na_table = utils.load_table(self.na_table_name, engine)
engine.execute(na_table.insert(network_allocations))
def check_upgrade(self, engine, data):
na_table = utils.load_table(self.na_table_name, engine)
for na in engine.execute(na_table.select()):
self.test_case.assertTrue(hasattr(na, 'gateway'))
# Create network allocation
network_allocations = [
{
'id': self.na_ids[2],
'share_server_id': na.share_server_id,
'ip_address': '3.3.3.3',
'gateway': '3.3.3.1',
'network_type': 'vlan',
'segmentation_id': 1005,
'ip_version': 4,
'cidr': '240.0.0.0/16',
},
]
engine.execute(na_table.insert(network_allocations))
# Select network allocations with gateway info
for na in engine.execute(
na_table.select().where(na_table.c.gateway == '3.3.3.1')):
self.test_case.assertTrue(hasattr(na, 'gateway'))
self.test_case.assertEqual(network_allocations[0]['gateway'],
getattr(na, 'gateway'))
sn_table = utils.load_table(self.sn_table_name, engine)
for sn in engine.execute(sn_table.select()):
self.test_case.assertTrue(hasattr(sn, 'gateway'))
# Create share network
share_networks = [
{
'id': self.sn_ids[1],
'user_id': sn.user_id,
'project_id': sn.project_id,
'gateway': '1.1.1.1',
'name': 'name_foo',
},
]
engine.execute(sn_table.insert(share_networks))
# Select share network
for sn in engine.execute(
sn_table.select().where(sn_table.c.name == 'name_foo')):
self.test_case.assertTrue(hasattr(sn, 'gateway'))
self.test_case.assertEqual(share_networks[0]['gateway'],
getattr(sn, 'gateway'))
def check_downgrade(self, engine):
for table_name, ids in ((self.na_table_name, self.na_ids),
(self.sn_table_name, self.sn_ids)):
table = utils.load_table(table_name, engine)
db_result = engine.execute(table.select())
self.test_case.assertTrue(db_result.rowcount >= len(ids))
for record in db_result:
self.test_case.assertFalse(hasattr(record, 'gateway'))
@map_to_migration('e8ea58723178')
class RemoveHostFromDriverPrivateDataChecks(BaseMigrationChecks):
table_name = 'drivers_private_data'
host_column_name = 'host'
def setup_upgrade_data(self, engine):
dpd_data = {
'created_at': datetime.datetime(2016, 7, 14, 22, 31, 22),
'deleted': 0,
'host': 'host1',
'entity_uuid': 'entity_uuid1',
'key': 'key1',
'value': 'value1'
}
dpd_table = utils.load_table(self.table_name, engine)
engine.execute(dpd_table.insert(dpd_data))
def check_upgrade(self, engine, data):
dpd_table = utils.load_table(self.table_name, engine)
rows = engine.execute(dpd_table.select())
for row in rows:
self.test_case.assertFalse(hasattr(row, self.host_column_name))
def check_downgrade(self, engine):
dpd_table = utils.load_table(self.table_name, engine)
rows = engine.execute(dpd_table.select())
for row in rows:
self.test_case.assertTrue(hasattr(row, self.host_column_name))
self.test_case.assertEqual('unknown', row[self.host_column_name])
@map_to_migration('493eaffd79e1')
class NewMTUColumnChecks(BaseMigrationChecks):
na_table_name = 'network_allocations'
sn_table_name = 'share_networks'
na_ids = ['network_allocation_id_fake_3_%d' % i for i in (1, 2, 3)]
sn_ids = ['share_network_id_fake_3_%d' % i for i in (1, 2)]
def setup_upgrade_data(self, engine):
user_id = 'user_id'
project_id = 'project_id'
share_server_id = 'share_server_id_foo_2'
# Create share network
share_network_data = {
'id': self.sn_ids[0],
'user_id': user_id,
'project_id': project_id,
}
sn_table = utils.load_table(self.sn_table_name, engine)
engine.execute(sn_table.insert(share_network_data))
# Create share server
share_server_data = {
'id': share_server_id,
'share_network_id': share_network_data['id'],
'host': 'fake_host',
'status': 'active',
}
ss_table = utils.load_table('share_servers', engine)
engine.execute(ss_table.insert(share_server_data))
# Create network allocations
network_allocations = [
{
'id': self.na_ids[0],
'share_server_id': share_server_id,
'ip_address': '1.1.1.1',
},
{
'id': self.na_ids[1],
'share_server_id': share_server_id,
'ip_address': '2.2.2.2',
},
]
na_table = utils.load_table(self.na_table_name, engine)
engine.execute(na_table.insert(network_allocations))
def check_upgrade(self, engine, data):
na_table = utils.load_table(self.na_table_name, engine)
for na in engine.execute(na_table.select()):
self.test_case.assertTrue(hasattr(na, 'mtu'))
# Create network allocation
network_allocations = [
{
'id': self.na_ids[2],
'share_server_id': na.share_server_id,
'ip_address': '3.3.3.3',
'gateway': '3.3.3.1',
'network_type': 'vlan',
'segmentation_id': 1005,
'ip_version': 4,
'cidr': '240.0.0.0/16',
'mtu': 1509,
},
]
engine.execute(na_table.insert(network_allocations))
# Select network allocations with mtu info
for na in engine.execute(
na_table.select().where(na_table.c.mtu == '1509')):
self.test_case.assertTrue(hasattr(na, 'mtu'))
self.test_case.assertEqual(network_allocations[0]['mtu'],
getattr(na, 'mtu'))
# Select all entries and check for the value
for na in engine.execute(na_table.select()):
self.test_case.assertTrue(hasattr(na, 'mtu'))
if na['id'] == self.na_ids[2]:
self.test_case.assertEqual(network_allocations[0]['mtu'],
getattr(na, 'mtu'))
else:
self.test_case.assertIsNone(na['mtu'])
sn_table = utils.load_table(self.sn_table_name, engine)
for sn in engine.execute(sn_table.select()):
self.test_case.assertTrue(hasattr(sn, 'mtu'))
# Create share network
share_networks = [
{
'id': self.sn_ids[1],
'user_id': sn.user_id,
'project_id': sn.project_id,
'gateway': '1.1.1.1',
'name': 'name_foo_2',
'mtu': 1509,
},
]
engine.execute(sn_table.insert(share_networks))
# Select share network with MTU set
for sn in engine.execute(
sn_table.select().where(sn_table.c.name == 'name_foo_2')):
self.test_case.assertTrue(hasattr(sn, 'mtu'))
self.test_case.assertEqual(share_networks[0]['mtu'],
getattr(sn, 'mtu'))
# Select all entries and check for the value
for sn in engine.execute(sn_table.select()):
self.test_case.assertTrue(hasattr(sn, 'mtu'))
if sn['id'] == self.sn_ids[1]:
self.test_case.assertEqual(network_allocations[0]['mtu'],
getattr(sn, 'mtu'))
else:
self.test_case.assertIsNone(sn['mtu'])
def check_downgrade(self, engine):
for table_name, ids in ((self.na_table_name, self.na_ids),
(self.sn_table_name, self.sn_ids)):
table = utils.load_table(table_name, engine)
db_result = engine.execute(table.select())
self.test_case.assertTrue(db_result.rowcount >= len(ids))
for record in db_result:
self.test_case.assertFalse(hasattr(record, 'mtu'))
@map_to_migration('63809d875e32')
class AddAccessKeyToShareAccessMapping(BaseMigrationChecks):
table_name = 'share_access_map'
access_key_column_name = 'access_key'
def setup_upgrade_data(self, engine):
share_data = {
'id': uuidutils.generate_uuid(),
'share_proto': "CEPHFS",
'size': 1,
'snapshot_id': None,
'user_id': 'fake',
'project_id': 'fake'
}
share_table = utils.load_table('shares', engine)
engine.execute(share_table.insert(share_data))
share_instance_data = {
'id': uuidutils.generate_uuid(),
'deleted': 'False',
'host': 'fake',
'share_id': share_data['id'],
'status': 'available',
'access_rules_status': 'active'
}
share_instance_table = utils.load_table('share_instances', engine)
engine.execute(share_instance_table.insert(share_instance_data))
share_access_data = {
'id': uuidutils.generate_uuid(),
'share_id': share_data['id'],
'access_type': 'cephx',
'access_to': 'alice',
'deleted': 'False'
}
share_access_table = utils.load_table(self.table_name, engine)
engine.execute(share_access_table.insert(share_access_data))
share_instance_access_data = {
'id': uuidutils.generate_uuid(),
'share_instance_id': share_instance_data['id'],
'access_id': share_access_data['id'],
'deleted': 'False'
}
share_instance_access_table = utils.load_table(
'share_instance_access_map', engine)
engine.execute(share_instance_access_table.insert(
share_instance_access_data))
def check_upgrade(self, engine, data):
share_access_table = utils.load_table(self.table_name, engine)
rows = engine.execute(share_access_table.select())
for row in rows:
self.test_case.assertTrue(hasattr(row,
self.access_key_column_name))
def check_downgrade(self, engine):
share_access_table = utils.load_table(self.table_name, engine)
rows = engine.execute(share_access_table.select())
for row in rows:
self.test_case.assertFalse(hasattr(row,
self.access_key_column_name))
@map_to_migration('48a7beae3117')
class MoveShareTypeIdToInstancesCheck(BaseMigrationChecks):
some_shares = [
{
'id': 's1',
'share_type_id': 't1',
},
{
'id': 's2',
'share_type_id': 't2',
},
{
'id': 's3',
'share_type_id': 't3',
},
]
share_ids = [x['id'] for x in some_shares]
some_instances = [
{
'id': 'i1',
'share_id': 's3',
},
{
'id': 'i2',
'share_id': 's2',
},
{
'id': 'i3',
'share_id': 's2',
},
{
'id': 'i4',
'share_id': 's1',
},
]
instance_ids = [x['id'] for x in some_instances]
some_share_types = [
{'id': 't1'},
{'id': 't2'},
{'id': 't3'},
]
def setup_upgrade_data(self, engine):
shares_table = utils.load_table('shares', engine)
share_instances_table = utils.load_table('share_instances', engine)
share_types_table = utils.load_table('share_types', engine)
for stype in self.some_share_types:
engine.execute(share_types_table.insert(stype))
for share in self.some_shares:
engine.execute(shares_table.insert(share))
for instance in self.some_instances:
engine.execute(share_instances_table.insert(instance))
def check_upgrade(self, engine, data):
shares_table = utils.load_table('shares', engine)
share_instances_table = utils.load_table('share_instances', engine)
for instance in engine.execute(share_instances_table.select().where(
share_instances_table.c.id in self.instance_ids)):
share = engine.execute(shares_table.select().where(
instance['share_id'] == shares_table.c.id)).first()
self.test_case.assertEqual(
next((x for x in self.some_shares if share['id'] == x['id']),
None)['share_type_id'],
instance['share_type_id'])
for share in engine.execute(share_instances_table.select().where(
shares_table.c.id in self.share_ids)):
self.test_case.assertNotIn('share_type_id', share)
def check_downgrade(self, engine):
shares_table = utils.load_table('shares', engine)
share_instances_table = utils.load_table('share_instances', engine)
for instance in engine.execute(share_instances_table.select().where(
share_instances_table.c.id in self.instance_ids)):
self.test_case.assertNotIn('share_type_id', instance)
for share in engine.execute(share_instances_table.select().where(
shares_table.c.id in self.share_ids)):
self.test_case.assertEqual(
next((x for x in self.some_shares if share['id'] == x['id']),
None)['share_type_id'],
share['share_type_id'])
@map_to_migration('3e7d62517afa')
class CreateFromSnapshotExtraSpecAndShareColumn(BaseMigrationChecks):
expected_attr = constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT
snap_support_attr = constants.ExtraSpecs.SNAPSHOT_SUPPORT
def _get_fake_data(self):
extra_specs = []
shares = []
share_instances = []
share_types = [
{
'id': uuidutils.generate_uuid(),
'deleted': 'False',
'name': 'share-type-1',
'is_public': False,
},
{
'id': uuidutils.generate_uuid(),
'deleted': 'False',
'name': 'share-type-2',
'is_public': True,
},
]
snapshot_support = (False, True)
dhss = ('True', 'False')
for idx, share_type in enumerate(share_types):
extra_specs.append({
'share_type_id': share_type['id'],
'spec_key': 'snapshot_support',
'spec_value': snapshot_support[idx],
'deleted': 0,
})
extra_specs.append({
'share_type_id': share_type['id'],
'spec_key': 'driver_handles_share_servers',
'spec_value': dhss[idx],
'deleted': 0,
})
share = fake_share(snapshot_support=snapshot_support[idx])
shares.append(share)
share_instances.append(
fake_instance(share_id=share['id'],
share_type_id=share_type['id'])
)
return share_types, extra_specs, shares, share_instances
def setup_upgrade_data(self, engine):
(self.share_types, self.extra_specs, self.shares,
self.share_instances) = self._get_fake_data()
share_types_table = utils.load_table('share_types', engine)
engine.execute(share_types_table.insert(self.share_types))
extra_specs_table = utils.load_table('share_type_extra_specs',
engine)
engine.execute(extra_specs_table.insert(self.extra_specs))
shares_table = utils.load_table('shares', engine)
engine.execute(shares_table.insert(self.shares))
share_instances_table = utils.load_table('share_instances', engine)
engine.execute(share_instances_table.insert(self.share_instances))
def check_upgrade(self, engine, data):
share_type_ids = [st['id'] for st in self.share_types]
share_ids = [s['id'] for s in self.shares]
shares_table = utils.load_table('shares', engine)
share_types_table = utils.load_table('share_types', engine)
extra_specs_table = utils.load_table('share_type_extra_specs',
engine)
# Pre-existing Shares must be present
shares_in_db = engine.execute(shares_table.select()).fetchall()
share_ids_in_db = [s['id'] for s in shares_in_db]
self.test_case.assertTrue(len(share_ids_in_db) > 1)
for share_id in share_ids:
self.test_case.assertIn(share_id, share_ids_in_db)
# new shares attr must match snapshot support
for share in shares_in_db:
self.test_case.assertTrue(hasattr(share, self.expected_attr))
self.test_case.assertEqual(share[self.snap_support_attr],
share[self.expected_attr])
# Pre-existing Share types must be present
share_types_in_db = (
engine.execute(share_types_table.select()).fetchall())
share_type_ids_in_db = [s['id'] for s in share_types_in_db]
for share_type_id in share_type_ids:
self.test_case.assertIn(share_type_id, share_type_ids_in_db)
# Pre-existing extra specs must be present
extra_specs_in_db = (
engine.execute(extra_specs_table.select().where(
extra_specs_table.c.deleted == 0)).fetchall())
self.test_case.assertGreaterEqual(len(extra_specs_in_db),
len(self.extra_specs))
# New Extra spec for share types must match snapshot support
for share_type_id in share_type_ids:
new_extra_spec = [x for x in extra_specs_in_db
if x['spec_key'] == self.expected_attr
and x['share_type_id'] == share_type_id]
snapshot_support_spec = [
x for x in extra_specs_in_db
if x['spec_key'] == self.snap_support_attr
and x['share_type_id'] == share_type_id]
self.test_case.assertEqual(1, len(new_extra_spec))
self.test_case.assertEqual(1, len(snapshot_support_spec))
self.test_case.assertEqual(
snapshot_support_spec[0]['spec_value'],
new_extra_spec[0]['spec_value'])
def check_downgrade(self, engine):
share_type_ids = [st['id'] for st in self.share_types]
share_ids = [s['id'] for s in self.shares]
shares_table = utils.load_table('shares', engine)
share_types_table = utils.load_table('share_types', engine)
extra_specs_table = utils.load_table('share_type_extra_specs',
engine)
# Pre-existing Shares must be present
shares_in_db = engine.execute(shares_table.select()).fetchall()
share_ids_in_db = [s['id'] for s in shares_in_db]
self.test_case.assertTrue(len(share_ids_in_db) > 1)
for share_id in share_ids:
self.test_case.assertIn(share_id, share_ids_in_db)
# Shares should have no attr to create share from snapshot
for share in shares_in_db:
self.test_case.assertFalse(hasattr(share, self.expected_attr))
# Pre-existing Share types must be present
share_types_in_db = (
engine.execute(share_types_table.select()).fetchall())
share_type_ids_in_db = [s['id'] for s in share_types_in_db]
for share_type_id in share_type_ids:
self.test_case.assertIn(share_type_id, share_type_ids_in_db)
# Pre-existing extra specs must be present
extra_specs_in_db = (
engine.execute(extra_specs_table.select().where(
extra_specs_table.c.deleted == 0)).fetchall())
self.test_case.assertGreaterEqual(len(extra_specs_in_db),
len(self.extra_specs))
# Share types must not have create share from snapshot extra spec
for share_type_id in share_type_ids:
new_extra_spec = [x for x in extra_specs_in_db
if x['spec_key'] == self.expected_attr
and x['share_type_id'] == share_type_id]
self.test_case.assertEqual(0, len(new_extra_spec))
@map_to_migration('87ce15c59bbe')
class RevertToSnapshotShareColumn(BaseMigrationChecks):
expected_attr = constants.ExtraSpecs.REVERT_TO_SNAPSHOT_SUPPORT
def _get_fake_data(self):
extra_specs = []
shares = []
share_instances = []
share_types = [
{
'id': uuidutils.generate_uuid(),
'deleted': 'False',
'name': 'revert-1',
'is_public': False,
},
{
'id': uuidutils.generate_uuid(),
'deleted': 'False',
'name': 'revert-2',
'is_public': True,
},
]
snapshot_support = (False, True)
dhss = ('True', 'False')
for idx, share_type in enumerate(share_types):
extra_specs.append({
'share_type_id': share_type['id'],
'spec_key': 'snapshot_support',
'spec_value': snapshot_support[idx],
'deleted': 0,
})
extra_specs.append({
'share_type_id': share_type['id'],
'spec_key': 'driver_handles_share_servers',
'spec_value': dhss[idx],
'deleted': 0,
})
share = fake_share(snapshot_support=snapshot_support[idx])
shares.append(share)
share_instances.append(
fake_instance(share_id=share['id'],
share_type_id=share_type['id'])
)
return share_types, extra_specs, shares, share_instances
def setup_upgrade_data(self, engine):
(self.share_types, self.extra_specs, self.shares,
self.share_instances) = self._get_fake_data()
share_types_table = utils.load_table('share_types', engine)
engine.execute(share_types_table.insert(self.share_types))
extra_specs_table = utils.load_table('share_type_extra_specs',
engine)
engine.execute(extra_specs_table.insert(self.extra_specs))
shares_table = utils.load_table('shares', engine)
engine.execute(shares_table.insert(self.shares))
share_instances_table = utils.load_table('share_instances', engine)
engine.execute(share_instances_table.insert(self.share_instances))
def check_upgrade(self, engine, data):
share_ids = [s['id'] for s in self.shares]
shares_table = utils.load_table('shares', engine)
# Pre-existing Shares must be present
shares_in_db = engine.execute(shares_table.select().where(
shares_table.c.deleted == 'False')).fetchall()
share_ids_in_db = [s['id'] for s in shares_in_db]
self.test_case.assertTrue(len(share_ids_in_db) > 1)
for share_id in share_ids:
self.test_case.assertIn(share_id, share_ids_in_db)
# New shares attr must be present and set to False
for share in shares_in_db:
self.test_case.assertTrue(hasattr(share, self.expected_attr))
self.test_case.assertEqual(False, share[self.expected_attr])
def check_downgrade(self, engine):
share_ids = [s['id'] for s in self.shares]
shares_table = utils.load_table('shares', engine)
# Pre-existing Shares must be present
shares_in_db = engine.execute(shares_table.select()).fetchall()
share_ids_in_db = [s['id'] for s in shares_in_db]
self.test_case.assertTrue(len(share_ids_in_db) > 1)
for share_id in share_ids:
self.test_case.assertIn(share_id, share_ids_in_db)
# Shares should have no attr to revert share to snapshot
for share in shares_in_db:
self.test_case.assertFalse(hasattr(share, self.expected_attr))
@map_to_migration('95e3cf760840')
class RemoveNovaNetIdColumnFromShareNetworks(BaseMigrationChecks):
table_name = 'share_networks'
nova_net_column_name = 'nova_net_id'
def setup_upgrade_data(self, engine):
user_id = 'user_id'
project_id = 'project_id'
nova_net_id = 'foo_nova_net_id'
share_network_data = {
'id': 'foo_share_network_id_3',
'user_id': user_id,
'project_id': project_id,
'nova_net_id': nova_net_id,
}
sn_table = utils.load_table(self.table_name, engine)
engine.execute(sn_table.insert(share_network_data))
def check_upgrade(self, engine, data):
sn_table = utils.load_table(self.table_name, engine)
rows = engine.execute(sn_table.select())
self.test_case.assertGreater(rows.rowcount, 0)
for row in rows:
self.test_case.assertFalse(hasattr(row, self.nova_net_column_name))
def check_downgrade(self, engine):
sn_table = utils.load_table(self.table_name, engine)
rows = engine.execute(sn_table.select())
self.test_case.assertGreater(rows.rowcount, 0)
for row in rows:
self.test_case.assertTrue(hasattr(row, self.nova_net_column_name))
self.test_case.assertIsNone(row[self.nova_net_column_name])
@map_to_migration('54667b9cade7')
class RestoreStateToShareInstanceAccessMap(BaseMigrationChecks):
new_instance_mapping_state = {
constants.STATUS_ACTIVE: constants.STATUS_ACTIVE,
constants.SHARE_INSTANCE_RULES_SYNCING:
constants.ACCESS_STATE_QUEUED_TO_APPLY,
constants.STATUS_OUT_OF_SYNC: constants.ACCESS_STATE_QUEUED_TO_APPLY,
'updating': constants.ACCESS_STATE_QUEUED_TO_APPLY,
'updating_multiple': constants.ACCESS_STATE_QUEUED_TO_APPLY,
constants.SHARE_INSTANCE_RULES_ERROR: constants.ACCESS_STATE_ERROR,
}
new_access_rules_status = {
constants.STATUS_ACTIVE: constants.STATUS_ACTIVE,
constants.STATUS_OUT_OF_SYNC: constants.SHARE_INSTANCE_RULES_SYNCING,
'updating': constants.SHARE_INSTANCE_RULES_SYNCING,
'updating_multiple': constants.SHARE_INSTANCE_RULES_SYNCING,
constants.SHARE_INSTANCE_RULES_ERROR:
constants.SHARE_INSTANCE_RULES_ERROR,
}
@staticmethod
def generate_share_instance(sid, access_rules_status):
share_instance_data = {
'id': uuidutils.generate_uuid(),
'deleted': 'False',
'host': 'fake',
'share_id': sid,
'status': constants.STATUS_AVAILABLE,
'access_rules_status': access_rules_status
}
return share_instance_data
@staticmethod
def generate_share_instance_access_map(share_access_data_id,
share_instance_id):
share_instance_access_data = {
'id': uuidutils.generate_uuid(),
'share_instance_id': share_instance_id,
'access_id': share_access_data_id,
'deleted': 'False'
}
return share_instance_access_data
def setup_upgrade_data(self, engine):
share_data = {
'id': uuidutils.generate_uuid(),
'share_proto': 'fake',
'size': 1,
'snapshot_id': None,
'user_id': 'fake',
'project_id': 'fake'
}
share_table = utils.load_table('shares', engine)
engine.execute(share_table.insert(share_data))
share_instances = [
self.generate_share_instance(
share_data['id'], constants.STATUS_ACTIVE),
self.generate_share_instance(
share_data['id'], constants.STATUS_OUT_OF_SYNC),
self.generate_share_instance(
share_data['id'], constants.STATUS_ERROR),
self.generate_share_instance(
share_data['id'], 'updating'),
self.generate_share_instance(
share_data['id'], 'updating_multiple'),
]
self.updating_share_instance = share_instances[3]
self.updating_multiple_share_instance = share_instances[4]
share_instance_table = utils.load_table('share_instances', engine)
for share_instance_data in share_instances:
engine.execute(share_instance_table.insert(share_instance_data))
share_access_data = {
'id': uuidutils.generate_uuid(),
'share_id': share_data['id'],
'access_type': 'fake',
'access_to': 'alice',
'deleted': 'False'
}
share_access_table = utils.load_table('share_access_map', engine)
engine.execute(share_access_table.insert(share_access_data))
share_instance_access_data = []
for share_instance in share_instances:
sia_map = self.generate_share_instance_access_map(
share_access_data['id'], share_instance['id'])
share_instance_access_data.append(sia_map)
share_instance_access_table = utils.load_table(
'share_instance_access_map', engine)
for sia_map in share_instance_access_data:
engine.execute(share_instance_access_table.insert(sia_map))
def check_upgrade(self, engine, data):
share_instance_table = utils.load_table('share_instances', engine)
sia_table = utils.load_table('share_instance_access_map', engine)
for rule in engine.execute(sia_table.select()):
self.test_case.assertTrue(hasattr(rule, 'state'))
correlated_share_instances = engine.execute(
share_instance_table.select().where(
share_instance_table.c.id == rule['share_instance_id']))
access_rules_status = getattr(correlated_share_instances.first(),
'access_rules_status')
self.test_case.assertEqual(
self.new_instance_mapping_state[access_rules_status],
rule['state'])
for instance in engine.execute(share_instance_table.select()):
self.test_case.assertTrue(instance['access_rules_status']
not in ('updating',
'updating_multiple',
constants.STATUS_OUT_OF_SYNC))
if instance['id'] in (self.updating_share_instance['id'],
self.updating_multiple_share_instance['id']):
self.test_case.assertEqual(
constants.SHARE_INSTANCE_RULES_SYNCING,
instance['access_rules_status'])
def check_downgrade(self, engine):
share_instance_table = utils.load_table('share_instances', engine)
sia_table = utils.load_table('share_instance_access_map', engine)
for rule in engine.execute(sia_table.select()):
self.test_case.assertFalse(hasattr(rule, 'state'))
for instance in engine.execute(share_instance_table.select()):
if instance['id'] in (self.updating_share_instance['id'],
self.updating_multiple_share_instance['id']):
self.test_case.assertEqual(
constants.STATUS_OUT_OF_SYNC,
instance['access_rules_status'])
@map_to_migration('e9f79621d83f')
class AddCastRulesToReadonlyToInstances(BaseMigrationChecks):
share_type = {
'id': uuidutils.generate_uuid(),
}
shares = [
{
'id': uuidutils.generate_uuid(),
'replication_type': constants.REPLICATION_TYPE_READABLE,
},
{
'id': uuidutils.generate_uuid(),
'replication_type': constants.REPLICATION_TYPE_READABLE,
},
{
'id': uuidutils.generate_uuid(),
'replication_type': constants.REPLICATION_TYPE_WRITABLE,
},
{
'id': uuidutils.generate_uuid(),
},
]
share_ids = [x['id'] for x in shares]
correct_instance = {
'id': uuidutils.generate_uuid(),
'share_id': share_ids[1],
'replica_state': constants.REPLICA_STATE_IN_SYNC,
'status': constants.STATUS_AVAILABLE,
'share_type_id': share_type['id'],
}
instances = [
{
'id': uuidutils.generate_uuid(),
'share_id': share_ids[0],
'replica_state': constants.REPLICA_STATE_ACTIVE,
'status': constants.STATUS_AVAILABLE,
'share_type_id': share_type['id'],
},
{
'id': uuidutils.generate_uuid(),
'share_id': share_ids[0],
'replica_state': constants.REPLICA_STATE_IN_SYNC,
'status': constants.STATUS_REPLICATION_CHANGE,
'share_type_id': share_type['id'],
},
{
'id': uuidutils.generate_uuid(),
'share_id': share_ids[1],
'replica_state': constants.REPLICA_STATE_ACTIVE,
'status': constants.STATUS_REPLICATION_CHANGE,
'share_type_id': share_type['id'],
},
correct_instance,
{
'id': uuidutils.generate_uuid(),
'share_id': share_ids[2],
'replica_state': constants.REPLICA_STATE_ACTIVE,
'status': constants.STATUS_REPLICATION_CHANGE,
'share_type_id': share_type['id'],
},
{
'id': uuidutils.generate_uuid(),
'share_id': share_ids[2],
'replica_state': constants.REPLICA_STATE_IN_SYNC,
'status': constants.STATUS_AVAILABLE,
'share_type_id': share_type['id'],
},
{
'id': uuidutils.generate_uuid(),
'share_id': share_ids[3],
'status': constants.STATUS_AVAILABLE,
'share_type_id': share_type['id'],
},
]
instance_ids = share_ids = [x['id'] for x in instances]
def setup_upgrade_data(self, engine):
shares_table = utils.load_table('shares', engine)
share_instances_table = utils.load_table('share_instances', engine)
share_types_table = utils.load_table('share_types', engine)
engine.execute(share_types_table.insert(self.share_type))
for share in self.shares:
engine.execute(shares_table.insert(share))
for instance in self.instances:
engine.execute(share_instances_table.insert(instance))
def check_upgrade(self, engine, data):
shares_table = utils.load_table('shares', engine)
share_instances_table = utils.load_table('share_instances', engine)
for instance in engine.execute(share_instances_table.select().where(
share_instances_table.c.id in self.instance_ids)):
self.test_case.assertIn('cast_rules_to_readonly', instance)
share = engine.execute(shares_table.select().where(
instance['share_id'] == shares_table.c.id)).first()
if (instance['replica_state'] != constants.REPLICA_STATE_ACTIVE and
share['replication_type'] ==
constants.REPLICATION_TYPE_READABLE and
instance['status'] != constants.STATUS_REPLICATION_CHANGE):
self.test_case.assertTrue(instance['cast_rules_to_readonly'])
self.test_case.assertEqual(instance['id'],
self.correct_instance['id'])
else:
self.test_case.assertEqual(
False, instance['cast_rules_to_readonly'])
def check_downgrade(self, engine):
share_instances_table = utils.load_table('share_instances', engine)
for instance in engine.execute(share_instances_table.select()):
self.test_case.assertNotIn('cast_rules_to_readonly', instance)
@map_to_migration('03da71c0e321')
class ShareGroupMigrationChecks(BaseMigrationChecks):
def setup_upgrade_data(self, engine):
# Create share type
self.share_type_id = uuidutils.generate_uuid()
st_fixture = {
'deleted': "False",
'id': self.share_type_id,
}
st_table = utils.load_table('share_types', engine)
engine.execute(st_table.insert(st_fixture))
# Create CG
self.cg_id = uuidutils.generate_uuid()
cg_fixture = {
'deleted': "False",
'id': self.cg_id,
'user_id': 'fake_user',
'project_id': 'fake_project_id',
}
cg_table = utils.load_table('consistency_groups', engine)
engine.execute(cg_table.insert(cg_fixture))
# Create share_type group mapping
self.mapping_id = uuidutils.generate_uuid()
mapping_fixture = {
'deleted': "False",
'id': self.mapping_id,
'consistency_group_id': self.cg_id,
'share_type_id': self.share_type_id,
}
mapping_table = utils.load_table(
'consistency_group_share_type_mappings', engine)
engine.execute(mapping_table.insert(mapping_fixture))
# Create share
self.share_id = uuidutils.generate_uuid()
share_fixture = {
'deleted': "False",
'id': self.share_id,
'consistency_group_id': self.cg_id,
'user_id': 'fake_user',
'project_id': 'fake_project_id',
}
share_table = utils.load_table('shares', engine)
engine.execute(share_table.insert(share_fixture))
# Create share instance
self.share_instance_id = uuidutils.generate_uuid()
share_instance_fixture = {
'deleted': "False",
'share_type_id': self.share_type_id,
'id': self.share_instance_id,
'share_id': self.share_id,
'cast_rules_to_readonly': False,
}
share_instance_table = utils.load_table('share_instances', engine)
engine.execute(share_instance_table.insert(share_instance_fixture))
# Create cgsnapshot
self.cgsnapshot_id = uuidutils.generate_uuid()
cg_snap_fixture = {
'deleted': "False",
'id': self.cgsnapshot_id,
'consistency_group_id': self.cg_id,
'user_id': 'fake_user',
'project_id': 'fake_project_id',
}
cgsnapshots_table = utils.load_table('cgsnapshots', engine)
engine.execute(cgsnapshots_table.insert(cg_snap_fixture))
# Create cgsnapshot member
self.cgsnapshot_member_id = uuidutils.generate_uuid()
cg_snap_member_fixture = {
'deleted': "False",
'id': self.cgsnapshot_member_id,
'cgsnapshot_id': self.cgsnapshot_id,
'share_type_id': self.share_type_id,
'share_instance_id': self.share_instance_id,
'share_id': self.share_id,
'user_id': 'fake_user',
'project_id': 'fake_project_id',
}
cgsnapshot_members_table = utils.load_table(
'cgsnapshot_members', engine)
engine.execute(cgsnapshot_members_table.insert(cg_snap_member_fixture))
def check_upgrade(self, engine, data):
sg_table = utils.load_table("share_groups", engine)
db_result = engine.execute(sg_table.select().where(
sg_table.c.id == self.cg_id))
self.test_case.assertEqual(1, db_result.rowcount)
sg = db_result.first()
self.test_case.assertIsNone(sg['source_share_group_snapshot_id'])
share_table = utils.load_table("shares", engine)
share_result = engine.execute(share_table.select().where(
share_table.c.id == self.share_id))
self.test_case.assertEqual(1, share_result.rowcount)
share = share_result.first()
self.test_case.assertEqual(self.cg_id, share['share_group_id'])
self.test_case.assertIsNone(
share['source_share_group_snapshot_member_id'])
mapping_table = utils.load_table(
"share_group_share_type_mappings", engine)
mapping_result = engine.execute(mapping_table.select().where(
mapping_table.c.id == self.mapping_id))
self.test_case.assertEqual(1, mapping_result.rowcount)
mapping_record = mapping_result.first()
self.test_case.assertEqual(
self.cg_id, mapping_record['share_group_id'])
self.test_case.assertEqual(
self.share_type_id, mapping_record['share_type_id'])
sgs_table = utils.load_table("share_group_snapshots", engine)
db_result = engine.execute(sgs_table.select().where(
sgs_table.c.id == self.cgsnapshot_id))
self.test_case.assertEqual(1, db_result.rowcount)
sgs = db_result.first()
self.test_case.assertEqual(self.cg_id, sgs['share_group_id'])
sgsm_table = utils.load_table("share_group_snapshot_members", engine)
db_result = engine.execute(sgsm_table.select().where(
sgsm_table.c.id == self.cgsnapshot_member_id))
self.test_case.assertEqual(1, db_result.rowcount)
sgsm = db_result.first()
self.test_case.assertEqual(
self.cgsnapshot_id, sgsm['share_group_snapshot_id'])
self.test_case.assertNotIn('share_type_id', sgsm)
def check_downgrade(self, engine):
cg_table = utils.load_table("consistency_groups", engine)
db_result = engine.execute(cg_table.select().where(
cg_table.c.id == self.cg_id))
self.test_case.assertEqual(1, db_result.rowcount)
cg = db_result.first()
self.test_case.assertIsNone(cg['source_cgsnapshot_id'])
share_table = utils.load_table("shares", engine)
share_result = engine.execute(share_table.select().where(
share_table.c.id == self.share_id))
self.test_case.assertEqual(1, share_result.rowcount)
share = share_result.first()
self.test_case.assertEqual(self.cg_id, share['consistency_group_id'])
self.test_case.assertIsNone(
share['source_cgsnapshot_member_id'])
mapping_table = utils.load_table(
"consistency_group_share_type_mappings", engine)
mapping_result = engine.execute(mapping_table.select().where(
mapping_table.c.id == self.mapping_id))
self.test_case.assertEqual(1, mapping_result.rowcount)
cg_st_mapping = mapping_result.first()
self.test_case.assertEqual(
self.cg_id, cg_st_mapping['consistency_group_id'])
self.test_case.assertEqual(
self.share_type_id, cg_st_mapping['share_type_id'])
cg_snapshots_table = utils.load_table("cgsnapshots", engine)
db_result = engine.execute(cg_snapshots_table.select().where(
cg_snapshots_table.c.id == self.cgsnapshot_id))
self.test_case.assertEqual(1, db_result.rowcount)
cgsnap = db_result.first()
self.test_case.assertEqual(self.cg_id, cgsnap['consistency_group_id'])
cg_snap_member_table = utils.load_table("cgsnapshot_members", engine)
db_result = engine.execute(cg_snap_member_table.select().where(
cg_snap_member_table.c.id == self.cgsnapshot_member_id))
self.test_case.assertEqual(1, db_result.rowcount)
member = db_result.first()
self.test_case.assertEqual(
self.cgsnapshot_id, member['cgsnapshot_id'])
self.test_case.assertIn('share_type_id', member)
self.test_case.assertEqual(self.share_type_id, member['share_type_id'])
@map_to_migration('927920b37453')
class ShareGroupSnapshotMemberNewProviderLocationColumnChecks(
BaseMigrationChecks):
table_name = 'share_group_snapshot_members'
share_group_type_id = uuidutils.generate_uuid()
share_group_id = uuidutils.generate_uuid()
share_id = uuidutils.generate_uuid()
share_instance_id = uuidutils.generate_uuid()
share_group_snapshot_id = uuidutils.generate_uuid()
share_group_snapshot_member_id = uuidutils.generate_uuid()
def setup_upgrade_data(self, engine):
# Setup share group type
sgt_data = {
'id': self.share_group_type_id,
'name': uuidutils.generate_uuid(),
}
sgt_table = utils.load_table('share_group_types', engine)
engine.execute(sgt_table.insert(sgt_data))
# Setup share group
sg_data = {
'id': self.share_group_id,
'project_id': 'fake_project_id',
'user_id': 'fake_user_id',
'share_group_type_id': self.share_group_type_id,
}
sg_table = utils.load_table('share_groups', engine)
engine.execute(sg_table.insert(sg_data))
# Setup shares
share_data = {
'id': self.share_id,
'share_group_id': self.share_group_id,
}
s_table = utils.load_table('shares', engine)
engine.execute(s_table.insert(share_data))
# Setup share instances
share_instance_data = {
'id': self.share_instance_id,
'share_id': share_data['id'],
'cast_rules_to_readonly': False,
}
si_table = utils.load_table('share_instances', engine)
engine.execute(si_table.insert(share_instance_data))
# Setup share group snapshot
sgs_data = {
'id': self.share_group_snapshot_id,
'share_group_id': self.share_group_id,
'project_id': 'fake_project_id',
'user_id': 'fake_user_id',
}
sgs_table = utils.load_table('share_group_snapshots', engine)
engine.execute(sgs_table.insert(sgs_data))
# Setup share group snapshot member
sgsm_data = {
'id': self.share_group_snapshot_member_id,
'share_group_snapshot_id': self.share_group_snapshot_id,
'share_id': self.share_id,
'share_instance_id': self.share_instance_id,
'project_id': 'fake_project_id',
'user_id': 'fake_user_id',
}
sgsm_table = utils.load_table(self.table_name, engine)
engine.execute(sgsm_table.insert(sgsm_data))
def check_upgrade(self, engine, data):
sgsm_table = utils.load_table(self.table_name, engine)
db_result = engine.execute(sgsm_table.select().where(
sgsm_table.c.id == self.share_group_snapshot_member_id))
self.test_case.assertEqual(1, db_result.rowcount)
for sgsm in db_result:
self.test_case.assertTrue(hasattr(sgsm, 'provider_location'))
# Check that we can write string data to the new field
engine.execute(sgsm_table.update().where(
sgsm_table.c.id == self.share_group_snapshot_member_id,
).values({
'provider_location': ('z' * 255),
}))
def check_downgrade(self, engine):
sgsm_table = utils.load_table(self.table_name, engine)
db_result = engine.execute(sgsm_table.select().where(
sgsm_table.c.id == self.share_group_snapshot_member_id))
self.test_case.assertEqual(1, db_result.rowcount)
for sgsm in db_result:
self.test_case.assertFalse(hasattr(sgsm, 'provider_location'))
@map_to_migration('d5db24264f5c')
class ShareGroupNewConsistentSnapshotSupportColumnChecks(BaseMigrationChecks):
table_name = 'share_groups'
new_attr_name = 'consistent_snapshot_support'
share_group_type_id = uuidutils.generate_uuid()
share_group_id = uuidutils.generate_uuid()
def setup_upgrade_data(self, engine):
# Setup share group type
sgt_data = {
'id': self.share_group_type_id,
'name': uuidutils.generate_uuid(),
}
sgt_table = utils.load_table('share_group_types', engine)
engine.execute(sgt_table.insert(sgt_data))
# Setup share group
sg_data = {
'id': self.share_group_id,
'project_id': 'fake_project_id',
'user_id': 'fake_user_id',
'share_group_type_id': self.share_group_type_id,
}
sg_table = utils.load_table('share_groups', engine)
engine.execute(sg_table.insert(sg_data))
def check_upgrade(self, engine, data):
sg_table = utils.load_table(self.table_name, engine)
db_result = engine.execute(sg_table.select().where(
sg_table.c.id == self.share_group_id))
self.test_case.assertEqual(1, db_result.rowcount)
for sg in db_result:
self.test_case.assertTrue(hasattr(sg, self.new_attr_name))
# Check that we can write proper enum data to the new field
for value in (None, 'pool', 'host'):
engine.execute(sg_table.update().where(
sg_table.c.id == self.share_group_id,
).values({self.new_attr_name: value}))
# Check that we cannot write values that are not allowed by enum.
for value in ('', 'fake', 'pool1', 'host1', '1pool', '1host'):
self.test_case.assertRaises(
oslo_db_exc.DBError,
engine.execute,
sg_table.update().where(
sg_table.c.id == self.share_group_id
).values({self.new_attr_name: value})
)
def check_downgrade(self, engine):
sg_table = utils.load_table(self.table_name, engine)
db_result = engine.execute(sg_table.select().where(
sg_table.c.id == self.share_group_id))
self.test_case.assertEqual(1, db_result.rowcount)
for sg in db_result:
self.test_case.assertFalse(hasattr(sg, self.new_attr_name))
@map_to_migration('7d142971c4ef')
class ReservationExpireIndexChecks(BaseMigrationChecks):
def setup_upgrade_data(self, engine):
pass
def _get_reservations_expire_delete_index(self, engine):
reservation_table = utils.load_table('reservations', engine)
members = ['deleted', 'expire']
for idx in reservation_table.indexes:
if sorted(idx.columns.keys()) == members:
return idx
def check_upgrade(self, engine, data):
self.test_case.assertTrue(
self._get_reservations_expire_delete_index(engine))
def check_downgrade(self, engine):
self.test_case.assertFalse(
self._get_reservations_expire_delete_index(engine))
@map_to_migration('5237b6625330')
class ShareGroupNewAvailabilityZoneIDColumnChecks(BaseMigrationChecks):
table_name = 'share_groups'
new_attr_name = 'availability_zone_id'
share_group_type_id = uuidutils.generate_uuid()
share_group_id = uuidutils.generate_uuid()
availability_zone_id = uuidutils.generate_uuid()
def setup_upgrade_data(self, engine):
# Setup AZ
az_data = {
'id': self.availability_zone_id,
'name': uuidutils.generate_uuid(),
}
az_table = utils.load_table('availability_zones', engine)
engine.execute(az_table.insert(az_data))
# Setup share group type
sgt_data = {
'id': self.share_group_type_id,
'name': uuidutils.generate_uuid(),
}
sgt_table = utils.load_table('share_group_types', engine)
engine.execute(sgt_table.insert(sgt_data))
# Setup share group
sg_data = {
'id': self.share_group_id,
'project_id': 'fake_project_id',
'user_id': 'fake_user_id',
'share_group_type_id': self.share_group_type_id,
}
sg_table = utils.load_table('share_groups', engine)
engine.execute(sg_table.insert(sg_data))
def check_upgrade(self, engine, data):
sg_table = utils.load_table(self.table_name, engine)
db_result = engine.execute(sg_table.select().where(
sg_table.c.id == self.share_group_id))
self.test_case.assertEqual(1, db_result.rowcount)
for sg in db_result:
self.test_case.assertTrue(hasattr(sg, self.new_attr_name))
# Check that we can write proper data to the new field
for value in (None, self.availability_zone_id):
engine.execute(sg_table.update().where(
sg_table.c.id == self.share_group_id,
).values({self.new_attr_name: value}))
def check_downgrade(self, engine):
sg_table = utils.load_table(self.table_name, engine)
db_result = engine.execute(sg_table.select().where(
sg_table.c.id == self.share_group_id))
self.test_case.assertEqual(1, db_result.rowcount)
for sg in db_result:
self.test_case.assertFalse(hasattr(sg, self.new_attr_name))
@map_to_migration('31252d671ae5')
class SquashSGSnapshotMembersAndSSIModelsChecks(BaseMigrationChecks):
old_table_name = 'share_group_snapshot_members'
new_table_name = 'share_snapshot_instances'
share_group_type_id = uuidutils.generate_uuid()
share_group_id = uuidutils.generate_uuid()
share_id = uuidutils.generate_uuid()
share_instance_id = uuidutils.generate_uuid()
share_group_snapshot_id = uuidutils.generate_uuid()
share_group_snapshot_member_id = uuidutils.generate_uuid()
keys = (
'user_id', 'project_id', 'size', 'share_proto',
'share_group_snapshot_id',
)
def setup_upgrade_data(self, engine):
# Setup share group type
sgt_data = {
'id': self.share_group_type_id,
'name': uuidutils.generate_uuid(),
}
sgt_table = utils.load_table('share_group_types', engine)
engine.execute(sgt_table.insert(sgt_data))
# Setup share group
sg_data = {
'id': self.share_group_id,
'project_id': 'fake_project_id',
'user_id': 'fake_user_id',
'share_group_type_id': self.share_group_type_id,
}
sg_table = utils.load_table('share_groups', engine)
engine.execute(sg_table.insert(sg_data))
# Setup shares
share_data = {
'id': self.share_id,
'share_group_id': self.share_group_id,
}
s_table = utils.load_table('shares', engine)
engine.execute(s_table.insert(share_data))
# Setup share instances
share_instance_data = {
'id': self.share_instance_id,
'share_id': share_data['id'],
'cast_rules_to_readonly': False,
}
si_table = utils.load_table('share_instances', engine)
engine.execute(si_table.insert(share_instance_data))
# Setup share group snapshot
sgs_data = {
'id': self.share_group_snapshot_id,
'share_group_id': self.share_group_id,
'project_id': 'fake_project_id',
'user_id': 'fake_user_id',
}
sgs_table = utils.load_table('share_group_snapshots', engine)
engine.execute(sgs_table.insert(sgs_data))
# Setup share group snapshot member
sgsm_data = {
'id': self.share_group_snapshot_member_id,
'share_group_snapshot_id': self.share_group_snapshot_id,
'share_id': self.share_id,
'share_instance_id': self.share_instance_id,
'project_id': 'fake_project_id',
'user_id': 'fake_user_id',
}
sgsm_table = utils.load_table(self.old_table_name, engine)
engine.execute(sgsm_table.insert(sgsm_data))
def check_upgrade(self, engine, data):
ssi_table = utils.load_table(self.new_table_name, engine)
db_result = engine.execute(ssi_table.select().where(
ssi_table.c.id == self.share_group_snapshot_member_id))
self.test_case.assertEqual(1, db_result.rowcount)
for ssi in db_result:
for key in self.keys:
self.test_case.assertTrue(hasattr(ssi, key))
# Check that we can write string data to the new fields
engine.execute(ssi_table.update().where(
ssi_table.c.id == self.share_group_snapshot_member_id,
).values({
'user_id': ('u' * 255),
'project_id': ('p' * 255),
'share_proto': ('s' * 255),
'size': 123456789,
'share_group_snapshot_id': self.share_group_snapshot_id,
}))
# Check that table 'share_group_snapshot_members' does not
# exist anymore
self.test_case.assertRaises(
sa_exc.NoSuchTableError,
utils.load_table, 'share_group_snapshot_members', engine)
def check_downgrade(self, engine):
sgsm_table = utils.load_table(self.old_table_name, engine)
db_result = engine.execute(sgsm_table.select().where(
sgsm_table.c.id == self.share_group_snapshot_member_id))
self.test_case.assertEqual(1, db_result.rowcount)
for sgsm in db_result:
for key in self.keys:
self.test_case.assertTrue(hasattr(sgsm, key))
# Check that create SGS member is absent in SSI table
ssi_table = utils.load_table(self.new_table_name, engine)
db_result = engine.execute(ssi_table.select().where(
ssi_table.c.id == self.share_group_snapshot_member_id))
self.test_case.assertEqual(0, db_result.rowcount)
@map_to_migration('238720805ce1')
class MessagesTableChecks(BaseMigrationChecks):
new_table_name = 'messages'
def setup_upgrade_data(self, engine):
pass
def check_upgrade(self, engine, data):
message_data = {
'id': uuidutils.generate_uuid(),
'project_id': 'x' * 255,
'request_id': 'x' * 255,
'resource_type': 'x' * 255,
'resource_id': 'y' * 36,
'action_id': 'y' * 10,
'detail_id': 'y' * 10,
'message_level': 'x' * 255,
'created_at': datetime.datetime(2017, 7, 10, 18, 5, 58),
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'expires_at': datetime.datetime(2017, 7, 11, 18, 5, 58),
}
new_table = utils.load_table(self.new_table_name, engine)
engine.execute(new_table.insert(message_data))
def check_downgrade(self, engine):
self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table,
'messages', engine)
@map_to_migration('b516de97bfee')
class ProjectShareTypesQuotasChecks(BaseMigrationChecks):
new_table_name = 'project_share_type_quotas'
usages_table = 'quota_usages'
reservations_table = 'reservations'
st_record_id = uuidutils.generate_uuid()
def setup_upgrade_data(self, engine):
# Create share type
self.st_data = {
'id': self.st_record_id,
'name': uuidutils.generate_uuid(),
'deleted': "False",
}
st_table = utils.load_table('share_types', engine)
engine.execute(st_table.insert(self.st_data))
def check_upgrade(self, engine, data):
# Create share type quota
self.quota_data = {
'project_id': 'x' * 255,
'resource': 'y' * 255,
'hard_limit': 987654321,
'created_at': datetime.datetime(2017, 4, 11, 18, 5, 58),
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'share_type_id': self.st_record_id,
}
new_table = utils.load_table(self.new_table_name, engine)
engine.execute(new_table.insert(self.quota_data))
# Create usage record
self.usages_data = {
'project_id': 'x' * 255,
'user_id': None,
'share_type_id': self.st_record_id,
'resource': 'y' * 255,
'in_use': 13,
'reserved': 15,
}
usages_table = utils.load_table(self.usages_table, engine)
engine.execute(usages_table.insert(self.usages_data))
# Create reservation record
self.reservations_data = {
'uuid': uuidutils.generate_uuid(),
'usage_id': 1,
'project_id': 'x' * 255,
'user_id': None,
'share_type_id': self.st_record_id,
'resource': 'y' * 255,
'delta': 13,
'expire': datetime.datetime(2399, 4, 11, 18, 5, 58),
}
reservations_table = utils.load_table(self.reservations_table, engine)
engine.execute(reservations_table.insert(self.reservations_data))
def check_downgrade(self, engine):
self.test_case.assertRaises(
sa_exc.NoSuchTableError,
utils.load_table, self.new_table_name, engine)
for table_name in (self.usages_table, self.reservations_table):
table = utils.load_table(table_name, engine)
db_result = engine.execute(table.select())
self.test_case.assertGreater(db_result.rowcount, 0)
for row in db_result:
self.test_case.assertFalse(hasattr(row, 'share_type_id'))
@map_to_migration('829a09b0ddd4')
class FixProjectShareTypesQuotasUniqueConstraintChecks(BaseMigrationChecks):
st_record_id = uuidutils.generate_uuid()
def setup_upgrade_data(self, engine):
# Create share type
self.st_data = {
'id': self.st_record_id,
'name': uuidutils.generate_uuid(),
'deleted': "False",
}
st_table = utils.load_table('share_types', engine)
engine.execute(st_table.insert(self.st_data))
def check_upgrade(self, engine, data):
for project_id in ('x' * 255, 'x'):
# Create share type quota
self.quota_data = {
'project_id': project_id,
'resource': 'y' * 255,
'hard_limit': 987654321,
'created_at': datetime.datetime(2017, 4, 11, 18, 5, 58),
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'share_type_id': self.st_record_id,
}
new_table = utils.load_table('project_share_type_quotas', engine)
engine.execute(new_table.insert(self.quota_data))
def check_downgrade(self, engine):
pass
@map_to_migration('27cb96d991fa')
class NewDescriptionColumnChecks(BaseMigrationChecks):
st_table_name = 'share_types'
st_ids = ['share_type_id_fake_3_%d' % i for i in (1, 2)]
def setup_upgrade_data(self, engine):
# Create share type
share_type_data = {
'id': self.st_ids[0],
'name': 'name_1',
}
st_table = utils.load_table(self.st_table_name, engine)
engine.execute(st_table.insert(share_type_data))
def check_upgrade(self, engine, data):
st_table = utils.load_table(self.st_table_name, engine)
for na in engine.execute(st_table.select()):
self.test_case.assertTrue(hasattr(na, 'description'))
share_type_data_ds = {
'id': self.st_ids[1],
'name': 'name_1',
'description': 'description_1',
}
engine.execute(st_table.insert(share_type_data_ds))
st = engine.execute(st_table.select().where(
share_type_data_ds['id'] == st_table.c.id)).first()
self.test_case.assertEqual(
share_type_data_ds['description'], st['description'])
def check_downgrade(self, engine):
table = utils.load_table(self.st_table_name, engine)
db_result = engine.execute(table.select())
for record in db_result:
self.test_case.assertFalse(hasattr(record, 'description'))
@map_to_migration('4a482571410f')
class BackenInfoTableChecks(BaseMigrationChecks):
new_table_name = 'backend_info'
def setup_upgrade_data(self, engine):
pass
def check_upgrade(self, engine, data):
data = {
'host': 'test_host',
'info_hash': 'test_hash',
'created_at': datetime.datetime(2017, 7, 10, 18, 5, 58),
'updated_at': None,
'deleted_at': None,
'deleted': 0,
}
new_table = utils.load_table(self.new_table_name, engine)
engine.execute(new_table.insert(data))
def check_downgrade(self, engine):
self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table,
self.new_table_name, engine)
| 39.074074 | 79 | 0.606835 |
import abc
import datetime
from oslo_db import exception as oslo_db_exc
from oslo_utils import uuidutils
import six
from sqlalchemy import exc as sa_exc
from manila.common import constants
from manila.db.migrations import utils
class DbMigrationsData(object):
migration_mappings = {}
methods_mapping = {
'pre': 'setup_upgrade_data',
'check': 'check_upgrade',
'post': 'check_downgrade',
}
def __getattr__(self, item):
parts = item.split('_')
is_mapping_method = (
len(parts) > 2 and parts[0] == ''
and parts[1] in self.methods_mapping
)
if not is_mapping_method:
return super(DbMigrationsData, self).__getattribute__(item)
check_obj = self.migration_mappings.get(parts[-1], None)
if check_obj is None:
raise AttributeError
check_obj.set_test_case(self)
return getattr(check_obj, self.methods_mapping.get(parts[1]))
def map_to_migration(revision):
def decorator(cls):
DbMigrationsData.migration_mappings[revision] = cls()
return cls
return decorator
class BaseMigrationChecks(object):
six.add_metaclass(abc.ABCMeta)
def __init__(self):
self.test_case = None
def set_test_case(self, test_case):
self.test_case = test_case
@abc.abstractmethod
def setup_upgrade_data(self, engine):
@abc.abstractmethod
def check_upgrade(self, engine, data):
@abc.abstractmethod
def check_downgrade(self, engine):
def fake_share(**kwargs):
share = {
'id': uuidutils.generate_uuid(),
'display_name': 'fake_share',
'display_description': 'my fake share',
'snapshot_id': uuidutils.generate_uuid(),
'share_proto': 'nfs',
'is_public': True,
'size': 1,
'deleted': 'False',
'share_proto': 'fake_proto',
'user_id': uuidutils.generate_uuid(),
'project_id': uuidutils.generate_uuid(),
'snapshot_support': True,
'task_state': None,
}
share.update(kwargs)
return share
def fake_instance(share_id=None, **kwargs):
instance = {
'id': uuidutils.generate_uuid(),
'share_id': share_id or uuidutils.generate_uuid(),
'deleted': 'False',
'host': 'openstack@BackendZ#PoolA',
'status': 'available',
'scheduled_at': datetime.datetime(2015, 8, 10, 0, 5, 58),
'launched_at': datetime.datetime(2015, 8, 10, 0, 5, 58),
'terminated_at': None,
'access_rules_status': 'active',
}
instance.update(kwargs)
return instance
@map_to_migration('38e632621e5a')
class ShareTypeMigrationChecks(BaseMigrationChecks):
def _get_fake_data(self):
extra_specs = []
self.share_type_ids = []
volume_types = [
{
'id': uuidutils.generate_uuid(),
'deleted': 'False',
'name': 'vol-type-A',
},
{
'id': uuidutils.generate_uuid(),
'deleted': 'False',
'name': 'vol-type-B',
},
]
for idx, volume_type in enumerate(volume_types):
extra_specs.append({
'volume_type_id': volume_type['id'],
'key': 'foo',
'value': 'bar%s' % idx,
'deleted': False,
})
extra_specs.append({
'volume_type_id': volume_type['id'],
'key': 'xyzzy',
'value': 'spoon_%s' % idx,
'deleted': False,
})
self.share_type_ids.append(volume_type['id'])
return volume_types, extra_specs
def setup_upgrade_data(self, engine):
(self.volume_types, self.extra_specs) = self._get_fake_data()
volume_types_table = utils.load_table('volume_types', engine)
engine.execute(volume_types_table.insert(self.volume_types))
extra_specs_table = utils.load_table('volume_type_extra_specs',
engine)
engine.execute(extra_specs_table.insert(self.extra_specs))
def check_upgrade(self, engine, data):
share_types_table = utils.load_table('share_types', engine)
share_types_specs_table = utils.load_table(
'share_type_extra_specs', engine)
self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table,
'volume_types', engine)
self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table,
'volume_type_extra_specs', engine)
share_type_ids = [
st['id'] for st in engine.execute(share_types_table.select())
if st['id'] in self.share_type_ids
]
self.test_case.assertEqual(sorted(self.share_type_ids),
sorted(share_type_ids))
extra_specs = [
{'type': es['share_type_id'], 'key': es['spec_key']}
for es in engine.execute(share_types_specs_table.select())
if es['share_type_id'] in self.share_type_ids
]
self.test_case.assertEqual(4, len(extra_specs))
def check_downgrade(self, engine):
volume_types_table = utils.load_table('volume_types', engine)
volume_types_specs_table = utils.load_table(
'volume_type_extra_specs', engine)
self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table,
'share_types', engine)
self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table,
'share_type_extra_specs', engine)
volume_type_ids = [
vt['id'] for vt in engine.execute(volume_types_table.select())
if vt['id'] in self.share_type_ids
]
self.test_case.assertEqual(sorted(self.share_type_ids),
sorted(volume_type_ids))
extra_specs = [
{'type': es['volume_type_id'], 'key': es['key']}
for es in engine.execute(volume_types_specs_table.select())
if es['volume_type_id'] in self.share_type_ids
]
self.test_case.assertEqual(4, len(extra_specs))
@map_to_migration('5077ffcc5f1c')
class ShareInstanceMigrationChecks(BaseMigrationChecks):
def _prepare_fake_data(self):
time = datetime.datetime(2017, 1, 12, 12, 12, 12)
self.share = {
'id': uuidutils.generate_uuid(),
'host': 'fake_host',
'status': 'fake_status',
'scheduled_at': time,
'launched_at': time,
'terminated_at': time,
'availability_zone': 'fake_az'}
self.share_snapshot = {
'id': uuidutils.generate_uuid(),
'status': 'fake_status',
'share_id': self.share['id'],
'progress': 'fake_progress'}
self.share_export_location = {
'id': 1001,
'share_id': self.share['id']}
def setup_upgrade_data(self, engine):
self._prepare_fake_data()
share_table = utils.load_table('shares', engine)
engine.execute(share_table.insert(self.share))
snapshot_table = utils.load_table('share_snapshots', engine)
engine.execute(snapshot_table.insert(self.share_snapshot))
el_table = utils.load_table('share_export_locations', engine)
engine.execute(el_table.insert(self.share_export_location))
def check_upgrade(self, engine, data):
share_table = utils.load_table('shares', engine)
s_instance_table = utils.load_table('share_instances', engine)
ss_instance_table = utils.load_table('share_snapshot_instances',
engine)
snapshot_table = utils.load_table('share_snapshots', engine)
instance_el_table = utils.load_table('share_instance_export_locations',
engine)
for column in ['host', 'status', 'scheduled_at', 'launched_at',
'terminated_at', 'share_network_id', 'share_server_id',
'availability_zone']:
rows = engine.execute(share_table.select())
for row in rows:
self.test_case.assertFalse(hasattr(row, column))
s_instance_record = engine.execute(s_instance_table.select().where(
s_instance_table.c.share_id == self.share['id'])).first()
self.test_case.assertTrue(s_instance_record is not None)
for column in ['host', 'status', 'scheduled_at', 'launched_at',
'terminated_at', 'availability_zone']:
self.test_case.assertEqual(self.share[column],
s_instance_record[column])
for column in ['status', 'progress']:
rows = engine.execute(snapshot_table.select())
for row in rows:
self.test_case.assertFalse(hasattr(row, column))
ss_instance_record = engine.execute(ss_instance_table.select().where(
ss_instance_table.c.snapshot_id == self.share_snapshot['id'])
).first()
self.test_case.assertEqual(s_instance_record['id'],
ss_instance_record['share_instance_id'])
for column in ['status', 'progress']:
self.test_case.assertEqual(self.share_snapshot[column],
ss_instance_record[column])
self.test_case.assertRaises(
sa_exc.NoSuchTableError,
utils.load_table, 'share_export_locations', engine)
el_record = engine.execute(instance_el_table.select().where(
instance_el_table.c.share_instance_id == s_instance_record['id'])
).first()
self.test_case.assertFalse(el_record is None)
self.test_case.assertTrue(hasattr(el_record, 'share_instance_id'))
self.test_case.assertFalse(hasattr(el_record, 'share_id'))
def check_downgrade(self, engine):
self.test_case.assertRaises(
sa_exc.NoSuchTableError,
utils.load_table, 'share_snapshot_instances', engine)
self.test_case.assertRaises(
sa_exc.NoSuchTableError,
utils.load_table, 'share_instances', engine)
self.test_case.assertRaises(
sa_exc.NoSuchTableError,
utils.load_table, 'share_instance_export_locations', engine)
share_table = utils.load_table('shares', engine)
snapshot_table = utils.load_table('share_snapshots', engine)
share_el_table = utils.load_table('share_export_locations',
engine)
for column in ['host', 'status', 'scheduled_at', 'launched_at',
'terminated_at', 'share_network_id', 'share_server_id',
'availability_zone']:
rows = engine.execute(share_table.select())
for row in rows:
self.test_case.assertTrue(hasattr(row, column))
for column in ['status', 'progress']:
rows = engine.execute(snapshot_table.select())
for row in rows:
self.test_case.assertTrue(hasattr(row, column))
rows = engine.execute(share_el_table.select())
for row in rows:
self.test_case.assertFalse(hasattr(row, 'share_instance_id'))
self.test_case.assertTrue(
hasattr(row, 'share_id'))
@map_to_migration('1f0bd302c1a6')
class AvailabilityZoneMigrationChecks(BaseMigrationChecks):
valid_az_names = ('az1', 'az2')
def _get_service_data(self, options):
base_dict = {
'binary': 'manila-share',
'topic': 'share',
'disabled': False,
'report_count': '100',
}
base_dict.update(options)
return base_dict
def setup_upgrade_data(self, engine):
service_fixture = [
self._get_service_data(
{'deleted': 0, 'host': 'fake1', 'availability_zone': 'az1'}
),
self._get_service_data(
{'deleted': 0, 'host': 'fake2', 'availability_zone': 'az1'}
),
self._get_service_data(
{'deleted': 1, 'host': 'fake3', 'availability_zone': 'az2'}
),
]
services_table = utils.load_table('services', engine)
for fixture in service_fixture:
engine.execute(services_table.insert(fixture))
def check_upgrade(self, engine, _):
az_table = utils.load_table('availability_zones', engine)
for az in engine.execute(az_table.select()):
self.test_case.assertTrue(uuidutils.is_uuid_like(az.id))
self.test_case.assertIn(az.name, self.valid_az_names)
self.test_case.assertEqual('False', az.deleted)
services_table = utils.load_table('services', engine)
for service in engine.execute(services_table.select()):
self.test_case.assertTrue(
uuidutils.is_uuid_like(service.availability_zone_id)
)
def check_downgrade(self, engine):
services_table = utils.load_table('services', engine)
for service in engine.execute(services_table.select()):
self.test_case.assertIn(
service.availability_zone, self.valid_az_names
)
@map_to_migration('dda6de06349')
class ShareInstanceExportLocationMetadataChecks(BaseMigrationChecks):
el_table_name = 'share_instance_export_locations'
elm_table_name = 'share_instance_export_locations_metadata'
def setup_upgrade_data(self, engine):
share_fixture = [{'id': 'foo_share_id'}, {'id': 'bar_share_id'}]
share_table = utils.load_table('shares', engine)
for fixture in share_fixture:
engine.execute(share_table.insert(fixture))
si_fixture = [
{'id': 'foo_share_instance_id_oof',
'share_id': share_fixture[0]['id']},
{'id': 'bar_share_instance_id_rab',
'share_id': share_fixture[1]['id']},
]
si_table = utils.load_table('share_instances', engine)
for fixture in si_fixture:
engine.execute(si_table.insert(fixture))
el_fixture = [
{'id': 1, 'path': '/1', 'share_instance_id': si_fixture[0]['id']},
{'id': 2, 'path': '/2', 'share_instance_id': si_fixture[1]['id']},
]
el_table = utils.load_table(self.el_table_name, engine)
for fixture in el_fixture:
engine.execute(el_table.insert(fixture))
def check_upgrade(self, engine, data):
el_table = utils.load_table(
'share_instance_export_locations', engine)
for el in engine.execute(el_table.select()):
self.test_case.assertTrue(hasattr(el, 'is_admin_only'))
self.test_case.assertTrue(hasattr(el, 'uuid'))
self.test_case.assertEqual(False, el.is_admin_only)
self.test_case.assertTrue(uuidutils.is_uuid_like(el.uuid))
el_metadata = [
{'key': 'foo_key', 'value': 'foo_value', 'export_location_id': 1},
{'key': 'bar_key', 'value': 'bar_value', 'export_location_id': 2},
]
elm_table = utils.load_table(self.elm_table_name, engine)
engine.execute(elm_table.insert(el_metadata))
for el_meta_datum in el_metadata:
el_id = el_meta_datum['export_location_id']
records = engine.execute(elm_table.select().where(
elm_table.c.export_location_id == el_id))
self.test_case.assertEqual(1, records.rowcount)
record = records.first()
expected_keys = (
'id', 'created_at', 'updated_at', 'deleted_at', 'deleted',
'export_location_id', 'key', 'value',
)
self.test_case.assertEqual(len(expected_keys), len(record.keys()))
for key in expected_keys:
self.test_case.assertIn(key, record.keys())
for k, v in el_meta_datum.items():
self.test_case.assertTrue(hasattr(record, k))
self.test_case.assertEqual(v, getattr(record, k))
def check_downgrade(self, engine):
el_table = utils.load_table(
'share_instance_export_locations', engine)
for el in engine.execute(el_table.select()):
self.test_case.assertFalse(hasattr(el, 'is_admin_only'))
self.test_case.assertFalse(hasattr(el, 'uuid'))
self.test_case.assertRaises(
sa_exc.NoSuchTableError,
utils.load_table, self.elm_table_name, engine)
@map_to_migration('344c1ac4747f')
class AccessRulesStatusMigrationChecks(BaseMigrationChecks):
def _get_instance_data(self, data):
base_dict = {}
base_dict.update(data)
return base_dict
def setup_upgrade_data(self, engine):
share_table = utils.load_table('shares', engine)
share = {
'id': 1,
'share_proto': "NFS",
'size': 0,
'snapshot_id': None,
'user_id': 'fake',
'project_id': 'fake',
}
engine.execute(share_table.insert(share))
rules1 = [
{'id': 'r1', 'share_instance_id': 1, 'state': 'active',
'deleted': 'False'},
{'id': 'r2', 'share_instance_id': 1, 'state': 'active',
'deleted': 'False'},
{'id': 'r3', 'share_instance_id': 1, 'state': 'deleting',
'deleted': 'False'},
]
rules2 = [
{'id': 'r4', 'share_instance_id': 2, 'state': 'active',
'deleted': 'False'},
{'id': 'r5', 'share_instance_id': 2, 'state': 'error',
'deleted': 'False'},
]
rules3 = [
{'id': 'r6', 'share_instance_id': 3, 'state': 'new',
'deleted': 'False'},
]
instance_fixtures = [
{'id': 1, 'deleted': 'False', 'host': 'fake1', 'share_id': 1,
'status': 'available', 'rules': rules1},
{'id': 2, 'deleted': 'False', 'host': 'fake2', 'share_id': 1,
'status': 'available', 'rules': rules2},
{'id': 3, 'deleted': 'False', 'host': 'fake3', 'share_id': 1,
'status': 'available', 'rules': rules3},
{'id': 4, 'deleted': 'False', 'host': 'fake4', 'share_id': 1,
'status': 'deleting', 'rules': []},
]
share_instances_table = utils.load_table('share_instances', engine)
share_instances_rules_table = utils.load_table(
'share_instance_access_map', engine)
for fixture in instance_fixtures:
rules = fixture.pop('rules')
engine.execute(share_instances_table.insert(fixture))
for rule in rules:
engine.execute(share_instances_rules_table.insert(rule))
def check_upgrade(self, engine, _):
instances_table = utils.load_table('share_instances', engine)
valid_statuses = {
'1': 'active',
'2': 'error',
'3': 'out_of_sync',
'4': None,
}
instances = engine.execute(instances_table.select().where(
instances_table.c.id in valid_statuses.keys()))
for instance in instances:
self.test_case.assertEqual(valid_statuses[instance['id']],
instance['access_rules_status'])
def check_downgrade(self, engine):
share_instances_rules_table = utils.load_table(
'share_instance_access_map', engine)
valid_statuses = {
'1': 'active',
'2': 'error',
'3': 'error',
'4': None,
}
for rule in engine.execute(share_instances_rules_table.select()):
valid_state = valid_statuses[rule['share_instance_id']]
self.test_case.assertEqual(valid_state, rule['state'])
@map_to_migration('293fac1130ca')
class ShareReplicationMigrationChecks(BaseMigrationChecks):
valid_share_display_names = ('FAKE_SHARE_1', 'FAKE_SHARE_2',
'FAKE_SHARE_3')
valid_share_ids = []
valid_replication_types = ('writable', 'readable', 'dr')
def _load_tables_and_get_data(self, engine):
share_table = utils.load_table('shares', engine)
share_instances_table = utils.load_table('share_instances', engine)
shares = engine.execute(
share_table.select().where(share_table.c.id.in_(
self.valid_share_ids))
).fetchall()
share_instances = engine.execute(share_instances_table.select().where(
share_instances_table.c.share_id.in_(self.valid_share_ids))
).fetchall()
return shares, share_instances
def setup_upgrade_data(self, engine):
shares_data = []
instances_data = []
self.valid_share_ids = []
for share_display_name in self.valid_share_display_names:
share_ref = fake_share(display_name=share_display_name)
shares_data.append(share_ref)
instances_data.append(fake_instance(share_id=share_ref['id']))
shares_table = utils.load_table('shares', engine)
for share in shares_data:
self.valid_share_ids.append(share['id'])
engine.execute(shares_table.insert(share))
shares_instances_table = utils.load_table('share_instances', engine)
for share_instance in instances_data:
engine.execute(shares_instances_table.insert(share_instance))
def check_upgrade(self, engine, _):
shares, share_instances = self._load_tables_and_get_data(engine)
share_ids = [share['id'] for share in shares]
share_instance_share_ids = [share_instance['share_id'] for
share_instance in share_instances]
for sid in self.valid_share_ids:
self.test_case.assertIn(sid, share_ids)
self.test_case.assertIn(sid, share_instance_share_ids)
for share in shares:
self.test_case.assertIn(share['display_name'],
self.valid_share_display_names)
self.test_case.assertEqual('False', share.deleted)
self.test_case.assertTrue(hasattr(share, 'replication_type'))
for share_instance in share_instances:
self.test_case.assertTrue(hasattr(share_instance, 'replica_state'))
def check_downgrade(self, engine):
shares, share_instances = self._load_tables_and_get_data(engine)
share_ids = [share['id'] for share in shares]
share_instance_share_ids = [share_instance['share_id'] for
share_instance in share_instances]
for sid in self.valid_share_ids:
self.test_case.assertIn(sid, share_ids)
self.test_case.assertIn(sid, share_instance_share_ids)
for share in shares:
self.test_case.assertEqual('False', share.deleted)
self.test_case.assertIn(share.display_name,
self.valid_share_display_names)
self.test_case.assertFalse(hasattr(share, 'replication_type'))
for share_instance in share_instances:
self.test_case.assertEqual('False', share_instance.deleted)
self.test_case.assertIn(share_instance.share_id,
self.valid_share_ids)
self.test_case.assertFalse(
hasattr(share_instance, 'replica_state'))
@map_to_migration('5155c7077f99')
class NetworkAllocationsNewLabelColumnChecks(BaseMigrationChecks):
table_name = 'network_allocations'
ids = ['fake_network_allocation_id_%d' % i for i in (1, 2, 3)]
def setup_upgrade_data(self, engine):
user_id = 'user_id'
project_id = 'project_id'
share_server_id = 'foo_share_server_id'
share_network_data = {
'id': 'foo_share_network_id',
'user_id': user_id,
'project_id': project_id,
}
sn_table = utils.load_table('share_networks', engine)
engine.execute(sn_table.insert(share_network_data))
share_server_data = {
'id': share_server_id,
'share_network_id': share_network_data['id'],
'host': 'fake_host',
'status': 'active',
}
ss_table = utils.load_table('share_servers', engine)
engine.execute(ss_table.insert(share_server_data))
network_allocations = [
{'id': self.ids[0],
'share_server_id': share_server_id,
'ip_address': '1.1.1.1'},
{'id': self.ids[1],
'share_server_id': share_server_id,
'ip_address': '2.2.2.2'},
]
na_table = utils.load_table(self.table_name, engine)
for network_allocation in network_allocations:
engine.execute(na_table.insert(network_allocation))
def check_upgrade(self, engine, data):
na_table = utils.load_table(self.table_name, engine)
for na in engine.execute(na_table.select()):
self.test_case.assertTrue(hasattr(na, 'label'))
self.test_case.assertEqual(na.label, 'user')
network_allocations = [
{'id': self.ids[2],
'share_server_id': na.share_server_id,
'ip_address': '3.3.3.3',
'label': 'admin',
'network_type': 'vlan',
'segmentation_id': 1005,
'ip_version': 4,
'cidr': '240.0.0.0/16'},
]
engine.execute(na_table.insert(network_allocations))
for na in engine.execute(
na_table.select().where(na_table.c.label == 'admin')):
self.test_case.assertTrue(hasattr(na, 'label'))
self.test_case.assertEqual('admin', na.label)
for col_name in ('network_type', 'segmentation_id', 'ip_version',
'cidr'):
self.test_case.assertTrue(hasattr(na, col_name))
self.test_case.assertEqual(
network_allocations[0][col_name], getattr(na, col_name))
def check_downgrade(self, engine):
na_table = utils.load_table(self.table_name, engine)
db_result = engine.execute(na_table.select())
self.test_case.assertTrue(db_result.rowcount >= len(self.ids))
for na in db_result:
for col_name in ('label', 'network_type', 'segmentation_id',
'ip_version', 'cidr'):
self.test_case.assertFalse(hasattr(na, col_name))
@map_to_migration('eb6d5544cbbd')
class ShareSnapshotInstanceNewProviderLocationColumnChecks(
BaseMigrationChecks):
table_name = 'share_snapshot_instances'
def setup_upgrade_data(self, engine):
share_data = {'id': 'new_share_id'}
s_table = utils.load_table('shares', engine)
engine.execute(s_table.insert(share_data))
share_instance_data = {
'id': 'new_share_instance_id',
'share_id': share_data['id']
}
si_table = utils.load_table('share_instances', engine)
engine.execute(si_table.insert(share_instance_data))
share_snapshot_data = {
'id': 'new_snapshot_id',
'share_id': share_data['id']}
snap_table = utils.load_table('share_snapshots', engine)
engine.execute(snap_table.insert(share_snapshot_data))
snapshot_instance_data = {
'id': 'new_snapshot_instance_id',
'snapshot_id': share_snapshot_data['id'],
'share_instance_id': share_instance_data['id']
}
snap_i_table = utils.load_table('share_snapshot_instances', engine)
engine.execute(snap_i_table.insert(snapshot_instance_data))
def check_upgrade(self, engine, data):
ss_table = utils.load_table(self.table_name, engine)
db_result = engine.execute(ss_table.select().where(
ss_table.c.id == 'new_snapshot_instance_id'))
self.test_case.assertTrue(db_result.rowcount > 0)
for ss in db_result:
self.test_case.assertTrue(hasattr(ss, 'provider_location'))
self.test_case.assertEqual('new_snapshot_id', ss.snapshot_id)
def check_downgrade(self, engine):
ss_table = utils.load_table(self.table_name, engine)
db_result = engine.execute(ss_table.select().where(
ss_table.c.id == 'new_snapshot_instance_id'))
self.test_case.assertTrue(db_result.rowcount > 0)
for ss in db_result:
self.test_case.assertFalse(hasattr(ss, 'provider_location'))
self.test_case.assertEqual('new_snapshot_id', ss.snapshot_id)
@map_to_migration('221a83cfd85b')
class ShareNetworksFieldLengthChecks(BaseMigrationChecks):
def setup_upgrade_data(self, engine):
user_id = '123456789123456789'
project_id = 'project_id'
share_network_data = {
'id': 'foo_share_network_id_2',
'user_id': user_id,
'project_id': project_id,
}
sn_table = utils.load_table('share_networks', engine)
engine.execute(sn_table.insert(share_network_data))
security_services_data = {
'id': 'foo_security_services_id',
'type': 'foo_type',
'project_id': project_id
}
ss_table = utils.load_table('security_services', engine)
engine.execute(ss_table.insert(security_services_data))
def _check_length_for_table_columns(self, table_name, engine,
cols, length):
table = utils.load_table(table_name, engine)
db_result = engine.execute(table.select())
self.test_case.assertTrue(db_result.rowcount > 0)
for col in cols:
self.test_case.assertEqual(table.columns.get(col).type.length,
length)
def check_upgrade(self, engine, data):
self._check_length_for_table_columns('share_networks', engine,
('user_id', 'project_id'), 255)
self._check_length_for_table_columns('security_services', engine,
('project_id',), 255)
def check_downgrade(self, engine):
self._check_length_for_table_columns('share_networks', engine,
('user_id', 'project_id'), 36)
self._check_length_for_table_columns('security_services', engine,
('project_id',), 36)
@map_to_migration('fdfb668d19e1')
class NewGatewayColumnChecks(BaseMigrationChecks):
na_table_name = 'network_allocations'
sn_table_name = 'share_networks'
na_ids = ['network_allocation_id_fake_%d' % i for i in (1, 2, 3)]
sn_ids = ['share_network_id_fake_%d' % i for i in (1, 2)]
def setup_upgrade_data(self, engine):
user_id = 'user_id'
project_id = 'project_id'
share_server_id = 'share_server_id_foo'
share_network_data = {
'id': self.sn_ids[0],
'user_id': user_id,
'project_id': project_id,
}
sn_table = utils.load_table(self.sn_table_name, engine)
engine.execute(sn_table.insert(share_network_data))
share_server_data = {
'id': share_server_id,
'share_network_id': share_network_data['id'],
'host': 'fake_host',
'status': 'active',
}
ss_table = utils.load_table('share_servers', engine)
engine.execute(ss_table.insert(share_server_data))
network_allocations = [
{
'id': self.na_ids[0],
'share_server_id': share_server_id,
'ip_address': '1.1.1.1',
},
{
'id': self.na_ids[1],
'share_server_id': share_server_id,
'ip_address': '2.2.2.2',
},
]
na_table = utils.load_table(self.na_table_name, engine)
engine.execute(na_table.insert(network_allocations))
def check_upgrade(self, engine, data):
na_table = utils.load_table(self.na_table_name, engine)
for na in engine.execute(na_table.select()):
self.test_case.assertTrue(hasattr(na, 'gateway'))
network_allocations = [
{
'id': self.na_ids[2],
'share_server_id': na.share_server_id,
'ip_address': '3.3.3.3',
'gateway': '3.3.3.1',
'network_type': 'vlan',
'segmentation_id': 1005,
'ip_version': 4,
'cidr': '240.0.0.0/16',
},
]
engine.execute(na_table.insert(network_allocations))
for na in engine.execute(
na_table.select().where(na_table.c.gateway == '3.3.3.1')):
self.test_case.assertTrue(hasattr(na, 'gateway'))
self.test_case.assertEqual(network_allocations[0]['gateway'],
getattr(na, 'gateway'))
sn_table = utils.load_table(self.sn_table_name, engine)
for sn in engine.execute(sn_table.select()):
self.test_case.assertTrue(hasattr(sn, 'gateway'))
share_networks = [
{
'id': self.sn_ids[1],
'user_id': sn.user_id,
'project_id': sn.project_id,
'gateway': '1.1.1.1',
'name': 'name_foo',
},
]
engine.execute(sn_table.insert(share_networks))
for sn in engine.execute(
sn_table.select().where(sn_table.c.name == 'name_foo')):
self.test_case.assertTrue(hasattr(sn, 'gateway'))
self.test_case.assertEqual(share_networks[0]['gateway'],
getattr(sn, 'gateway'))
def check_downgrade(self, engine):
for table_name, ids in ((self.na_table_name, self.na_ids),
(self.sn_table_name, self.sn_ids)):
table = utils.load_table(table_name, engine)
db_result = engine.execute(table.select())
self.test_case.assertTrue(db_result.rowcount >= len(ids))
for record in db_result:
self.test_case.assertFalse(hasattr(record, 'gateway'))
@map_to_migration('e8ea58723178')
class RemoveHostFromDriverPrivateDataChecks(BaseMigrationChecks):
table_name = 'drivers_private_data'
host_column_name = 'host'
def setup_upgrade_data(self, engine):
dpd_data = {
'created_at': datetime.datetime(2016, 7, 14, 22, 31, 22),
'deleted': 0,
'host': 'host1',
'entity_uuid': 'entity_uuid1',
'key': 'key1',
'value': 'value1'
}
dpd_table = utils.load_table(self.table_name, engine)
engine.execute(dpd_table.insert(dpd_data))
def check_upgrade(self, engine, data):
dpd_table = utils.load_table(self.table_name, engine)
rows = engine.execute(dpd_table.select())
for row in rows:
self.test_case.assertFalse(hasattr(row, self.host_column_name))
def check_downgrade(self, engine):
dpd_table = utils.load_table(self.table_name, engine)
rows = engine.execute(dpd_table.select())
for row in rows:
self.test_case.assertTrue(hasattr(row, self.host_column_name))
self.test_case.assertEqual('unknown', row[self.host_column_name])
@map_to_migration('493eaffd79e1')
class NewMTUColumnChecks(BaseMigrationChecks):
na_table_name = 'network_allocations'
sn_table_name = 'share_networks'
na_ids = ['network_allocation_id_fake_3_%d' % i for i in (1, 2, 3)]
sn_ids = ['share_network_id_fake_3_%d' % i for i in (1, 2)]
def setup_upgrade_data(self, engine):
user_id = 'user_id'
project_id = 'project_id'
share_server_id = 'share_server_id_foo_2'
share_network_data = {
'id': self.sn_ids[0],
'user_id': user_id,
'project_id': project_id,
}
sn_table = utils.load_table(self.sn_table_name, engine)
engine.execute(sn_table.insert(share_network_data))
share_server_data = {
'id': share_server_id,
'share_network_id': share_network_data['id'],
'host': 'fake_host',
'status': 'active',
}
ss_table = utils.load_table('share_servers', engine)
engine.execute(ss_table.insert(share_server_data))
network_allocations = [
{
'id': self.na_ids[0],
'share_server_id': share_server_id,
'ip_address': '1.1.1.1',
},
{
'id': self.na_ids[1],
'share_server_id': share_server_id,
'ip_address': '2.2.2.2',
},
]
na_table = utils.load_table(self.na_table_name, engine)
engine.execute(na_table.insert(network_allocations))
def check_upgrade(self, engine, data):
na_table = utils.load_table(self.na_table_name, engine)
for na in engine.execute(na_table.select()):
self.test_case.assertTrue(hasattr(na, 'mtu'))
network_allocations = [
{
'id': self.na_ids[2],
'share_server_id': na.share_server_id,
'ip_address': '3.3.3.3',
'gateway': '3.3.3.1',
'network_type': 'vlan',
'segmentation_id': 1005,
'ip_version': 4,
'cidr': '240.0.0.0/16',
'mtu': 1509,
},
]
engine.execute(na_table.insert(network_allocations))
for na in engine.execute(
na_table.select().where(na_table.c.mtu == '1509')):
self.test_case.assertTrue(hasattr(na, 'mtu'))
self.test_case.assertEqual(network_allocations[0]['mtu'],
getattr(na, 'mtu'))
for na in engine.execute(na_table.select()):
self.test_case.assertTrue(hasattr(na, 'mtu'))
if na['id'] == self.na_ids[2]:
self.test_case.assertEqual(network_allocations[0]['mtu'],
getattr(na, 'mtu'))
else:
self.test_case.assertIsNone(na['mtu'])
sn_table = utils.load_table(self.sn_table_name, engine)
for sn in engine.execute(sn_table.select()):
self.test_case.assertTrue(hasattr(sn, 'mtu'))
share_networks = [
{
'id': self.sn_ids[1],
'user_id': sn.user_id,
'project_id': sn.project_id,
'gateway': '1.1.1.1',
'name': 'name_foo_2',
'mtu': 1509,
},
]
engine.execute(sn_table.insert(share_networks))
for sn in engine.execute(
sn_table.select().where(sn_table.c.name == 'name_foo_2')):
self.test_case.assertTrue(hasattr(sn, 'mtu'))
self.test_case.assertEqual(share_networks[0]['mtu'],
getattr(sn, 'mtu'))
for sn in engine.execute(sn_table.select()):
self.test_case.assertTrue(hasattr(sn, 'mtu'))
if sn['id'] == self.sn_ids[1]:
self.test_case.assertEqual(network_allocations[0]['mtu'],
getattr(sn, 'mtu'))
else:
self.test_case.assertIsNone(sn['mtu'])
def check_downgrade(self, engine):
for table_name, ids in ((self.na_table_name, self.na_ids),
(self.sn_table_name, self.sn_ids)):
table = utils.load_table(table_name, engine)
db_result = engine.execute(table.select())
self.test_case.assertTrue(db_result.rowcount >= len(ids))
for record in db_result:
self.test_case.assertFalse(hasattr(record, 'mtu'))
@map_to_migration('63809d875e32')
class AddAccessKeyToShareAccessMapping(BaseMigrationChecks):
table_name = 'share_access_map'
access_key_column_name = 'access_key'
def setup_upgrade_data(self, engine):
share_data = {
'id': uuidutils.generate_uuid(),
'share_proto': "CEPHFS",
'size': 1,
'snapshot_id': None,
'user_id': 'fake',
'project_id': 'fake'
}
share_table = utils.load_table('shares', engine)
engine.execute(share_table.insert(share_data))
share_instance_data = {
'id': uuidutils.generate_uuid(),
'deleted': 'False',
'host': 'fake',
'share_id': share_data['id'],
'status': 'available',
'access_rules_status': 'active'
}
share_instance_table = utils.load_table('share_instances', engine)
engine.execute(share_instance_table.insert(share_instance_data))
share_access_data = {
'id': uuidutils.generate_uuid(),
'share_id': share_data['id'],
'access_type': 'cephx',
'access_to': 'alice',
'deleted': 'False'
}
share_access_table = utils.load_table(self.table_name, engine)
engine.execute(share_access_table.insert(share_access_data))
share_instance_access_data = {
'id': uuidutils.generate_uuid(),
'share_instance_id': share_instance_data['id'],
'access_id': share_access_data['id'],
'deleted': 'False'
}
share_instance_access_table = utils.load_table(
'share_instance_access_map', engine)
engine.execute(share_instance_access_table.insert(
share_instance_access_data))
def check_upgrade(self, engine, data):
share_access_table = utils.load_table(self.table_name, engine)
rows = engine.execute(share_access_table.select())
for row in rows:
self.test_case.assertTrue(hasattr(row,
self.access_key_column_name))
def check_downgrade(self, engine):
share_access_table = utils.load_table(self.table_name, engine)
rows = engine.execute(share_access_table.select())
for row in rows:
self.test_case.assertFalse(hasattr(row,
self.access_key_column_name))
@map_to_migration('48a7beae3117')
class MoveShareTypeIdToInstancesCheck(BaseMigrationChecks):
some_shares = [
{
'id': 's1',
'share_type_id': 't1',
},
{
'id': 's2',
'share_type_id': 't2',
},
{
'id': 's3',
'share_type_id': 't3',
},
]
share_ids = [x['id'] for x in some_shares]
some_instances = [
{
'id': 'i1',
'share_id': 's3',
},
{
'id': 'i2',
'share_id': 's2',
},
{
'id': 'i3',
'share_id': 's2',
},
{
'id': 'i4',
'share_id': 's1',
},
]
instance_ids = [x['id'] for x in some_instances]
some_share_types = [
{'id': 't1'},
{'id': 't2'},
{'id': 't3'},
]
def setup_upgrade_data(self, engine):
shares_table = utils.load_table('shares', engine)
share_instances_table = utils.load_table('share_instances', engine)
share_types_table = utils.load_table('share_types', engine)
for stype in self.some_share_types:
engine.execute(share_types_table.insert(stype))
for share in self.some_shares:
engine.execute(shares_table.insert(share))
for instance in self.some_instances:
engine.execute(share_instances_table.insert(instance))
def check_upgrade(self, engine, data):
shares_table = utils.load_table('shares', engine)
share_instances_table = utils.load_table('share_instances', engine)
for instance in engine.execute(share_instances_table.select().where(
share_instances_table.c.id in self.instance_ids)):
share = engine.execute(shares_table.select().where(
instance['share_id'] == shares_table.c.id)).first()
self.test_case.assertEqual(
next((x for x in self.some_shares if share['id'] == x['id']),
None)['share_type_id'],
instance['share_type_id'])
for share in engine.execute(share_instances_table.select().where(
shares_table.c.id in self.share_ids)):
self.test_case.assertNotIn('share_type_id', share)
def check_downgrade(self, engine):
shares_table = utils.load_table('shares', engine)
share_instances_table = utils.load_table('share_instances', engine)
for instance in engine.execute(share_instances_table.select().where(
share_instances_table.c.id in self.instance_ids)):
self.test_case.assertNotIn('share_type_id', instance)
for share in engine.execute(share_instances_table.select().where(
shares_table.c.id in self.share_ids)):
self.test_case.assertEqual(
next((x for x in self.some_shares if share['id'] == x['id']),
None)['share_type_id'],
share['share_type_id'])
@map_to_migration('3e7d62517afa')
class CreateFromSnapshotExtraSpecAndShareColumn(BaseMigrationChecks):
expected_attr = constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT
snap_support_attr = constants.ExtraSpecs.SNAPSHOT_SUPPORT
def _get_fake_data(self):
extra_specs = []
shares = []
share_instances = []
share_types = [
{
'id': uuidutils.generate_uuid(),
'deleted': 'False',
'name': 'share-type-1',
'is_public': False,
},
{
'id': uuidutils.generate_uuid(),
'deleted': 'False',
'name': 'share-type-2',
'is_public': True,
},
]
snapshot_support = (False, True)
dhss = ('True', 'False')
for idx, share_type in enumerate(share_types):
extra_specs.append({
'share_type_id': share_type['id'],
'spec_key': 'snapshot_support',
'spec_value': snapshot_support[idx],
'deleted': 0,
})
extra_specs.append({
'share_type_id': share_type['id'],
'spec_key': 'driver_handles_share_servers',
'spec_value': dhss[idx],
'deleted': 0,
})
share = fake_share(snapshot_support=snapshot_support[idx])
shares.append(share)
share_instances.append(
fake_instance(share_id=share['id'],
share_type_id=share_type['id'])
)
return share_types, extra_specs, shares, share_instances
def setup_upgrade_data(self, engine):
(self.share_types, self.extra_specs, self.shares,
self.share_instances) = self._get_fake_data()
share_types_table = utils.load_table('share_types', engine)
engine.execute(share_types_table.insert(self.share_types))
extra_specs_table = utils.load_table('share_type_extra_specs',
engine)
engine.execute(extra_specs_table.insert(self.extra_specs))
shares_table = utils.load_table('shares', engine)
engine.execute(shares_table.insert(self.shares))
share_instances_table = utils.load_table('share_instances', engine)
engine.execute(share_instances_table.insert(self.share_instances))
def check_upgrade(self, engine, data):
share_type_ids = [st['id'] for st in self.share_types]
share_ids = [s['id'] for s in self.shares]
shares_table = utils.load_table('shares', engine)
share_types_table = utils.load_table('share_types', engine)
extra_specs_table = utils.load_table('share_type_extra_specs',
engine)
shares_in_db = engine.execute(shares_table.select()).fetchall()
share_ids_in_db = [s['id'] for s in shares_in_db]
self.test_case.assertTrue(len(share_ids_in_db) > 1)
for share_id in share_ids:
self.test_case.assertIn(share_id, share_ids_in_db)
for share in shares_in_db:
self.test_case.assertTrue(hasattr(share, self.expected_attr))
self.test_case.assertEqual(share[self.snap_support_attr],
share[self.expected_attr])
share_types_in_db = (
engine.execute(share_types_table.select()).fetchall())
share_type_ids_in_db = [s['id'] for s in share_types_in_db]
for share_type_id in share_type_ids:
self.test_case.assertIn(share_type_id, share_type_ids_in_db)
extra_specs_in_db = (
engine.execute(extra_specs_table.select().where(
extra_specs_table.c.deleted == 0)).fetchall())
self.test_case.assertGreaterEqual(len(extra_specs_in_db),
len(self.extra_specs))
for share_type_id in share_type_ids:
new_extra_spec = [x for x in extra_specs_in_db
if x['spec_key'] == self.expected_attr
and x['share_type_id'] == share_type_id]
snapshot_support_spec = [
x for x in extra_specs_in_db
if x['spec_key'] == self.snap_support_attr
and x['share_type_id'] == share_type_id]
self.test_case.assertEqual(1, len(new_extra_spec))
self.test_case.assertEqual(1, len(snapshot_support_spec))
self.test_case.assertEqual(
snapshot_support_spec[0]['spec_value'],
new_extra_spec[0]['spec_value'])
def check_downgrade(self, engine):
share_type_ids = [st['id'] for st in self.share_types]
share_ids = [s['id'] for s in self.shares]
shares_table = utils.load_table('shares', engine)
share_types_table = utils.load_table('share_types', engine)
extra_specs_table = utils.load_table('share_type_extra_specs',
engine)
shares_in_db = engine.execute(shares_table.select()).fetchall()
share_ids_in_db = [s['id'] for s in shares_in_db]
self.test_case.assertTrue(len(share_ids_in_db) > 1)
for share_id in share_ids:
self.test_case.assertIn(share_id, share_ids_in_db)
for share in shares_in_db:
self.test_case.assertFalse(hasattr(share, self.expected_attr))
share_types_in_db = (
engine.execute(share_types_table.select()).fetchall())
share_type_ids_in_db = [s['id'] for s in share_types_in_db]
for share_type_id in share_type_ids:
self.test_case.assertIn(share_type_id, share_type_ids_in_db)
extra_specs_in_db = (
engine.execute(extra_specs_table.select().where(
extra_specs_table.c.deleted == 0)).fetchall())
self.test_case.assertGreaterEqual(len(extra_specs_in_db),
len(self.extra_specs))
for share_type_id in share_type_ids:
new_extra_spec = [x for x in extra_specs_in_db
if x['spec_key'] == self.expected_attr
and x['share_type_id'] == share_type_id]
self.test_case.assertEqual(0, len(new_extra_spec))
@map_to_migration('87ce15c59bbe')
class RevertToSnapshotShareColumn(BaseMigrationChecks):
expected_attr = constants.ExtraSpecs.REVERT_TO_SNAPSHOT_SUPPORT
def _get_fake_data(self):
extra_specs = []
shares = []
share_instances = []
share_types = [
{
'id': uuidutils.generate_uuid(),
'deleted': 'False',
'name': 'revert-1',
'is_public': False,
},
{
'id': uuidutils.generate_uuid(),
'deleted': 'False',
'name': 'revert-2',
'is_public': True,
},
]
snapshot_support = (False, True)
dhss = ('True', 'False')
for idx, share_type in enumerate(share_types):
extra_specs.append({
'share_type_id': share_type['id'],
'spec_key': 'snapshot_support',
'spec_value': snapshot_support[idx],
'deleted': 0,
})
extra_specs.append({
'share_type_id': share_type['id'],
'spec_key': 'driver_handles_share_servers',
'spec_value': dhss[idx],
'deleted': 0,
})
share = fake_share(snapshot_support=snapshot_support[idx])
shares.append(share)
share_instances.append(
fake_instance(share_id=share['id'],
share_type_id=share_type['id'])
)
return share_types, extra_specs, shares, share_instances
def setup_upgrade_data(self, engine):
(self.share_types, self.extra_specs, self.shares,
self.share_instances) = self._get_fake_data()
share_types_table = utils.load_table('share_types', engine)
engine.execute(share_types_table.insert(self.share_types))
extra_specs_table = utils.load_table('share_type_extra_specs',
engine)
engine.execute(extra_specs_table.insert(self.extra_specs))
shares_table = utils.load_table('shares', engine)
engine.execute(shares_table.insert(self.shares))
share_instances_table = utils.load_table('share_instances', engine)
engine.execute(share_instances_table.insert(self.share_instances))
def check_upgrade(self, engine, data):
share_ids = [s['id'] for s in self.shares]
shares_table = utils.load_table('shares', engine)
shares_in_db = engine.execute(shares_table.select().where(
shares_table.c.deleted == 'False')).fetchall()
share_ids_in_db = [s['id'] for s in shares_in_db]
self.test_case.assertTrue(len(share_ids_in_db) > 1)
for share_id in share_ids:
self.test_case.assertIn(share_id, share_ids_in_db)
for share in shares_in_db:
self.test_case.assertTrue(hasattr(share, self.expected_attr))
self.test_case.assertEqual(False, share[self.expected_attr])
def check_downgrade(self, engine):
share_ids = [s['id'] for s in self.shares]
shares_table = utils.load_table('shares', engine)
shares_in_db = engine.execute(shares_table.select()).fetchall()
share_ids_in_db = [s['id'] for s in shares_in_db]
self.test_case.assertTrue(len(share_ids_in_db) > 1)
for share_id in share_ids:
self.test_case.assertIn(share_id, share_ids_in_db)
for share in shares_in_db:
self.test_case.assertFalse(hasattr(share, self.expected_attr))
@map_to_migration('95e3cf760840')
class RemoveNovaNetIdColumnFromShareNetworks(BaseMigrationChecks):
table_name = 'share_networks'
nova_net_column_name = 'nova_net_id'
def setup_upgrade_data(self, engine):
user_id = 'user_id'
project_id = 'project_id'
nova_net_id = 'foo_nova_net_id'
share_network_data = {
'id': 'foo_share_network_id_3',
'user_id': user_id,
'project_id': project_id,
'nova_net_id': nova_net_id,
}
sn_table = utils.load_table(self.table_name, engine)
engine.execute(sn_table.insert(share_network_data))
def check_upgrade(self, engine, data):
sn_table = utils.load_table(self.table_name, engine)
rows = engine.execute(sn_table.select())
self.test_case.assertGreater(rows.rowcount, 0)
for row in rows:
self.test_case.assertFalse(hasattr(row, self.nova_net_column_name))
def check_downgrade(self, engine):
sn_table = utils.load_table(self.table_name, engine)
rows = engine.execute(sn_table.select())
self.test_case.assertGreater(rows.rowcount, 0)
for row in rows:
self.test_case.assertTrue(hasattr(row, self.nova_net_column_name))
self.test_case.assertIsNone(row[self.nova_net_column_name])
@map_to_migration('54667b9cade7')
class RestoreStateToShareInstanceAccessMap(BaseMigrationChecks):
new_instance_mapping_state = {
constants.STATUS_ACTIVE: constants.STATUS_ACTIVE,
constants.SHARE_INSTANCE_RULES_SYNCING:
constants.ACCESS_STATE_QUEUED_TO_APPLY,
constants.STATUS_OUT_OF_SYNC: constants.ACCESS_STATE_QUEUED_TO_APPLY,
'updating': constants.ACCESS_STATE_QUEUED_TO_APPLY,
'updating_multiple': constants.ACCESS_STATE_QUEUED_TO_APPLY,
constants.SHARE_INSTANCE_RULES_ERROR: constants.ACCESS_STATE_ERROR,
}
new_access_rules_status = {
constants.STATUS_ACTIVE: constants.STATUS_ACTIVE,
constants.STATUS_OUT_OF_SYNC: constants.SHARE_INSTANCE_RULES_SYNCING,
'updating': constants.SHARE_INSTANCE_RULES_SYNCING,
'updating_multiple': constants.SHARE_INSTANCE_RULES_SYNCING,
constants.SHARE_INSTANCE_RULES_ERROR:
constants.SHARE_INSTANCE_RULES_ERROR,
}
@staticmethod
def generate_share_instance(sid, access_rules_status):
share_instance_data = {
'id': uuidutils.generate_uuid(),
'deleted': 'False',
'host': 'fake',
'share_id': sid,
'status': constants.STATUS_AVAILABLE,
'access_rules_status': access_rules_status
}
return share_instance_data
@staticmethod
def generate_share_instance_access_map(share_access_data_id,
share_instance_id):
share_instance_access_data = {
'id': uuidutils.generate_uuid(),
'share_instance_id': share_instance_id,
'access_id': share_access_data_id,
'deleted': 'False'
}
return share_instance_access_data
def setup_upgrade_data(self, engine):
share_data = {
'id': uuidutils.generate_uuid(),
'share_proto': 'fake',
'size': 1,
'snapshot_id': None,
'user_id': 'fake',
'project_id': 'fake'
}
share_table = utils.load_table('shares', engine)
engine.execute(share_table.insert(share_data))
share_instances = [
self.generate_share_instance(
share_data['id'], constants.STATUS_ACTIVE),
self.generate_share_instance(
share_data['id'], constants.STATUS_OUT_OF_SYNC),
self.generate_share_instance(
share_data['id'], constants.STATUS_ERROR),
self.generate_share_instance(
share_data['id'], 'updating'),
self.generate_share_instance(
share_data['id'], 'updating_multiple'),
]
self.updating_share_instance = share_instances[3]
self.updating_multiple_share_instance = share_instances[4]
share_instance_table = utils.load_table('share_instances', engine)
for share_instance_data in share_instances:
engine.execute(share_instance_table.insert(share_instance_data))
share_access_data = {
'id': uuidutils.generate_uuid(),
'share_id': share_data['id'],
'access_type': 'fake',
'access_to': 'alice',
'deleted': 'False'
}
share_access_table = utils.load_table('share_access_map', engine)
engine.execute(share_access_table.insert(share_access_data))
share_instance_access_data = []
for share_instance in share_instances:
sia_map = self.generate_share_instance_access_map(
share_access_data['id'], share_instance['id'])
share_instance_access_data.append(sia_map)
share_instance_access_table = utils.load_table(
'share_instance_access_map', engine)
for sia_map in share_instance_access_data:
engine.execute(share_instance_access_table.insert(sia_map))
def check_upgrade(self, engine, data):
share_instance_table = utils.load_table('share_instances', engine)
sia_table = utils.load_table('share_instance_access_map', engine)
for rule in engine.execute(sia_table.select()):
self.test_case.assertTrue(hasattr(rule, 'state'))
correlated_share_instances = engine.execute(
share_instance_table.select().where(
share_instance_table.c.id == rule['share_instance_id']))
access_rules_status = getattr(correlated_share_instances.first(),
'access_rules_status')
self.test_case.assertEqual(
self.new_instance_mapping_state[access_rules_status],
rule['state'])
for instance in engine.execute(share_instance_table.select()):
self.test_case.assertTrue(instance['access_rules_status']
not in ('updating',
'updating_multiple',
constants.STATUS_OUT_OF_SYNC))
if instance['id'] in (self.updating_share_instance['id'],
self.updating_multiple_share_instance['id']):
self.test_case.assertEqual(
constants.SHARE_INSTANCE_RULES_SYNCING,
instance['access_rules_status'])
def check_downgrade(self, engine):
share_instance_table = utils.load_table('share_instances', engine)
sia_table = utils.load_table('share_instance_access_map', engine)
for rule in engine.execute(sia_table.select()):
self.test_case.assertFalse(hasattr(rule, 'state'))
for instance in engine.execute(share_instance_table.select()):
if instance['id'] in (self.updating_share_instance['id'],
self.updating_multiple_share_instance['id']):
self.test_case.assertEqual(
constants.STATUS_OUT_OF_SYNC,
instance['access_rules_status'])
@map_to_migration('e9f79621d83f')
class AddCastRulesToReadonlyToInstances(BaseMigrationChecks):
share_type = {
'id': uuidutils.generate_uuid(),
}
shares = [
{
'id': uuidutils.generate_uuid(),
'replication_type': constants.REPLICATION_TYPE_READABLE,
},
{
'id': uuidutils.generate_uuid(),
'replication_type': constants.REPLICATION_TYPE_READABLE,
},
{
'id': uuidutils.generate_uuid(),
'replication_type': constants.REPLICATION_TYPE_WRITABLE,
},
{
'id': uuidutils.generate_uuid(),
},
]
share_ids = [x['id'] for x in shares]
correct_instance = {
'id': uuidutils.generate_uuid(),
'share_id': share_ids[1],
'replica_state': constants.REPLICA_STATE_IN_SYNC,
'status': constants.STATUS_AVAILABLE,
'share_type_id': share_type['id'],
}
instances = [
{
'id': uuidutils.generate_uuid(),
'share_id': share_ids[0],
'replica_state': constants.REPLICA_STATE_ACTIVE,
'status': constants.STATUS_AVAILABLE,
'share_type_id': share_type['id'],
},
{
'id': uuidutils.generate_uuid(),
'share_id': share_ids[0],
'replica_state': constants.REPLICA_STATE_IN_SYNC,
'status': constants.STATUS_REPLICATION_CHANGE,
'share_type_id': share_type['id'],
},
{
'id': uuidutils.generate_uuid(),
'share_id': share_ids[1],
'replica_state': constants.REPLICA_STATE_ACTIVE,
'status': constants.STATUS_REPLICATION_CHANGE,
'share_type_id': share_type['id'],
},
correct_instance,
{
'id': uuidutils.generate_uuid(),
'share_id': share_ids[2],
'replica_state': constants.REPLICA_STATE_ACTIVE,
'status': constants.STATUS_REPLICATION_CHANGE,
'share_type_id': share_type['id'],
},
{
'id': uuidutils.generate_uuid(),
'share_id': share_ids[2],
'replica_state': constants.REPLICA_STATE_IN_SYNC,
'status': constants.STATUS_AVAILABLE,
'share_type_id': share_type['id'],
},
{
'id': uuidutils.generate_uuid(),
'share_id': share_ids[3],
'status': constants.STATUS_AVAILABLE,
'share_type_id': share_type['id'],
},
]
instance_ids = share_ids = [x['id'] for x in instances]
def setup_upgrade_data(self, engine):
shares_table = utils.load_table('shares', engine)
share_instances_table = utils.load_table('share_instances', engine)
share_types_table = utils.load_table('share_types', engine)
engine.execute(share_types_table.insert(self.share_type))
for share in self.shares:
engine.execute(shares_table.insert(share))
for instance in self.instances:
engine.execute(share_instances_table.insert(instance))
def check_upgrade(self, engine, data):
shares_table = utils.load_table('shares', engine)
share_instances_table = utils.load_table('share_instances', engine)
for instance in engine.execute(share_instances_table.select().where(
share_instances_table.c.id in self.instance_ids)):
self.test_case.assertIn('cast_rules_to_readonly', instance)
share = engine.execute(shares_table.select().where(
instance['share_id'] == shares_table.c.id)).first()
if (instance['replica_state'] != constants.REPLICA_STATE_ACTIVE and
share['replication_type'] ==
constants.REPLICATION_TYPE_READABLE and
instance['status'] != constants.STATUS_REPLICATION_CHANGE):
self.test_case.assertTrue(instance['cast_rules_to_readonly'])
self.test_case.assertEqual(instance['id'],
self.correct_instance['id'])
else:
self.test_case.assertEqual(
False, instance['cast_rules_to_readonly'])
def check_downgrade(self, engine):
share_instances_table = utils.load_table('share_instances', engine)
for instance in engine.execute(share_instances_table.select()):
self.test_case.assertNotIn('cast_rules_to_readonly', instance)
@map_to_migration('03da71c0e321')
class ShareGroupMigrationChecks(BaseMigrationChecks):
def setup_upgrade_data(self, engine):
self.share_type_id = uuidutils.generate_uuid()
st_fixture = {
'deleted': "False",
'id': self.share_type_id,
}
st_table = utils.load_table('share_types', engine)
engine.execute(st_table.insert(st_fixture))
self.cg_id = uuidutils.generate_uuid()
cg_fixture = {
'deleted': "False",
'id': self.cg_id,
'user_id': 'fake_user',
'project_id': 'fake_project_id',
}
cg_table = utils.load_table('consistency_groups', engine)
engine.execute(cg_table.insert(cg_fixture))
self.mapping_id = uuidutils.generate_uuid()
mapping_fixture = {
'deleted': "False",
'id': self.mapping_id,
'consistency_group_id': self.cg_id,
'share_type_id': self.share_type_id,
}
mapping_table = utils.load_table(
'consistency_group_share_type_mappings', engine)
engine.execute(mapping_table.insert(mapping_fixture))
self.share_id = uuidutils.generate_uuid()
share_fixture = {
'deleted': "False",
'id': self.share_id,
'consistency_group_id': self.cg_id,
'user_id': 'fake_user',
'project_id': 'fake_project_id',
}
share_table = utils.load_table('shares', engine)
engine.execute(share_table.insert(share_fixture))
self.share_instance_id = uuidutils.generate_uuid()
share_instance_fixture = {
'deleted': "False",
'share_type_id': self.share_type_id,
'id': self.share_instance_id,
'share_id': self.share_id,
'cast_rules_to_readonly': False,
}
share_instance_table = utils.load_table('share_instances', engine)
engine.execute(share_instance_table.insert(share_instance_fixture))
self.cgsnapshot_id = uuidutils.generate_uuid()
cg_snap_fixture = {
'deleted': "False",
'id': self.cgsnapshot_id,
'consistency_group_id': self.cg_id,
'user_id': 'fake_user',
'project_id': 'fake_project_id',
}
cgsnapshots_table = utils.load_table('cgsnapshots', engine)
engine.execute(cgsnapshots_table.insert(cg_snap_fixture))
self.cgsnapshot_member_id = uuidutils.generate_uuid()
cg_snap_member_fixture = {
'deleted': "False",
'id': self.cgsnapshot_member_id,
'cgsnapshot_id': self.cgsnapshot_id,
'share_type_id': self.share_type_id,
'share_instance_id': self.share_instance_id,
'share_id': self.share_id,
'user_id': 'fake_user',
'project_id': 'fake_project_id',
}
cgsnapshot_members_table = utils.load_table(
'cgsnapshot_members', engine)
engine.execute(cgsnapshot_members_table.insert(cg_snap_member_fixture))
def check_upgrade(self, engine, data):
sg_table = utils.load_table("share_groups", engine)
db_result = engine.execute(sg_table.select().where(
sg_table.c.id == self.cg_id))
self.test_case.assertEqual(1, db_result.rowcount)
sg = db_result.first()
self.test_case.assertIsNone(sg['source_share_group_snapshot_id'])
share_table = utils.load_table("shares", engine)
share_result = engine.execute(share_table.select().where(
share_table.c.id == self.share_id))
self.test_case.assertEqual(1, share_result.rowcount)
share = share_result.first()
self.test_case.assertEqual(self.cg_id, share['share_group_id'])
self.test_case.assertIsNone(
share['source_share_group_snapshot_member_id'])
mapping_table = utils.load_table(
"share_group_share_type_mappings", engine)
mapping_result = engine.execute(mapping_table.select().where(
mapping_table.c.id == self.mapping_id))
self.test_case.assertEqual(1, mapping_result.rowcount)
mapping_record = mapping_result.first()
self.test_case.assertEqual(
self.cg_id, mapping_record['share_group_id'])
self.test_case.assertEqual(
self.share_type_id, mapping_record['share_type_id'])
sgs_table = utils.load_table("share_group_snapshots", engine)
db_result = engine.execute(sgs_table.select().where(
sgs_table.c.id == self.cgsnapshot_id))
self.test_case.assertEqual(1, db_result.rowcount)
sgs = db_result.first()
self.test_case.assertEqual(self.cg_id, sgs['share_group_id'])
sgsm_table = utils.load_table("share_group_snapshot_members", engine)
db_result = engine.execute(sgsm_table.select().where(
sgsm_table.c.id == self.cgsnapshot_member_id))
self.test_case.assertEqual(1, db_result.rowcount)
sgsm = db_result.first()
self.test_case.assertEqual(
self.cgsnapshot_id, sgsm['share_group_snapshot_id'])
self.test_case.assertNotIn('share_type_id', sgsm)
def check_downgrade(self, engine):
cg_table = utils.load_table("consistency_groups", engine)
db_result = engine.execute(cg_table.select().where(
cg_table.c.id == self.cg_id))
self.test_case.assertEqual(1, db_result.rowcount)
cg = db_result.first()
self.test_case.assertIsNone(cg['source_cgsnapshot_id'])
share_table = utils.load_table("shares", engine)
share_result = engine.execute(share_table.select().where(
share_table.c.id == self.share_id))
self.test_case.assertEqual(1, share_result.rowcount)
share = share_result.first()
self.test_case.assertEqual(self.cg_id, share['consistency_group_id'])
self.test_case.assertIsNone(
share['source_cgsnapshot_member_id'])
mapping_table = utils.load_table(
"consistency_group_share_type_mappings", engine)
mapping_result = engine.execute(mapping_table.select().where(
mapping_table.c.id == self.mapping_id))
self.test_case.assertEqual(1, mapping_result.rowcount)
cg_st_mapping = mapping_result.first()
self.test_case.assertEqual(
self.cg_id, cg_st_mapping['consistency_group_id'])
self.test_case.assertEqual(
self.share_type_id, cg_st_mapping['share_type_id'])
cg_snapshots_table = utils.load_table("cgsnapshots", engine)
db_result = engine.execute(cg_snapshots_table.select().where(
cg_snapshots_table.c.id == self.cgsnapshot_id))
self.test_case.assertEqual(1, db_result.rowcount)
cgsnap = db_result.first()
self.test_case.assertEqual(self.cg_id, cgsnap['consistency_group_id'])
cg_snap_member_table = utils.load_table("cgsnapshot_members", engine)
db_result = engine.execute(cg_snap_member_table.select().where(
cg_snap_member_table.c.id == self.cgsnapshot_member_id))
self.test_case.assertEqual(1, db_result.rowcount)
member = db_result.first()
self.test_case.assertEqual(
self.cgsnapshot_id, member['cgsnapshot_id'])
self.test_case.assertIn('share_type_id', member)
self.test_case.assertEqual(self.share_type_id, member['share_type_id'])
@map_to_migration('927920b37453')
class ShareGroupSnapshotMemberNewProviderLocationColumnChecks(
BaseMigrationChecks):
table_name = 'share_group_snapshot_members'
share_group_type_id = uuidutils.generate_uuid()
share_group_id = uuidutils.generate_uuid()
share_id = uuidutils.generate_uuid()
share_instance_id = uuidutils.generate_uuid()
share_group_snapshot_id = uuidutils.generate_uuid()
share_group_snapshot_member_id = uuidutils.generate_uuid()
def setup_upgrade_data(self, engine):
sgt_data = {
'id': self.share_group_type_id,
'name': uuidutils.generate_uuid(),
}
sgt_table = utils.load_table('share_group_types', engine)
engine.execute(sgt_table.insert(sgt_data))
sg_data = {
'id': self.share_group_id,
'project_id': 'fake_project_id',
'user_id': 'fake_user_id',
'share_group_type_id': self.share_group_type_id,
}
sg_table = utils.load_table('share_groups', engine)
engine.execute(sg_table.insert(sg_data))
share_data = {
'id': self.share_id,
'share_group_id': self.share_group_id,
}
s_table = utils.load_table('shares', engine)
engine.execute(s_table.insert(share_data))
share_instance_data = {
'id': self.share_instance_id,
'share_id': share_data['id'],
'cast_rules_to_readonly': False,
}
si_table = utils.load_table('share_instances', engine)
engine.execute(si_table.insert(share_instance_data))
sgs_data = {
'id': self.share_group_snapshot_id,
'share_group_id': self.share_group_id,
'project_id': 'fake_project_id',
'user_id': 'fake_user_id',
}
sgs_table = utils.load_table('share_group_snapshots', engine)
engine.execute(sgs_table.insert(sgs_data))
sgsm_data = {
'id': self.share_group_snapshot_member_id,
'share_group_snapshot_id': self.share_group_snapshot_id,
'share_id': self.share_id,
'share_instance_id': self.share_instance_id,
'project_id': 'fake_project_id',
'user_id': 'fake_user_id',
}
sgsm_table = utils.load_table(self.table_name, engine)
engine.execute(sgsm_table.insert(sgsm_data))
def check_upgrade(self, engine, data):
sgsm_table = utils.load_table(self.table_name, engine)
db_result = engine.execute(sgsm_table.select().where(
sgsm_table.c.id == self.share_group_snapshot_member_id))
self.test_case.assertEqual(1, db_result.rowcount)
for sgsm in db_result:
self.test_case.assertTrue(hasattr(sgsm, 'provider_location'))
engine.execute(sgsm_table.update().where(
sgsm_table.c.id == self.share_group_snapshot_member_id,
).values({
'provider_location': ('z' * 255),
}))
def check_downgrade(self, engine):
sgsm_table = utils.load_table(self.table_name, engine)
db_result = engine.execute(sgsm_table.select().where(
sgsm_table.c.id == self.share_group_snapshot_member_id))
self.test_case.assertEqual(1, db_result.rowcount)
for sgsm in db_result:
self.test_case.assertFalse(hasattr(sgsm, 'provider_location'))
@map_to_migration('d5db24264f5c')
class ShareGroupNewConsistentSnapshotSupportColumnChecks(BaseMigrationChecks):
table_name = 'share_groups'
new_attr_name = 'consistent_snapshot_support'
share_group_type_id = uuidutils.generate_uuid()
share_group_id = uuidutils.generate_uuid()
def setup_upgrade_data(self, engine):
sgt_data = {
'id': self.share_group_type_id,
'name': uuidutils.generate_uuid(),
}
sgt_table = utils.load_table('share_group_types', engine)
engine.execute(sgt_table.insert(sgt_data))
sg_data = {
'id': self.share_group_id,
'project_id': 'fake_project_id',
'user_id': 'fake_user_id',
'share_group_type_id': self.share_group_type_id,
}
sg_table = utils.load_table('share_groups', engine)
engine.execute(sg_table.insert(sg_data))
def check_upgrade(self, engine, data):
sg_table = utils.load_table(self.table_name, engine)
db_result = engine.execute(sg_table.select().where(
sg_table.c.id == self.share_group_id))
self.test_case.assertEqual(1, db_result.rowcount)
for sg in db_result:
self.test_case.assertTrue(hasattr(sg, self.new_attr_name))
for value in (None, 'pool', 'host'):
engine.execute(sg_table.update().where(
sg_table.c.id == self.share_group_id,
).values({self.new_attr_name: value}))
for value in ('', 'fake', 'pool1', 'host1', '1pool', '1host'):
self.test_case.assertRaises(
oslo_db_exc.DBError,
engine.execute,
sg_table.update().where(
sg_table.c.id == self.share_group_id
).values({self.new_attr_name: value})
)
def check_downgrade(self, engine):
sg_table = utils.load_table(self.table_name, engine)
db_result = engine.execute(sg_table.select().where(
sg_table.c.id == self.share_group_id))
self.test_case.assertEqual(1, db_result.rowcount)
for sg in db_result:
self.test_case.assertFalse(hasattr(sg, self.new_attr_name))
@map_to_migration('7d142971c4ef')
class ReservationExpireIndexChecks(BaseMigrationChecks):
def setup_upgrade_data(self, engine):
pass
def _get_reservations_expire_delete_index(self, engine):
reservation_table = utils.load_table('reservations', engine)
members = ['deleted', 'expire']
for idx in reservation_table.indexes:
if sorted(idx.columns.keys()) == members:
return idx
def check_upgrade(self, engine, data):
self.test_case.assertTrue(
self._get_reservations_expire_delete_index(engine))
def check_downgrade(self, engine):
self.test_case.assertFalse(
self._get_reservations_expire_delete_index(engine))
@map_to_migration('5237b6625330')
class ShareGroupNewAvailabilityZoneIDColumnChecks(BaseMigrationChecks):
table_name = 'share_groups'
new_attr_name = 'availability_zone_id'
share_group_type_id = uuidutils.generate_uuid()
share_group_id = uuidutils.generate_uuid()
availability_zone_id = uuidutils.generate_uuid()
def setup_upgrade_data(self, engine):
az_data = {
'id': self.availability_zone_id,
'name': uuidutils.generate_uuid(),
}
az_table = utils.load_table('availability_zones', engine)
engine.execute(az_table.insert(az_data))
sgt_data = {
'id': self.share_group_type_id,
'name': uuidutils.generate_uuid(),
}
sgt_table = utils.load_table('share_group_types', engine)
engine.execute(sgt_table.insert(sgt_data))
sg_data = {
'id': self.share_group_id,
'project_id': 'fake_project_id',
'user_id': 'fake_user_id',
'share_group_type_id': self.share_group_type_id,
}
sg_table = utils.load_table('share_groups', engine)
engine.execute(sg_table.insert(sg_data))
def check_upgrade(self, engine, data):
sg_table = utils.load_table(self.table_name, engine)
db_result = engine.execute(sg_table.select().where(
sg_table.c.id == self.share_group_id))
self.test_case.assertEqual(1, db_result.rowcount)
for sg in db_result:
self.test_case.assertTrue(hasattr(sg, self.new_attr_name))
for value in (None, self.availability_zone_id):
engine.execute(sg_table.update().where(
sg_table.c.id == self.share_group_id,
).values({self.new_attr_name: value}))
def check_downgrade(self, engine):
sg_table = utils.load_table(self.table_name, engine)
db_result = engine.execute(sg_table.select().where(
sg_table.c.id == self.share_group_id))
self.test_case.assertEqual(1, db_result.rowcount)
for sg in db_result:
self.test_case.assertFalse(hasattr(sg, self.new_attr_name))
@map_to_migration('31252d671ae5')
class SquashSGSnapshotMembersAndSSIModelsChecks(BaseMigrationChecks):
old_table_name = 'share_group_snapshot_members'
new_table_name = 'share_snapshot_instances'
share_group_type_id = uuidutils.generate_uuid()
share_group_id = uuidutils.generate_uuid()
share_id = uuidutils.generate_uuid()
share_instance_id = uuidutils.generate_uuid()
share_group_snapshot_id = uuidutils.generate_uuid()
share_group_snapshot_member_id = uuidutils.generate_uuid()
keys = (
'user_id', 'project_id', 'size', 'share_proto',
'share_group_snapshot_id',
)
def setup_upgrade_data(self, engine):
sgt_data = {
'id': self.share_group_type_id,
'name': uuidutils.generate_uuid(),
}
sgt_table = utils.load_table('share_group_types', engine)
engine.execute(sgt_table.insert(sgt_data))
sg_data = {
'id': self.share_group_id,
'project_id': 'fake_project_id',
'user_id': 'fake_user_id',
'share_group_type_id': self.share_group_type_id,
}
sg_table = utils.load_table('share_groups', engine)
engine.execute(sg_table.insert(sg_data))
share_data = {
'id': self.share_id,
'share_group_id': self.share_group_id,
}
s_table = utils.load_table('shares', engine)
engine.execute(s_table.insert(share_data))
share_instance_data = {
'id': self.share_instance_id,
'share_id': share_data['id'],
'cast_rules_to_readonly': False,
}
si_table = utils.load_table('share_instances', engine)
engine.execute(si_table.insert(share_instance_data))
sgs_data = {
'id': self.share_group_snapshot_id,
'share_group_id': self.share_group_id,
'project_id': 'fake_project_id',
'user_id': 'fake_user_id',
}
sgs_table = utils.load_table('share_group_snapshots', engine)
engine.execute(sgs_table.insert(sgs_data))
sgsm_data = {
'id': self.share_group_snapshot_member_id,
'share_group_snapshot_id': self.share_group_snapshot_id,
'share_id': self.share_id,
'share_instance_id': self.share_instance_id,
'project_id': 'fake_project_id',
'user_id': 'fake_user_id',
}
sgsm_table = utils.load_table(self.old_table_name, engine)
engine.execute(sgsm_table.insert(sgsm_data))
def check_upgrade(self, engine, data):
ssi_table = utils.load_table(self.new_table_name, engine)
db_result = engine.execute(ssi_table.select().where(
ssi_table.c.id == self.share_group_snapshot_member_id))
self.test_case.assertEqual(1, db_result.rowcount)
for ssi in db_result:
for key in self.keys:
self.test_case.assertTrue(hasattr(ssi, key))
engine.execute(ssi_table.update().where(
ssi_table.c.id == self.share_group_snapshot_member_id,
).values({
'user_id': ('u' * 255),
'project_id': ('p' * 255),
'share_proto': ('s' * 255),
'size': 123456789,
'share_group_snapshot_id': self.share_group_snapshot_id,
}))
self.test_case.assertRaises(
sa_exc.NoSuchTableError,
utils.load_table, 'share_group_snapshot_members', engine)
def check_downgrade(self, engine):
sgsm_table = utils.load_table(self.old_table_name, engine)
db_result = engine.execute(sgsm_table.select().where(
sgsm_table.c.id == self.share_group_snapshot_member_id))
self.test_case.assertEqual(1, db_result.rowcount)
for sgsm in db_result:
for key in self.keys:
self.test_case.assertTrue(hasattr(sgsm, key))
ssi_table = utils.load_table(self.new_table_name, engine)
db_result = engine.execute(ssi_table.select().where(
ssi_table.c.id == self.share_group_snapshot_member_id))
self.test_case.assertEqual(0, db_result.rowcount)
@map_to_migration('238720805ce1')
class MessagesTableChecks(BaseMigrationChecks):
new_table_name = 'messages'
def setup_upgrade_data(self, engine):
pass
def check_upgrade(self, engine, data):
message_data = {
'id': uuidutils.generate_uuid(),
'project_id': 'x' * 255,
'request_id': 'x' * 255,
'resource_type': 'x' * 255,
'resource_id': 'y' * 36,
'action_id': 'y' * 10,
'detail_id': 'y' * 10,
'message_level': 'x' * 255,
'created_at': datetime.datetime(2017, 7, 10, 18, 5, 58),
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'expires_at': datetime.datetime(2017, 7, 11, 18, 5, 58),
}
new_table = utils.load_table(self.new_table_name, engine)
engine.execute(new_table.insert(message_data))
def check_downgrade(self, engine):
self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table,
'messages', engine)
@map_to_migration('b516de97bfee')
class ProjectShareTypesQuotasChecks(BaseMigrationChecks):
new_table_name = 'project_share_type_quotas'
usages_table = 'quota_usages'
reservations_table = 'reservations'
st_record_id = uuidutils.generate_uuid()
def setup_upgrade_data(self, engine):
self.st_data = {
'id': self.st_record_id,
'name': uuidutils.generate_uuid(),
'deleted': "False",
}
st_table = utils.load_table('share_types', engine)
engine.execute(st_table.insert(self.st_data))
def check_upgrade(self, engine, data):
self.quota_data = {
'project_id': 'x' * 255,
'resource': 'y' * 255,
'hard_limit': 987654321,
'created_at': datetime.datetime(2017, 4, 11, 18, 5, 58),
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'share_type_id': self.st_record_id,
}
new_table = utils.load_table(self.new_table_name, engine)
engine.execute(new_table.insert(self.quota_data))
self.usages_data = {
'project_id': 'x' * 255,
'user_id': None,
'share_type_id': self.st_record_id,
'resource': 'y' * 255,
'in_use': 13,
'reserved': 15,
}
usages_table = utils.load_table(self.usages_table, engine)
engine.execute(usages_table.insert(self.usages_data))
self.reservations_data = {
'uuid': uuidutils.generate_uuid(),
'usage_id': 1,
'project_id': 'x' * 255,
'user_id': None,
'share_type_id': self.st_record_id,
'resource': 'y' * 255,
'delta': 13,
'expire': datetime.datetime(2399, 4, 11, 18, 5, 58),
}
reservations_table = utils.load_table(self.reservations_table, engine)
engine.execute(reservations_table.insert(self.reservations_data))
def check_downgrade(self, engine):
self.test_case.assertRaises(
sa_exc.NoSuchTableError,
utils.load_table, self.new_table_name, engine)
for table_name in (self.usages_table, self.reservations_table):
table = utils.load_table(table_name, engine)
db_result = engine.execute(table.select())
self.test_case.assertGreater(db_result.rowcount, 0)
for row in db_result:
self.test_case.assertFalse(hasattr(row, 'share_type_id'))
@map_to_migration('829a09b0ddd4')
class FixProjectShareTypesQuotasUniqueConstraintChecks(BaseMigrationChecks):
st_record_id = uuidutils.generate_uuid()
def setup_upgrade_data(self, engine):
self.st_data = {
'id': self.st_record_id,
'name': uuidutils.generate_uuid(),
'deleted': "False",
}
st_table = utils.load_table('share_types', engine)
engine.execute(st_table.insert(self.st_data))
def check_upgrade(self, engine, data):
for project_id in ('x' * 255, 'x'):
self.quota_data = {
'project_id': project_id,
'resource': 'y' * 255,
'hard_limit': 987654321,
'created_at': datetime.datetime(2017, 4, 11, 18, 5, 58),
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'share_type_id': self.st_record_id,
}
new_table = utils.load_table('project_share_type_quotas', engine)
engine.execute(new_table.insert(self.quota_data))
def check_downgrade(self, engine):
pass
@map_to_migration('27cb96d991fa')
class NewDescriptionColumnChecks(BaseMigrationChecks):
st_table_name = 'share_types'
st_ids = ['share_type_id_fake_3_%d' % i for i in (1, 2)]
def setup_upgrade_data(self, engine):
share_type_data = {
'id': self.st_ids[0],
'name': 'name_1',
}
st_table = utils.load_table(self.st_table_name, engine)
engine.execute(st_table.insert(share_type_data))
def check_upgrade(self, engine, data):
st_table = utils.load_table(self.st_table_name, engine)
for na in engine.execute(st_table.select()):
self.test_case.assertTrue(hasattr(na, 'description'))
share_type_data_ds = {
'id': self.st_ids[1],
'name': 'name_1',
'description': 'description_1',
}
engine.execute(st_table.insert(share_type_data_ds))
st = engine.execute(st_table.select().where(
share_type_data_ds['id'] == st_table.c.id)).first()
self.test_case.assertEqual(
share_type_data_ds['description'], st['description'])
def check_downgrade(self, engine):
table = utils.load_table(self.st_table_name, engine)
db_result = engine.execute(table.select())
for record in db_result:
self.test_case.assertFalse(hasattr(record, 'description'))
@map_to_migration('4a482571410f')
class BackenInfoTableChecks(BaseMigrationChecks):
new_table_name = 'backend_info'
def setup_upgrade_data(self, engine):
pass
def check_upgrade(self, engine, data):
data = {
'host': 'test_host',
'info_hash': 'test_hash',
'created_at': datetime.datetime(2017, 7, 10, 18, 5, 58),
'updated_at': None,
'deleted_at': None,
'deleted': 0,
}
new_table = utils.load_table(self.new_table_name, engine)
engine.execute(new_table.insert(data))
def check_downgrade(self, engine):
self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table,
self.new_table_name, engine)
| true | true |
f72ddd7241194452b55a3968e1f8f4807cdc48eb | 1,166 | py | Python | pact/test/test_constants.py | dwang7/pact-python | da03551e812508652e062fc4ba6071f1119e5bf2 | [
"MIT"
] | null | null | null | pact/test/test_constants.py | dwang7/pact-python | da03551e812508652e062fc4ba6071f1119e5bf2 | [
"MIT"
] | null | null | null | pact/test/test_constants.py | dwang7/pact-python | da03551e812508652e062fc4ba6071f1119e5bf2 | [
"MIT"
] | null | null | null | from unittest import TestCase
from mock import patch
from .. import constants
class mock_service_exeTestCase(TestCase):
def setUp(self):
super(mock_service_exeTestCase, self).setUp()
self.addCleanup(patch.stopall)
self.mock_os = patch.object(constants, 'os', autospec=True).start()
def test_other(self):
self.mock_os.name = 'posix'
self.assertEqual(constants.mock_service_exe(), 'pact-mock-service')
def test_windows(self):
self.mock_os.name = 'nt'
self.assertEqual(constants.mock_service_exe(), 'pact-mock-service.bat')
class provider_verifier_exeTestCase(TestCase):
def setUp(self):
super(provider_verifier_exeTestCase, self).setUp()
self.addCleanup(patch.stopall)
self.mock_os = patch.object(constants, 'os', autospec=True).start()
def test_other(self):
self.mock_os.name = 'posix'
self.assertEqual(
constants.provider_verifier_exe(), 'pact-provider-verifier')
def test_windows(self):
self.mock_os.name = 'nt'
self.assertEqual(
constants.provider_verifier_exe(), 'pact-provider-verifier.bat')
| 30.684211 | 79 | 0.679245 | from unittest import TestCase
from mock import patch
from .. import constants
class mock_service_exeTestCase(TestCase):
def setUp(self):
super(mock_service_exeTestCase, self).setUp()
self.addCleanup(patch.stopall)
self.mock_os = patch.object(constants, 'os', autospec=True).start()
def test_other(self):
self.mock_os.name = 'posix'
self.assertEqual(constants.mock_service_exe(), 'pact-mock-service')
def test_windows(self):
self.mock_os.name = 'nt'
self.assertEqual(constants.mock_service_exe(), 'pact-mock-service.bat')
class provider_verifier_exeTestCase(TestCase):
def setUp(self):
super(provider_verifier_exeTestCase, self).setUp()
self.addCleanup(patch.stopall)
self.mock_os = patch.object(constants, 'os', autospec=True).start()
def test_other(self):
self.mock_os.name = 'posix'
self.assertEqual(
constants.provider_verifier_exe(), 'pact-provider-verifier')
def test_windows(self):
self.mock_os.name = 'nt'
self.assertEqual(
constants.provider_verifier_exe(), 'pact-provider-verifier.bat')
| true | true |
f72de033c98b828eeb36b1cc7ae17401f85d1ae6 | 2,806 | py | Python | pygext/taskmanager.py | chandler14362/pygext | b90b4c5727d2b6ac98fcbc096159050c266f105e | [
"MIT"
] | 1 | 2018-03-09T12:09:55.000Z | 2018-03-09T12:09:55.000Z | pygext/taskmanager.py | chandler14362/pygext | b90b4c5727d2b6ac98fcbc096159050c266f105e | [
"MIT"
] | 1 | 2015-03-14T00:50:30.000Z | 2015-03-14T00:50:30.000Z | pygext/taskmanager.py | chandler14362/pygext | b90b4c5727d2b6ac98fcbc096159050c266f105e | [
"MIT"
] | null | null | null | __author__ = 'stowellc17'
from twisted.internet.error import AlreadyCalled
from twisted.internet.task import LoopingCall
from twisted.internet import reactor
from pygext.notifier import global_notify
TASK_DONE = 0
TASK_AGAIN = 1
class Task:
notify = global_notify.new_category('Task')
def __init__(self, task_mgr, name, method, args, kwargs, looping, delay):
self.task_mgr = task_mgr
self.name = name
self.method = method
self.args = args
self.kwargs = kwargs
self.looping = looping
self.delay = delay
self.caller = None
def call_method(self):
task_status = self.method(self, *self.args, **self.kwargs)
if task_status == TASK_DONE:
self.__finish()
return
if task_status == TASK_AGAIN:
if not self.looping:
self.start()
return
self.notify.warning('Task returned invalid status %s' % task_status)
def start(self):
if self.looping:
self.caller = LoopingCall(self.call_method)
self.caller.start(1.0 / 60.0)
return
self.caller = reactor.callLater(self.delay, self.call_method)
def stop(self):
if not self.looping:
try:
self.caller.cancel()
except AlreadyCalled:
pass
return
self.caller.stop()
def __finish(self):
self.task_mgr.remove(self.name)
class TaskManager:
notify = global_notify.new_category('TaskManager')
def __init__(self):
self._tasks = {}
def add(self, name, method, args=None, kwargs=None):
if name in self._tasks:
self.notify.warning('Tried to add task %s when it was already in the task dict' % name)
return
args = args or []
kwargs = kwargs or {}
task = Task(self, name, method, args, kwargs, True, 0)
self._tasks[name] = task
task.start()
return task
def do_method_later(self, delay, name, method, args=None, kwargs=None):
if name in self._tasks:
self.notify.warning('Tried to add task %s when it was already in the task dict' % name)
return
args = args or []
kwargs = kwargs or {}
task = Task(self, name, method, args, kwargs, False, delay)
self._tasks[name] = task
task.start()
return task
def remove(self, name):
if name not in self._tasks:
self.notify.warning('Tried to remove non-existent task %s' % name)
return
self._tasks[name].stop()
del self._tasks[name]
def stop_all_tasks(self):
for name in self._tasks.keys():
self.remove(name)
global_taskmgr = TaskManager()
| 25.053571 | 99 | 0.59052 | __author__ = 'stowellc17'
from twisted.internet.error import AlreadyCalled
from twisted.internet.task import LoopingCall
from twisted.internet import reactor
from pygext.notifier import global_notify
TASK_DONE = 0
TASK_AGAIN = 1
class Task:
notify = global_notify.new_category('Task')
def __init__(self, task_mgr, name, method, args, kwargs, looping, delay):
self.task_mgr = task_mgr
self.name = name
self.method = method
self.args = args
self.kwargs = kwargs
self.looping = looping
self.delay = delay
self.caller = None
def call_method(self):
task_status = self.method(self, *self.args, **self.kwargs)
if task_status == TASK_DONE:
self.__finish()
return
if task_status == TASK_AGAIN:
if not self.looping:
self.start()
return
self.notify.warning('Task returned invalid status %s' % task_status)
def start(self):
if self.looping:
self.caller = LoopingCall(self.call_method)
self.caller.start(1.0 / 60.0)
return
self.caller = reactor.callLater(self.delay, self.call_method)
def stop(self):
if not self.looping:
try:
self.caller.cancel()
except AlreadyCalled:
pass
return
self.caller.stop()
def __finish(self):
self.task_mgr.remove(self.name)
class TaskManager:
notify = global_notify.new_category('TaskManager')
def __init__(self):
self._tasks = {}
def add(self, name, method, args=None, kwargs=None):
if name in self._tasks:
self.notify.warning('Tried to add task %s when it was already in the task dict' % name)
return
args = args or []
kwargs = kwargs or {}
task = Task(self, name, method, args, kwargs, True, 0)
self._tasks[name] = task
task.start()
return task
def do_method_later(self, delay, name, method, args=None, kwargs=None):
if name in self._tasks:
self.notify.warning('Tried to add task %s when it was already in the task dict' % name)
return
args = args or []
kwargs = kwargs or {}
task = Task(self, name, method, args, kwargs, False, delay)
self._tasks[name] = task
task.start()
return task
def remove(self, name):
if name not in self._tasks:
self.notify.warning('Tried to remove non-existent task %s' % name)
return
self._tasks[name].stop()
del self._tasks[name]
def stop_all_tasks(self):
for name in self._tasks.keys():
self.remove(name)
global_taskmgr = TaskManager()
| true | true |
f72de052359df817fc0ec946f2134f2ca26b6137 | 436 | py | Python | storage/database-cookbook/relational/sqlite/connection_sqlite.py | brunocampos01/job-challenges | 62cb3b63bec1659fe3dbb9c68afa9a0e19bdebcc | [
"MIT"
] | 1 | 2022-01-09T03:18:45.000Z | 2022-01-09T03:18:45.000Z | storage/database-cookbook/relational/sqlite/connection_sqlite.py | brunocampos01/job-challenges | 62cb3b63bec1659fe3dbb9c68afa9a0e19bdebcc | [
"MIT"
] | null | null | null | storage/database-cookbook/relational/sqlite/connection_sqlite.py | brunocampos01/job-challenges | 62cb3b63bec1659fe3dbb9c68afa9a0e19bdebcc | [
"MIT"
] | 1 | 2022-01-07T21:04:45.000Z | 2022-01-07T21:04:45.000Z | import sqlite3
conn = sqlite3.connect("users.db")
# You can also supply the special name :memory: to create a temporary database in RAM
# conn = sqlite3.connect(':memory:')
c = conn.cursor()
c.execute("CREATE TABLE user (name text, age integer)")
c.execute("INSERT INTO user VALUES ('User A', 42)")
c.execute("INSERT INTO user VALUES ('User B', 43)")
conn.commit()
c.execute("SELECT * FROM user")
print(c.fetchall())
conn.close()
| 22.947368 | 85 | 0.701835 | import sqlite3
conn = sqlite3.connect("users.db")
c = conn.cursor()
c.execute("CREATE TABLE user (name text, age integer)")
c.execute("INSERT INTO user VALUES ('User A', 42)")
c.execute("INSERT INTO user VALUES ('User B', 43)")
conn.commit()
c.execute("SELECT * FROM user")
print(c.fetchall())
conn.close()
| true | true |
f72de06c4b741bccbe2f6e564e84f5ba0d6b7d31 | 845 | py | Python | scisalt/matplotlib/plot.py | joelfrederico/mytools | 7bf57c49c7dde0a8b0aa337fbd2fbd527ce7a67f | [
"MIT"
] | 1 | 2021-03-31T23:27:09.000Z | 2021-03-31T23:27:09.000Z | scisalt/matplotlib/plot.py | joelfrederico/mytools | 7bf57c49c7dde0a8b0aa337fbd2fbd527ce7a67f | [
"MIT"
] | null | null | null | scisalt/matplotlib/plot.py | joelfrederico/mytools | 7bf57c49c7dde0a8b0aa337fbd2fbd527ce7a67f | [
"MIT"
] | null | null | null | import os as _os
_on_rtd = _os.environ.get('READTHEDOCS', None) == 'True'
if not _on_rtd:
import matplotlib.pyplot as _plt
import numpy as _np
from .setup_axes import setup_axes as _setup_axes
def plot(*args, ax=None, **kwargs):
"""
Plots but automatically resizes x axis.
.. versionadded:: 1.4
Parameters
----------
args
Passed on to :meth:`matplotlib.axis.Axis.plot`.
ax : :class:`matplotlib.axis.Axis`, optional
The axis to plot to.
kwargs
Passed on to :meth:`matplotlib.axis.Axis.plot`.
"""
if ax is None:
fig, ax = _setup_axes()
pl = ax.plot(*args, **kwargs)
if _np.shape(args)[0] > 1:
if type(args[1]) is not str:
min_x = min(args[0])
max_x = max(args[0])
ax.set_xlim((min_x, max_x))
return pl
| 22.236842 | 56 | 0.584615 | import os as _os
_on_rtd = _os.environ.get('READTHEDOCS', None) == 'True'
if not _on_rtd:
import matplotlib.pyplot as _plt
import numpy as _np
from .setup_axes import setup_axes as _setup_axes
def plot(*args, ax=None, **kwargs):
if ax is None:
fig, ax = _setup_axes()
pl = ax.plot(*args, **kwargs)
if _np.shape(args)[0] > 1:
if type(args[1]) is not str:
min_x = min(args[0])
max_x = max(args[0])
ax.set_xlim((min_x, max_x))
return pl
| true | true |
f72de0ea6a751f43e43a7090b6c6bc5b3a4e04ab | 19,297 | py | Python | da/compiler/ui.py | edgarsit/distalgo | 87a53b537c2ae9d974fb2f5f7ba95eaa92a9806e | [
"MIT"
] | 76 | 2015-05-27T01:22:38.000Z | 2022-02-28T18:38:55.000Z | da/compiler/ui.py | edgarsit/distalgo | 87a53b537c2ae9d974fb2f5f7ba95eaa92a9806e | [
"MIT"
] | 32 | 2015-09-19T01:37:36.000Z | 2021-02-17T03:04:56.000Z | da/compiler/ui.py | edgarsit/distalgo | 87a53b537c2ae9d974fb2f5f7ba95eaa92a9806e | [
"MIT"
] | 55 | 2015-08-10T01:42:16.000Z | 2021-11-15T10:37:32.000Z | # Copyright (c) 2010-2017 Bo Lin
# Copyright (c) 2010-2017 Yanhong Annie Liu
# Copyright (c) 2010-2017 Stony Brook University
# Copyright (c) 2010-2017 The Research Foundation of SUNY
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import os.path
import ast
import sys
import time
import argparse
from .. import __version__
from ..importer import da_cache_from_source
from .utils import is_valid_debug_level, set_debug_level, to_source, to_file
from .parser import daast_from_file
from .parser import daast_from_str
from .pygen import PythonGenerator
from .incgen import gen_inc_module
from .pseudo import DastUnparser
# DistAlgo filename suffix
DA_SUFFIX = "da"
stdout = sys.stdout
stderr = sys.stderr
# Benchmark stats:
WallclockStart = 0
InputSize = 0
OutputSize = 0
def dastr_to_pyast(src, filename='<str>', args=None):
"""Translates DistAlgo source string into executable Python AST.
'src' is the DistAlgo source string to parse. Optional argument 'filename'
is the filename that appears in error messages. Optional argument 'args'
is a Namespace object containing the command line parameters for the
compiler. Returns the generated Python AST.
"""
daast = daast_from_str(src, filename, args)
if daast is not None:
pyast = PythonGenerator(filename, args).visit(daast)
if pyast is None:
print("Error: unable to generate Python AST from DistAlgo AST"
" for file ", filename, file=stderr)
assert isinstance(pyast, list) and len(pyast) == 1 and \
isinstance(pyast[0], ast.Module)
pyast = pyast[0]
ast.fix_missing_locations(pyast)
return pyast
else:
return None
def dafile_to_pyast(filename, args=None):
"""Translates DistAlgo source file into executable Python AST.
'filename' is the filename of source file. Optional argument 'args' is a
Namespace object containing the command line parameters for the compiler.
Returns the generated Python AST.
"""
if args is None:
args = parse_compiler_args([])
daast = daast_from_file(filename, args)
if daast is not None:
pyast = PythonGenerator(filename, args).visit(daast)
if pyast is None:
print("Error: unable to generate Python AST from DistAlgo AST"
" for file ", filename, file=stderr)
return None
assert isinstance(pyast, list) and len(pyast) == 1 and \
isinstance(pyast[0], ast.Module)
pyast = pyast[0]
ast.fix_missing_locations(pyast)
if args and hasattr(args, 'dump_ast') and args.dump_ast:
print(ast.dump(pyast, include_attributes=True), file=stderr)
return pyast
else:
return None
def _pyast_to_pycode(pyast, filename, _optimize=-1):
try:
return compile(pyast, filename, mode='exec',
dont_inherit=True, optimize=_optimize)
except Exception as e:
print("Unable to generate bytecode: {}".format(e), file=stderr)
return None
def dafile_to_pycode(filename, args=None, _optimize=-1, dfile=None):
"""Generates compiled Python code object from DistAlgo source file.
'filename' is the source file to compile. Optional argument 'args' is a
Namespace object containing the command line parameters for the compiler.
Returns the compiled Python code object, or None in case of errors.
"""
pyast = dafile_to_pyast(filename, args)
if pyast is not None:
return _pyast_to_pycode(pyast,
dfile if dfile is not None else filename,
_optimize)
else:
return None
def dastr_to_pycode(src, filename='<string>', args=None, _optimize=-1):
"""Generates compiled Python code object from DistAlgo source string.
'src' is the DistAlgo source string to compile. Optional argument 'filename'
is the filename that appears in error messages. Optional argument 'args' is
a Namespace object containing the command line parameters for the compiler.
Returns the compiled Python code object, or None in case of errors.
"""
pyast = dastr_to_pyast(src, filename, args)
if pyast is not None:
return _pyast_to_pycode(pyast, filename, _optimize)
else:
return None
def dafile_to_pystr(filename, args=None):
"""Generates executable Python code from DistAlgo source string.
'filename' is the filename of DistAlgo source file. Optional argument 'args'
is a Namespace object containing the command line parameters for the
compiler. Returns the generated Python code as a string.
"""
pyast = dafile_to_pyast(filename, args)
if pyast is not None:
return to_source(pyast)
else:
return None
def dastr_to_pystr(src, filename='<str>', args=None):
"""Generates executable Python code from DistAlgo source string.
'src' is the DistAlgo source string to parse. Optional argument 'filename'
is the filename that appears in error messages. Optional argument 'args'
is a Namespace object containing the command line parameters for the
compiler. Returns the generated Python code as a string.
"""
pyast = dastr_to_pyast(src, filename, args)
if pyast is not None:
return to_source(pyast)
else:
return None
def _sanitize_filename(dfile, no_symlink=True):
"""Check and sanitize 'dfile' for use as a target file.
"""
dirname, basename = os.path.split(dfile)
dirname = os.path.abspath(dirname)
dfile = os.path.join(dirname, basename)
if no_symlink:
if os.path.islink(dfile):
msg = ('{} is a symlink and will be changed into a regular file if '
'the compiler writes a compiled file to it')
raise FileExistsError(msg.format(dfile))
elif os.path.exists(dfile) and not os.path.isfile(dfile):
msg = ('{} is a non-regular file and will be changed into a regular '
'one if the compiler writes a compiled file to it')
raise FileExistsError(msg.format(dfile))
os.makedirs(dirname, exist_ok=True)
return dfile
def dafile_to_pseudofile(filename, outname=None, args=None):
"""Compiles a DistAlgo source file to Python file.
'filename' is the input DistAlgo source file. Optional parameter 'outname'
specifies the file to write the result to. If 'outname' is None the
filename is inferred by replacing the suffix of 'filename' with '.py'.
"""
purename, _, suffix = filename.rpartition(".")
if len(purename) == 0:
purename = suffix
suffix = ""
if suffix == "py":
stderr.write("Warning: skipping '.py' file %s\n" % filename)
return
elif suffix != DA_SUFFIX:
stderr.write("Warning: unknown suffix '%s' in filename '%s'\n" %
(suffix, filename))
if outname is None:
outname = purename + ".dap"
outname = _sanitize_filename(outname)
daast = daast_from_file(filename, args)
if daast:
with open(outname, "w", encoding='utf-8') as outfd:
DastUnparser(daast, outfd)
stderr.write("Written pseudo code file %s.\n"% outname)
def dafile_to_pyfile(filename, outname=None, args=None):
"""Compiles a DistAlgo source file to Python file.
If specified, 'args' should be an object (usually generated by argparse from
the command line arguments) that contains the following properties:
'args.filename' is the input DistAlgo source file. Optional property
'args.outname' specifies the file to write the result to. If
'args.outname' is None the filename is inferred by replacing the suffix of
'args.filename' with '.py'.
"""
purename, _, suffix = filename.rpartition(".")
if len(purename) == 0:
purename = suffix
suffix = ""
if suffix == "py":
stderr.write("Warning: skipping '.py' file %s\n" % filename)
return
elif suffix != DA_SUFFIX:
stderr.write("Warning: unknown suffix '%s' in filename '%s'\n" %
(suffix, filename))
if outname is None:
outname = purename + ".py"
outname = _sanitize_filename(outname)
pyast = dafile_to_pyast(filename, args)
if pyast is not None:
with open(outname, "w", encoding='utf-8') as outfd:
global OutputSize
OutputSize += to_file(pyast, outfd)
stderr.write("Written compiled file %s.\n"% outname)
return 0
else:
return 1
def dafile_to_pycfile(filename, outname=None, optimize=-1, args=None,
dfile=None):
"""Byte-compile one DistAlgo source file to Python bytecode.
"""
import importlib._bootstrap_external
if outname is None:
if optimize >= 0:
opt = '' if optimize == 0 else optimize
outname = da_cache_from_source(filename, optimization=opt)
else:
outname = da_cache_from_source(filename)
outname = _sanitize_filename(outname)
code = dafile_to_pycode(filename, args, _optimize=optimize, dfile=dfile)
if code is not None:
source_stats = os.stat(filename)
PythonVersion = sys.version_info
if PythonVersion < (3, 7):
bytecode = importlib._bootstrap_external._code_to_bytecode(
code, source_stats.st_mtime, source_stats.st_size)
else:
bytecode = importlib._bootstrap_external._code_to_timestamp_pyc(
code, source_stats.st_mtime, source_stats.st_size)
mode = importlib._bootstrap_external._calc_mode(filename)
importlib._bootstrap_external._write_atomic(outname, bytecode, mode)
stderr.write("Written bytecode file {}.\n".format(outname))
return 0
else:
return 1
def check_python_version():
if sys.version_info < (3, 5):
stderr.write("DistAlgo requires Python version 3.5 or newer.\n")
return False
else:
return True
def dafile_to_incfiles(args):
"""Compiles a DistAlgo source file to Python file and generate an interface
file for incrementalization.
'args' is the object generated by argparse from the command line
arguments, and should contain the following properties:
'filename' is the input DistAlgo source file. Optional property 'outname'
specifies the file to write the result to. If 'outname' is None the
filename is inferred by replacing the suffix of 'filename' with '.py'.
Optional property 'incname' is the file to write the incrementalization
module to. If 'incname' is None it defaults to the base of 'filename'
plus '_inc.py'.
"""
filename = args.infile
outname = args.outfile
incname = args.incfile
purename, _, suffix = filename.rpartition(".")
if len(purename) == 0:
purename = suffix
suffix = ""
if suffix == "py":
stderr.write("Warning: skipping '.py' file %s\n" % filename)
return 2
elif suffix != DA_SUFFIX:
stderr.write("Warning: unknown suffix '%s' in filename '%s'\n" %
(suffix, filename))
daast = daast_from_file(filename, args)
if outname is None:
outname = purename + ".py"
if incname is None:
incname = purename + "_inc.py"
if daast is not None:
global OutputSize
inc, ast = gen_inc_module(daast, args, filename=incname)
with open(outname, "w", encoding='utf-8') as outfd:
OutputSize += to_file(ast, outfd)
stderr.write("Written compiled file %s.\n"% outname)
with open(incname, "w", encoding='utf-8') as outfd:
OutputSize += to_file(inc, outfd)
stderr.write("Written interface file %s.\n" % incname)
return 0
else:
return 1
def _add_compiler_args(parser):
"""Install the set of options affecting compilation.
"""
ap = parser
ap.add_argument('--full-event-pattern',
help="If set, use the 'full' format "
"(TYPE, (CLK, DST, SRC), MSG) for event patterns;"
"otherwise, use 'short' format (MSG, SRC)",
action='store_true')
ap.add_argument('--enable-object-pattern',
help="Enable the use of object-style tuple pattern syntax:"
" Object(ARGS...); which is equivalent to "
"('Object', ARGS...)",
action='store_true')
ap.add_argument('--enable-membertest-pattern',
help="Overloads the Python 'in' operator to support using "
"tuple patterns, e.g.: '(_a, 1, _) in S', which is "
"equivalent to 'some((_a, 1, _) in S)'",
action='store_true')
ap.add_argument('--enable-iterator-pattern',
help="Overloads the Python 'for ... in ...' keywords to "
"support using tuple patterns in the target, "
"e.g.: '[b for (_a, 1, b) in S]', which is equivalent to "
"'[b for (var1, var2, b) in S if var1 == a if var2 == b]'",
action='store_true')
ap.add_argument('--use-top-semantic',
help="Use 'top' semantics for query variable and "
"parameter resolution. Under 'top' semantics, only "
"parameters to the top-level query are marked.",
action='store_true')
ap.add_argument('--no-table1',
help="Disable table 1 quantification transformations. "
"Only used when '-i' is enabled.",
action='store_true')
ap.add_argument('--no-table2',
help="Disable table 2 quantification transformations. "
"Only used when '-i' is enabled.",
action='store_true')
ap.add_argument('--no-table3',
help="Disable table 3 quantification transformations. "
"Only used when '-i' is enabled.",
action='store_true')
ap.add_argument('--no-table4',
help="Disable table 4 quantification transformations. "
"Only used when '-i' is enabled.",
action='store_true')
ap.add_argument('--jb-style',
help="Generate Jon-friendly quantification transformations. "
"Only useful with '-i'.",
action='store_true')
ap.add_argument('--no-all-tables',
help="Disable all quantification transformations. "
"Only useful with '-i'.",
action='store_true')
ap.add_argument('--module-name', type=str, default='__main__',
help="Name of the module to be compiled.")
def parse_compiler_args(argv):
ap = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
_add_compiler_args(ap)
args = ap.parse_args(argv)
return args
def main(argv=None):
"""Main entry point when invoking compiler module from command line.
"""
if not check_python_version():
return 2
if argv is None:
argv = sys.argv[1:]
ap = argparse.ArgumentParser(description="DistAlgo compiler.",
argument_default=argparse.SUPPRESS)
_add_compiler_args(ap)
ap.add_argument('-o', help="Output file name.",
dest="outfile", default=None)
ap.add_argument('-L', help="Logging output level.",
dest="debug", default=None)
ap.add_argument('-i',
help="Generate interface code for plugging"
" into incrementalizer.",
action='store_true', dest="geninc", default=False)
ap.add_argument("-m", "--inc-module-name",
help="name of the incrementalized interface module, "
"defaults to source module name + '_inc'. ",
dest="incfile", default=None)
ap.add_argument('-O', '--optimize', type=int, default=-1)
ap.add_argument('-D', '--dump-ast', default=False, action='store_true')
ap.add_argument('-C', '--write-bytecode', default=False, action='store_true')
ap.add_argument('-I', '--interactive',
help="Launch interactive shell.",
action='store_true', default=False)
ap.add_argument('-B', '--benchmark',
help="Print the elapsed wallclock time of the compile session.",
action='store_true', default=False)
ap.add_argument('-p', help="Generate DistAlgo pseudo code.",
action='store_true', dest="genpsd", default=False)
ap.add_argument("-v", "--version", action="version", version=__version__)
ap.add_argument('--psdfile', help="Name of DistAlgo pseudo code output file.",
dest="psdfile", default=None)
ap.add_argument('infile', metavar='SOURCEFILE', type=str,
help="DistAlgo input source file.")
args = ap.parse_args(argv)
if args.benchmark:
global WallclockStart
WallclockStart = time.perf_counter()
if args.interactive:
import code
code.interact()
return
if args.debug is not None:
try:
level = int(args.debug)
if is_valid_debug_level(level):
set_debug_level(level)
else:
raise ValueError()
except ValueError:
stderr.write("Invalid debugging level %s.\n" % str(args.debug))
if args.genpsd:
res = dafile_to_pseudofile(args.infile, args.psdfile, args)
elif args.geninc:
res = dafile_to_incfiles(args)
elif args.write_bytecode:
res = dafile_to_pycfile(args.infile, args.outfile, args.optimize,
args=args)
else:
res = dafile_to_pyfile(args.infile, args.outfile, args)
if args.benchmark:
import json
walltime = time.perf_counter() - WallclockStart
jsondata = {'Wallclock_time' : walltime,
"Input_size" : InputSize,
"Output_size" : OutputSize}
print("###OUTPUT: " + json.dumps(jsondata))
return res
| 40.202083 | 84 | 0.630513 |
import os
import os.path
import ast
import sys
import time
import argparse
from .. import __version__
from ..importer import da_cache_from_source
from .utils import is_valid_debug_level, set_debug_level, to_source, to_file
from .parser import daast_from_file
from .parser import daast_from_str
from .pygen import PythonGenerator
from .incgen import gen_inc_module
from .pseudo import DastUnparser
DA_SUFFIX = "da"
stdout = sys.stdout
stderr = sys.stderr
WallclockStart = 0
InputSize = 0
OutputSize = 0
def dastr_to_pyast(src, filename='<str>', args=None):
daast = daast_from_str(src, filename, args)
if daast is not None:
pyast = PythonGenerator(filename, args).visit(daast)
if pyast is None:
print("Error: unable to generate Python AST from DistAlgo AST"
" for file ", filename, file=stderr)
assert isinstance(pyast, list) and len(pyast) == 1 and \
isinstance(pyast[0], ast.Module)
pyast = pyast[0]
ast.fix_missing_locations(pyast)
return pyast
else:
return None
def dafile_to_pyast(filename, args=None):
if args is None:
args = parse_compiler_args([])
daast = daast_from_file(filename, args)
if daast is not None:
pyast = PythonGenerator(filename, args).visit(daast)
if pyast is None:
print("Error: unable to generate Python AST from DistAlgo AST"
" for file ", filename, file=stderr)
return None
assert isinstance(pyast, list) and len(pyast) == 1 and \
isinstance(pyast[0], ast.Module)
pyast = pyast[0]
ast.fix_missing_locations(pyast)
if args and hasattr(args, 'dump_ast') and args.dump_ast:
print(ast.dump(pyast, include_attributes=True), file=stderr)
return pyast
else:
return None
def _pyast_to_pycode(pyast, filename, _optimize=-1):
try:
return compile(pyast, filename, mode='exec',
dont_inherit=True, optimize=_optimize)
except Exception as e:
print("Unable to generate bytecode: {}".format(e), file=stderr)
return None
def dafile_to_pycode(filename, args=None, _optimize=-1, dfile=None):
pyast = dafile_to_pyast(filename, args)
if pyast is not None:
return _pyast_to_pycode(pyast,
dfile if dfile is not None else filename,
_optimize)
else:
return None
def dastr_to_pycode(src, filename='<string>', args=None, _optimize=-1):
pyast = dastr_to_pyast(src, filename, args)
if pyast is not None:
return _pyast_to_pycode(pyast, filename, _optimize)
else:
return None
def dafile_to_pystr(filename, args=None):
pyast = dafile_to_pyast(filename, args)
if pyast is not None:
return to_source(pyast)
else:
return None
def dastr_to_pystr(src, filename='<str>', args=None):
pyast = dastr_to_pyast(src, filename, args)
if pyast is not None:
return to_source(pyast)
else:
return None
def _sanitize_filename(dfile, no_symlink=True):
dirname, basename = os.path.split(dfile)
dirname = os.path.abspath(dirname)
dfile = os.path.join(dirname, basename)
if no_symlink:
if os.path.islink(dfile):
msg = ('{} is a symlink and will be changed into a regular file if '
'the compiler writes a compiled file to it')
raise FileExistsError(msg.format(dfile))
elif os.path.exists(dfile) and not os.path.isfile(dfile):
msg = ('{} is a non-regular file and will be changed into a regular '
'one if the compiler writes a compiled file to it')
raise FileExistsError(msg.format(dfile))
os.makedirs(dirname, exist_ok=True)
return dfile
def dafile_to_pseudofile(filename, outname=None, args=None):
purename, _, suffix = filename.rpartition(".")
if len(purename) == 0:
purename = suffix
suffix = ""
if suffix == "py":
stderr.write("Warning: skipping '.py' file %s\n" % filename)
return
elif suffix != DA_SUFFIX:
stderr.write("Warning: unknown suffix '%s' in filename '%s'\n" %
(suffix, filename))
if outname is None:
outname = purename + ".dap"
outname = _sanitize_filename(outname)
daast = daast_from_file(filename, args)
if daast:
with open(outname, "w", encoding='utf-8') as outfd:
DastUnparser(daast, outfd)
stderr.write("Written pseudo code file %s.\n"% outname)
def dafile_to_pyfile(filename, outname=None, args=None):
purename, _, suffix = filename.rpartition(".")
if len(purename) == 0:
purename = suffix
suffix = ""
if suffix == "py":
stderr.write("Warning: skipping '.py' file %s\n" % filename)
return
elif suffix != DA_SUFFIX:
stderr.write("Warning: unknown suffix '%s' in filename '%s'\n" %
(suffix, filename))
if outname is None:
outname = purename + ".py"
outname = _sanitize_filename(outname)
pyast = dafile_to_pyast(filename, args)
if pyast is not None:
with open(outname, "w", encoding='utf-8') as outfd:
global OutputSize
OutputSize += to_file(pyast, outfd)
stderr.write("Written compiled file %s.\n"% outname)
return 0
else:
return 1
def dafile_to_pycfile(filename, outname=None, optimize=-1, args=None,
dfile=None):
import importlib._bootstrap_external
if outname is None:
if optimize >= 0:
opt = '' if optimize == 0 else optimize
outname = da_cache_from_source(filename, optimization=opt)
else:
outname = da_cache_from_source(filename)
outname = _sanitize_filename(outname)
code = dafile_to_pycode(filename, args, _optimize=optimize, dfile=dfile)
if code is not None:
source_stats = os.stat(filename)
PythonVersion = sys.version_info
if PythonVersion < (3, 7):
bytecode = importlib._bootstrap_external._code_to_bytecode(
code, source_stats.st_mtime, source_stats.st_size)
else:
bytecode = importlib._bootstrap_external._code_to_timestamp_pyc(
code, source_stats.st_mtime, source_stats.st_size)
mode = importlib._bootstrap_external._calc_mode(filename)
importlib._bootstrap_external._write_atomic(outname, bytecode, mode)
stderr.write("Written bytecode file {}.\n".format(outname))
return 0
else:
return 1
def check_python_version():
if sys.version_info < (3, 5):
stderr.write("DistAlgo requires Python version 3.5 or newer.\n")
return False
else:
return True
def dafile_to_incfiles(args):
filename = args.infile
outname = args.outfile
incname = args.incfile
purename, _, suffix = filename.rpartition(".")
if len(purename) == 0:
purename = suffix
suffix = ""
if suffix == "py":
stderr.write("Warning: skipping '.py' file %s\n" % filename)
return 2
elif suffix != DA_SUFFIX:
stderr.write("Warning: unknown suffix '%s' in filename '%s'\n" %
(suffix, filename))
daast = daast_from_file(filename, args)
if outname is None:
outname = purename + ".py"
if incname is None:
incname = purename + "_inc.py"
if daast is not None:
global OutputSize
inc, ast = gen_inc_module(daast, args, filename=incname)
with open(outname, "w", encoding='utf-8') as outfd:
OutputSize += to_file(ast, outfd)
stderr.write("Written compiled file %s.\n"% outname)
with open(incname, "w", encoding='utf-8') as outfd:
OutputSize += to_file(inc, outfd)
stderr.write("Written interface file %s.\n" % incname)
return 0
else:
return 1
def _add_compiler_args(parser):
ap = parser
ap.add_argument('--full-event-pattern',
help="If set, use the 'full' format "
"(TYPE, (CLK, DST, SRC), MSG) for event patterns;"
"otherwise, use 'short' format (MSG, SRC)",
action='store_true')
ap.add_argument('--enable-object-pattern',
help="Enable the use of object-style tuple pattern syntax:"
" Object(ARGS...); which is equivalent to "
"('Object', ARGS...)",
action='store_true')
ap.add_argument('--enable-membertest-pattern',
help="Overloads the Python 'in' operator to support using "
"tuple patterns, e.g.: '(_a, 1, _) in S', which is "
"equivalent to 'some((_a, 1, _) in S)'",
action='store_true')
ap.add_argument('--enable-iterator-pattern',
help="Overloads the Python 'for ... in ...' keywords to "
"support using tuple patterns in the target, "
"e.g.: '[b for (_a, 1, b) in S]', which is equivalent to "
"'[b for (var1, var2, b) in S if var1 == a if var2 == b]'",
action='store_true')
ap.add_argument('--use-top-semantic',
help="Use 'top' semantics for query variable and "
"parameter resolution. Under 'top' semantics, only "
"parameters to the top-level query are marked.",
action='store_true')
ap.add_argument('--no-table1',
help="Disable table 1 quantification transformations. "
"Only used when '-i' is enabled.",
action='store_true')
ap.add_argument('--no-table2',
help="Disable table 2 quantification transformations. "
"Only used when '-i' is enabled.",
action='store_true')
ap.add_argument('--no-table3',
help="Disable table 3 quantification transformations. "
"Only used when '-i' is enabled.",
action='store_true')
ap.add_argument('--no-table4',
help="Disable table 4 quantification transformations. "
"Only used when '-i' is enabled.",
action='store_true')
ap.add_argument('--jb-style',
help="Generate Jon-friendly quantification transformations. "
"Only useful with '-i'.",
action='store_true')
ap.add_argument('--no-all-tables',
help="Disable all quantification transformations. "
"Only useful with '-i'.",
action='store_true')
ap.add_argument('--module-name', type=str, default='__main__',
help="Name of the module to be compiled.")
def parse_compiler_args(argv):
ap = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
_add_compiler_args(ap)
args = ap.parse_args(argv)
return args
def main(argv=None):
if not check_python_version():
return 2
if argv is None:
argv = sys.argv[1:]
ap = argparse.ArgumentParser(description="DistAlgo compiler.",
argument_default=argparse.SUPPRESS)
_add_compiler_args(ap)
ap.add_argument('-o', help="Output file name.",
dest="outfile", default=None)
ap.add_argument('-L', help="Logging output level.",
dest="debug", default=None)
ap.add_argument('-i',
help="Generate interface code for plugging"
" into incrementalizer.",
action='store_true', dest="geninc", default=False)
ap.add_argument("-m", "--inc-module-name",
help="name of the incrementalized interface module, "
"defaults to source module name + '_inc'. ",
dest="incfile", default=None)
ap.add_argument('-O', '--optimize', type=int, default=-1)
ap.add_argument('-D', '--dump-ast', default=False, action='store_true')
ap.add_argument('-C', '--write-bytecode', default=False, action='store_true')
ap.add_argument('-I', '--interactive',
help="Launch interactive shell.",
action='store_true', default=False)
ap.add_argument('-B', '--benchmark',
help="Print the elapsed wallclock time of the compile session.",
action='store_true', default=False)
ap.add_argument('-p', help="Generate DistAlgo pseudo code.",
action='store_true', dest="genpsd", default=False)
ap.add_argument("-v", "--version", action="version", version=__version__)
ap.add_argument('--psdfile', help="Name of DistAlgo pseudo code output file.",
dest="psdfile", default=None)
ap.add_argument('infile', metavar='SOURCEFILE', type=str,
help="DistAlgo input source file.")
args = ap.parse_args(argv)
if args.benchmark:
global WallclockStart
WallclockStart = time.perf_counter()
if args.interactive:
import code
code.interact()
return
if args.debug is not None:
try:
level = int(args.debug)
if is_valid_debug_level(level):
set_debug_level(level)
else:
raise ValueError()
except ValueError:
stderr.write("Invalid debugging level %s.\n" % str(args.debug))
if args.genpsd:
res = dafile_to_pseudofile(args.infile, args.psdfile, args)
elif args.geninc:
res = dafile_to_incfiles(args)
elif args.write_bytecode:
res = dafile_to_pycfile(args.infile, args.outfile, args.optimize,
args=args)
else:
res = dafile_to_pyfile(args.infile, args.outfile, args)
if args.benchmark:
import json
walltime = time.perf_counter() - WallclockStart
jsondata = {'Wallclock_time' : walltime,
"Input_size" : InputSize,
"Output_size" : OutputSize}
print("###OUTPUT: " + json.dumps(jsondata))
return res
| true | true |
f72de425aeaa5252f36d4ae7e4afd2f59fe3e6a5 | 17,897 | py | Python | eve/io/base.py | mdhaman/eve | 646ad5713b7baaffd0daee25a724edb722cc7a91 | [
"BSD-3-Clause"
] | 1 | 2017-10-31T17:36:58.000Z | 2017-10-31T17:36:58.000Z | eve/io/base.py | nitecoder/eve | d859e60b41a4e95f9b7b8606e173790b7b8d0bc6 | [
"BSD-3-Clause"
] | null | null | null | eve/io/base.py | nitecoder/eve | d859e60b41a4e95f9b7b8606e173790b7b8d0bc6 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
eve.io.base
~~~~~~~~~~~
Standard interface implemented by Eve data layers.
:copyright: (c) 2014 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import datetime
import simplejson as json
from copy import copy
from flask import request, abort
from eve.utils import date_to_str
from eve.auth import auth_field_and_value
from eve.utils import config, debug_error_message, auto_fields
class BaseJSONEncoder(json.JSONEncoder):
""" Proprietary JSONEconder subclass used by the json render function.
This is needed to address the encoding of special values.
"""
def default(self, obj):
if isinstance(obj, datetime.datetime):
# convert any datetime to RFC 1123 format
return date_to_str(obj)
elif isinstance(obj, (datetime.time, datetime.date)):
# should not happen since the only supported date-like format
# supported at dmain schema level is 'datetime' .
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
class ConnectionException(Exception):
""" Raised when DataLayer subclasses cannot find/activate to their
database connection.
:param driver_exception: the original exception raised by the source db
driver
"""
def __init__(self, driver_exception=None):
self.driver_exception = driver_exception
def __str__(self):
msg = ("Error initializing the driver. Make sure the database server"
"is running. ")
if self.driver_exception:
msg += "Driver exception: %s" % repr(self.driver_exception)
return msg
class DataLayer(object):
""" Base data layer class. Defines the interface that actual data-access
classes, being subclasses, must implement. Implemented as a Flask
extension.
Admittedly, this interface is a Mongo rip-off. See the io.mongo
package for an implementation example.
.. versionchanged:: 0.2
Allow subclasses to provide their own specialized json encoder.
.. versionchanged:: 0.1.1
'serializers' dictionary added.
.. versionchanged:: 0.1.0
Support for PUT method.
.. versionchanged:: 0.0.6
support for 'projections' has been added. For more information see
http://docs.mongodb.org/manual/reference/glossary/#term-projection.
While typically a MongoDB feature, other subclasses could decide to
provide support for their own projection syntax.
.. versionchanged:: 0.0.4
the _datasource helper function has been added.
"""
# if custom serialize functions are needed, add them to the 'serializers'
# dictionary, eg:
# serializers = {'objectid': ObjectId, 'datetime': serialize_date}
serializers = {}
# json.JSONEncoder subclass for serializing data to json.
# Subclasses should provide their own specialized encoder (see
# eve.io.mongo.MongoJSONEncoder).
json_encoder_class = BaseJSONEncoder
def __init__(self, app):
""" Implements the Flask extension pattern.
.. versionchanged:: 0.2
Explicit initialize self.driver to None.
"""
self.driver = None
if app is not None:
self.app = app
self.init_app(self.app)
else:
self.app = None
def init_app(self, app):
""" This is where you want to initialize the db driver so it will be
alive through the whole instance lifespan.
"""
raise NotImplementedError
def find(self, resource, req, sub_resource_lookup):
""" Retrieves a set of documents (rows), matching the current request.
Consumed when a request hits a collection/document endpoint
(`/people/`).
:param resource: resource being accessed. You should then use
the ``_datasource`` helper function to retrieve both
the db collection/table and base query (filter), if
any.
:param req: an instance of ``eve.utils.ParsedRequest``. This contains
all the constraints that must be fulfilled in order to
satisfy the original request (where and sort parts, paging,
etc). Be warned that `where` and `sort` expresions will
need proper parsing, according to the syntax that you want
to support with your driver. For example ``eve.io.Mongo``
supports both Python and Mongo-like query syntaxes.
:param sub_resource_lookup: sub-resource lookup from the endpoint url.
.. versionchanged:: 0.3
Support for sub-resources.
"""
raise NotImplementedError
def find_one(self, resource, req, **lookup):
""" Retrieves a single document/record. Consumed when a request hits an
item endpoint (`/people/id/`).
:param resource: resource being accessed. You should then use the
``_datasource`` helper function to retrieve both the
db collection/table and base query (filter), if any.
:param req: an instance of ``eve.utils.ParsedRequest``. This contains
all the constraints that must be fulfilled in order to
satisfy the original request (where and sort parts, paging,
etc). As we are going to only look for one document here,
the only req attribute that you want to process here is
``req.projection``.
:param **lookup: the lookup fields. This will most likely be a record
id or, if alternate lookup is supported by the API,
the corresponding query.
.. versionchanged:: 0.4
Added the 'req' argument.
"""
raise NotImplementedError
def find_one_raw(self, resource, _id):
""" Retrieves a single, raw document. No projections or datasource
filters are being applied here. Just looking up the document by unique
id.
:param resource: resource name.
:param id: unique id.
.. versionadded:: 0.4
"""
raise NotImplementedError
def find_list_of_ids(self, resource, ids, client_projection=None):
""" Retrieves a list of documents based on a list of primary keys
The primary key is the field defined in `ID_FIELD`.
This is a separate function to allow us to use per-database
optimizations for this type of query.
:param resource: resource name.
:param ids: a list of ids corresponding to the documents
to retrieve
:param client_projection: a specific projection to use
:return: a list of documents matching the ids in `ids` from the
collection specified in `resource`
.. versionadded:: 0.1.0
"""
raise NotImplementedError
def insert(self, resource, doc_or_docs):
""" Inserts a document into a resource collection/table.
:param resource: resource being accessed. You should then use
the ``_datasource`` helper function to retrieve both
the actual datasource name.
:param doc_or_docs: json document or list of json documents to be added
to the database.
.. versionchanged:: 0.0.6
'document' param renamed to 'doc_or_docs', making support for bulk
inserts apparent.
"""
raise NotImplementedError
def update(self, resource, id_, updates):
""" Updates a collection/table document/row.
:param resource: resource being accessed. You should then use
the ``_datasource`` helper function to retrieve
the actual datasource name.
:param id_: the unique id of the document.
:param updates: json updates to be performed on the database document
(or row).
"""
raise NotImplementedError
def replace(self, resource, id_, document):
""" Replaces a collection/table document/row.
:param resource: resource being accessed. You should then use
the ``_datasource`` helper function to retrieve
the actual datasource name.
:param id_: the unique id of the document.
:param document: the new json document
.. versionadded:: 0.1.0
"""
raise NotImplementedError
def remove(self, resource, lookup={}):
""" Removes a document/row or an entire set of documents/rows from a
database collection/table.
:param resource: resource being accessed. You should then use
the ``_datasource`` helper function to retrieve
the actual datasource name.
:param lookup: a dict with the query that documents must match in order
to qualify for deletion. For single document deletes,
this is usually the unique id of the document to be
removed.
.. versionchanged:: 0.3
'_id' arg removed; replaced with 'lookup'.
"""
raise NotImplementedError
def combine_queries(self, query_a, query_b):
""" Takes two db queries and applies db-specific syntax to produce
the intersection.
.. versionadded: 0.1.0
Support for intelligent combination of db queries
"""
raise NotImplementedError
def get_value_from_query(self, query, field_name):
""" Parses the given potentially-complex query and returns the value
being assigned to the field given in `field_name`.
This mainly exists to deal with more complicated compound queries
.. versionadded: 0.1.0
Support for parsing values embedded in compound db queries
"""
raise NotImplementedError
def query_contains_field(self, query, field_name):
""" For the specified field name, does the query contain it?
Used know whether we need to parse a compound query.
.. versionadded: 0.1.0
Support for parsing values embedded in compound db queries
"""
raise NotImplementedError
def is_empty(self, resource):
""" Returns True if the collection is empty; False otherwise. While
a user could rely on self.find() method to achieve the same result,
this method can probably take advantage of specific datastore features
to provide better perfomance.
Don't forget, a 'resource' could have a pre-defined filter. If that is
the case, it will have to be taken into consideration when performing
the is_empty() check (see eve.io.mongo.mongo.py implementation).
:param resource: resource being accessed. You should then use
the ``_datasource`` helper function to retrieve
the actual datasource name.
.. versionadded: 0.3
"""
raise NotImplementedError
def _datasource(self, resource):
""" Returns a tuple with the actual name of the database
collection/table, base query and projection for the resource being
accessed.
:param resource: resource being accessed.
.. versionchanged:: 0.5
If allow_unknown is enabled for the resource, don't return any
projection for the document. Addresses #397 and #250.
.. versionchanged:: 0.4
Return copies to avoid accidental tampering. Fix #258.
.. versionchanged:: 0.2
Support for 'default_sort'.
"""
dsource = config.SOURCES[resource]
source = copy(dsource['source'])
filter_ = copy(dsource['filter'])
sort = copy(dsource['default_sort'])
# if allow_unknown is enabled for the resource, then don't return
# the default or client projection so all document fields can be
# returned to the client (regardless of the resource schema).
allow_unknown = config.DOMAIN[resource]['allow_unknown']
projection = copy(dsource['projection']) if not allow_unknown else None
return source, filter_, projection, sort,
def _datasource_ex(self, resource, query=None, client_projection=None,
client_sort=None):
""" Returns both db collection and exact query (base filter included)
to which an API resource refers to.
.. versionchanged:: 0.4
Always return required/auto fields (issue 282.)
.. versionchanged:: 0.3
Field exclusion support in client projections.
Honor auth_field even when client query is missing.
Only inject auth_field in queries when we are not creating new
documents.
'auth_field' and 'request_auth_value' fetching is now delegated to
auth.auth_field_and value().
.. versionchanged:: 0.2
Difference between resource and item endpoints is now determined
by the presence of a '|' in request.endpoint.
Support for 'default_sort'.
.. versionchanged:: 0.1.1
auth.request_auth_value is now used to store the auth_field value.
.. versionchanged:: 0.1.0
Calls `combine_queries` to merge query and filter_
Updated logic performing `auth_field` check
.. versionchanged:: 0.0.9
Storing self.app.auth.userid in auth_field when 'user-restricted
resource access' is enabled.
Support for Python 3.3.
.. versionchanged:: 0.0.6
'auth_username_field' is injected even in empty queries.
Projection queries ('?projection={"name": 1}')
.. versionchanged:: 0.0.5
Support for 'user-restricted resource access'.
.. versionadded:: 0.0.4
"""
datasource, filter_, projection_, sort_ = self._datasource(resource)
if client_sort:
sort = client_sort
else:
# default sort is activated only if 'sorting' is enabled for the
# resource.
# TODO Consider raising a validation error on startup instead?
sort = sort_ if sort_ and config.DOMAIN[resource]['sorting'] else \
None
if filter_:
if query:
# Can't just dump one set of query operators into another
# e.g. if the dataset contains a custom datasource pattern
# 'filter': {'username': {'$exists': True}}
# and we try to filter on the field `username`,
# which is correct?
# Solution: call the db driver `combine_queries` operation
# which will apply db-specific syntax to produce the
# intersection of the two queries
query = self.combine_queries(query, filter_)
else:
query = filter_
fields = projection_
if client_projection:
# only allow fields which are included with the standard projection
# for the resource (avoid sniffing of private fields)
keep_fields = auto_fields(resource)
if 0 not in client_projection.values():
# inclusive projection - all values are 0 unless spec. or auto
fields = dict([(field, field in keep_fields) for field in
fields.keys()])
for field, value in client_projection.items():
field_base = field.split('.')[0]
if field_base not in keep_fields and field_base in fields:
fields[field] = value
fields = dict([(field, 1) for field, value in fields.items() if
value])
# If the current HTTP method is in `public_methods` or
# `public_item_methods`, skip the `auth_field` check
# Only inject the auth_field in the query when not creating new
# documents.
if request and request.method not in ('POST', 'PUT'):
auth_field, request_auth_value = auth_field_and_value(resource)
if auth_field and request.authorization and request_auth_value:
if query:
# If the auth_field *replaces* a field in the query,
# and the values are /different/, deny the request
# This prevents the auth_field condition from
# overwriting the query (issue #77)
auth_field_in_query = \
self.app.data.query_contains_field(query, auth_field)
if auth_field_in_query and \
self.app.data.get_value_from_query(
query, auth_field) != request_auth_value:
abort(401, description=debug_error_message(
'Incompatible User-Restricted Resource request. '
'Request was for "%s"="%s" but `auth_field` '
'requires "%s"="%s".' % (
auth_field,
self.app.data.get_value_from_query(
query, auth_field),
auth_field,
request_auth_value)
))
else:
query = self.app.data.combine_queries(
query, {auth_field: request_auth_value}
)
else:
query = {auth_field: request_auth_value}
return datasource, query, fields, sort
| 40.767654 | 79 | 0.605073 |
import datetime
import simplejson as json
from copy import copy
from flask import request, abort
from eve.utils import date_to_str
from eve.auth import auth_field_and_value
from eve.utils import config, debug_error_message, auto_fields
class BaseJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return date_to_str(obj)
elif isinstance(obj, (datetime.time, datetime.date)):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
class ConnectionException(Exception):
def __init__(self, driver_exception=None):
self.driver_exception = driver_exception
def __str__(self):
msg = ("Error initializing the driver. Make sure the database server"
"is running. ")
if self.driver_exception:
msg += "Driver exception: %s" % repr(self.driver_exception)
return msg
class DataLayer(object):
serializers = {}
json_encoder_class = BaseJSONEncoder
def __init__(self, app):
self.driver = None
if app is not None:
self.app = app
self.init_app(self.app)
else:
self.app = None
def init_app(self, app):
raise NotImplementedError
def find(self, resource, req, sub_resource_lookup):
raise NotImplementedError
def find_one(self, resource, req, **lookup):
raise NotImplementedError
def find_one_raw(self, resource, _id):
raise NotImplementedError
def find_list_of_ids(self, resource, ids, client_projection=None):
raise NotImplementedError
def insert(self, resource, doc_or_docs):
raise NotImplementedError
def update(self, resource, id_, updates):
raise NotImplementedError
def replace(self, resource, id_, document):
raise NotImplementedError
def remove(self, resource, lookup={}):
raise NotImplementedError
def combine_queries(self, query_a, query_b):
raise NotImplementedError
def get_value_from_query(self, query, field_name):
raise NotImplementedError
def query_contains_field(self, query, field_name):
raise NotImplementedError
def is_empty(self, resource):
raise NotImplementedError
def _datasource(self, resource):
dsource = config.SOURCES[resource]
source = copy(dsource['source'])
filter_ = copy(dsource['filter'])
sort = copy(dsource['default_sort'])
# the default or client projection so all document fields can be
# returned to the client (regardless of the resource schema).
allow_unknown = config.DOMAIN[resource]['allow_unknown']
projection = copy(dsource['projection']) if not allow_unknown else None
return source, filter_, projection, sort,
def _datasource_ex(self, resource, query=None, client_projection=None,
client_sort=None):
datasource, filter_, projection_, sort_ = self._datasource(resource)
if client_sort:
sort = client_sort
else:
# default sort is activated only if 'sorting' is enabled for the
# resource.
# TODO Consider raising a validation error on startup instead?
sort = sort_ if sort_ and config.DOMAIN[resource]['sorting'] else \
None
if filter_:
if query:
# Can't just dump one set of query operators into another
query = self.combine_queries(query, filter_)
else:
query = filter_
fields = projection_
if client_projection:
keep_fields = auto_fields(resource)
if 0 not in client_projection.values():
fields = dict([(field, field in keep_fields) for field in
fields.keys()])
for field, value in client_projection.items():
field_base = field.split('.')[0]
if field_base not in keep_fields and field_base in fields:
fields[field] = value
fields = dict([(field, 1) for field, value in fields.items() if
value])
if request and request.method not in ('POST', 'PUT'):
auth_field, request_auth_value = auth_field_and_value(resource)
if auth_field and request.authorization and request_auth_value:
if query:
auth_field_in_query = \
self.app.data.query_contains_field(query, auth_field)
if auth_field_in_query and \
self.app.data.get_value_from_query(
query, auth_field) != request_auth_value:
abort(401, description=debug_error_message(
'Incompatible User-Restricted Resource request. '
'Request was for "%s"="%s" but `auth_field` '
'requires "%s"="%s".' % (
auth_field,
self.app.data.get_value_from_query(
query, auth_field),
auth_field,
request_auth_value)
))
else:
query = self.app.data.combine_queries(
query, {auth_field: request_auth_value}
)
else:
query = {auth_field: request_auth_value}
return datasource, query, fields, sort
| true | true |
f72de4edec828d37bb804254c422746c7c0e35f7 | 5,502 | py | Python | allennlp/data/fields/sequence_label_field.py | purnendu91/allennlp | 7bdc142f3fba9b4b751be4de51299858613f134f | [
"Apache-2.0"
] | null | null | null | allennlp/data/fields/sequence_label_field.py | purnendu91/allennlp | 7bdc142f3fba9b4b751be4de51299858613f134f | [
"Apache-2.0"
] | null | null | null | allennlp/data/fields/sequence_label_field.py | purnendu91/allennlp | 7bdc142f3fba9b4b751be4de51299858613f134f | [
"Apache-2.0"
] | null | null | null | from typing import Dict, List, Union, Set
import logging
from overrides import overrides
import torch
from torch.autograd import Variable
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import pad_sequence_to_length
from allennlp.data.fields.field import Field
from allennlp.data.fields.sequence_field import SequenceField
from allennlp.data.vocabulary import Vocabulary
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class SequenceLabelField(Field[torch.Tensor]):
"""
A ``SequenceLabelField`` assigns a categorical label to each element in a
:class:`~allennlp.data.fields.sequence_field.SequenceField`.
Because it's a labeling of some other field, we take that field as input here, and we use it to
determine our padding and other things.
This field will get converted into a list of integer class ids, representing the correct class
for each element in the sequence.
Parameters
----------
labels : ``Union[List[str], List[int]]``
A sequence of categorical labels, encoded as strings or integers. These could be POS tags like [NN,
JJ, ...], BIO tags like [B-PERS, I-PERS, O, O, ...], or any other categorical tag sequence. If the
labels are encoded as integers, they will not be indexed using a vocab.
sequence_field : ``SequenceField``
A field containing the sequence that this ``SequenceLabelField`` is labeling. Most often, this is a
``TextField``, for tagging individual tokens in a sentence.
label_namespace : ``str``, optional (default='labels')
The namespace to use for converting tag strings into integers. We convert tag strings to
integers for you, and this parameter tells the ``Vocabulary`` object which mapping from
strings to integers to use (so that "O" as a tag doesn't get the same id as "O" as a word).
"""
# It is possible that users want to use this field with a namespace which uses OOV/PAD tokens.
# This warning will be repeated for every instantiation of this class (i.e for every data
# instance), spewing a lot of warnings so this class variable is used to only log a single
# warning per namespace.
_already_warned_namespaces: Set[str] = set()
def __init__(self,
labels: Union[List[str], List[int]],
sequence_field: SequenceField,
label_namespace: str = 'labels') -> None:
self.labels = labels
self.sequence_field = sequence_field
self._label_namespace = label_namespace
self._indexed_labels = None
self._maybe_warn_for_namespace(label_namespace)
if len(labels) != sequence_field.sequence_length():
raise ConfigurationError("Label length and sequence length "
"don't match: %d and %d" % (len(labels), sequence_field.sequence_length()))
if all([isinstance(x, int) for x in labels]):
self._indexed_labels = labels
elif not all([isinstance(x, str) for x in labels]):
raise ConfigurationError("SequenceLabelFields must be passed either all "
"strings or all ints. Found labels {} with "
"types: {}.".format(labels, [type(x) for x in labels]))
def _maybe_warn_for_namespace(self, label_namespace: str) -> None:
if not (self._label_namespace.endswith("labels") or self._label_namespace.endswith("tags")):
if label_namespace not in self._already_warned_namespaces:
logger.warning("Your label namespace was '%s'. We recommend you use a namespace "
"ending with 'labels' or 'tags', so we don't add UNK and PAD tokens by "
"default to your vocabulary. See documentation for "
"`non_padded_namespaces` parameter in Vocabulary.",
self._label_namespace)
self._already_warned_namespaces.add(label_namespace)
@overrides
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):
if self._indexed_labels is None:
for label in self.labels:
counter[self._label_namespace][label] += 1 # type: ignore
@overrides
def index(self, vocab: Vocabulary):
if self._indexed_labels is None:
self._indexed_labels = [vocab.get_token_index(label, self._label_namespace) # type: ignore
for label in self.labels]
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {'num_tokens': self.sequence_field.sequence_length()}
@overrides
def as_tensor(self,
padding_lengths: Dict[str, int],
cuda_device: int = -1,
for_training: bool = True) -> torch.Tensor:
desired_num_tokens = padding_lengths['num_tokens']
padded_tags = pad_sequence_to_length(self._indexed_labels, desired_num_tokens)
tensor = Variable(torch.LongTensor(padded_tags), volatile=not for_training)
return tensor if cuda_device == -1 else tensor.cuda(cuda_device)
@overrides
def empty_field(self): # pylint: disable=no-self-use
# pylint: disable=protected-access
sequence_label_field = SequenceLabelField([], self.sequence_field.empty_field())
sequence_label_field._indexed_labels = []
return sequence_label_field
| 50.018182 | 112 | 0.661032 | from typing import Dict, List, Union, Set
import logging
from overrides import overrides
import torch
from torch.autograd import Variable
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import pad_sequence_to_length
from allennlp.data.fields.field import Field
from allennlp.data.fields.sequence_field import SequenceField
from allennlp.data.vocabulary import Vocabulary
logger = logging.getLogger(__name__)
class SequenceLabelField(Field[torch.Tensor]):
_already_warned_namespaces: Set[str] = set()
def __init__(self,
labels: Union[List[str], List[int]],
sequence_field: SequenceField,
label_namespace: str = 'labels') -> None:
self.labels = labels
self.sequence_field = sequence_field
self._label_namespace = label_namespace
self._indexed_labels = None
self._maybe_warn_for_namespace(label_namespace)
if len(labels) != sequence_field.sequence_length():
raise ConfigurationError("Label length and sequence length "
"don't match: %d and %d" % (len(labels), sequence_field.sequence_length()))
if all([isinstance(x, int) for x in labels]):
self._indexed_labels = labels
elif not all([isinstance(x, str) for x in labels]):
raise ConfigurationError("SequenceLabelFields must be passed either all "
"strings or all ints. Found labels {} with "
"types: {}.".format(labels, [type(x) for x in labels]))
def _maybe_warn_for_namespace(self, label_namespace: str) -> None:
if not (self._label_namespace.endswith("labels") or self._label_namespace.endswith("tags")):
if label_namespace not in self._already_warned_namespaces:
logger.warning("Your label namespace was '%s'. We recommend you use a namespace "
"ending with 'labels' or 'tags', so we don't add UNK and PAD tokens by "
"default to your vocabulary. See documentation for "
"`non_padded_namespaces` parameter in Vocabulary.",
self._label_namespace)
self._already_warned_namespaces.add(label_namespace)
@overrides
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):
if self._indexed_labels is None:
for label in self.labels:
counter[self._label_namespace][label] += 1
@overrides
def index(self, vocab: Vocabulary):
if self._indexed_labels is None:
self._indexed_labels = [vocab.get_token_index(label, self._label_namespace)
for label in self.labels]
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {'num_tokens': self.sequence_field.sequence_length()}
@overrides
def as_tensor(self,
padding_lengths: Dict[str, int],
cuda_device: int = -1,
for_training: bool = True) -> torch.Tensor:
desired_num_tokens = padding_lengths['num_tokens']
padded_tags = pad_sequence_to_length(self._indexed_labels, desired_num_tokens)
tensor = Variable(torch.LongTensor(padded_tags), volatile=not for_training)
return tensor if cuda_device == -1 else tensor.cuda(cuda_device)
@overrides
def empty_field(self):
sequence_label_field = SequenceLabelField([], self.sequence_field.empty_field())
sequence_label_field._indexed_labels = []
return sequence_label_field
| true | true |
f72de5318f9d64110e0d3a420ec657838b3aa7e8 | 1,502 | py | Python | networks/60lines.py | eym55/power_grid_sim | 35822a2d2f09c00a38841e0bf3395bc5ea0ddbaa | [
"MIT"
] | 1 | 2022-01-26T15:19:15.000Z | 2022-01-26T15:19:15.000Z | networks/60lines.py | eym55/power_grid_sim | 35822a2d2f09c00a38841e0bf3395bc5ea0ddbaa | [
"MIT"
] | null | null | null | networks/60lines.py | eym55/power_grid_sim | 35822a2d2f09c00a38841e0bf3395bc5ea0ddbaa | [
"MIT"
] | null | null | null | import pypsa
import numpy as np
import random
import matplotlib.pyplot as plt
random.seed(69)
network = pypsa.Network()
for i in range(30):
network.add("Bus","Bus {}".format(i))
for i in range(30):
network.buses.at[network.buses.index[i], 'x'] = random.randint(0,100)
network.buses.at[network.buses.index[i], 'y'] = random.randint(0,100)
edges = [(0, 12),(0, 21),(0, 22),(0, 23),(0, 6),(1, 17),(1, 18),(1, 19),(10, 17),
(10, 24),(10, 3),(11, 3),(11, 8),(12, 2),(12, 22),(12, 24),(12, 3), (12, 6),(12, 8),
(13, 14),(13, 21),(13, 5),(14, 16),(14, 2),(14, 21),(14, 4),(14, 5),(15, 18),(15, 19),
(15, 2),(15, 9),(16, 4),(16, 9),(17, 18),(17, 24),(18, 19),(18, 24),(19, 7),(2, 21),
(2, 24),(2, 6),(2, 9),(20, 9),(21, 5),(21, 6),(22, 25),(22, 8),(24, 3),(25, 8),(3, 8),
(4, 5),(7, 9), (7, 26), (2, 27), (1, 28), (15, 29), (0, 29), (28, 4), (27, 22), (27, 23)
]
for i in range(len(edges)):
network.add("Line","Linine {}".format(i),
bus0="Bus {}".format(edges[i][0]),
bus1="Bus {}".format(edges[i][1]),
x=0.0001,
s_nom=60)
#generators
for i in range(12):
network.add("Generator","Gen {}".format(i),
bus="Bus {}".format(i),
p_nom=300,
marginal_cost=random.randint(25,75))
#loads
for i in range(12,30):
network.add("Load",f"Load {i}",
bus=f"Bus {i}",
p_set=random.randint(25,125))
network.export_to_netcdf("60line.nc") | 35.761905 | 92 | 0.496671 | import pypsa
import numpy as np
import random
import matplotlib.pyplot as plt
random.seed(69)
network = pypsa.Network()
for i in range(30):
network.add("Bus","Bus {}".format(i))
for i in range(30):
network.buses.at[network.buses.index[i], 'x'] = random.randint(0,100)
network.buses.at[network.buses.index[i], 'y'] = random.randint(0,100)
edges = [(0, 12),(0, 21),(0, 22),(0, 23),(0, 6),(1, 17),(1, 18),(1, 19),(10, 17),
(10, 24),(10, 3),(11, 3),(11, 8),(12, 2),(12, 22),(12, 24),(12, 3), (12, 6),(12, 8),
(13, 14),(13, 21),(13, 5),(14, 16),(14, 2),(14, 21),(14, 4),(14, 5),(15, 18),(15, 19),
(15, 2),(15, 9),(16, 4),(16, 9),(17, 18),(17, 24),(18, 19),(18, 24),(19, 7),(2, 21),
(2, 24),(2, 6),(2, 9),(20, 9),(21, 5),(21, 6),(22, 25),(22, 8),(24, 3),(25, 8),(3, 8),
(4, 5),(7, 9), (7, 26), (2, 27), (1, 28), (15, 29), (0, 29), (28, 4), (27, 22), (27, 23)
]
for i in range(len(edges)):
network.add("Line","Linine {}".format(i),
bus0="Bus {}".format(edges[i][0]),
bus1="Bus {}".format(edges[i][1]),
x=0.0001,
s_nom=60)
for i in range(12):
network.add("Generator","Gen {}".format(i),
bus="Bus {}".format(i),
p_nom=300,
marginal_cost=random.randint(25,75))
for i in range(12,30):
network.add("Load",f"Load {i}",
bus=f"Bus {i}",
p_set=random.randint(25,125))
network.export_to_netcdf("60line.nc") | true | true |
f72de71bd4141843c398b8f069d0199f6aefb8dd | 37 | py | Python | DaysOfCode/__main__.py | PeterMcD/100-Days-Of-Code-Cli-Tool | cea1be23cd314a5421a3c9ae4587769d6bb75e77 | [
"MIT"
] | null | null | null | DaysOfCode/__main__.py | PeterMcD/100-Days-Of-Code-Cli-Tool | cea1be23cd314a5421a3c9ae4587769d6bb75e77 | [
"MIT"
] | 4 | 2019-01-04T01:49:17.000Z | 2019-01-11T18:22:58.000Z | DaysOfCode/__main__.py | petermcd/100-Days-Of-Code-Cli-Tool | cea1be23cd314a5421a3c9ae4587769d6bb75e77 | [
"MIT"
] | null | null | null | from .bootstrap import main
main()
| 7.4 | 27 | 0.72973 | from .bootstrap import main
main()
| true | true |
f72de7d26b34833391adceb3f86898a1586662a4 | 468 | py | Python | Project_google_task_usage/task_uasge_500_preprocess/data_inverse.py | sssssch/jupyter-examples | cf9e26e22dcfa263bcd26323527911cdbcc2cd61 | [
"MIT"
] | 2 | 2020-07-29T13:07:52.000Z | 2021-01-15T09:22:07.000Z | Project_google_task_usage/task_uasge_500_preprocess/data_inverse.py | sssssch/jupyter-examples | cf9e26e22dcfa263bcd26323527911cdbcc2cd61 | [
"MIT"
] | null | null | null | Project_google_task_usage/task_uasge_500_preprocess/data_inverse.py | sssssch/jupyter-examples | cf9e26e22dcfa263bcd26323527911cdbcc2cd61 | [
"MIT"
] | null | null | null | # -*-coding:utf-8-*-
import pandas as pd
from numpy import *
dataset = pd.read_csv(
'test_data.csv', header=None)
dataset = round(dataset, 8)
List_data = mat(dataset)
Inverse = List_data.T
print(Inverse)
name = [
'cpu',
'cmui',
'amui',
'upcmui',
'tpcmui',
'mmui',
'mditi',
'mldsui',
'mcui',
'scui'
]
test = pd.DataFrame(columns=name, data=Inverse)
test.to_csv('test_data_inversed_bycode.csv', encoding='gbk', header=None)
| 18 | 73 | 0.628205 |
import pandas as pd
from numpy import *
dataset = pd.read_csv(
'test_data.csv', header=None)
dataset = round(dataset, 8)
List_data = mat(dataset)
Inverse = List_data.T
print(Inverse)
name = [
'cpu',
'cmui',
'amui',
'upcmui',
'tpcmui',
'mmui',
'mditi',
'mldsui',
'mcui',
'scui'
]
test = pd.DataFrame(columns=name, data=Inverse)
test.to_csv('test_data_inversed_bycode.csv', encoding='gbk', header=None)
| true | true |
f72de8042318229e69f952746599a95b600e0bd4 | 464 | py | Python | python__advanced/02.lists_as_stacks_and_queues_exercise/08.balanced_parentheses.py | EmilianStoyanov/Projects-in-SoftUni | e83996670fe00424a158905d537a7bbbeee8fb59 | [
"MIT"
] | 1 | 2020-07-14T12:32:47.000Z | 2020-07-14T12:32:47.000Z | python__advanced/02.lists_as_stacks_and_queues_exercise/08.balanced_parentheses.py | EmilianStoyanov/Projects-in-SoftUni | e83996670fe00424a158905d537a7bbbeee8fb59 | [
"MIT"
] | null | null | null | python__advanced/02.lists_as_stacks_and_queues_exercise/08.balanced_parentheses.py | EmilianStoyanov/Projects-in-SoftUni | e83996670fe00424a158905d537a7bbbeee8fb59 | [
"MIT"
] | null | null | null | text = input()
save = []
pairs = {
"{": "}",
"[": "]",
"(": ")"}
valid = True
for el in text:
if el in "({[":
save.append(el)
elif el in ")}]":
if save:
current = save[-1]
if pairs[current] == el:
save.pop()
else:
valid = False
break
else:
valid = False
break
if valid:
print("YES")
else:
print("NO")
| 16 | 36 | 0.364224 | text = input()
save = []
pairs = {
"{": "}",
"[": "]",
"(": ")"}
valid = True
for el in text:
if el in "({[":
save.append(el)
elif el in ")}]":
if save:
current = save[-1]
if pairs[current] == el:
save.pop()
else:
valid = False
break
else:
valid = False
break
if valid:
print("YES")
else:
print("NO")
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.