code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>React Router - Lazy Loading Example using RouterProvider</title>
</head>
<body>
<div id="root"></div>
<script type="module" src="/src/main.tsx"></script>
</body>
</html> | html | github | https://github.com/remix-run/react-router | examples/lazy-loading-router-provider/index.html |
""" Test the graphical_lasso module.
"""
import sys
import pytest
import numpy as np
from scipy import linalg
from numpy.testing import assert_allclose
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_less
from sklearn.covariance import (graphical_lasso, GraphicalLasso,
GraphicalLassoCV, empirical_covariance)
from sklearn.datasets import make_sparse_spd_matrix
from io import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graphical_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graphical_lasso(emp_cov, return_costs=True,
alpha=alpha, mode=method)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphicalLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphicalLasso(
assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graphical_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# (need to set penalize.diagonal to FALSE)
cov_R = np.array([
[0.68112222, 0.0000000, 0.265820, 0.02464314],
[0.00000000, 0.1887129, 0.000000, 0.00000000],
[0.26582000, 0.0000000, 3.095503, 0.28697200],
[0.02464314, 0.0000000, 0.286972, 0.57713289]
])
icov_R = np.array([
[1.5190747, 0.000000, -0.1304475, 0.0000000],
[0.0000000, 5.299055, 0.0000000, 0.0000000],
[-0.1304475, 0.000000, 0.3498624, -0.1683946],
[0.0000000, 0.000000, -0.1683946, 1.8164353]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graphical_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_2D():
# Hard-coded solution from Python skggm package
# obtained by calling `quic(emp_cov, lam=.1, tol=1e-8)`
cov_skggm = np.array([[3.09550269, 1.186972],
[1.186972, 0.57713289]])
icov_skggm = np.array([[1.52836773, -3.14334831],
[-3.14334831, 8.19753385]])
X = datasets.load_iris().data[:, 2:]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graphical_lasso(emp_cov, alpha=.1, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_skggm)
assert_array_almost_equal(icov, icov_skggm)
def test_graphical_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graphical_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graphical_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphicalLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphicalLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
# TODO: Remove in 1.1 when grid_scores_ is deprecated
def test_graphical_lasso_cv_grid_scores_and_cv_alphas_deprecated():
splits = 4
n_alphas = 5
n_refinements = 3
true_cov = np.array([[0.8, 0.0, 0.2, 0.0],
[0.0, 0.4, 0.0, 0.0],
[0.2, 0.0, 0.3, 0.1],
[0.0, 0.0, 0.1, 0.7]])
rng = np.random.RandomState(0)
X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200)
cov = GraphicalLassoCV(cv=splits, alphas=n_alphas,
n_refinements=n_refinements).fit(X)
total_alphas = n_refinements * n_alphas + 1
msg = (r"The grid_scores_ attribute is deprecated in version 0\.24 in "
r"favor of cv_results_ and will be removed in version 1\.1 "
r"\(renaming of 0\.26\).")
with pytest.warns(FutureWarning, match=msg):
assert cov.grid_scores_.shape == (total_alphas, splits)
msg = (r"The cv_alphas_ attribute is deprecated in version 0\.24 in "
r"favor of cv_results_\['alpha'\] and will be removed in version "
r"1\.1 \(renaming of 0\.26\)")
with pytest.warns(FutureWarning, match=msg):
assert len(cov.cv_alphas_) == total_alphas
def test_graphical_lasso_cv_scores():
splits = 4
n_alphas = 5
n_refinements = 3
true_cov = np.array([[0.8, 0.0, 0.2, 0.0],
[0.0, 0.4, 0.0, 0.0],
[0.2, 0.0, 0.3, 0.1],
[0.0, 0.0, 0.1, 0.7]])
rng = np.random.RandomState(0)
X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200)
cov = GraphicalLassoCV(cv=splits, alphas=n_alphas,
n_refinements=n_refinements).fit(X)
cv_results = cov.cv_results_
# alpha and one for each split
total_alphas = n_refinements * n_alphas + 1
keys = ['alphas']
split_keys = ['split{}_score'.format(i) for i in range(splits)]
for key in keys + split_keys:
assert key in cv_results
assert len(cv_results[key]) == total_alphas
cv_scores = np.asarray([cov.cv_results_[key] for key in split_keys])
expected_mean = cv_scores.mean(axis=0)
expected_std = cv_scores.std(axis=0)
assert_allclose(cov.cv_results_["mean_score"], expected_mean)
assert_allclose(cov.cv_results_["std_score"], expected_std) | unknown | codeparrot/codeparrot-clean | ||
# $Id$
import random
import config_site
import socket
import errno
DEFAULT_ECHO = True
DEFAULT_TRACE = True
DEFAULT_START_SIP_PORT = 50000
# Shared vars
ARGS = [] # arguments containing script module & config
HAS_SND_DEV = config_site.HAS_SND_DEV
# Individual pjsua instance configuration class
class InstanceParam:
# Name to identify this pjsua instance (e.g. "caller", "callee", etc.)
name = ""
# pjsua command line arguments, concatenated in string
arg = ""
# Specify whether pjsua output should be echoed to stdout
echo_enabled = DEFAULT_ECHO
# Enable/disable test tracing
trace_enabled = DEFAULT_TRACE
# SIP URI to send request to this instance
uri = ""
# SIP port number, zero to automatically assign
sip_port = 0
# Does this have registration? If yes then the test function will
# wait until the UA is registered before doing anything else
have_reg = False
# Does this have PUBLISH?
have_publish = False
# Enable stdout buffer?
enable_buffer = False
def __init__( self,
name, # Instance name
arg, # Cmd-line arguments
uri="", # URI
uri_param="", # Additional URI param
sip_port=0, # SIP port
have_reg=False, # Have registration?
have_publish=False, # Have publish?
echo_enabled=DEFAULT_ECHO,
trace_enabled=DEFAULT_TRACE,
enable_buffer = False):
# Instance name
self.name = name
# Give random sip_port if it's not specified
if sip_port==0:
# avoid port conflict
cnt = 0
port = 0
while cnt < 10:
cnt = cnt + 1
port = random.randint(DEFAULT_START_SIP_PORT, 65534)
s = socket.socket(socket.AF_INET)
try:
s.bind(("0.0.0.0", port))
except socket.error as serr:
s.close()
if serr.errno == errno.EADDRINUSE:
continue
s.close()
break;
self.sip_port = port
else:
self.sip_port = sip_port
# Autogenerate URI if it's empty.
self.uri = uri
if self.uri=="":
self.uri = "sip:pjsip@127.0.0.1:" + str(self.sip_port)
# Add uri_param to the URI
self.uri = self.uri + uri_param
# Add bracket to the URI
if self.uri[0] != "<":
self.uri = "<" + self.uri + ">"
# Add SIP local port to the argument
self.arg = arg + " --local-port=" + str(self.sip_port)
self.have_reg = have_reg
self.have_publish = have_publish
if have_publish and have_reg and not ("--publish" in self.arg):
self.arg = self.arg + " --publish"
self.echo_enabled = echo_enabled
self.trace_enabled = trace_enabled
self.enable_buffer = enable_buffer
############################################
# Test parameter class
class TestParam:
title = ""
# params is list containing InstanceParams objects
inst_params = []
# flag if this tes should be skipped
skip = None
# list of Expect instances, to be filled at run-time by
# the test program
process = []
# the function for test body
test_func = None
post_func = None
def __init__( self,
title, # Test title
inst_params, # InstanceParam's as list
func=None,
skip=False,
post_func=None,
need_stdout_buffer=False):
self.title = title
self.inst_params = inst_params
self.skip = skip
self.test_func = func
self.post_func = post_func
###################################
# TestError exception
class TestError:
desc = ""
def __init__(self, desc):
self.desc = desc | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=bad-whitespace,redefined-builtin
"""
nwid setup
"""
from codecs import open
from setuptools import setup, find_packages
from os import path
from nwid import __version__ as version
with open('README.rst', 'r', 'utf-8') as f:
README = f.read()
url = 'https://github.com/hbradleyiii/nwid/archive/v{}.tar.gz'\
.format(version)
setup(
name = 'nwid',
version = version,
description = 'A terminal widget framework for humans.',
long_description = README,
long_description_content_type='text/x-rst',
url = 'https://github.com/hbradleyiii/nwid',
download_url = url,
author = 'Harold Bradley III',
author_email = 'harold@prestix.studio',
license = 'MIT License',
keywords = ['server development', 'terminal programming', 'terminal', 'terminal widgets'],
classifiers = [
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
packages = find_packages(),
install_requires = [],
test_requires = ['pytest>=3', 'mock'],
package_data = { '' : ['LICENSE'], },
entry_points = { },
) | unknown | codeparrot/codeparrot-clean | ||
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
#
# Example on the use of the CurvatureFlowImageFilter
#
from InsightToolkit import *
from sys import argv
reader = itkImageFileReaderUS2_New()
writer = itkImageFileWriterUS2_New()
inputCast = itkCastImageFilterUS2F2_New()
outputCast = itkRescaleIntensityImageFilterF2US2_New()
filter = itkCurvatureFlowImageFilterF2F2_New()
inputCast.SetInput( reader.GetOutput() )
filter.SetInput( inputCast.GetOutput() )
outputCast.SetInput( filter.GetOutput() )
writer.SetInput( outputCast.GetOutput() )
reader.SetFileName( argv[1] )
writer.SetFileName( argv[2] )
outputCast.SetOutputMinimum( 0 )
outputCast.SetOutputMaximum( 65535 )
numberOfIterations = eval( argv[3] )
timeStep = eval( argv[4] )
filter.SetNumberOfIterations( numberOfIterations )
filter.SetTimeStep( timeStep )
writer.Update() | unknown | codeparrot/codeparrot-clean | ||
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from copy import deepcopy
from sklearn.base import BaseEstimator
from sklearn.exceptions import NotFittedError
from sklearn.utils import get_tags
from sklearn.utils.metaestimators import available_if
from sklearn.utils.validation import check_is_fitted
def _estimator_has(attr):
"""Check that final_estimator has `attr`.
Used together with `available_if`.
"""
def check(self):
# raise original `AttributeError` if `attr` does not exist
getattr(self.estimator, attr)
return True
return check
class FrozenEstimator(BaseEstimator):
"""Estimator that wraps a fitted estimator to prevent re-fitting.
This meta-estimator takes an estimator and freezes it, in the sense that calling
`fit` on it has no effect. `fit_predict` and `fit_transform` are also disabled.
All other methods are delegated to the original estimator and original estimator's
attributes are accessible as well.
This is particularly useful when you have a fitted or a pre-trained model as a
transformer in a pipeline, and you'd like `pipeline.fit` to have no effect on this
step.
Parameters
----------
estimator : estimator
The estimator which is to be kept frozen.
See Also
--------
None: No similar entry in the scikit-learn documentation.
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from sklearn.frozen import FrozenEstimator
>>> from sklearn.linear_model import LogisticRegression
>>> X, y = make_classification(random_state=0)
>>> clf = LogisticRegression(random_state=0).fit(X, y)
>>> frozen_clf = FrozenEstimator(clf)
>>> frozen_clf.fit(X, y) # No-op
FrozenEstimator(estimator=LogisticRegression(random_state=0))
>>> frozen_clf.predict(X) # Predictions from `clf.predict`
array(...)
"""
def __init__(self, estimator):
self.estimator = estimator
@available_if(_estimator_has("__getitem__"))
def __getitem__(self, *args, **kwargs):
"""__getitem__ is defined in :class:`~sklearn.pipeline.Pipeline` and \
:class:`~sklearn.compose.ColumnTransformer`.
"""
return self.estimator.__getitem__(*args, **kwargs)
def __getattr__(self, name):
# `estimator`'s attributes are now accessible except `fit_predict` and
# `fit_transform`
if name in ["fit_predict", "fit_transform"]:
raise AttributeError(f"{name} is not available for frozen estimators.")
return getattr(self.estimator, name)
def __sklearn_clone__(self):
return self
def __sklearn_is_fitted__(self):
try:
check_is_fitted(self.estimator)
return True
except NotFittedError:
return False
def fit(self, X, y, *args, **kwargs):
"""No-op.
As a frozen estimator, calling `fit` has no effect.
Parameters
----------
X : object
Ignored.
y : object
Ignored.
*args : tuple
Additional positional arguments. Ignored, but present for API compatibility
with `self.estimator`.
**kwargs : dict
Additional keyword arguments. Ignored, but present for API compatibility
with `self.estimator`.
Returns
-------
self : object
Returns the instance itself.
"""
check_is_fitted(self.estimator)
return self
def set_params(self, **kwargs):
"""Set the parameters of this estimator.
The only valid key here is `estimator`. You cannot set the parameters of the
inner estimator.
Parameters
----------
**kwargs : dict
Estimator parameters.
Returns
-------
self : FrozenEstimator
This estimator.
"""
estimator = kwargs.pop("estimator", None)
if estimator is not None:
self.estimator = estimator
if kwargs:
raise ValueError(
"You cannot set parameters of the inner estimator in a frozen "
"estimator since calling `fit` has no effect. You can use "
"`frozenestimator.estimator.set_params` to set parameters of the inner "
"estimator."
)
def get_params(self, deep=True):
"""Get parameters for this estimator.
Returns a `{"estimator": estimator}` dict. The parameters of the inner
estimator are not included.
Parameters
----------
deep : bool, default=True
Ignored.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
return {"estimator": self.estimator}
def __sklearn_tags__(self):
tags = deepcopy(get_tags(self.estimator))
tags._skip_test = True
return tags | python | github | https://github.com/scikit-learn/scikit-learn | sklearn/frozen/_frozen.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Multinomial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import timeit
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
def composed_sampler(logits, num_samples):
# [batch size, num classes, num samples]
unif = random_ops.random_uniform(logits.get_shape().concatenate(
tensor_shape.TensorShape([num_samples])))
noise = -math_ops.log(-math_ops.log(unif))
# [batch size, num classes, 1]
logits = array_ops.expand_dims(logits, -1)
# [batch size, num samples]
return math_ops.argmax(logits + noise, axis=1)
native_sampler = random_ops.multinomial
class MultinomialTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testSmallEntropy(self):
random_seed.set_random_seed(1618)
for output_dtype in [np.int32, np.int64]:
with test_util.device(use_gpu=True):
# A logit value of -10 corresponds to a probability of ~5e-5.
logits = constant_op.constant([[-10., 10., -10.], [-10., -10., 10.]])
num_samples = 1000
samples = self.evaluate(random_ops.multinomial(
logits, num_samples, output_dtype=output_dtype))
self.assertAllEqual([[1] * num_samples, [2] * num_samples], samples)
@test_util.run_deprecated_v1
def testOneOpMultipleStepsIndependent(self):
with test_util.use_gpu():
sample_op1, _ = self._make_ops(10)
# Consecutive runs shouldn't yield identical output.
sample1a = self.evaluate(sample_op1)
sample1b = self.evaluate(sample_op1)
self.assertFalse(np.equal(sample1a, sample1b).all())
def testEagerOneOpMultipleStepsIndependent(self):
with context.eager_mode(), test_util.device(use_gpu=True):
sample1, sample2 = self._make_ops(10)
# Consecutive runs shouldn't yield identical output.
self.assertFalse(np.equal(sample1.numpy(), sample2.numpy()).all())
def testTwoOpsIndependent(self):
with test_util.use_gpu():
sample_op1, sample_op2 = self._make_ops(32)
sample1, sample2 = self.evaluate([sample_op1, sample_op2])
# We expect sample1 and sample2 to be independent.
# 1 in 2^32 chance of this assertion failing.
self.assertFalse(np.equal(sample1, sample2).all())
@test_util.run_deprecated_v1
def testTwoOpsSameSeedDrawSameSequences(self):
with test_util.use_gpu():
sample_op1, sample_op2 = self._make_ops(1000, seed=1)
sample1, sample2 = self.evaluate([sample_op1, sample_op2])
self.assertAllEqual(sample1, sample2)
def testLargeLogits(self):
for neg in [True, False]:
with test_util.use_gpu():
logits = np.array([[1000.] * 5])
if neg:
logits *= -1
samples = self.evaluate(random_ops.multinomial(logits, 10))
# Sampled classes should be in-range.
self.assertTrue((samples >= 0).all())
self.assertTrue((samples < 5).all())
def testSamplingCorrectness(self):
np.random.seed(1618) # Make it reproducible.
num_samples = 21000
rand_probs = self._normalize(np.random.random_sample((10,)))
rand_probs2 = self._normalize(np.random.random_sample((3, 5))) # batched
for probs in [[.5, .5], [.85, .05, .1], rand_probs, rand_probs2]:
probs = np.asarray(probs)
if len(probs.shape) == 1:
probs = probs.reshape(1, probs.size) # singleton batch
logits = np.log(probs).astype(np.float32)
composed_freqs = self._do_sampling(logits, num_samples, composed_sampler)
native_freqs = self._do_sampling(logits, num_samples, native_sampler)
# the test here is similar to core/lib/random/distribution_sampler_test.cc
composed_chi2 = self._chi2(probs, composed_freqs)
native_chi2 = self._chi2(probs, native_freqs)
composed_native_chi2 = self._chi2(composed_freqs, native_freqs)
def check(chi2s):
for chi2 in chi2s:
self.assertLess(chi2, 1e-3)
check(composed_chi2)
check(native_chi2)
check(composed_native_chi2)
def _make_ops(self, num_samples, seed=None):
prob_dist = constant_op.constant([[0.15, 0.5, 0.3, 0.05]])
logits = math_ops.log(prob_dist)
# Two independent sets of samples from the same distribution
sample_op1 = random_ops.multinomial(logits, num_samples, seed)
sample_op2 = random_ops.multinomial(logits, num_samples, seed)
return (sample_op1, sample_op2)
def _normalize(self, vec):
batched = (len(vec.shape) == 2)
return vec / vec.sum(axis=1, keepdims=True) if batched else vec / vec.sum()
def _do_sampling(self, logits, num_samples, sampler):
"""Samples using the supplied sampler and inputs.
Args:
logits: Numpy ndarray of shape [batch_size, num_classes].
num_samples: Int; number of samples to draw.
sampler: A sampler function that takes (1) a [batch_size, num_classes]
Tensor, (2) num_samples and returns a [batch_size, num_samples] Tensor.
Returns:
Frequencies from sampled classes; shape [batch_size, num_classes].
"""
with test_util.use_gpu():
random_seed.set_random_seed(1618)
op = sampler(constant_op.constant(logits), num_samples)
d = self.evaluate(op)
batch_size, num_classes = logits.shape
freqs_mat = []
for i in range(batch_size):
cnts = dict(collections.Counter(d[i, :]))
# Requires drawn class labels be in range.
self.assertLess(max(cnts.keys()), num_classes)
self.assertGreaterEqual(min(cnts.keys()), 0)
freqs = [(cnts[k] * 1. / num_samples if k in cnts else 0)
for k in range(num_classes)]
freqs_mat.append(freqs)
return freqs_mat
def _chi2(self, expected, actual):
actual = np.asarray(actual)
expected = np.asarray(expected)
diff = actual - expected
chi2 = np.sum(diff * diff / expected, axis=0)
return chi2
def testEmpty(self):
classes = 5
with test_util.use_gpu():
for batch in 0, 3:
for samples in 0, 7:
x = self.evaluate(
random_ops.multinomial(
array_ops.zeros([batch, classes]), samples))
self.assertEqual(x.shape, (batch, samples))
@test_util.run_deprecated_v1
def testEmptyClasses(self):
with test_util.use_gpu():
x = random_ops.multinomial(array_ops.zeros([5, 0]), 7)
with self.assertRaisesOpError("num_classes should be positive"):
self.evaluate(x)
def testNegativeMinLogits(self):
random_seed.set_random_seed(78844)
with test_util.use_gpu():
logits = constant_op.constant([[np.finfo(np.float32).min] * 1023 + [0]])
num_samples = 1000
samples = self.evaluate(random_ops.multinomial(logits, num_samples))
self.assertAllEqual([[1023] * num_samples], samples)
# Benchmarking code
def native_op_vs_composed_ops(batch_size, num_classes, num_samples, num_iters):
np.random.seed(1618) # Make it reproducible.
shape = [batch_size, num_classes]
logits_np = np.random.randn(*shape).astype(np.float32)
# No CSE/CF.
optimizer_options = config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0)
config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(
optimizer_options=optimizer_options))
with session.Session(config=config) as sess:
logits = constant_op.constant(logits_np, shape=shape)
native_op = control_flow_ops.group(native_sampler(logits, num_samples))
composed_op = control_flow_ops.group(composed_sampler(logits, num_samples))
native_dt = timeit.timeit(lambda: sess.run(native_op), number=num_iters)
composed_dt = timeit.timeit(lambda: sess.run(composed_op), number=num_iters)
return native_dt, composed_dt
class MultinomialBenchmark(test.Benchmark):
def benchmarkNativeOpVsComposedOps(self):
num_iters = 50
print("Composition of existing ops vs. Native Multinomial op [%d iters]" %
num_iters)
print("BatchSize\tNumClasses\tNumSamples\tsec(composed)\tsec(native)\t"
"speedup")
for batch_size in [32, 128]:
for num_classes in [10000, 100000]:
for num_samples in [1, 4, 32]:
n_dt, c_dt = native_op_vs_composed_ops(batch_size, num_classes,
num_samples, num_iters)
print("%d\t%d\t%d\t%.3f\t%.3f\t%.2f" % (batch_size, num_classes,
num_samples, c_dt, n_dt,
c_dt / n_dt))
self.report_benchmark(
name="native_batch%d_classes%d_s%d" %
(batch_size, num_classes, num_samples),
iters=num_iters,
wall_time=n_dt)
self.report_benchmark(
name="composed_batch%d_classes%d_s%d" %
(batch_size, num_classes, num_samples),
iters=num_iters,
wall_time=c_dt)
if __name__ == "__main__":
test.main() | unknown | codeparrot/codeparrot-clean | ||
"""Component to interface with switches that can be controlled remotely."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.loader import bind_hass
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.config_validation import ( # noqa
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
from homeassistant.const import (
STATE_ON,
SERVICE_TURN_ON,
SERVICE_TURN_OFF,
SERVICE_TOGGLE,
)
from homeassistant.components import group
# mypy: allow-untyped-defs, no-check-untyped-defs
DOMAIN = "switch"
SCAN_INTERVAL = timedelta(seconds=30)
GROUP_NAME_ALL_SWITCHES = "all switches"
ENTITY_ID_ALL_SWITCHES = group.ENTITY_ID_FORMAT.format("all_switches")
ENTITY_ID_FORMAT = DOMAIN + ".{}"
ATTR_TODAY_ENERGY_KWH = "today_energy_kwh"
ATTR_CURRENT_POWER_W = "current_power_w"
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
PROP_TO_ATTR = {
"current_power_w": ATTR_CURRENT_POWER_W,
"today_energy_kwh": ATTR_TODAY_ENERGY_KWH,
}
DEVICE_CLASS_OUTLET = "outlet"
DEVICE_CLASS_SWITCH = "switch"
DEVICE_CLASSES = [DEVICE_CLASS_OUTLET, DEVICE_CLASS_SWITCH]
DEVICE_CLASSES_SCHEMA = vol.All(vol.Lower, vol.In(DEVICE_CLASSES))
_LOGGER = logging.getLogger(__name__)
@bind_hass
def is_on(hass, entity_id=None):
"""Return if the switch is on based on the statemachine.
Async friendly.
"""
entity_id = entity_id or ENTITY_ID_ALL_SWITCHES
return hass.states.is_state(entity_id, STATE_ON)
async def async_setup(hass, config):
"""Track states and offer events for switches."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_SWITCHES
)
await component.async_setup(config)
component.async_register_entity_service(SERVICE_TURN_OFF, {}, "async_turn_off")
component.async_register_entity_service(SERVICE_TURN_ON, {}, "async_turn_on")
component.async_register_entity_service(SERVICE_TOGGLE, {}, "async_toggle")
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class SwitchDevice(ToggleEntity):
"""Representation of a switch."""
@property
def current_power_w(self):
"""Return the current power usage in W."""
return None
@property
def today_energy_kwh(self):
"""Return the today total energy usage in kWh."""
return None
@property
def is_standby(self):
"""Return true if device is in standby."""
return None
@property
def state_attributes(self):
"""Return the optional state attributes."""
data = {}
for prop, attr in PROP_TO_ATTR.items():
value = getattr(self, prop)
if value:
data[attr] = value
return data
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return None | unknown | codeparrot/codeparrot-clean | ||
import os
import subprocess as sp
from cosmos.api import Cosmos, Dependency, draw_stage_graph, draw_task_graph, \
pygraphviz_available, default_get_submit_args
from functools import partial
from tools import echo, cat, word_count
def recipe(workflow):
# Create two Tasks that echo "hello" and "world" respectively (source nodes of the dag).
echo_tasks = [workflow.add_task(func=echo,
params=dict(word=word, out_txt='%s.txt' % word),
uid=word)
for word in ['hello', 'world']]
# Split each echo into two dependent Tasks (a one2many relationship).
word_count_tasks = []
for echo_task in echo_tasks:
word = echo_task.params['word']
for n in [1, 2]:
cat_task = workflow.add_task(
func=cat,
params=dict(in_txts=[echo_task.params['out_txt']],
out_txt='%s/%s/cat.txt' % (word, n)),
parents=[echo_task],
uid='%s_%s' % (word, n))
# Count the words in the previous stage. An example of a simple one2one relationship
# For each task in StageA, there is a single dependent task in StageB.
word_count_task = workflow.add_task(
func=word_count,
# Dependency instances allow you to specify an input and parent simultaneously
params=dict(in_txts=[Dependency(cat_task, 'out_txt')],
out_txt='%s/%s/wc.txt' % (word, n),
chars=True),
# parents=[cat_task], <-- not necessary!
uid='%s_%s' % (word, n), )
word_count_tasks.append(word_count_task)
# Cat the contents of all word_counts into one file. Only one node is being created who's
# parents are all of the WordCounts (a many2one relationship, aka a reduce operation).
summarize_task = workflow.add_task(
func=cat,
params=dict(in_txts=[Dependency(t, 'out_txt') for t in word_count_tasks],
out_txt='summary.txt'),
parents=word_count_tasks,
stage_name='Summary_Analysis',
uid='') # It's the only Task in this Stage, so doesn't need a specific uid
if __name__ == '__main__':
import argparse
p = argparse.ArgumentParser()
p.add_argument('-drm', default='local', help='', choices=('local', 'drmaa:ge', 'ge'))
p.add_argument('-q', '--queue', help='Submit to this queue of the DRM supports it')
args = p.parse_args()
cosmos = Cosmos('sqlite:///%s/sqlite.db' % os.path.dirname(os.path.abspath(__file__)),
# example of how to change arguments if you're NOT using default_drm='local'
get_submit_args=partial(default_get_submit_args, parallel_env='smp'),
default_drm=args.drm,
default_queue=args.queue)
cosmos.initdb()
sp.check_call('mkdir -p analysis_output/ex2', shell=True)
os.chdir('analysis_output/ex2')
workflow = cosmos.start('Example2', restart=True, skip_confirm=True)
recipe(workflow)
workflow.make_output_dirs()
workflow.run(max_attempts=1, max_cores=10)
if pygraphviz_available:
# These images can also be seen on the fly in the web-interface
draw_stage_graph(workflow.stage_graph(), '/tmp/ex1_task_graph.png', format='png')
draw_task_graph(workflow.task_graph(), '/tmp/ex1_stage_graph.png', format='png')
else:
print 'Pygraphviz is not available :(' | unknown | codeparrot/codeparrot-clean | ||
# git-mergetool--lib is a shell library for common merge tool functions
: ${MERGE_TOOLS_DIR=$(git --exec-path)/mergetools}
IFS='
'
mode_ok () {
if diff_mode
then
can_diff
elif merge_mode
then
can_merge
else
false
fi
}
is_available () {
merge_tool_path=$(translate_merge_tool_path "$1") &&
type "$merge_tool_path" >/dev/null 2>&1
}
list_config_tools () {
section=$1
line_prefix=${2:-}
git config --get-regexp $section'\..*\.cmd' |
while read -r key value
do
toolname=${key#$section.}
toolname=${toolname%.cmd}
printf "%s%s\n" "$line_prefix" "$toolname"
done
}
show_tool_names () {
condition=${1:-true} per_line_prefix=${2:-} preamble=${3:-}
not_found_msg=${4:-}
extra_content=${5:-}
shown_any=
( cd "$MERGE_TOOLS_DIR" && ls ) | {
while read scriptname
do
setup_tool "$scriptname" 2>/dev/null
# We need an actual line feed here
variants="$variants
$(list_tool_variants)"
done
variants="$(echo "$variants" | sort -u)"
for toolname in $variants
do
if setup_tool "$toolname" 2>/dev/null &&
(eval "$condition" "$toolname")
then
if test -n "$preamble"
then
printf "%s\n" "$preamble"
preamble=
fi
shown_any=yes
printf "%s%-15s %s\n" "$per_line_prefix" "$toolname" $(diff_mode && diff_cmd_help "$toolname" || merge_cmd_help "$toolname")
fi
done
if test -n "$extra_content"
then
if test -n "$preamble"
then
# Note: no '\n' here since we don't want a
# blank line if there is no initial content.
printf "%s" "$preamble"
preamble=
fi
shown_any=yes
printf "\n%s\n" "$extra_content"
fi
if test -n "$preamble" && test -n "$not_found_msg"
then
printf "%s\n" "$not_found_msg"
fi
test -n "$shown_any"
}
}
diff_mode () {
test "$TOOL_MODE" = diff
}
merge_mode () {
test "$TOOL_MODE" = merge
}
get_gui_default () {
if diff_mode
then
GUI_DEFAULT_KEY="difftool.guiDefault"
else
GUI_DEFAULT_KEY="mergetool.guiDefault"
fi
GUI_DEFAULT_CONFIG_LCASE=$(git config --default false --get "$GUI_DEFAULT_KEY" | tr 'A-Z' 'a-z')
if test "$GUI_DEFAULT_CONFIG_LCASE" = "auto"
then
if test -n "$DISPLAY"
then
GUI_DEFAULT=true
else
GUI_DEFAULT=false
fi
else
GUI_DEFAULT=$(git config --default false --bool --get "$GUI_DEFAULT_KEY")
subshell_exit_status=$?
if test $subshell_exit_status -ne 0
then
exit $subshell_exit_status
fi
fi
echo $GUI_DEFAULT
}
gui_mode () {
if test -z "$GIT_MERGETOOL_GUI"
then
GIT_MERGETOOL_GUI=$(get_gui_default)
if test $? -ne 0
then
exit 2
fi
fi
test "$GIT_MERGETOOL_GUI" = true
}
translate_merge_tool_path () {
echo "$1"
}
check_unchanged () {
if test "$MERGED" -nt "$BACKUP"
then
return 0
else
while true
do
echo "$MERGED seems unchanged."
printf "Was the merge successful [y/n]? "
read answer || return 1
case "$answer" in
y*|Y*) return 0 ;;
n*|N*) return 1 ;;
esac
done
fi
}
valid_tool () {
setup_tool "$1" 2>/dev/null && return 0
cmd=$(get_merge_tool_cmd "$1")
test -n "$cmd"
}
setup_user_tool () {
merge_tool_cmd=$(get_merge_tool_cmd "$tool")
test -n "$merge_tool_cmd" || return 1
diff_cmd () {
( eval $merge_tool_cmd )
}
merge_cmd () {
( eval $merge_tool_cmd )
}
list_tool_variants () {
echo "$tool"
}
}
setup_tool () {
tool="$1"
# Fallback definitions, to be overridden by tools.
can_merge () {
return 0
}
can_diff () {
return 0
}
diff_cmd () {
return 1
}
diff_cmd_help () {
return 0
}
merge_cmd () {
return 1
}
merge_cmd_help () {
return 0
}
hide_resolved_enabled () {
return 0
}
translate_merge_tool_path () {
echo "$1"
}
list_tool_variants () {
echo "$tool"
}
# Most tools' exit codes cannot be trusted, so By default we ignore
# their exit code and check the merged file's modification time in
# check_unchanged() to determine whether or not the merge was
# successful. The return value from run_merge_cmd, by default, is
# determined by check_unchanged().
#
# When a tool's exit code can be trusted then the return value from
# run_merge_cmd is simply the tool's exit code, and check_unchanged()
# is not called.
#
# The return value of exit_code_trustable() tells us whether or not we
# can trust the tool's exit code.
#
# User-defined and built-in tools default to false.
# Built-in tools advertise that their exit code is trustable by
# redefining exit_code_trustable() to true.
exit_code_trustable () {
false
}
if test -f "$MERGE_TOOLS_DIR/$tool"
then
. "$MERGE_TOOLS_DIR/$tool"
elif test -f "$MERGE_TOOLS_DIR/${tool%[0-9]}"
then
. "$MERGE_TOOLS_DIR/${tool%[0-9]}"
else
setup_user_tool
rc=$?
if test $rc -ne 0
then
echo >&2 "error: ${TOOL_MODE}tool.$tool.cmd not set for tool '$tool'"
fi
return $rc
fi
# Now let the user override the default command for the tool. If
# they have not done so then this will return 1 which we ignore.
setup_user_tool
if ! list_tool_variants | grep -q "^$tool$"
then
echo "error: unknown tool variant '$tool'" >&2
return 1
fi
if merge_mode && ! can_merge
then
echo "error: '$tool' can not be used to resolve merges" >&2
return 1
elif diff_mode && ! can_diff
then
echo "error: '$tool' can only be used to resolve merges" >&2
return 1
fi
return 0
}
get_merge_tool_cmd () {
merge_tool="$1"
if diff_mode
then
git config "difftool.$merge_tool.cmd" ||
git config "mergetool.$merge_tool.cmd"
else
git config "mergetool.$merge_tool.cmd"
fi
}
trust_exit_code () {
if git config --bool "mergetool.$1.trustExitCode"
then
:; # OK
elif exit_code_trustable
then
echo true
else
echo false
fi
}
initialize_merge_tool () {
# Bring tool-specific functions into scope
setup_tool "$1" || return 1
}
# Entry point for running tools
run_merge_tool () {
# If GIT_PREFIX is empty then we cannot use it in tools
# that expect to be able to chdir() to its value.
GIT_PREFIX=${GIT_PREFIX:-.}
export GIT_PREFIX
merge_tool_path=$(get_merge_tool_path "$1") || exit
base_present="$2"
if merge_mode
then
run_merge_cmd "$1"
else
run_diff_cmd "$1"
fi
}
# Run a either a configured or built-in diff tool
run_diff_cmd () {
diff_cmd "$1"
}
# Run a either a configured or built-in merge tool
run_merge_cmd () {
mergetool_trust_exit_code=$(trust_exit_code "$1")
if test "$mergetool_trust_exit_code" = "true"
then
merge_cmd "$1"
else
touch "$BACKUP"
merge_cmd "$1"
check_unchanged
fi
}
list_merge_tool_candidates () {
if merge_mode
then
tools="tortoisemerge"
else
tools="kompare"
fi
if test -n "$DISPLAY"
then
if test -n "$GNOME_DESKTOP_SESSION_ID"
then
tools="meld opendiff kdiff3 tkdiff xxdiff $tools"
else
tools="opendiff kdiff3 tkdiff xxdiff meld $tools"
fi
tools="$tools gvimdiff diffuse diffmerge ecmerge"
tools="$tools p4merge araxis bc codecompare"
tools="$tools smerge"
fi
case "${VISUAL:-$EDITOR}" in
*nvim*)
tools="$tools nvimdiff vimdiff emerge"
;;
*vim*)
tools="$tools vimdiff nvimdiff emerge"
;;
*)
tools="$tools emerge vimdiff nvimdiff"
;;
esac
}
show_tool_help () {
tool_opt="'git ${TOOL_MODE}tool --tool=<tool>'"
tab=' '
LF='
'
any_shown=no
cmd_name=${TOOL_MODE}tool
config_tools=$({
diff_mode && list_config_tools difftool "$tab$tab"
list_config_tools mergetool "$tab$tab"
} | sort)
extra_content=
if test -n "$config_tools"
then
extra_content="${tab}user-defined:${LF}$config_tools"
fi
show_tool_names 'mode_ok && is_available' "$tab$tab" \
"$tool_opt may be set to one of the following:" \
"No suitable tool for 'git $cmd_name --tool=<tool>' found." \
"$extra_content" &&
any_shown=yes
show_tool_names 'mode_ok && ! is_available' "$tab$tab" \
"${LF}The following tools are valid, but not currently available:" &&
any_shown=yes
if test "$any_shown" = yes
then
echo
echo "Some of the tools listed above only work in a windowed"
echo "environment. If run in a terminal-only session, they will fail."
fi
exit 0
}
guess_merge_tool () {
list_merge_tool_candidates
cat >&2 <<-EOF
This message is displayed because '$TOOL_MODE.tool' is not configured.
See 'git ${TOOL_MODE}tool --tool-help' or 'git help config' for more details.
'git ${TOOL_MODE}tool' will now attempt to use one of the following tools:
$tools
EOF
# Loop over each candidate and stop when a valid merge tool is found.
IFS=' '
for tool in $tools
do
is_available "$tool" && echo "$tool" && return 0
done
echo >&2 "No known ${TOOL_MODE} tool is available."
return 1
}
get_configured_merge_tool () {
keys=
if diff_mode
then
if gui_mode
then
keys="diff.guitool merge.guitool diff.tool merge.tool"
else
keys="diff.tool merge.tool"
fi
else
if gui_mode
then
keys="merge.guitool merge.tool"
else
keys="merge.tool"
fi
fi
merge_tool=$(
IFS=' '
for key in $keys
do
selected=$(git config $key)
if test -n "$selected"
then
echo "$selected"
return
fi
done)
if test -n "$merge_tool" && ! valid_tool "$merge_tool"
then
echo >&2 "git config option $TOOL_MODE.${gui_prefix}tool set to unknown tool: $merge_tool"
echo >&2 "Resetting to default..."
return 1
fi
echo "$merge_tool"
}
get_merge_tool_path () {
# A merge tool has been set, so verify that it's valid.
merge_tool="$1"
if ! valid_tool "$merge_tool"
then
echo >&2 "Unknown $TOOL_MODE tool $merge_tool"
exit 1
fi
if diff_mode
then
merge_tool_path=$(git config difftool."$merge_tool".path ||
git config mergetool."$merge_tool".path)
else
merge_tool_path=$(git config mergetool."$merge_tool".path)
fi
if test -z "$merge_tool_path"
then
merge_tool_path=$(translate_merge_tool_path "$merge_tool")
fi
if test -z "$(get_merge_tool_cmd "$merge_tool")" &&
! type "$merge_tool_path" >/dev/null 2>&1
then
echo >&2 "The $TOOL_MODE tool $merge_tool is not available as"\
"'$merge_tool_path'"
exit 1
fi
echo "$merge_tool_path"
}
get_merge_tool () {
is_guessed=false
# Check if a merge tool has been configured
merge_tool=$(get_configured_merge_tool)
subshell_exit_status=$?
if test $subshell_exit_status -gt "1"
then
exit $subshell_exit_status
fi
# Try to guess an appropriate merge tool if no tool has been set.
if test -z "$merge_tool"
then
merge_tool=$(guess_merge_tool) || exit
is_guessed=true
fi
echo "$merge_tool"
test "$is_guessed" = false
}
mergetool_find_win32_cmd () {
executable=$1
sub_directory=$2
# Use $executable if it exists in $PATH
if type -p "$executable" >/dev/null 2>&1
then
printf '%s' "$executable"
return
fi
# Look for executable in the typical locations
for directory in $(env | grep -Ei '^PROGRAM(FILES(\(X86\))?|W6432)=' |
cut -d '=' -f 2- | sort -u)
do
if test -n "$directory" && test -x "$directory/$sub_directory/$executable"
then
printf '%s' "$directory/$sub_directory/$executable"
return
fi
done
printf '%s' "$executable"
} | unknown | github | https://github.com/git/git | git-mergetool--lib.sh |
from PIL import Image
from PIL import ImageFile
from io import BytesIO
from PIL import _webp
_VALID_WEBP_MODES = {
"RGB": True,
"RGBA": True,
}
_VP8_MODES_BY_IDENTIFIER = {
b"VP8 ": "RGB",
b"VP8X": "RGBA",
b"VP8L": "RGBA", # lossless
}
def _accept(prefix):
is_riff_file_format = prefix[:4] == b"RIFF"
is_webp_file = prefix[8:12] == b"WEBP"
is_valid_vp8_mode = prefix[12:16] in _VP8_MODES_BY_IDENTIFIER
return is_riff_file_format and is_webp_file and is_valid_vp8_mode
class WebPImageFile(ImageFile.ImageFile):
format = "WEBP"
format_description = "WebP image"
def _open(self):
data, width, height, self.mode, icc_profile, exif = _webp.WebPDecode(self.fp.read())
if icc_profile:
self.info["icc_profile"] = icc_profile
if exif:
self.info["exif"] = exif
self.size = width, height
self.fp = BytesIO(data)
self.tile = [("raw", (0, 0) + self.size, 0, self.mode)]
def _getexif(self):
from PIL.JpegImagePlugin import _getexif
return _getexif(self)
def _save(im, fp, filename):
image_mode = im.mode
if im.mode not in _VALID_WEBP_MODES:
raise IOError("cannot write mode %s as WEBP" % image_mode)
lossless = im.encoderinfo.get("lossless", False)
quality = im.encoderinfo.get("quality", 80)
icc_profile = im.encoderinfo.get("icc_profile", "")
exif = im.encoderinfo.get("exif", "")
data = _webp.WebPEncode(
im.tobytes(),
im.size[0],
im.size[1],
lossless,
float(quality),
im.mode,
icc_profile,
exif
)
if data is None:
raise IOError("cannot write file as WEBP (encoder returned None)")
fp.write(data)
Image.register_open("WEBP", WebPImageFile, _accept)
Image.register_save("WEBP", _save)
Image.register_extension("WEBP", ".webp")
Image.register_mime("WEBP", "image/webp") | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2009 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.testing;
import com.google.common.annotations.GwtCompatible;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.jspecify.annotations.NullMarked;
/**
* Simple utility for when you want to create a {@link TearDown} that may throw an exception but
* should not fail a test when it does. (The behavior of a {@code TearDown} that throws an exception
* varies; see its documentation for details.) Use it just like a {@code TearDown}, except override
* {@link #sloppyTearDown()} instead.
*
* @author Luiz-Otavio Zorzella
* @since 10.0
*/
@GwtCompatible
@NullMarked
public abstract class SloppyTearDown implements TearDown {
private static final Logger logger = Logger.getLogger(SloppyTearDown.class.getName());
@Override
public final void tearDown() {
try {
sloppyTearDown();
} catch (Throwable t) {
logger.log(Level.INFO, "exception thrown during tearDown: " + t.getMessage(), t);
}
}
public abstract void sloppyTearDown() throws Exception;
} | java | github | https://github.com/google/guava | android/guava-testlib/src/com/google/common/testing/SloppyTearDown.java |
---
- hosts: localhost
gather_facts: no
tasks:
- include_role:
name: import_template_handler_names
tags:
- lazy_evaluation
- evaluation_time | unknown | github | https://github.com/ansible/ansible | test/integration/targets/handlers/58841.yml |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Account Payment Sale module for OpenERP
# Copyright (C) 2014 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import models | unknown | codeparrot/codeparrot-clean | ||
import unittest
from mako.lexer import Lexer
from mako import exceptions, util
from util import flatten_result, result_lines
from mako.template import Template
import re
from test import TemplateTest, template_base, skip_if, eq_, assert_raises_message
# create fake parsetree classes which are constructed
# exactly as the repr() of a real parsetree object.
# this allows us to use a Python construct as the source
# of a comparable repr(), which is also hit by the 2to3 tool.
def repr_arg(x):
if isinstance(x, dict):
return util.sorted_dict_repr(x)
else:
return repr(x)
from mako import parsetree
for cls in parsetree.__dict__.values():
if isinstance(cls, type) and \
issubclass(cls, parsetree.Node):
clsname = cls.__name__
exec ("""
class %s(object):
def __init__(self, *args):
self.args = args
def __repr__(self):
return "%%s(%%s)" %% (
self.__class__.__name__,
", ".join(repr_arg(x) for x in self.args)
)
""" % clsname) in locals()
# NOTE: most assertion expressions were generated, then formatted
# by PyTidy, hence the dense formatting.
class LexerTest(TemplateTest):
def _compare(self, node, expected):
eq_(repr(node), repr(expected))
def test_text_and_tag(self):
template = """
<b>Hello world</b>
<%def name="foo()">
this is a def.
</%def>
and some more text.
"""
node = Lexer(template).parse()
self._compare(node, TemplateNode({},
[Text(u'''\n<b>Hello world</b>\n ''', (1,
1)), DefTag(u'def', {u'name': u'foo()'}, (3, 9),
[Text(u'''\n this is a def.\n ''',
(3, 28))]),
Text(u'''\n \n and some more text.\n''',
(5, 16))]))
def test_unclosed_tag(self):
template = """
<%def name="foo()">
other text
"""
try:
nodes = Lexer(template).parse()
assert False
except exceptions.SyntaxException, e:
assert str(e) == "Unclosed tag: <%def> at line: 5 char: 9"
def test_onlyclosed_tag(self):
template = \
"""
<%def name="foo()">
foo
</%def>
</%namespace>
hi.
"""
self.assertRaises(exceptions.SyntaxException,
Lexer(template).parse)
def test_noexpr_allowed(self):
template = \
"""
<%namespace name="${foo}"/>
"""
self.assertRaises(exceptions.CompileException,
Lexer(template).parse)
def test_unmatched_tag(self):
template = \
"""
<%namespace name="bar">
<%def name="foo()">
foo
</%namespace>
</%def>
hi.
"""
self.assertRaises(exceptions.SyntaxException,
Lexer(template).parse)
def test_nonexistent_tag(self):
template = """
<%lala x="5"/>
"""
self.assertRaises(exceptions.CompileException,
Lexer(template).parse)
def test_wrongcase_tag(self):
template = \
"""
<%DEF name="foo()">
</%def>
"""
self.assertRaises(exceptions.CompileException,
Lexer(template).parse)
def test_percent_escape(self):
template = \
"""
%% some whatever.
%% more some whatever
% if foo:
% endif
"""
node = Lexer(template).parse()
self._compare(node, TemplateNode({}, [Text(u'''\n \n''',
(1, 1)), Text(u'''% some whatever.\n\n''', (3, 2)),
Text(u' %% more some whatever\n', (5, 2)),
ControlLine(u'if', u'if foo:', False, (6, 1)),
ControlLine(u'if', u'endif', True, (7, 1)),
Text(u' ', (8, 1))]))
def test_text_tag(self):
template = \
"""
## comment
% if foo:
hi
% endif
<%text>
# more code
% more code
<%illegal compionent>/></>
<%def name="laal()">def</%def>
</%text>
<%def name="foo()">this is foo</%def>
% if bar:
code
% endif
"""
node = Lexer(template).parse()
self._compare(node,
TemplateNode({}, [Text(u'\n', (1, 1)),
Comment(u'comment', (2, 1)),
ControlLine(u'if', u'if foo:', False, (3, 1)),
Text(u' hi\n', (4, 1)),
ControlLine(u'if', u'endif', True, (5, 1)),
Text(u' ', (6, 1)), TextTag(u'text', {},
(6, 9),
[Text(u'''\n # more code\n '''
'''\n % more code\n '''
'''<%illegal compionent>/></>\n '''
'''<%def name="laal()">def</%def>\n '''
''' \n \n ''',
(6, 16))]), Text(u'''
''', (14, 17)),
DefTag(u'def', {u'name': u'foo()'}, (16, 9),
[Text(u'this is foo', (16, 28))]),
Text(u'''\n \n''', (16, 46)),
ControlLine(u'if', u'if bar:', False, (18, 1)),
Text(u' code\n', (19, 1)),
ControlLine(u'if', u'endif', True, (20, 1)),
Text(u' ', (21, 1))]))
def test_def_syntax(self):
template = \
"""
<%def lala>
hi
</%def>
"""
self.assertRaises(exceptions.CompileException,
Lexer(template).parse)
def test_def_syntax_2(self):
template = \
"""
<%def name="lala">
hi
</%def>
"""
self.assertRaises(exceptions.CompileException,
Lexer(template).parse)
def test_whitespace_equals(self):
template = \
"""
<%def name = "adef()" >
adef
</%def>
"""
node = Lexer(template).parse()
self._compare(node, TemplateNode({}, [Text(u'\n ',
(1, 1)), DefTag(u'def', {u'name': u'adef()'}, (2,
13),
[Text(u'''\n adef\n ''',
(2, 36))]), Text(u'\n ', (4, 20))]))
def test_ns_tag_closed(self):
template = \
"""
<%self:go x="1" y="2" z="${'hi' + ' ' + 'there'}"/>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text(u'''
''', (1, 1)),
CallNamespaceTag(u'self:go', {u'x': u'1', u'y'
: u'2', u'z': u"${'hi' + ' ' + 'there'}"}, (3,
13), []), Text(u'\n ', (3, 64))]))
def test_ns_tag_empty(self):
template = \
"""
<%form:option value=""></%form:option>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text(u'\n ',
(1, 1)), CallNamespaceTag(u'form:option',
{u'value': u''}, (2, 13), []), Text(u'\n '
, (2, 51))]))
def test_ns_tag_open(self):
template = \
"""
<%self:go x="1" y="${process()}">
this is the body
</%self:go>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text(u'''
''', (1, 1)),
CallNamespaceTag(u'self:go', {u'x': u'1', u'y'
: u'${process()}'}, (3, 13),
[Text(u'''
this is the body
''',
(3, 46))]), Text(u'\n ', (5, 24))]))
def test_expr_in_attribute(self):
"""test some slightly trickier expressions.
you can still trip up the expression parsing, though, unless we
integrated really deeply somehow with AST."""
template = \
"""
<%call expr="foo>bar and 'lala' or 'hoho'"/>
<%call expr='foo<bar and hoho>lala and "x" + "y"'/>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text(u'\n ',
(1, 1)), CallTag(u'call', {u'expr'
: u"foo>bar and 'lala' or 'hoho'"}, (2, 13), []),
Text(u'\n ', (2, 57)), CallTag(u'call'
, {u'expr': u'foo<bar and hoho>lala and "x" + "y"'
}, (3, 13), []), Text(u'\n ', (3, 64))]))
def test_pagetag(self):
template = \
"""
<%page cached="True", args="a, b"/>
some template
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text(u'\n ',
(1, 1)), PageTag(u'page', {u'args': u'a, b',
u'cached': u'True'}, (2, 13), []),
Text(u'''
some template
''',
(2, 48))]))
def test_nesting(self):
template = \
"""
<%namespace name="ns">
<%def name="lala(hi, there)">
<%call expr="something()"/>
</%def>
</%namespace>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text(u'''
''', (1, 1)),
NamespaceTag(u'namespace', {u'name': u'ns'}, (3,
9), [Text(u'\n ', (3, 31)),
DefTag(u'def', {u'name': u'lala(hi, there)'}, (4,
13), [Text(u'\n ', (4, 42)),
CallTag(u'call', {u'expr': u'something()'}, (5,
17), []), Text(u'\n ', (5, 44))]),
Text(u'\n ', (6, 20))]),
Text(u'''
''', (7, 22))]))
if util.py3k:
def test_code(self):
template = \
"""text
<%
print("hi")
for x in range(1,5):
print(x)
%>
more text
<%!
import foo
%>
"""
nodes = Lexer(template).parse()
self._compare(nodes,
TemplateNode({}, [
Text(u'text\n ', (1, 1)),
Code(u'\nprint("hi")\nfor x in range(1,5):\n '
'print(x)\n \n', False, (2, 5)),
Text(u'\nmore text\n ', (6, 7)),
Code(u'\nimport foo\n \n', True, (8, 5)),
Text(u'\n', (10, 7))])
)
else:
def test_code(self):
template = \
"""text
<%
print "hi"
for x in range(1,5):
print x
%>
more text
<%!
import foo
%>
"""
nodes = Lexer(template).parse()
self._compare(nodes,
TemplateNode({}, [
Text(u'text\n ', (1, 1)),
Code(u'\nprint "hi"\nfor x in range(1,5):\n '
'print x\n \n', False, (2, 5)),
Text(u'\nmore text\n ', (6, 7)),
Code(u'\nimport foo\n \n', True, (8, 5)),
Text(u'\n', (10, 7))])
)
def test_code_and_tags(self):
template = \
"""
<%namespace name="foo">
<%def name="x()">
this is x
</%def>
<%def name="y()">
this is y
</%def>
</%namespace>
<%
result = []
data = get_data()
for x in data:
result.append(x+7)
%>
result: <%call expr="foo.x(result)"/>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text(u'\n', (1, 1)),
NamespaceTag(u'namespace', {u'name': u'foo'}, (2,
1), [Text(u'\n ', (2, 24)), DefTag(u'def',
{u'name': u'x()'}, (3, 5),
[Text(u'''\n this is x\n ''', (3, 22))]),
Text(u'\n ', (5, 12)), DefTag(u'def', {u'name'
: u'y()'}, (6, 5),
[Text(u'''\n this is y\n ''', (6, 22))]),
Text(u'\n', (8, 12))]), Text(u'''\n\n''', (9, 14)),
Code(u'''\nresult = []\ndata = get_data()\n'''
'''for x in data:\n result.append(x+7)\n\n''',
False, (11, 1)), Text(u'''\n\n result: ''', (16,
3)), CallTag(u'call', {u'expr': u'foo.x(result)'
}, (18, 13), []), Text(u'\n', (18, 42))]))
def test_expression(self):
template = \
"""
this is some ${text} and this is ${textwith | escapes, moreescapes}
<%def name="hi()">
give me ${foo()} and ${bar()}
</%def>
${hi()}
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text(u'\n this is some ', (1, 1)),
Expression(u'text', [], (2, 22)),
Text(u' and this is ', (2, 29)),
Expression(u'textwith ', ['escapes', 'moreescapes'
], (2, 42)), Text(u'\n ', (2, 76)),
DefTag(u'def', {u'name': u'hi()'}, (3, 9),
[Text(u'\n give me ', (3, 27)),
Expression(u'foo()', [], (4, 21)), Text(u' and ',
(4, 29)), Expression(u'bar()', [], (4, 34)),
Text(u'\n ', (4, 42))]), Text(u'\n '
, (5, 16)), Expression(u'hi()', [], (6, 9)),
Text(u'\n', (6, 16))]))
def test_tricky_expression(self):
template = """
${x and "|" or "hi"}
"""
nodes = Lexer(template).parse()
self._compare(
nodes,
TemplateNode({}, [
Text(u'\n \n ', (1, 1)),
Expression(u'x and "|" or "hi"', [], (3, 13)),
Text(u'\n ', (3, 33))
])
)
template = """
${hello + '''heres '{|}' text | | }''' | escape1}
"""
nodes = Lexer(template).parse()
self._compare(
nodes,
TemplateNode({}, [
Text(u'\n \n ', (1, 1)),
Expression(u"hello + '''heres '{|}' text | | }''' ",
['escape1'], (3, 13)),
Text(u'\n ', (3, 62))
])
)
def test_tricky_code(self):
if util.py3k:
template = """<% print('hi %>') %>"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code(u"print('hi %>') \n", False, (1, 1))]))
else:
template = """<% print 'hi %>' %>"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code(u"print 'hi %>' \n", False, (1, 1))]))
def test_tricky_code_2(self):
template = \
"""<%
# someone's comment
%>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code(u"""
# someone's comment
""",
False, (1, 1)), Text(u'\n ', (3, 11))]))
if util.py3k:
def test_tricky_code_3(self):
template = \
"""<%
print('hi')
# this is a comment
# another comment
x = 7 # someone's '''comment
print('''
there
''')
# someone else's comment
%> '''and now some text '''"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code(u"""
print('hi')
# this is a comment
# another comment
x = 7 # someone's '''comment
print('''
there
''')
# someone else's comment
""",
False, (1, 1)),
Text(u" '''and now some text '''", (10,
11))]))
else:
def test_tricky_code_3(self):
template = \
"""<%
print 'hi'
# this is a comment
# another comment
x = 7 # someone's '''comment
print '''
there
'''
# someone else's comment
%> '''and now some text '''"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code(u"""\nprint 'hi'\n# this is a comment\n"""
"""# another comment\nx = 7 """
"""# someone's '''comment\nprint '''\n """
"""there\n '''\n# someone else's """
"""comment\n \n""",
False, (1, 1)),
Text(u" '''and now some text '''", (10,11))]))
def test_tricky_code_4(self):
template = \
"""<% foo = "\\"\\\\" %>"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Code(u"""foo = "\\"\\\\" \n""",
False, (1, 1))]))
def test_tricky_code_5(self):
template = \
"""before ${ {'key': 'value'} } after"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text(u'before ', (1, 1)),
Expression(u" {'key': 'value'} ", [], (1, 8)),
Text(u' after', (1, 29))]))
def test_control_lines(self):
template = \
"""
text text la la
% if foo():
mroe text la la blah blah
% endif
and osme more stuff
% for l in range(1,5):
tex tesl asdl l is ${l} kfmas d
% endfor
tetx text
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text(u'''\ntext text la la\n''', (1, 1)),
ControlLine(u'if', u'if foo():', False, (3, 1)),
Text(u' mroe text la la blah blah\n', (4, 1)),
ControlLine(u'if', u'endif', True, (5, 1)),
Text(u'''\n and osme more stuff\n''', (6,
1)), ControlLine(u'for', u'for l in range(1,5):',
False, (8, 1)), Text(u' tex tesl asdl l is ',
(9, 1)), Expression(u'l', [], (9, 24)),
Text(u' kfmas d\n', (9, 28)), ControlLine(u'for',
u'endfor', True, (10, 1)),
Text(u''' tetx text\n \n''', (11, 1))]))
def test_control_lines_2(self):
template = \
"""% for file in requestattr['toc'].filenames:
x
% endfor
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [ControlLine(u'for',
u"for file in requestattr['toc'].filenames:",
False, (1, 1)), Text(u' x\n', (2, 1)),
ControlLine(u'for', u'endfor', True, (3, 1))]))
def test_long_control_lines(self):
template = \
"""
% for file in \\
requestattr['toc'].filenames:
x
% endfor
"""
nodes = Lexer(template).parse()
self._compare(
nodes,
TemplateNode({}, [
Text(u'\n', (1, 1)),
ControlLine(u'for', u"for file in \\\n "
"requestattr['toc'].filenames:",
False, (2, 1)),
Text(u' x\n', (4, 1)),
ControlLine(u'for', u'endfor', True, (5, 1)),
Text(u' ', (6, 1))
])
)
def test_unmatched_control(self):
template = """
% if foo:
% for x in range(1,5):
% endif
"""
assert_raises_message(
exceptions.SyntaxException,
"Keyword 'endif' doesn't match keyword 'for' at line: 5 char: 1",
Lexer(template).parse
)
def test_unmatched_control_2(self):
template = """
% if foo:
% for x in range(1,5):
% endfor
"""
assert_raises_message(
exceptions.SyntaxException,
"Unterminated control keyword: 'if' at line: 3 char: 1",
Lexer(template).parse
)
def test_unmatched_control_3(self):
template = """
% if foo:
% for x in range(1,5):
% endlala
% endif
"""
assert_raises_message(
exceptions.SyntaxException,
"Keyword 'endlala' doesn't match keyword 'for' at line: 5 char: 1",
Lexer(template).parse
)
def test_ternary_control(self):
template = \
"""
% if x:
hi
% elif y+7==10:
there
% elif lala:
lala
% else:
hi
% endif
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text(u'\n', (1, 1)),
ControlLine(u'if', u'if x:', False, (2, 1)),
Text(u' hi\n', (3, 1)),
ControlLine(u'elif', u'elif y+7==10:', False, (4,
1)), Text(u' there\n', (5, 1)),
ControlLine(u'elif', u'elif lala:', False, (6,
1)), Text(u' lala\n', (7, 1)),
ControlLine(u'else', u'else:', False, (8, 1)),
Text(u' hi\n', (9, 1)),
ControlLine(u'if', u'endif', True, (10, 1))]))
def test_integration(self):
template = \
"""<%namespace name="foo" file="somefile.html"/>
## inherit from foobar.html
<%inherit file="foobar.html"/>
<%def name="header()">
<div>header</div>
</%def>
<%def name="footer()">
<div> footer</div>
</%def>
<table>
% for j in data():
<tr>
% for x in j:
<td>Hello ${x| h}</td>
% endfor
</tr>
% endfor
</table>
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [NamespaceTag(u'namespace'
, {u'file': u'somefile.html', u'name': u'foo'},
(1, 1), []), Text(u'\n', (1, 46)),
Comment(u'inherit from foobar.html', (2, 1)),
InheritTag(u'inherit', {u'file': u'foobar.html'},
(3, 1), []), Text(u'''\n\n''', (3, 31)),
DefTag(u'def', {u'name': u'header()'}, (5, 1),
[Text(u'''\n <div>header</div>\n''', (5,
23))]), Text(u'\n', (7, 8)), DefTag(u'def',
{u'name': u'footer()'}, (8, 1),
[Text(u'''\n <div> footer</div>\n''', (8,
23))]), Text(u'''\n\n<table>\n''', (10, 8)),
ControlLine(u'for', u'for j in data():', False,
(13, 1)), Text(u' <tr>\n', (14, 1)),
ControlLine(u'for', u'for x in j:', False, (15,
1)), Text(u' <td>Hello ', (16, 1)),
Expression(u'x', ['h'], (16, 23)), Text(u'</td>\n'
, (16, 30)), ControlLine(u'for', u'endfor', True,
(17, 1)), Text(u' </tr>\n', (18, 1)),
ControlLine(u'for', u'endfor', True, (19, 1)),
Text(u'</table>\n', (20, 1))]))
def test_comment_after_statement(self):
template = \
"""
% if x: #comment
hi
% else: #next
hi
% endif #end
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({}, [Text(u'\n', (1, 1)),
ControlLine(u'if', u'if x: #comment', False, (2,
1)), Text(u' hi\n', (3, 1)),
ControlLine(u'else', u'else: #next', False, (4,
1)), Text(u' hi\n', (5, 1)),
ControlLine(u'if', u'endif #end', True, (6, 1))]))
def test_crlf(self):
template = open(self._file_path("crlf.html"), 'rb').read()
nodes = Lexer(template).parse()
self._compare(
nodes,
TemplateNode({}, [
Text(u'<html>\r\n\r\n', (1, 1)),
PageTag(u'page', {
u'args': u"a=['foo',\n 'bar']"
}, (3, 1), []),
Text(u'\r\n\r\nlike the name says.\r\n\r\n', (4, 26)),
ControlLine(u'for', u'for x in [1,2,3]:', False, (8, 1)),
Text(u' ', (9, 1)),
Expression(u'x', [], (9, 9)),
ControlLine(u'for', u'endfor', True, (10, 1)),
Text(u'\r\n', (11, 1)),
Expression(u"trumpeter == 'Miles' and "
"trumpeter or \\\n 'Dizzy'",
[], (12, 1)),
Text(u'\r\n\r\n', (13, 15)),
DefTag(u'def', {u'name': u'hi()'}, (15, 1), [
Text(u'\r\n hi!\r\n', (15, 19))]),
Text(u'\r\n\r\n</html>\r\n', (17, 8))
])
)
assert flatten_result(Template(template).render()) \
== """<html> like the name says. 1 2 3 Dizzy </html>"""
def test_comments(self):
template = \
"""
<style>
#someselector
# other non comment stuff
</style>
## a comment
# also not a comment
## this is a comment
this is ## not a comment
<%doc> multiline
comment
</%doc>
hi
"""
nodes = Lexer(template).parse()
self._compare(nodes, TemplateNode({},
[Text(u'''\n<style>\n #someselector\n # '''
'''other non comment stuff\n</style>\n''',
(1, 1)), Comment(u'a comment', (6, 1)),
Text(u'''\n# also not a comment\n\n''', (7, 1)),
Comment(u'this is a comment', (10, 1)),
Text(u''' \nthis is ## not a comment\n\n''', (11,
1)), Comment(u''' multiline\ncomment\n''', (14,
1)), Text(u'''
hi
''', (16, 8))]))
def test_docs(self):
template = \
"""
<%doc>
this is a comment
</%doc>
<%def name="foo()">
<%doc>
this is the foo func
</%doc>
</%def>
"""
nodes = Lexer(template).parse()
self._compare(nodes,
TemplateNode({}, [Text(u'\n ', (1,
1)),
Comment(u'''\n this is a comment\n ''',
(2, 9)), Text(u'\n ', (4, 16)),
DefTag(u'def', {u'name': u'foo()'}, (5, 9),
[Text(u'\n ', (5, 28)),
Comment(u'''\n this is the foo func\n'''
''' ''',
(6, 13)), Text(u'\n ', (8, 20))]),
Text(u'\n ', (9, 16))]))
def test_preprocess(self):
def preproc(text):
return re.sub(r'(?<=\n)\s*#[^#]', '##', text)
template = \
"""
hi
# old style comment
# another comment
"""
nodes = Lexer(template, preprocessor=preproc).parse()
self._compare(nodes, TemplateNode({}, [Text(u'''\n hi\n''',
(1, 1)), Comment(u'old style comment', (3, 1)),
Comment(u'another comment', (4, 1))])) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright (C) 2008 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php.
"""SQL Lexer"""
# This code is based on the SqlLexer in pygments.
# http://pygments.org/
# It's separated from the rest of pygments to increase performance
# and to allow some customizations.
import re
from debug_toolbar.utils.sqlparse import tokens
from debug_toolbar.utils.sqlparse.keywords import KEYWORDS, KEYWORDS_COMMON
class include(str):
pass
class combined(tuple):
"""Indicates a state combined from multiple states."""
def __new__(cls, *args):
return tuple.__new__(cls, args)
def __init__(self, *args):
# tuple.__init__ doesn't do anything
pass
def is_keyword(value):
test = value.upper()
return KEYWORDS_COMMON.get(test, KEYWORDS.get(test, tokens.Name)), value
def apply_filters(stream, filters, lexer=None):
"""
Use this method to apply an iterable of filters to
a stream. If lexer is given it's forwarded to the
filter, otherwise the filter receives `None`.
"""
def _apply(filter_, stream):
for token in filter_.filter(lexer, stream):
yield token
for filter_ in filters:
stream = _apply(filter_, stream)
return stream
class LexerMeta(type):
"""
Metaclass for Lexer, creates the self._tokens attribute from
self.tokens on the first instantiation.
"""
def _process_state(cls, unprocessed, processed, state):
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokenlist = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokenlist.extend(cls._process_state(
unprocessed, processed, str(tdef)))
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = re.compile(tdef[0], rflags).match
except Exception, err:
raise ValueError(("uncompilable regex %r in state"
" %r of %r: %s"
% (tdef[0], state, cls, err)))
assert type(tdef[1]) is tokens._TokenType or callable(tdef[1]), \
('token type must be simple type or callable, not %r'
% (tdef[1],))
if len(tdef) == 2:
new_state = None
else:
tdef2 = tdef[2]
if isinstance(tdef2, str):
# an existing state
if tdef2 == '#pop':
new_state = -1
elif tdef2 in unprocessed:
new_state = (tdef2,)
elif tdef2 == '#push':
new_state = tdef2
elif tdef2[:5] == '#pop:':
new_state = -int(tdef2[5:])
else:
assert False, 'unknown new state %r' % tdef2
elif isinstance(tdef2, combined):
# combine a new state from existing ones
new_state = '_tmp_%d' % cls._tmpname
cls._tmpname += 1
itokens = []
for istate in tdef2:
assert istate != state, \
'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[new_state] = itokens
new_state = (new_state,)
elif isinstance(tdef2, tuple):
# push more than one state
for state in tdef2:
assert (state in unprocessed or
state in ('#pop', '#push')), \
'unknown new state ' + state
new_state = tdef2
else:
assert False, 'unknown new state def %r' % tdef2
tokenlist.append((rex, tdef[1], new_state))
return tokenlist
def process_tokendef(cls):
cls._all_tokens = {}
cls._tmpname = 0
processed = cls._all_tokens[cls.__name__] = {}
#tokendefs = tokendefs or cls.tokens[name]
for state in cls.tokens.keys():
cls._process_state(cls.tokens, processed, state)
return processed
def __call__(cls, *args, **kwds):
if not hasattr(cls, '_tokens'):
cls._all_tokens = {}
cls._tmpname = 0
if hasattr(cls, 'token_variants') and cls.token_variants:
# don't process yet
pass
else:
cls._tokens = cls.process_tokendef()
return type.__call__(cls, *args, **kwds)
class Lexer(object):
__metaclass__ = LexerMeta
encoding = 'utf-8'
stripall = False
stripnl = False
tabsize = 0
flags = re.IGNORECASE
tokens = {
'root': [
(r'--.*?(\r\n|\r|\n)', tokens.Comment.Single),
# $ matches *before* newline, therefore we have two patterns
# to match Comment.Single
(r'--.*?$', tokens.Comment.Single),
(r'(\r|\n|\r\n)', tokens.Newline),
(r'\s+', tokens.Whitespace),
(r'/\*', tokens.Comment.Multiline, 'multiline-comments'),
(r':=', tokens.Assignment),
(r'::', tokens.Punctuation),
(r'[*]', tokens.Wildcard),
(r'CASE\b', tokens.Keyword), # extended CASE(foo)
(r"`(``|[^`])*`", tokens.Name),
(r"´(´´|[^´])*´", tokens.Name),
(r'\$([a-zA-Z_][a-zA-Z0-9_]*)?\$', tokens.Name.Builtin),
(r'\?{1}', tokens.Name.Placeholder),
(r'[$:?%][a-zA-Z0-9_]+[^$:?%]?', tokens.Name.Placeholder),
(r'@[a-zA-Z_][a-zA-Z0-9_]+', tokens.Name),
(r'[a-zA-Z_][a-zA-Z0-9_]*(?=[.(])', tokens.Name), # see issue39
(r'[<>=~!]+', tokens.Operator.Comparison),
(r'[+/@#%^&|`?^-]+', tokens.Operator),
(r'0x[0-9a-fA-F]+', tokens.Number.Hexadecimal),
(r'[0-9]*\.[0-9]+', tokens.Number.Float),
(r'[0-9]+', tokens.Number.Integer),
# TODO: Backslash escapes?
(r"(''|'.*?[^\\]')", tokens.String.Single),
# not a real string literal in ANSI SQL:
(r'(""|".*?[^\\]")', tokens.String.Symbol),
(r'(\[.*[^\]]\])', tokens.Name),
(r'(LEFT |RIGHT )?(INNER |OUTER )?JOIN\b', tokens.Keyword),
(r'END( IF| LOOP)?\b', tokens.Keyword),
(r'NOT NULL\b', tokens.Keyword),
(r'CREATE( OR REPLACE)?\b', tokens.Keyword.DDL),
(r'[a-zA-Z_][a-zA-Z0-9_]*', is_keyword),
(r'[;:()\[\],\.]', tokens.Punctuation),
],
'multiline-comments': [
(r'/\*', tokens.Comment.Multiline, 'multiline-comments'),
(r'\*/', tokens.Comment.Multiline, '#pop'),
(r'[^/\*]+', tokens.Comment.Multiline),
(r'[/*]', tokens.Comment.Multiline)
]}
def __init__(self):
self.filters = []
def add_filter(self, filter_, **options):
from debug_toolbar.utils.sqlparse.filters import Filter
if not isinstance(filter_, Filter):
filter_ = filter_(**options)
self.filters.append(filter_)
def get_tokens(self, text, unfiltered=False):
"""
Return an iterable of (tokentype, value) pairs generated from
`text`. If `unfiltered` is set to `True`, the filtering mechanism
is bypassed even if filters are defined.
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
"""
if not isinstance(text, unicode):
if self.encoding == 'guess':
try:
text = text.decode('utf-8')
if text.startswith(u'\ufeff'):
text = text[len(u'\ufeff'):]
except UnicodeDecodeError:
text = text.decode('latin1')
elif self.encoding == 'chardet':
try:
import chardet
except ImportError:
raise ImportError('To enable chardet encoding guessing, '
'please install the chardet library '
'from http://chardet.feedparser.org/')
enc = chardet.detect(text)
text = text.decode(enc['encoding'])
else:
text = text.decode(self.encoding)
if self.stripall:
text = text.strip()
elif self.stripnl:
text = text.strip('\n')
if self.tabsize > 0:
text = text.expandtabs(self.tabsize)
# if not text.endswith('\n'):
# text += '\n'
def streamer():
for i, t, v in self.get_tokens_unprocessed(text):
yield t, v
stream = streamer()
if not unfiltered:
stream = apply_filters(stream, self.filters, self)
return stream
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
known_names = {}
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
# print rex.pattern
value = m.group()
if value in known_names:
yield pos, known_names[value], value
elif type(action) is tokens._TokenType:
yield pos, action, value
elif hasattr(action, '__call__'):
ttype, value = action(value)
known_names[value] = ttype
yield pos, ttype, value
else:
for item in action(self, m):
yield item
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
pos += 1
statestack = ['root']
statetokens = tokendefs['root']
yield pos, tokens.Text, u'\n'
continue
yield pos, tokens.Error, text[pos]
pos += 1
except IndexError:
break
def tokenize(sql):
"""Tokenize sql.
Tokenize *sql* using the :class:`Lexer` and return a 2-tuple stream
of ``(token type, value)`` items.
"""
lexer = Lexer()
return lexer.get_tokens(sql) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# hex2mem.py
# 2012, rok.krajnc@gmail.com
"""Converts ordinary (non-intel format) hex files into Verilog ROMs."""
from __future__ import with_statement, print_function
import sys
import os
import math
from optparse import OptionParser
# main
def main():
"""main function"""
# handle command-line options
usage = "Usage: %prog [options] in.hex out.v"
parser = OptionParser(usage=usage)
parser.add_option("-a", "--address-bits", dest="aw", action="store", default=0, help="Force use of this many address bits")
parser.add_option("-s", "--memory-size", dest="ms", action="store", default=0, help="Force length of memory (number of rows)")
parser.add_option("-w", "--memory-width", dest="mw", action="store", default=0, help="Force width of memory (width of a row)")
parser.add_option("-n", "--no-pad", dest="nopad", action="store_true", default=False, help="Do not pad output")
parser.add_option("-p", "--pad-with-zeroes", dest="padval", action="store_true", default=False, help="Pad with zeroes instead of ones")
(options, args) = parser.parse_args()
# parse args
if (len(args) != 2) : parser.error("Invalid number of arguments.\n")
fin = args[0]
fon = args[1]
modulename = os.path.splitext(os.path.basename(fon))[0]
# check that files exist
if (not os.path.isfile(fin)):
sys.stderr.write("ERROR: could not open source file %s. Cannot continue.\n" % fin)
sys.exit(-1)
# test if output file is writeable
if (not os.access(os.path.dirname(fon), os.W_OK | os.X_OK)):
sys.stderr.write("ERROR: output directory %s is not writeable, or no such path exists.\n" % os.path.dirname(fon))
sys.exit(-1)
# open & read input file
with open(fin, 'r') as fi:
dat = fi.readlines()
dat = [line.strip() for line in dat]
# calculate needed address width
aw = int(math.ceil(math.log(len(dat), 2)))
if options.aw != 0:
if int(options.aw) < aw:
sys.stderr.write("ERROR: requested number of address bits is less than required (requested: %d, required: %d).\n" % (int(options.aw), aw))
sys.exit(-1)
else:
aw = int(options.aw)
# check memory size
if options.nopad:
ms = len(dat)
else:
ms = 2**aw
if options.ms != 0:
if int(options.ms) < ms:
sys.stderr.write("ERROR: requested memory size is less than required (requested: %d, required: %d).\n" % (int(options.ms), ms))
sys.exit(-1)
else:
ms = int(options.ms)
# check memory width
mw = len(max(dat, key=len))
if options.mw != 0:
if int(options.mw) < mw:
sys.stderr.write("ERROR: requested memory width is less than required (requested: %d, required: %d).\n" % (int(options.mw), mw))
sys.exit(-1)
else:
mw = int(options.mw)
# write Verilog memory file
# the Verilog code follows Altera guidelines for inferring ROM functions from HDL code (Altera Recommended HDL Coding Styles)
fmt = " %d'h%%0%dx : q <= #1 %d'h%%0%dx;\n" % (aw, int(math.ceil((aw+3)/4)), (mw*4), mw)
with open(fon, 'w') as fo:
# header
fo.write( "/* %s */\n" % os.path.basename(fon))
fo.write( "/* AUTO-GENERATED FILE, DO NOT EDIT! */\n")
fo.write( "/* generated from %s assembler file */\n\n\n" % fin)
fo.write( "module %s (\n" % modulename)
fo.write( " input wire clock,\n")
fo.write( " input wire [ %02d-1:0] address,\n" % (aw))
fo.write( " output reg [ %02d-1:0] q\n" % (mw*4))
fo.write( ");\n\n\n")
# data
fo.write( "always @ (posedge clock) begin\n")
fo.write( " case(address)\n")
for idx, data in enumerate(dat):
fo.write( fmt % (idx, int(data, 16)))
# padding
for i in range(ms-idx-1):
idx = idx+1
if options.padval:
fo.write( fmt % (idx, 0))
else:
fo.write( fmt % (idx, (1<<(mw*4))-1))
# footer
fo.write( " endcase\n")
fo.write( "end\n\n\n")
fo.write( "endmodule\n\n")
# done
print ("File ", fon, " written successfully, using ", idx+1, "x", mw*4, " memory (", (idx+1)*mw*4," bits), will be probably inferred into ", int(math.ceil((idx+1)*mw*4/4096)), " Altera M4Ks.", sep="")
# END main
# start
if __name__ == "__main__":
main()
# END start | unknown | codeparrot/codeparrot-clean | ||
{
"private": true,
"scripts": {
"dev": "next dev --turbopack",
"build": "biome check && next build",
"start": "next start",
"check": "biome check"
},
"dependencies": {
"next": "latest",
"react": "^19.0.0",
"react-dom": "^19.0.0"
},
"devDependencies": {
"@biomejs/biome": "^2",
"@types/node": "^20",
"@types/react": "^19",
"@types/react-dom": "^19",
"typescript": "^5"
}
} | json | github | https://github.com/vercel/next.js | examples/with-biome/package.json |
import unittest
from test._test_multiprocessing import install_tests_in_module_dict
install_tests_in_module_dict(globals(), 'spawn', only_type="processes")
if __name__ == '__main__':
unittest.main() | python | github | https://github.com/python/cpython | Lib/test/test_multiprocessing_spawn/test_processes.py |
#!/bin/sh
test_description='Test cloning a repository larger than 2 gigabyte'
. ./test-lib.sh
if ! test_bool_env GIT_TEST_CLONE_2GB false
then
skip_all='expensive 2GB clone test; enable with GIT_TEST_CLONE_2GB=true'
test_done
fi
test_expect_success 'setup' '
git config pack.compression 0 &&
git config pack.depth 0 &&
blobsize=$((100*1024*1024)) &&
blobcount=$((2*1024*1024*1024/$blobsize+1)) &&
i=1 &&
(while test $i -le $blobcount
do
printf "Generating blob $i/$blobcount\r" >&2 &&
printf "blob\nmark :$i\ndata $blobsize\n" &&
#test-tool genrandom $i $blobsize &&
printf "%-${blobsize}s" $i &&
echo "M 100644 :$i $i" >> commit &&
i=$(($i+1)) ||
echo $? > exit-status
done &&
echo "commit refs/heads/main" &&
echo "author A U Thor <author@email.com> 123456789 +0000" &&
echo "committer C O Mitter <committer@email.com> 123456789 +0000" &&
echo "data 5" &&
echo ">2gb" &&
cat commit) |
git fast-import --big-file-threshold=2 &&
test ! -f exit-status
'
test_expect_success 'clone - bare' '
git clone --bare --no-hardlinks . clone-bare
'
test_expect_success 'clone - with worktree, file:// protocol' '
git clone "file://$(pwd)" clone-wt
'
test_done | unknown | github | https://github.com/git/git | t/t5608-clone-2gb.sh |
import unittest, string
from test import support
class ModuleTest(unittest.TestCase):
def test_attrs(self):
string.whitespace
string.ascii_lowercase
string.ascii_uppercase
string.ascii_letters
string.digits
string.hexdigits
string.octdigits
string.punctuation
string.printable
def test_capwords(self):
self.assertEqual(string.capwords('abc def ghi'), 'Abc Def Ghi')
self.assertEqual(string.capwords('abc\tdef\nghi'), 'Abc Def Ghi')
self.assertEqual(string.capwords('abc\t def \nghi'), 'Abc Def Ghi')
self.assertEqual(string.capwords('ABC DEF GHI'), 'Abc Def Ghi')
self.assertEqual(string.capwords('ABC-DEF-GHI', '-'), 'Abc-Def-Ghi')
self.assertEqual(string.capwords('ABC-def DEF-ghi GHI'), 'Abc-def Def-ghi Ghi')
self.assertEqual(string.capwords(' aBc DeF '), 'Abc Def')
self.assertEqual(string.capwords('\taBc\tDeF\t'), 'Abc Def')
self.assertEqual(string.capwords('\taBc\tDeF\t', '\t'), '\tAbc\tDef\t')
def test_formatter(self):
fmt = string.Formatter()
self.assertEqual(fmt.format("foo"), "foo")
self.assertEqual(fmt.format("foo{0}", "bar"), "foobar")
self.assertEqual(fmt.format("foo{1}{0}-{1}", "bar", 6), "foo6bar-6")
self.assertEqual(fmt.format("-{arg!r}-", arg='test'), "-'test'-")
# override get_value ############################################
class NamespaceFormatter(string.Formatter):
def __init__(self, namespace={}):
string.Formatter.__init__(self)
self.namespace = namespace
def get_value(self, key, args, kwds):
if isinstance(key, str):
try:
# Check explicitly passed arguments first
return kwds[key]
except KeyError:
return self.namespace[key]
else:
string.Formatter.get_value(key, args, kwds)
fmt = NamespaceFormatter({'greeting':'hello'})
self.assertEqual(fmt.format("{greeting}, world!"), 'hello, world!')
# override format_field #########################################
class CallFormatter(string.Formatter):
def format_field(self, value, format_spec):
return format(value(), format_spec)
fmt = CallFormatter()
self.assertEqual(fmt.format('*{0}*', lambda : 'result'), '*result*')
# override convert_field ########################################
class XFormatter(string.Formatter):
def convert_field(self, value, conversion):
if conversion == 'x':
return None
return super(XFormatter, self).convert_field(value, conversion)
fmt = XFormatter()
self.assertEqual(fmt.format("{0!r}:{0!x}", 'foo', 'foo'), "'foo':None")
# override parse ################################################
class BarFormatter(string.Formatter):
# returns an iterable that contains tuples of the form:
# (literal_text, field_name, format_spec, conversion)
def parse(self, format_string):
for field in format_string.split('|'):
if field[0] == '+':
# it's markup
field_name, _, format_spec = field[1:].partition(':')
yield '', field_name, format_spec, None
else:
yield field, None, None, None
fmt = BarFormatter()
self.assertEqual(fmt.format('*|+0:^10s|*', 'foo'), '* foo *')
# test all parameters used
class CheckAllUsedFormatter(string.Formatter):
def check_unused_args(self, used_args, args, kwargs):
# Track which arguments actuallly got used
unused_args = set(kwargs.keys())
unused_args.update(range(0, len(args)))
for arg in used_args:
unused_args.remove(arg)
if unused_args:
raise ValueError("unused arguments")
fmt = CheckAllUsedFormatter()
self.assertEqual(fmt.format("{0}", 10), "10")
self.assertEqual(fmt.format("{0}{i}", 10, i=100), "10100")
self.assertEqual(fmt.format("{0}{i}{1}", 10, 20, i=100), "1010020")
self.assertRaises(ValueError, fmt.format, "{0}{i}{1}", 10, 20, i=100, j=0)
self.assertRaises(ValueError, fmt.format, "{0}", 10, 20)
self.assertRaises(ValueError, fmt.format, "{0}", 10, 20, i=100)
self.assertRaises(ValueError, fmt.format, "{i}", 10, 20, i=100)
def test_main():
support.run_unittest(ModuleTest)
if __name__ == "__main__":
test_main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# Copyright(c)2008-2010 Internet Archive. Software license AGPL version 3.
#
# This file is part of BookReader.
#
# BookReader is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BookReader is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with BookReader. If not, see <http://www.gnu.org/licenses/>.
#
# The BookReader source is hosted at http://github.com/openlibrary/bookreader/
from collections import deque
import itertools
class windowed_iterator:
""" Wrap an iterator s.t. we can see [window] neighbors
in either direction from the current item.
Items are stored in a deque of size 2*window + 1, where the latest
item is always in the middle position.
The supplied clear_callback() is called for items more than
[window] steps in the past.
"""
# Todo? remove use of None as sentinel, to be able to represent
# iterators returning None.
def __init__(self, iterator, window, clear_callback=None):
self.iterator = iterator
# initialize deque with sentinel values
self.items = deque((None for i in range(window + 1)),
window * 2 + 1)
self.window = window
self.clear_callback = clear_callback
def __iter__(self):
return self
def __repr__(self):
return str(self.items) + ' window: ' + str(self.window)
def clear(self):
for item in self.items:
if item and self.clear_callback is not None:
self.clear_callback(item)
self.items.clear()
def neighbor(self, delta):
if abs(delta) > self.window:
raise IndexError('Requested delta outside window')
while self.window + delta + 1 > len(self.items):
try:
self.items.append(self.iterator.next())
except StopIteration:
return None
return self.items[self.window + delta]
def neighbors(self, window=None, modtwo=False):
if window is None:
window = self.window
if window > self.window:
raise IndexError('Requested delta outside window')
for i in itertools.chain(range(-window, 0),
range(1, window + 1)):
if modtwo and i % 2 == 1:
continue
n = self.neighbor(i)
if n is not None:
yield n
def next(self):
nextitem = None
if len(self.items) == self.window + 1:
# elicit potential StopIteration before clearing/popping
nextitem = self.iterator.next()
if self.items[0] is not None and self.clear_callback is not None:
self.clear_callback(self.items[0])
self.items.popleft()
if nextitem is not None:
self.items.append(nextitem)
return self.items[self.window]
if __name__ == '__main__':
def sample_gen():
for i in range(0, 10):
yield { 'num': i*i }
g = sample_gen()
c = windowed_iterator(g, 3)
for i, item in enumerate(c):
print 'item %s: %s' % (i, item)
# print c
if i in (1, 4, 6, 9):
print 'neighbors of item %s: %s' % (i, [n for n in c.neighbors(2)]) | unknown | codeparrot/codeparrot-clean | ||
def largest_subset(a, n):
dp = [0 for i in range(n)]
dp[n - 1] = 1;
for i in range(n - 2, -1, -1):
mxm = 0;
for j in range(i + 1, n):
if a[j] % a[i] == 0 or a[i] % a[j] == 0:
mxm = max(mxm, dp[j])
dp[i] = 1 + mxm
return max(dp) | unknown | mbpp | ||
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
from time import sleep
from ansible.module_utils.cloud import CloudRetry
try:
import boto
import boto.ec2 #boto does weird import stuff
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto3
import botocore
HAS_BOTO3 = True
except:
HAS_BOTO3 = False
try:
from distutils.version import LooseVersion
HAS_LOOSE_VERSION = True
except:
HAS_LOOSE_VERSION = False
from ansible.module_utils.six import string_types, binary_type, text_type
class AnsibleAWSError(Exception):
pass
def _botocore_exception_maybe():
"""
Allow for boto3 not being installed when using these utils by wrapping
botocore.exceptions instead of assigning from it directly.
"""
if HAS_BOTO3:
return botocore.exceptions.ClientError
return type(None)
class AWSRetry(CloudRetry):
base_class = _botocore_exception_maybe()
@staticmethod
def status_code_from_exception(error):
return error.response['Error']['Code']
@staticmethod
def found(response_code):
# This list of failures is based on this API Reference
# http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
retry_on = [
'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable',
'InternalFailure', 'InternalError'
]
not_found = re.compile(r'^\w+.NotFound')
if response_code in retry_on or not_found.search(response_code):
return True
else:
return False
def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params):
try:
return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params)
except ValueError:
module.fail_json(msg='There is an issue in the code of the module. You must specify either both, resource or client to the conn_type parameter in the boto3_conn function call')
def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params):
profile = params.pop('profile_name', None)
if conn_type not in ['both', 'resource', 'client']:
raise ValueError('There is an issue in the calling code. You '
'must specify either both, resource, or client to '
'the conn_type parameter in the boto3_conn function '
'call')
if conn_type == 'resource':
resource = boto3.session.Session(profile_name=profile).resource(resource, region_name=region, endpoint_url=endpoint, **params)
return resource
elif conn_type == 'client':
client = boto3.session.Session(profile_name=profile).client(resource, region_name=region, endpoint_url=endpoint, **params)
return client
else:
client = boto3.session.Session(profile_name=profile).client(resource, region_name=region, endpoint_url=endpoint, **params)
resource = boto3.session.Session(profile_name=profile).resource(resource, region_name=region, endpoint_url=endpoint, **params)
return client, resource
boto3_inventory_conn = _boto3_conn
def aws_common_argument_spec():
return dict(
ec2_url=dict(),
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
validate_certs=dict(default=True, type='bool'),
security_token=dict(aliases=['access_token'], no_log=True),
profile=dict(),
)
def ec2_argument_spec():
spec = aws_common_argument_spec()
spec.update(
dict(
region=dict(aliases=['aws_region', 'ec2_region']),
)
)
return spec
def get_aws_connection_info(module, boto3=False):
# Check module args for credentials, then check environment vars
# access_key
ec2_url = module.params.get('ec2_url')
access_key = module.params.get('aws_access_key')
secret_key = module.params.get('aws_secret_key')
security_token = module.params.get('security_token')
region = module.params.get('region')
profile_name = module.params.get('profile')
validate_certs = module.params.get('validate_certs')
if not ec2_url:
if 'AWS_URL' in os.environ:
ec2_url = os.environ['AWS_URL']
elif 'EC2_URL' in os.environ:
ec2_url = os.environ['EC2_URL']
if not access_key:
if 'AWS_ACCESS_KEY_ID' in os.environ:
access_key = os.environ['AWS_ACCESS_KEY_ID']
elif 'AWS_ACCESS_KEY' in os.environ:
access_key = os.environ['AWS_ACCESS_KEY']
elif 'EC2_ACCESS_KEY' in os.environ:
access_key = os.environ['EC2_ACCESS_KEY']
else:
# in case access_key came in as empty string
access_key = None
if not secret_key:
if 'AWS_SECRET_ACCESS_KEY' in os.environ:
secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
elif 'AWS_SECRET_KEY' in os.environ:
secret_key = os.environ['AWS_SECRET_KEY']
elif 'EC2_SECRET_KEY' in os.environ:
secret_key = os.environ['EC2_SECRET_KEY']
else:
# in case secret_key came in as empty string
secret_key = None
if not region:
if 'AWS_REGION' in os.environ:
region = os.environ['AWS_REGION']
elif 'AWS_DEFAULT_REGION' in os.environ:
region = os.environ['AWS_DEFAULT_REGION']
elif 'EC2_REGION' in os.environ:
region = os.environ['EC2_REGION']
else:
if not boto3:
# boto.config.get returns None if config not found
region = boto.config.get('Boto', 'aws_region')
if not region:
region = boto.config.get('Boto', 'ec2_region')
elif HAS_BOTO3:
# here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None.
region = botocore.session.get_session().get_config_variable('region')
else:
module.fail_json(msg="Boto3 is required for this module. Please install boto3 and try again")
if not security_token:
if 'AWS_SECURITY_TOKEN' in os.environ:
security_token = os.environ['AWS_SECURITY_TOKEN']
elif 'AWS_SESSION_TOKEN' in os.environ:
security_token = os.environ['AWS_SESSION_TOKEN']
elif 'EC2_SECURITY_TOKEN' in os.environ:
security_token = os.environ['EC2_SECURITY_TOKEN']
else:
# in case security_token came in as empty string
security_token = None
if HAS_BOTO3 and boto3:
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=security_token)
boto_params['verify'] = validate_certs
if profile_name:
boto_params['profile_name'] = profile_name
else:
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
security_token=security_token)
# only set profile_name if passed as an argument
if profile_name:
boto_params['profile_name'] = profile_name
boto_params['validate_certs'] = validate_certs
for param, value in boto_params.items():
if isinstance(value, binary_type):
boto_params[param] = text_type(value, 'utf-8', 'strict')
return region, ec2_url, boto_params
def get_ec2_creds(module):
''' for compatibility mode with old modules that don't/can't yet
use ec2_connect method '''
region, ec2_url, boto_params = get_aws_connection_info(module)
return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region
def boto_fix_security_token_in_profile(conn, profile_name):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + profile_name
if boto.config.has_option(profile, 'aws_security_token'):
conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token'))
return conn
def connect_to_aws(aws_module, region, **params):
conn = aws_module.connect_to_region(region, **params)
if not conn:
if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]:
raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade boto or extend with endpoints_path" % (region, aws_module.__name__))
else:
raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
if params.get('profile_name'):
conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
return conn
def ec2_connect(module):
""" Return an ec2 connection"""
region, ec2_url, boto_params = get_aws_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
ec2 = connect_to_aws(boto.ec2, region, **boto_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
# Otherwise, no region so we fallback to the old connection method
elif ec2_url:
try:
ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="Either region or ec2_url must be specified")
return ec2
def paging(pause=0, marker_property='marker'):
""" Adds paging to boto retrieval functions that support a 'marker'
this is configurable as not all boto functions seem to use the
same name.
"""
def wrapper(f):
def page(*args, **kwargs):
results = []
marker = None
while True:
try:
new = f(*args, marker=marker, **kwargs)
marker = getattr(new, marker_property)
results.extend(new)
if not marker:
break
elif pause:
sleep(pause)
except TypeError:
# Older version of boto do not allow for marker param, just run normally
results = f(*args, **kwargs)
break
return results
return page
return wrapper
def camel_dict_to_snake_dict(camel_dict):
def camel_to_snake(name):
import re
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
s1 = first_cap_re.sub(r'\1_\2', name)
return all_cap_re.sub(r'\1_\2', s1).lower()
def value_is_list(camel_list):
checked_list = []
for item in camel_list:
if isinstance(item, dict):
checked_list.append(camel_dict_to_snake_dict(item))
elif isinstance(item, list):
checked_list.append(value_is_list(item))
else:
checked_list.append(item)
return checked_list
snake_dict = {}
for k, v in camel_dict.items():
if isinstance(v, dict):
snake_dict[camel_to_snake(k)] = camel_dict_to_snake_dict(v)
elif isinstance(v, list):
snake_dict[camel_to_snake(k)] = value_is_list(v)
else:
snake_dict[camel_to_snake(k)] = v
return snake_dict
def snake_dict_to_camel_dict(snake_dict):
def camelize(complex_type):
if complex_type is None:
return
new_type = type(complex_type)()
if isinstance(complex_type, dict):
for key in complex_type:
new_type[camel(key)] = camelize(complex_type[key])
elif isinstance(complex_type, list):
for i in range(len(complex_type)):
new_type.append(camelize(complex_type[i]))
else:
return complex_type
return new_type
def camel(words):
return words.split('_')[0] + ''.join(x.capitalize() or '_' for x in words.split('_')[1:])
return camelize(snake_dict)
def ansible_dict_to_boto3_filter_list(filters_dict):
""" Convert an Ansible dict of filters to list of dicts that boto3 can use
Args:
filters_dict (dict): Dict of AWS filters.
Basic Usage:
>>> filters = {'some-aws-id', 'i-01234567'}
>>> ansible_dict_to_boto3_filter_list(filters)
{
'some-aws-id': 'i-01234567'
}
Returns:
List: List of AWS filters and their values
[
{
'Name': 'some-aws-id',
'Values': [
'i-01234567',
]
}
]
"""
filters_list = []
for k,v in filters_dict.items():
filter_dict = {'Name': k}
if isinstance(v, string_types):
filter_dict['Values'] = [v]
else:
filter_dict['Values'] = v
filters_list.append(filter_dict)
return filters_list
def boto3_tag_list_to_ansible_dict(tags_list):
""" Convert a boto3 list of resource tags to a flat dict of key:value pairs
Args:
tags_list (list): List of dicts representing AWS tags.
Basic Usage:
>>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}]
>>> boto3_tag_list_to_ansible_dict(tags_list)
[
{
'Key': 'MyTagKey',
'Value': 'MyTagValue'
}
]
Returns:
Dict: Dict of key:value pairs representing AWS tags
{
'MyTagKey': 'MyTagValue',
}
"""
tags_dict = {}
for tag in tags_list:
if 'key' in tag:
tags_dict[tag['key']] = tag['value']
elif 'Key' in tag:
tags_dict[tag['Key']] = tag['Value']
return tags_dict
def ansible_dict_to_boto3_tag_list(tags_dict):
""" Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts
Args:
tags_dict (dict): Dict representing AWS resource tags.
Basic Usage:
>>> tags_dict = {'MyTagKey': 'MyTagValue'}
>>> ansible_dict_to_boto3_tag_list(tags_dict)
{
'MyTagKey': 'MyTagValue'
}
Returns:
List: List of dicts containing tag keys and values
[
{
'Key': 'MyTagKey',
'Value': 'MyTagValue'
}
]
"""
tags_list = []
for k,v in tags_dict.items():
tags_list.append({'Key': k, 'Value': v})
return tags_list
def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=True):
""" Return list of security group IDs from security group names. Note that security group names are not unique
across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This
will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in
a try block
"""
def get_sg_name(sg, boto3):
if boto3:
return sg['GroupName']
else:
return sg.name
def get_sg_id(sg, boto3):
if boto3:
return sg['GroupId']
else:
return sg.id
sec_group_id_list = []
if isinstance(sec_group_list, string_types):
sec_group_list = [sec_group_list]
# Get all security groups
if boto3:
if vpc_id:
filters = [
{
'Name': 'vpc-id',
'Values': [
vpc_id,
]
}
]
all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups']
else:
all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups']
else:
if vpc_id:
filters = { 'vpc-id': vpc_id }
all_sec_groups = ec2_connection.get_all_security_groups(filters=filters)
else:
all_sec_groups = ec2_connection.get_all_security_groups()
unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups)
sec_group_name_list = list(set(sec_group_list) - set(unmatched))
if len(unmatched) > 0:
# If we have unmatched names that look like an ID, assume they are
import re
sec_group_id_list[:] = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)]
still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)]
if len(still_unmatched) > 0:
raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched))
sec_group_id_list += [ str(get_sg_id(all_sg, boto3)) for all_sg in all_sec_groups if str(get_sg_name(all_sg, boto3)) in sec_group_name_list ]
return sec_group_id_list
def sort_json_policy_dict(policy_dict):
""" Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but
different orders will return true
Args:
policy_dict (dict): Dict representing IAM JSON policy.
Basic Usage:
>>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]}
>>> sort_json_policy_dict(my_iam_policy)
Returns:
Dict: Will return a copy of the policy as a Dict but any List will be sorted
{
'Principle': {
'AWS': [ '7', '14', '31', '101' ]
}
}
"""
def value_is_list(my_list):
checked_list = []
for item in my_list:
if isinstance(item, dict):
checked_list.append(sort_json_policy_dict(item))
elif isinstance(item, list):
checked_list.append(value_is_list(item))
else:
checked_list.append(item)
checked_list.sort()
return checked_list
ordered_policy_dict = {}
for key, value in policy_dict.items():
if isinstance(value, dict):
ordered_policy_dict[key] = sort_json_policy_dict(value)
elif isinstance(value, list):
ordered_policy_dict[key] = value_is_list(value)
else:
ordered_policy_dict[key] = value
return ordered_policy_dict
def map_complex_type(complex_type, type_map):
"""
Allows to cast elements within a dictionary to a specific type
Example of usage:
DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
'maximum_percent': 'int',
'minimum_healthy_percent': 'int'
}
deployment_configuration = map_complex_type(module.params['deployment_configuration'],
DEPLOYMENT_CONFIGURATION_TYPE_MAP)
This ensures all keys within the root element are casted and valid integers
"""
if complex_type is None:
return
new_type = type(complex_type)()
if isinstance(complex_type, dict):
for key in complex_type:
if key in type_map:
if isinstance(type_map[key], list):
new_type[key] = map_complex_type(
complex_type[key],
type_map[key][0])
else:
new_type[key] = map_complex_type(
complex_type[key],
type_map[key])
else:
return complex_type
elif isinstance(complex_type, list):
for i in range(len(complex_type)):
new_type.append(map_complex_type(
complex_type[i],
type_map))
elif type_map:
return globals()['__builtins__'][type_map](complex_type)
return new_type | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Policy Engine For Manila"""
import functools
from oslo_config import cfg
from oslo_policy import policy
from manila import exception
CONF = cfg.CONF
_ENFORCER = None
def reset():
global _ENFORCER
if _ENFORCER:
_ENFORCER.clear()
_ENFORCER = None
def init(policy_path=None):
global _ENFORCER
if not _ENFORCER:
_ENFORCER = policy.Enforcer(CONF)
if policy_path:
_ENFORCER.policy_path = policy_path
_ENFORCER.load_rules()
def enforce(context, action, target, do_raise=True):
"""Verifies that the action is valid on the target in this context.
:param context: manila context
:param action: string representing the action to be checked
this should be colon separated for clarity.
i.e. ``compute:create_instance``,
``compute:attach_volume``,
``volume:attach_volume``
:param object: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:raises manila.exception.PolicyNotAuthorized: if verification fails.
"""
init()
if not isinstance(context, dict):
context = context.to_dict()
# Add the exception arguments if asked to do a raise
extra = {}
if do_raise:
extra.update(exc=exception.PolicyNotAuthorized, action=action,
do_raise=do_raise)
return _ENFORCER.enforce(action, target, context, **extra)
def check_is_admin(roles):
"""Whether or not roles contains 'admin' role according to policy setting.
"""
init()
# include project_id on target to avoid KeyError if context_is_admin
# policy definition is missing, and default admin_or_owner rule
# attempts to apply. Since our credentials dict does not include a
# project_id, this target can never match as a generic rule.
target = {'project_id': ''}
credentials = {'roles': roles}
return _ENFORCER.enforce("context_is_admin", target, credentials)
def wrap_check_policy(resource):
"""Check policy corresponding to the wrapped methods prior to execution."""
def check_policy_wraper(func):
@functools.wraps(func)
def wrapped(self, context, target_obj, *args, **kwargs):
check_policy(context, resource, func.__name__, target_obj)
return func(self, context, target_obj, *args, **kwargs)
return wrapped
return check_policy_wraper
def check_policy(context, resource, action, target_obj=None):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
target.update(target_obj or {})
_action = '%s:%s' % (resource, action)
enforce(context, _action, target) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
'''
A parser for CSS.
'''
import re
from ply import yacc as ply_yacc
from csslex import csslexer
import css
__all__ = ('cssparser', 'yacc', 'parsetab')
def normalize(x):
'''Normalizes escaped characters to their literal value.'''
p = ur'\\0{0,4}([0-9]{2})'
r = lambda m: chr(int(m.groups()[0],16))
return re.sub(p,r,x).lower()
def URI_value(x):
url = normalize(x)[4:-1].strip()
if -1 != '"\''.find(url[0]):
url = STRING_value(url)
return css.Uri(url)
def STRING_value(x):
q = x[0]
return css.String(x[1:-1].replace(u'\\'+q,q))
class cssparser(object):
tokens = csslexer.tokens
def p_stylesheet(self, p):
'''
stylesheet : charset spaces_or_sgml_comments imports statements
| spaces_or_sgml_comments imports statements
'''
if isinstance(p[1], css.Charset):
p[0] = css.Stylesheet(p[4], p[3], p[1])
else:
p[0] = css.Stylesheet(p[3], p[2])
#print p.slice
def p_charset(self, p):
'''
charset : CHARSET_SYM STRING ';'
'''
p[0] = css.Charset(STRING_value(p[2]))
def p_media(self, p):
'''
media : MEDIA_SYM spaces media_types LBRACE spaces rulesets '}' spaces
'''
p[0] = css.Media(p[3], p[6])
def p_medium(self, p):
'''
medium : IDENT spaces
'''
p[0] = p[1]
def p_page(self, p):
'''
page : PAGE_SYM spaces pseudo_page spaces LBRACE block_declarations '}' spaces
| PAGE_SYM spaces LBRACE block_declarations '}' spaces
'''
if isinstance(p[3], css.Ident):
p[0] = css.Page(p[6], p[3])
else:
p[0] = css.Page(p[4])
def p_pseudo_page(self, p):
'''
pseudo_page : ':' IDENT
'''
p[0] = css.Ident(p[2])
def p_import(self, p):
'''
import : IMPORT_SYM spaces import_source media_types spaces ';' spaces
| IMPORT_SYM spaces import_source ';' spaces
'''
if isinstance(p[4], list):
p[0] = css.Import(p[3], p[4])
else:
p[0] = css.Import(p[3])
def p_operator(self, p):
'''
operator : '/' spaces
| COMMA spaces
| empty
'''
p[0] = p[1]
def p_combinator(self, p):
'''
combinator : PLUS spaces
| GREATER spaces
| spaces
'''
p[0] = p[1]
def p_unary_operator(self, p):
'''
unary_operator : '-'
| PLUS
'''
p[0] = p[1]
def p_property(self, p):
'''
property : IDENT spaces
'''
p[0] = css.Ident(p[1])
def p_ruleset(self, p):
'''
ruleset : ruleset_selector_group LBRACE spaces block_declarations '}' spaces
'''
p[0] = css.Ruleset(p[1], p[4])
def p_selector(self, p):
'''
selector : simple_selector simple_selectors
'''
p[0] = u''.join(p[1:])
def p_simple_selector(self, p):
'''
simple_selector : element_name simple_selector_components
| simple_selector_component simple_selector_components
'''
p[0] = u''.join(p[1:])
def p_simple_selectors(self, p):
'''
simple_selectors : combinator simple_selector simple_selectors
| empty
'''
p[0] = u''.join(p[1:])
def p_simple_selector_component(self, p):
'''
simple_selector_component : HASH
| class
| attrib
| pseudo
'''
p[0] = p[1]
def p_simple_selector_components(self, p):
'''
simple_selector_components : simple_selector_component simple_selector_components
| empty
'''
p[0] = u''.join(p[1:])
def p_class(self, p):
'''
class : '.' IDENT
'''
p[0] = u''.join(p[1:])
def p_element_name(self, p):
'''
element_name : IDENT
| '*'
'''
p[0] = p[1]
def p_attrib(self, p):
'''
attrib : '[' spaces IDENT spaces attrib_match ']'
'''
p[0] = u''.join(p[1:])
def p_pseudo(self, p):
'''
pseudo : ':' IDENT
| ':' FUNCTION spaces IDENT spaces ')'
| ':' FUNCTION spaces ')'
'''
p[0] = u''.join(p[1:])
def p_declaration(self, p):
'''
declaration : property ':' spaces expr prio
| property ':' spaces expr
| empty
'''
if len(p) == 2:
p[0] = None
else:
important = len(p) == 6
p[0] = css.Declaration(p[1], p[4], important)
def p_prio(self, p):
'''
prio : IMPORTANT_SYM spaces
'''
p[0] = p[1]
def p_expr(self, p):
'''
expr : expr operator term
| expr term
| term
'''
if len(p) == 4:
p[0] = u''.join([unicode(x) for x in p[1:]])
elif len(p) == 3:
p[0] = unicode(p[1]) + u' ' + unicode(p[2])
else:
p[0] = p[1]
def p_term(self, p):
'''
term : unary_operator term_quant spaces
| term_quant spaces
| STRING spaces
| IDENT spaces
| URI spaces
| hexcolor
| function
'''
if isinstance(p[1], css.Function) or isinstance(p[1], css.Hexcolor):
p[0] = p[1]
elif p.slice[1].type == 'URI':
p[0] = URI_value(p[1])
elif p.slice[1].type == 'STRING':
p[0] = STRING_value(p[1])
elif p.slice[1].type == 'IDENT':
p[0] = css.Ident(p[1])
elif -1 != '-+'.find(p[1]):
p[0] = css.Term(p[2], p[1])
else:
p[0] = css.Term(p[1])
def p_term_quant(self, p):
'''
term_quant : NUMBER
| PERCENTAGE
| LENGTH
| EMS
| EXS
| ANGLE
| TIME
| FREQ
'''
p[0] = normalize(p[1])
def p_function(self, p):
'''
function : FUNCTION spaces expr ')' spaces
'''
name = p[1][:-1] # strip the open paren
p[0] = css.Function(name, p[3])
def p_hexcolor(self, p):
'''
hexcolor : HASH spaces
'''
p[0] = css.Hexcolor(p[1])
def p_spaces(self, p):
'''
spaces : spaces S
| S
| empty
'''
p[0] = p[1] and u' '
def p_imports(self, p):
'''
imports : imports import spaces_or_sgml_comments
| import spaces_or_sgml_comments
| empty
'''
if not p[1]:
p[0] = []
elif isinstance(p[1], list):
p[0] = p[1]
p[0].append(p[2])
else:
p[0] = [p[1]]
def p_statements(self, p):
'''
statements : statements ruleset spaces_or_sgml_comments
| statements media spaces_or_sgml_comments
| statements page spaces_or_sgml_comments
| ruleset spaces_or_sgml_comments
| media spaces_or_sgml_comments
| page spaces_or_sgml_comments
| empty
'''
if not p[1]:
p[0] = []
elif isinstance(p[1], list):
p[0] = p[1]
p[0].append(p[2])
else:
p[0] = [p[1]]
def p_import_source(self, p):
'''
import_source : STRING spaces
| URI spaces
'''
if p.slice[1].type == 'URI':
p[0] = URI_value(p[1])
else:
p[0] = STRING_value(p[1])
def p_media_types(self, p):
'''
media_types : media_types COMMA spaces medium
| medium
'''
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1]
p[0].append(p[4])
def p_rulesets(self, p):
'''
rulesets : rulesets ruleset
| ruleset
| empty
'''
if not p[1]:
p[0] = []
elif isinstance(p[1], list):
p[0] = p[1]
p[0].append(p[2])
else:
p[0] = [p[1]]
def p_ruleset_selector_group(self, p):
'''
ruleset_selector_group : ruleset_selector_group COMMA spaces selector
| selector
'''
if len(p) == 2:
p[0] = p[1:]
else:
p[0] = p[1] + p[4:]
def p_block_declarations(self, p):
'''
block_declarations : block_declarations ';' spaces declaration
| declaration
'''
if len(p) == 2:
p[0] = []
if p[1]:
p[0].append(p[1])
else:
p[0] = p[1]
if p[4]:
p[0].append(p[4])
def p_attrib_match(self, p):
'''
attrib_match : '=' spaces attrib_val spaces
| INCLUDES spaces attrib_val spaces
| DASHMATCH spaces attrib_val spaces
| empty
'''
p[0] = u''.join(p[1:])
def p_attrib_val(self, p):
'''
attrib_val : IDENT
| STRING
'''
p[0] = p[1]
def p_spaces_or_sgml_comments(self, p):
'''
spaces_or_sgml_comments : spaces_or_sgml_comments S
| spaces_or_sgml_comments CDO
| spaces_or_sgml_comments CDC
| S
| CDO
| CDC
| empty
'''
p[0] = p[1] and u' '
def p_empty(self, p):
'''
empty :
'''
p[0] = u''
def p_error(self, p):
print "Syntax error at '%r'" % (p,)
def yacc(**kw):
kw['module'] = cssparser()
if 'start' not in kw:
kw['start'] = 'stylesheet'
return ply_yacc.yacc(**kw) | unknown | codeparrot/codeparrot-clean | ||
// RUN: %check_clang_tidy %s bugprone-unique-ptr-array-mismatch %t
namespace std {
template<class T> struct default_delete {};
template<class T> struct default_delete<T[]> {};
template<class T, class Deleter = std::default_delete<T>>
class unique_ptr {
public:
explicit unique_ptr(T* p) noexcept;
unique_ptr(T* p, Deleter d1 ) noexcept;
};
template <class T, class Deleter>
class unique_ptr<T[], Deleter> {
public:
template<class U>
explicit unique_ptr(U p) noexcept;
template<class U>
unique_ptr(U p, Deleter d1) noexcept;
};
} // namespace std
struct A {};
using PtrT = std::unique_ptr<A>;
using PtrTArr = std::unique_ptr<A[]>;
void f1() {
std::unique_ptr<int> P1{new int};
std::unique_ptr<int> P2{new int[10]};
// CHECK-MESSAGES: :[[@LINE-1]]:27: warning: unique pointer to non-array is initialized with array [bugprone-unique-ptr-array-mismatch]
// CHECK-FIXES: std::unique_ptr<int[]> P2{new int[10]};
// clang-format off
std::unique_ptr< int > P3{new int[10]};
// CHECK-MESSAGES: :[[@LINE-1]]:31: warning: unique pointer to non-array is initialized with array [bugprone-unique-ptr-array-mismatch]
// CHECK-FIXES: std::unique_ptr< int[] > P3{new int[10]};
// clang-format on
std::unique_ptr<int> P4(new int[10]);
// CHECK-MESSAGES: :[[@LINE-1]]:27: warning: unique pointer to non-array is initialized with array [bugprone-unique-ptr-array-mismatch]
// CHECK-FIXES: std::unique_ptr<int[]> P4(new int[10]);
new std::unique_ptr<int>(new int[10]);
// CHECK-MESSAGES: :[[@LINE-1]]:28: warning: unique pointer to non-array is initialized with array [bugprone-unique-ptr-array-mismatch]
std::unique_ptr<int[]> P5(new int[10]);
A deleter;
std::unique_ptr<int, A> P6(new int[10], deleter);
std::unique_ptr<int, A> P7(new int[10]);
std::default_delete<int[]> def_del;
std::unique_ptr<int, std::default_delete<int[]>> P8(new int[10], def_del);
new PtrT(new A[10]);
// CHECK-MESSAGES: :[[@LINE-1]]:12: warning: unique pointer to non-array is initialized with array [bugprone-unique-ptr-array-mismatch]
new PtrTArr(new A[10]);
}
void f2() {
std::unique_ptr<A> P1(new A);
std::unique_ptr<A> P2(new A[10]);
// CHECK-MESSAGES: :[[@LINE-1]]:25: warning: unique pointer to non-array is initialized with array [bugprone-unique-ptr-array-mismatch]
// CHECK-FIXES: std::unique_ptr<A[]> P2(new A[10]);
std::unique_ptr<A[]> P3(new A[10]);
}
void f3() {
std::unique_ptr<int> P1{new int}, P2{new int[10]}, P3{new int[10]};
// CHECK-MESSAGES: :[[@LINE-1]]:40: warning: unique pointer to non-array is initialized with array [bugprone-unique-ptr-array-mismatch]
// CHECK-MESSAGES: :[[@LINE-2]]:57: warning: unique pointer to non-array is initialized with array [bugprone-unique-ptr-array-mismatch]
}
struct S {
std::unique_ptr<int> P1;
std::unique_ptr<int> P2{new int[10]};
// CHECK-MESSAGES: :[[@LINE-1]]:27: warning: unique pointer to non-array is initialized with array [bugprone-unique-ptr-array-mismatch]
std::unique_ptr<int> P3{new int}, P4{new int[10]};
// CHECK-MESSAGES: :[[@LINE-1]]:40: warning: unique pointer to non-array is initialized with array [bugprone-unique-ptr-array-mismatch]
S() : P1{new int[10]} {}
// CHECK-MESSAGES: :[[@LINE-1]]:12: warning: unique pointer to non-array is initialized with array [bugprone-unique-ptr-array-mismatch]
};
void f_parm(std::unique_ptr<int>);
void f4() {
f_parm(std::unique_ptr<int>{new int[10]});
// CHECK-MESSAGES: :[[@LINE-1]]:31: warning: unique pointer to non-array is initialized with array [bugprone-unique-ptr-array-mismatch]
}
std::unique_ptr<int> f_ret() {
return std::unique_ptr<int>(new int[10]);
// CHECK-MESSAGES: :[[@LINE-1]]:31: warning: unique pointer to non-array is initialized with array [bugprone-unique-ptr-array-mismatch]
}
template <class T>
void f_tmpl() {
std::unique_ptr<T> P1{new T[10]};
// CHECK-MESSAGES: :[[@LINE-1]]:25: warning: unique pointer to non-array is initialized with array [bugprone-unique-ptr-array-mismatch]
// CHECK-FIXES: std::unique_ptr<T[]> P1{new T[10]};
}
void f5() {
f_tmpl<char>();
}
template <class T>
void f_tmpl_1() {
std::unique_ptr<T> P1{new T[10]};
// FIXME_CHECK-MESSAGES: :[[@LINE-1]]:25: warning: unique pointer to non-array is initialized with array [bugprone-unique-ptr-array-mismatch]
// FIXME_CHECK-FIXES: std::unique_ptr<T[]> P1{new T[10]};
}
#define CHAR_PTR_TYPE std::unique_ptr<char>
#define CHAR_PTR_VAR(X) \
X { new char[10] }
#define CHAR_PTR_INIT(X, Y) \
std::unique_ptr<char> X { Y }
void f6() {
CHAR_PTR_TYPE P1{new char[10]};
// CHECK-MESSAGES: :[[@LINE-1]]:20: warning: unique pointer to non-array is initialized with array [bugprone-unique-ptr-array-mismatch]
std::unique_ptr<char> CHAR_PTR_VAR(P2);
// CHECK-MESSAGES: :[[@LINE-1]]:25: warning: unique pointer to non-array is initialized with array [bugprone-unique-ptr-array-mismatch]
// CHECK-FIXES: std::unique_ptr<char[]> CHAR_PTR_VAR(P2);
CHAR_PTR_INIT(P3, new char[10]);
// CHECK-MESSAGES: :[[@LINE-1]]:21: warning: unique pointer to non-array is initialized with array [bugprone-unique-ptr-array-mismatch]
} | cpp | github | https://github.com/llvm/llvm-project | clang-tools-extra/test/clang-tidy/checkers/bugprone/unique-ptr-array-mismatch.cpp |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Initializers for TF 2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.ops.init_ops import _compute_fans
from tensorflow.python.util.tf_export import tf_export
_PARTITION_SHAPE = "partition_shape"
_PARTITION_OFFSET = "partition_offset"
class Initializer(object):
"""Initializer base class: all initializers inherit from this class.
Initializers should implement a `__call__` method with the following
signature:
```python
def __call__(self, shape, dtype=None, **kwargs):
# returns a tensor of shape `shape` and dtype `dtype`
# containing values drawn from a distribution of your choice.
```
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. If not provided will return tensor
of `tf.float32`.
**kwargs: Additional keyword arguments. Accepted values:
`partition_shape` and `partition_offset`. Used when creating a single
partition in a partitioned variable. `partition_shape` is the shape of
the partition (i.e. the shape of the returned tensor) and
`partition_offset` is a tuple of `int` specifying the offset of this
partition w.r.t each axis. For example, a tensor of shape `(30, 100)`
can be partitioned into two partitions: `p0` of shape `(10, 100)` and
`p1` of shape `(20, 100)`; if the initializer is called with
`partition_shape=(20, 100)` and `partition_offset=(10, 0)`, it should
return the value for `p1`.
"""
raise NotImplementedError
def get_config(self):
"""Returns the configuration of the initializer as a JSON-serializable dict.
Returns:
A JSON-serializable Python dict.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary.
Example:
```python
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Args:
config: A Python dictionary.
It will typically be the output of `get_config`.
Returns:
An Initializer instance.
"""
config.pop("dtype", None)
return cls(**config)
def _validate_kwargs(self, kwargs, support_partition=True):
for kwarg in kwargs:
if kwarg not in [_PARTITION_SHAPE, _PARTITION_OFFSET]:
raise TypeError("Unknown keyword arguments: %s" % kwarg)
elif not support_partition:
raise ValueError("%s initializer doesn't support partition-related"
" arguments" % self.__class__.__name__)
@tf_export("zeros_initializer", v1=[])
class Zeros(Initializer):
"""Initializer that generates tensors initialized to 0.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.zeros_initializer())
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([0., 0., 0.], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]], dtype=float32)>
>>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
"""
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValuesError: If the dtype is not numeric or boolean.
"""
self._validate_kwargs(kwargs)
dtype = dtypes.as_dtype(dtype)
if not dtype.is_numpy_compatible or dtype == dtypes.string:
raise ValueError("Expected numeric or boolean dtype, got %s." % dtype)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return array_ops.zeros(shape, dtype)
@tf_export("ones_initializer", v1=[])
class Ones(Initializer):
"""Initializer that generates tensors initialized to 1.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.ones_initializer())
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([1., 1., 1.], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype=float32)>
>>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
"""
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValuesError: If the dtype is not numeric or boolean.
"""
self._validate_kwargs(kwargs)
dtype = dtypes.as_dtype(dtype)
if not dtype.is_numpy_compatible or dtype == dtypes.string:
raise ValueError("Expected numeric or boolean dtype, got %s." % dtype)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return array_ops.ones(shape, dtype)
@tf_export("constant_initializer", v1=[])
class Constant(Initializer):
"""Initializer that generates tensors with constant values.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
`tf.constant_initializer` returns an object which when called returns a tensor
populated with the `value` specified in the constructor. This `value` must be
convertible to the requested `dtype`.
The argument `value` can be a scalar constant value, or a list of
values. Scalars broadcast to whichever shape is requested from the
initializer.
If `value` is a list, then the length of the list must be equal to the number
of elements implied by the desired shape of the tensor. If the total number of
elements in `value` is not equal to the number of elements required by the
tensor shape, the initializer will raise a `TypeError`.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.constant_initializer(2.))
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([2., 2., 2.], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
array([[2., 2., 2.],
[2., 2., 2.],
[2., 2., 2.]], dtype=float32)>
>>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
>>> value = [0, 1, 2, 3, 4, 5, 6, 7]
>>> init = tf.constant_initializer(value)
>>> # Fitting shape
>>> tf.Variable(init(shape=[2, 4], dtype=tf.float32))
<tf.Variable ...
array([[0., 1., 2., 3.],
[4., 5., 6., 7.]], dtype=float32)>
>>> # Larger shape
>>> tf.Variable(init(shape=[3, 4], dtype=tf.float32))
Traceback (most recent call last):
...
TypeError: ...value has 8 elements, shape is (3, 4) with 12 elements...
>>> # Smaller shape
>>> tf.Variable(init(shape=[2, 3], dtype=tf.float32))
Traceback (most recent call last):
...
TypeError: ...value has 8 elements, shape is (2, 3) with 6 elements...
Args:
value: A Python scalar, list or tuple of values, or a N-dimensional numpy
array. All elements of the initialized variable will be set to the
corresponding value in the `value` argument.
Raises:
TypeError: If the input `value` is not one of the expected types.
"""
def __init__(self, value=0):
if not (np.isscalar(value) or isinstance(value, (list, tuple, np.ndarray))):
raise TypeError(
"Invalid type for initial value: %s (expected Python scalar, list or "
"tuple of values, or numpy.ndarray)." % type(value))
self.value = value
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. If not provided the dtype of the
tensor created will be the type of the inital value.
**kwargs: Additional keyword arguments.
Raises:
TypeError: If the initializer cannot create a tensor of the requested
dtype.
"""
self._validate_kwargs(kwargs, support_partition=False)
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
return constant_op.constant(self.value, dtype=dtype, shape=shape)
def get_config(self):
return {"value": self.value}
@tf_export("random_uniform_initializer", v1=[])
class RandomUniform(Initializer):
"""Initializer that generates tensors with a uniform distribution.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.ones_initializer())
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([1., 1., 1.], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype=float32)>
>>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range of
random values to generate (inclusive).
maxval: A python scalar or a scalar tensor. Upper bound of the range of
random values to generate (exclusive).
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
"""
def __init__(self, minval=-0.05, maxval=0.05, seed=None):
self.minval = minval
self.maxval = maxval
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point and integer
types are supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not numeric.
"""
self._validate_kwargs(kwargs)
dtype = dtypes.as_dtype(dtype)
if not dtype.is_floating and not dtype.is_integer:
raise ValueError("Expected float or integer dtype, got %s." % dtype)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.random_uniform(shape, self.minval,
self.maxval, dtype)
def get_config(self):
return {
"minval": self.minval,
"maxval": self.maxval,
"seed": self.seed
}
@tf_export("random_normal_initializer", v1=[])
class RandomNormal(Initializer):
"""Initializer that generates tensors with a normal distribution.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3,
... tf.random_normal_initializer(mean=1., stddev=2.))
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([...], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
...
>>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
Args:
mean: a python scalar or a scalar tensor. Mean of the random values to
generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the random
values to generate.
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not floating point
"""
self._validate_kwargs(kwargs)
dtype = _assert_float_dtype(dtype)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.random_normal(shape, self.mean, self.stddev,
dtype)
def get_config(self):
return {
"mean": self.mean,
"stddev": self.stddev,
"seed": self.seed
}
class TruncatedNormal(Initializer):
"""Initializer that generates a truncated normal distribution.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
These values are similar to values from a `tf.initializers.RandomNormal`
except that values more than two standard deviations from the mean are
discarded and re-drawn. This is the recommended initializer for neural network
weights and filters.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(
... 3, tf.initializers.TruncatedNormal(mean=1., stddev=2.))
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([...], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
...
>>> make_variables(4, tf.initializers.RandomUniform(minval=-1., maxval=1.))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not floating point
"""
self._validate_kwargs(kwargs)
dtype = _assert_float_dtype(dtype)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.truncated_normal(shape, self.mean,
self.stddev, dtype)
def get_config(self):
return {
"mean": self.mean,
"stddev": self.stddev,
"seed": self.seed
}
class VarianceScaling(Initializer):
"""Initializer capable of adapting its scale to the shape of weights tensors.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
With `distribution="truncated_normal" or "untruncated_normal"`, samples are
drawn from a truncated/untruncated normal distribution with a mean of zero and
a standard deviation (after truncation, if used) `stddev = sqrt(scale / n)`
where n is:
- number of input units in the weight tensor, if mode = "fan_in"
- number of output units, if mode = "fan_out"
- average of the numbers of input and output units, if mode = "fan_avg"
With `distribution="uniform"`, samples are drawn from a uniform distribution
within [-limit, limit], with `limit = sqrt(3 * scale / n)`.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.VarianceScaling(scale=1.))
>>> v1
<tf.Variable ... shape=(3,) ... numpy=array([...], dtype=float32)>
>>> v2
<tf.Variable ... shape=(3, 3) ... numpy=
...
>>> make_variables(4, tf.initializers.VarianceScaling(distribution='uniform'))
(<tf.Variable...shape=(4,) dtype=float32...>, <tf.Variable...shape=(4, 4) ...
Args:
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "truncated_normal",
"untruncated_normal" and "uniform".
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
Raises:
ValueError: In case of an invalid value for the "scale", mode" or
"distribution" arguments.
"""
def __init__(self,
scale=1.0,
mode="fan_in",
distribution="truncated_normal",
seed=None):
if scale <= 0.:
raise ValueError("`scale` must be positive float.")
if mode not in {"fan_in", "fan_out", "fan_avg"}:
raise ValueError("Invalid `mode` argument:", mode)
distribution = distribution.lower()
# Compatibility with keras-team/keras.
if distribution == "normal":
distribution = "truncated_normal"
if distribution not in {"uniform", "truncated_normal",
"untruncated_normal"}:
raise ValueError("Invalid `distribution` argument:", distribution)
self.scale = scale
self.mode = mode
self.distribution = distribution
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not floating point
"""
self._validate_kwargs(kwargs)
dtype = _assert_float_dtype(dtype)
scale = self.scale
fan_in, fan_out = _compute_fans(shape)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
if self.mode == "fan_in":
scale /= max(1., fan_in)
elif self.mode == "fan_out":
scale /= max(1., fan_out)
else:
scale /= max(1., (fan_in + fan_out) / 2.)
if self.distribution == "truncated_normal":
# constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
stddev = math.sqrt(scale) / .87962566103423978
return self._random_generator.truncated_normal(shape, 0.0, stddev, dtype)
elif self.distribution == "untruncated_normal":
stddev = math.sqrt(scale)
return self._random_generator.random_normal(shape, 0.0, stddev, dtype)
else:
limit = math.sqrt(3.0 * scale)
return self._random_generator.random_uniform(shape, -limit, limit, dtype)
def get_config(self):
return {
"scale": self.scale,
"mode": self.mode,
"distribution": self.distribution,
"seed": self.seed
}
class Orthogonal(Initializer):
"""Initializer that generates an orthogonal matrix.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
If the shape of the tensor to initialize is two-dimensional, it is initialized
with an orthogonal matrix obtained from the QR decomposition of a matrix of
random numbers drawn from a normal distribution.
If the matrix has fewer rows than columns then the output will have orthogonal
rows. Otherwise, the output will have orthogonal columns.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.Orthogonal())
>>> v1
<tf.Variable ... shape=(3, 3) ...
>>> v2
<tf.Variable ... shape=(3, 3, 3) ...
>>> make_variables(4, tf.initializers.Orthogonal(gain=0.5))
(<tf.Variable ... shape=(4, 4) dtype=float32...
<tf.Variable ... shape=(4, 4, 4) dtype=float32...
Args:
gain: multiplicative factor to apply to the orthogonal matrix
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
References:
[Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C)
([pdf](https://arxiv.org/pdf/1312.6120.pdf))
"""
def __init__(self, gain=1.0, seed=None):
self.gain = gain
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not floating point or the input shape is not
valid.
"""
self._validate_kwargs(kwargs, support_partition=False)
dtype = _assert_float_dtype(dtype)
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize must be "
"at least two-dimensional")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows))
# Generate a random matrix
a = self._random_generator.random_normal(flat_shape, dtype=dtype)
# Compute the qr factorization
q, r = gen_linalg_ops.qr(a, full_matrices=False)
# Make Q uniform
d = array_ops.diag_part(r)
q *= math_ops.sign(d)
if num_rows < num_cols:
q = array_ops.matrix_transpose(q)
return self.gain * array_ops.reshape(q, shape)
def get_config(self):
return {"gain": self.gain, "seed": self.seed}
class Identity(Initializer):
"""Initializer that generates the identity matrix.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Only usable for generating 2D matrices.
Examples:
>>> def make_variable(k, initializer):
... return tf.Variable(initializer(shape=[k, k], dtype=tf.float32))
>>> make_variable(2, tf.initializers.Identity())
<tf.Variable ... shape=(2, 2) dtype=float32, numpy=
array([[1., 0.],
[0., 1.]], dtype=float32)>
>>> make_variable(3, tf.initializers.Identity(gain=0.5))
<tf.Variable ... shape=(3, 3) dtype=float32, numpy=
array([[0.5, 0. , 0. ],
[0. , 0.5, 0. ],
[0. , 0. , 0.5]], dtype=float32)>
Args:
gain: Multiplicative factor to apply to the identity matrix.
"""
def __init__(self, gain=1.0):
self.gain = gain
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not floating point
ValueError: If the requested shape does not have exactly two axes.
"""
self._validate_kwargs(kwargs, support_partition=False)
dtype = _assert_float_dtype(dtype)
if len(shape) != 2:
raise ValueError(
"Identity matrix initializer can only be used for 2D matrices.")
initializer = linalg_ops_impl.eye(*shape, dtype=dtype)
return self.gain * initializer
def get_config(self):
return {"gain": self.gain}
class GlorotUniform(VarianceScaling):
"""The Glorot uniform initializer, also called Xavier uniform initializer.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a uniform distribution within [-limit, limit] where `limit`
is `sqrt(6 / (fan_in + fan_out))` where `fan_in` is the number of input units
in the weight tensor and `fan_out` is the number of output units in the weight
tensor.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.GlorotUniform())
>>> v1
<tf.Variable ... shape=(3, 3) ...
>>> v2
<tf.Variable ... shape=(3, 3, 3) ...
>>> make_variables(4, tf.initializers.RandomNormal())
(<tf.Variable ... shape=(4, 4) dtype=float32...
<tf.Variable ... shape=(4, 4, 4) dtype=float32...
Args:
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
"""
def __init__(self, seed=None):
super(GlorotUniform, self).__init__(
scale=1.0,
mode="fan_avg",
distribution="uniform",
seed=seed)
def get_config(self):
return {"seed": self.seed}
class GlorotNormal(VarianceScaling):
"""The Glorot normal initializer, also called Xavier normal initializer.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a truncated normal distribution centered on 0 with `stddev
= sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number of input units in
the weight tensor and `fan_out` is the number of output units in the weight
tensor.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.GlorotNormal())
>>> v1
<tf.Variable ... shape=(3, 3) ...
>>> v2
<tf.Variable ... shape=(3, 3, 3) ...
>>> make_variables(4, tf.initializers.RandomNormal())
(<tf.Variable ... shape=(4, 4) dtype=float32...
<tf.Variable ... shape=(4, 4, 4) dtype=float32...
Args:
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
"""
def __init__(self, seed=None):
super(GlorotNormal, self).__init__(
scale=1.0,
mode="fan_avg",
distribution="truncated_normal",
seed=seed)
def get_config(self):
return {"seed": self.seed}
# Aliases.
# pylint: disable=invalid-name
zeros_initializer = Zeros
ones_initializer = Ones
constant_initializer = Constant
random_uniform_initializer = RandomUniform
random_normal_initializer = RandomNormal
truncated_normal_initializer = TruncatedNormal
variance_scaling_initializer = VarianceScaling
glorot_uniform_initializer = GlorotUniform
glorot_normal_initializer = GlorotNormal
orthogonal_initializer = Orthogonal
identity_initializer = Identity
# pylint: enable=invalid-name
def lecun_normal(seed=None):
"""LeCun normal initializer.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a truncated normal distribution centered on 0 with `stddev
= sqrt(1 / fan_in)` where `fan_in` is the number of input units in the weight
tensor.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.lecun_normal())
>>> v1
<tf.Variable ... shape=(3, 3) ...
>>> v2
<tf.Variable ... shape=(3, 3, 3) ...
>>> make_variables(4, tf.initializers.RandomNormal())
(<tf.Variable ... shape=(4, 4) dtype=float32...
<tf.Variable ... shape=(4, 4, 4) dtype=float32...
Args:
seed: A Python integer. Used to seed the random generator.
Returns:
A callable Initializer with `shape` and `dtype` arguments which generates a
tensor.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017]
(https://papers.nips.cc/paper/6698-self-normalizing-neural-networks)
([pdf]
(https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
return VarianceScaling(
scale=1., mode="fan_in", distribution="truncated_normal", seed=seed)
def lecun_uniform(seed=None):
"""LeCun uniform initializer.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a uniform distribution within [-limit, limit] where `limit`
is `sqrt(3 / fan_in)` where `fan_in` is the number of input units in the
weight tensor.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.lecun_uniform())
>>> v1
<tf.Variable ... shape=(3, 3) ...
>>> v2
<tf.Variable ... shape=(3, 3, 3) ...
>>> make_variables(4, tf.initializers.RandomNormal())
(<tf.Variable ... shape=(4, 4) dtype=float32...
<tf.Variable ... shape=(4, 4, 4) dtype=float32...
Args:
seed: A Python integer. Used to seed the random generator.
Returns:
A callable Initializer with `shape` and `dtype` arguments which generates a
tensor.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) # pylint: disable=line-too-long
([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
return VarianceScaling(
scale=1., mode="fan_in", distribution="uniform", seed=seed)
def he_normal(seed=None):
"""He normal initializer.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
It draws samples from a truncated normal distribution centered on 0 with
`stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in the
weight tensor.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.he_normal())
>>> v1
<tf.Variable ... shape=(3, 3) ...
>>> v2
<tf.Variable ... shape=(3, 3, 3) ...
>>> make_variables(4, tf.initializers.RandomNormal())
(<tf.Variable ... shape=(4, 4) dtype=float32...
<tf.Variable ... shape=(4, 4, 4) dtype=float32...
Args:
seed: A Python integer. Used to seed the random generator.
Returns:
A callable Initializer with `shape` and `dtype` arguments which generates a
tensor.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
return VarianceScaling(
scale=2., mode="fan_in", distribution="truncated_normal", seed=seed)
def he_uniform(seed=None):
"""He uniform variance scaling initializer.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a uniform distribution within [-limit, limit] where `limit`
is `sqrt(6 / fan_in)` where `fan_in` is the number of input units in the
weight tensor.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.he_uniform())
>>> v1
<tf.Variable ... shape=(3, 3) ...
>>> v2
<tf.Variable ... shape=(3, 3, 3) ...
>>> make_variables(4, tf.initializers.RandomNormal())
(<tf.Variable ... shape=(4, 4) dtype=float32...
<tf.Variable ... shape=(4, 4, 4) dtype=float32...
Args:
seed: A Python integer. Used to seed the random generator.
Returns:
A callable Initializer with `shape` and `dtype` arguments which generates a
tensor.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
return VarianceScaling(
scale=2., mode="fan_in", distribution="uniform", seed=seed)
# Utility functions.
def _assert_float_dtype(dtype):
"""Validate and return floating point type based on `dtype`.
`dtype` must be a floating point type.
Args:
dtype: The data type to validate.
Returns:
Validated type.
Raises:
ValueError: if `dtype` is not a floating point type.
"""
dtype = dtypes.as_dtype(dtype)
if not dtype.is_floating:
raise ValueError("Expected floating point type, got %s." % dtype)
return dtype
class _RandomGenerator(object):
"""Random generator that selects appropriate random ops."""
def __init__(self, seed=None):
super(_RandomGenerator, self).__init__()
if seed is not None:
# Stateless random ops requires 2-int seed.
self.seed = [seed, 0]
else:
self.seed = None
def random_normal(self, shape, mean=0.0, stddev=1, dtype=dtypes.float32):
"""A deterministic random normal if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_random_normal
else:
op = random_ops.random_normal
return op(
shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)
def random_uniform(self, shape, minval, maxval, dtype):
"""A deterministic random uniform if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_random_uniform
else:
op = random_ops.random_uniform
return op(
shape=shape, minval=minval, maxval=maxval, dtype=dtype, seed=self.seed)
def truncated_normal(self, shape, mean, stddev, dtype):
"""A deterministic truncated normal if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_truncated_normal
else:
op = random_ops.truncated_normal
return op(
shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)
# Compatibility aliases
# pylint: disable=invalid-name
zero = zeros = Zeros
one = ones = Ones
constant = Constant
uniform = random_uniform = RandomUniform
normal = random_normal = RandomNormal
truncated_normal = TruncatedNormal
identity = Identity
orthogonal = Orthogonal
glorot_normal = GlorotNormal
glorot_uniform = GlorotUniform | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2024 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package storage
import (
"context"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/pebble"
)
type pebbleLogger struct {
ctx context.Context
depth int
}
var _ pebble.LoggerAndTracer = pebbleLogger{}
func (l pebbleLogger) Infof(format string, args ...interface{}) {
log.Storage.InfofDepth(l.ctx, l.depth, format, args...)
}
func (l pebbleLogger) Fatalf(format string, args ...interface{}) {
log.Storage.FatalfDepth(l.ctx, l.depth, format, args...)
}
func (l pebbleLogger) Errorf(format string, args ...interface{}) {
log.Storage.ErrorfDepth(l.ctx, l.depth, format, args...)
}
// pebble.LoggerAndTracer does not expose verbosity levels in its logging
// interface, and Pebble logs go to a separate STORAGE channel.
//
// The tracing part of the interface is meant for user-facing activities, so
// in addition to outputting the event when tracing is enabled, we also log.
// The eventAlsoLogVerbosityLevel of 2 is chosen semi-arbitrarily since this
// is the only verbosity level in this file.
const eventAlsoLogVerbosityLevel = 2
func (l pebbleLogger) Eventf(ctx context.Context, format string, args ...interface{}) {
log.VEventfDepth(ctx, l.depth, eventAlsoLogVerbosityLevel, format, args...)
}
func (l pebbleLogger) IsTracingEnabled(ctx context.Context) bool {
return log.HasSpan(ctx) || log.ExpensiveLogEnabledVDepth(ctx, l.depth, eventAlsoLogVerbosityLevel)
} | go | github | https://github.com/cockroachdb/cockroach | pkg/storage/pebble_logger_and_tracer.go |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_KERNELS_REDUX_FUNCTOR_H_
#define TENSORFLOW_CORE_KERNELS_REDUX_FUNCTOR_H_
#define EIGEN_USE_THREADS
#include "Eigen/Core" // from @eigen_archive
#include "unsupported/Eigen/CXX11/Tensor" // from @eigen_archive
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
using CPUDevice = Eigen::ThreadPoolDevice;
namespace functor {
// Compute reduction over outer dimensions.
// Example:
// input: [D1, D2, ... , DN]
// ->
// output: [Di, ... , DN] where i belongs to set [1,N]
template <typename InputT, typename AccumT, typename OutputT,
typename BinaryFunctor>
struct ReduceOuterDimensions {
ReduceOuterDimensions() {}
template <int num_dims>
void operator()(const CPUDevice& device,
const Eigen::DSizes<Eigen::Index, num_dims>& input_dims,
const Tensor& input, Tensor* output) const {
// Compute inner and outer dim after reshaping into 2d tensor.
const int num_output_dims = output->dims();
auto output_dims = output->template flat<OutputT>().dimensions();
Eigen::Index inner_dim = 1, outer_dim = 1;
for (int i = 0; i < num_dims - num_output_dims; ++i)
outer_dim *= input_dims[i];
for (int i = num_dims - num_output_dims; i < num_dims; ++i)
inner_dim *= input_dims[i];
if (1 == outer_dim) {
// Nothing to do but passing input to output.
output->template flat<OutputT>() =
input.template flat<InputT>().template cast<OutputT>().reshape(
output_dims);
return;
}
// Get device thread num.
const Eigen::Index num_threads = device.numThreads();
// If the inner dim parallelism is large enough
// TODO(ezhulenev): There seems to be no benefits in going this route. Check
// if this can be improved, or use better heuristic?
if (inner_dim > num_threads * 32) {
// Do not create more blocks than there are threads in a pool.
const Eigen::Index num_blocks = num_threads;
// Block size along the outer dimension.
const Eigen::Index inner_block_size = Eigen::divup(inner_dim, num_blocks);
const InputT* input_data = input.template flat<InputT>().data();
// Allocate temporary buffer for partial reductions.
Eigen::Tensor<AccumT, 1, Eigen::RowMajor, Eigen::Index> buffer(
{inner_dim});
buffer.setZero();
AccumT* buffer_data = buffer.data();
using Buffer = Eigen::TensorMap<
Eigen::Tensor<AccumT, 1, Eigen::RowMajor, Eigen::Index>,
Eigen::Unaligned>;
using Input = Eigen::TensorMap<
Eigen::Tensor<const InputT, 1, Eigen::RowMajor, Eigen::Index>,
Eigen::Unaligned>;
const auto compute = [inner_dim, outer_dim, num_blocks, inner_block_size,
input_data, buffer_data](
Eigen::Index start, Eigen::Index limit) -> void {
DCHECK(start >= 0 && limit <= num_blocks);
Eigen::Index inner_dim_start = start * inner_block_size;
Eigen::Index inner_dim_limit = limit * inner_block_size;
inner_dim_limit = std::min(inner_dim, inner_dim_limit);
Eigen::Index my_job_len = inner_dim_limit - inner_dim_start;
const InputT* my_job_start = input_data + inner_dim_start;
Buffer buf(buffer_data + inner_dim_start, my_job_len);
for (Eigen::Index i = 0; i < outer_dim; ++i) {
auto in = Input(my_job_start + i * inner_dim, my_job_len);
auto cast = in.template cast<AccumT>();
buf = Eigen::TensorCwiseBinaryOp<BinaryFunctor, const decltype(buf),
const decltype(cast)>(buf, cast);
}
};
// Compute cost of reducing a single block.
const Eigen::Index compute_size = outer_dim * inner_block_size;
const Eigen::Index compute_input_bytes = compute_size * sizeof(InputT);
const Eigen::TensorOpCost cost(
compute_input_bytes,
0, // We'll be mostly writing to L1, assume store cost is 0
compute_size * Eigen::internal::functor_traits<BinaryFunctor>::Cost);
device.parallelFor(num_blocks, cost, compute);
// Write final result to the output.
output->template flat<OutputT>() =
buffer.template cast<OutputT>().reshape(output_dims);
} else {
// Compute block size along the outer dimension for efficiency.
const Eigen::Index parallel_cell_size = inner_dim;
const Eigen::Index total_workload = outer_dim * inner_dim;
const Eigen::Index max_parallelism = total_workload / parallel_cell_size;
const Eigen::Index min_block_workload = 2000;
const Eigen::Index min_block_size =
Eigen::divup(min_block_workload, parallel_cell_size);
const Eigen::Index max_num_blocks = std::min(
max_parallelism, Eigen::divup(total_workload, min_block_size));
// Do not create more blocks than there are threads in a pool.
const Eigen::Index num_blocks = std::min(max_num_blocks, num_threads);
// Block size along the outer dimension.
const Eigen::Index outer_block_size = Eigen::divup(outer_dim, num_blocks);
const InputT* input_data = input.template flat<InputT>().data();
// Allocate temporary buffer for partial reductions.
Tensor buffer(DataTypeToEnum<AccumT>::v(), {num_blocks, inner_dim});
buffer.template flat<AccumT>().setZero();
AccumT* buffer_data = buffer.template flat<AccumT>().data();
using Buffer = Eigen::TensorMap<
Eigen::Tensor<AccumT, 1, Eigen::RowMajor, Eigen::Index>,
Eigen::Unaligned>;
using Input = Eigen::TensorMap<
Eigen::Tensor<const InputT, 1, Eigen::RowMajor, Eigen::Index>,
Eigen::Unaligned>;
const auto compute = [inner_dim, num_blocks, outer_block_size,
buffer_data, input_data, outer_dim](
Eigen::Index start, Eigen::Index limit) -> void {
DCHECK(start >= 0 && limit <= num_blocks);
Eigen::Index outer_dim_start = start * outer_block_size;
Eigen::Index outer_dim_limit = limit * outer_block_size;
outer_dim_limit = std::min(outer_dim, outer_dim_limit);
Buffer buf(buffer_data + start * inner_dim, inner_dim);
for (Eigen::Index i = outer_dim_start; i < outer_dim_limit; ++i) {
auto in = Input(input_data + i * inner_dim, inner_dim);
auto cast = in.template cast<AccumT>();
buf = Eigen::TensorCwiseBinaryOp<BinaryFunctor, const decltype(buf),
const decltype(cast)>(buf, cast);
}
};
// Compute cost of reducing a single block.
const Eigen::Index compute_size = outer_block_size * inner_dim;
const Eigen::Index compute_input_bytes = compute_size * sizeof(InputT);
const Eigen::TensorOpCost cost(
compute_input_bytes,
0, // We'll be mostly writing to L1, assume store cost is 0
compute_size * Eigen::internal::functor_traits<BinaryFunctor>::Cost);
device.parallelFor(num_blocks, cost, compute);
// Aggregate partial results from temporary buffer into first block.
auto buf0 = Buffer(buffer_data, inner_dim);
// Just sum the buffer up, as inner dimensions is not large in this case.
for (int i = 1; i < num_blocks; ++i) {
auto buf = Buffer(buffer_data + i * inner_dim, inner_dim);
buf0 = Eigen::TensorCwiseBinaryOp<BinaryFunctor, const decltype(buf0),
const decltype(buf)>(buf0, buf);
}
// Write final result to the output.
output->template flat<OutputT>() =
buf0.template cast<OutputT>().reshape(output_dims);
}
}
};
// Compute reduction to some serial middle dimensions (like a axis).
// Example:
// input: [D1, D2, ... , DN]
// ->
// output: [Di, ... , Dj] where i & j belongs to set [1,N].
template <typename InputT, typename AccumT, typename OutputT,
typename BinaryFunctor, typename Reducer>
struct ReduceMiddleDimensions {
ReduceMiddleDimensions() {}
template <int num_dims>
void operator()(const CPUDevice& device,
const Eigen::DSizes<Eigen::Index, num_dims>& input_dims,
const Tensor& input, Tensor* output,
const int axis_begin_dim) const {
// Compute dims after reshaping into 3d tensor.
const int num_output_dims = output->dims();
auto output_dims = output->template flat<OutputT>().dimensions();
Eigen::Index inner_dim = 1, middle_dim = 1, outer_dim = 1;
for (int i = 0; i < axis_begin_dim; ++i) outer_dim *= input_dims[i];
for (int i = axis_begin_dim; i < axis_begin_dim + num_output_dims; ++i)
middle_dim *= input_dims[i];
for (int i = axis_begin_dim + num_output_dims; i < num_dims; ++i)
inner_dim *= input_dims[i];
if ((1 == inner_dim * outer_dim)) {
// Nothing to do.
output->template flat<OutputT>() =
input.template flat<InputT>().template cast<OutputT>().reshape(
output_dims);
return;
}
// Compute block size along the outer dimension for efficiency.
const Eigen::Index parallel_cell_size = inner_dim;
const Eigen::Index max_parallelism = outer_dim * middle_dim;
const Eigen::Index total_workload = max_parallelism * inner_dim;
const Eigen::Index min_block_workload = 2000;
const Eigen::Index min_block_size =
Eigen::divup(min_block_workload, parallel_cell_size);
const Eigen::Index max_num_blocks =
std::min(max_parallelism, Eigen::divup(total_workload, min_block_size));
// Do not create more blocks than there are threads in a pool.
const Eigen::Index num_threads = device.numThreads();
const Eigen::Index num_blocks = std::min(max_num_blocks, num_threads);
// Block size along the outer dimension.
const Eigen::Index outer_block_size =
Eigen::divup(total_workload, num_blocks);
const InputT* input_data = input.template flat<InputT>().data();
// Allocate temporary buffer for partial reductions.
Eigen::Tensor<AccumT, 2> buffer(num_blocks, middle_dim);
buffer.setZero();
AccumT* buffer_data = buffer.data();
using Buffer = Eigen::TensorMap<Eigen::Tensor<AccumT, 1>>;
using Input = Eigen::TensorMap<Eigen::Tensor<const InputT, 1>>;
Eigen::array<Eigen::Index, 1> reduction_axis = {0};
Reducer reducer;
const BinaryFunctor binary_op;
const auto compute = [inner_dim, middle_dim, input_data, buffer_data,
total_workload, num_blocks, outer_block_size,
reduction_axis, reducer, binary_op](
Eigen::Index start, Eigen::Index limit) -> void {
DCHECK(start >= 0 && limit <= num_blocks);
Eigen::Index block_start = start * outer_block_size;
Eigen::Index block_limit = limit * outer_block_size;
block_limit = std::min(total_workload, block_limit);
Buffer buf(buffer_data + start * middle_dim, middle_dim);
const int align_start =
((block_start + inner_dim - 1) / inner_dim) * inner_dim;
const int align_end = (block_limit / inner_dim) * inner_dim;
Eigen::Index coordinate = block_start / inner_dim % middle_dim;
Eigen::Tensor<AccumT, 0> reduced =
Input(&input_data[block_start], align_start - block_start)
.reduce(reduction_axis, reducer)
.template cast<AccumT>();
buf(coordinate) = binary_op(buf(coordinate), reduced(0));
coordinate = align_start / inner_dim % middle_dim;
for (int i = align_start; i < align_end; i += inner_dim) {
reduced = Input(&input_data[i], inner_dim)
.reduce(reduction_axis, reducer)
.template cast<AccumT>();
buf(coordinate) = binary_op(buf(coordinate), reduced(0));
++coordinate;
if (middle_dim == coordinate) coordinate = 0;
}
reduced = Input(&input_data[align_end], block_limit - align_end)
.reduce(reduction_axis, reducer)
.template cast<AccumT>();
buf(coordinate) = binary_op(buf(coordinate), reduced(0));
};
// Compute cost of reducing a single block.
const Eigen::Index compute_size = outer_block_size * inner_dim;
const Eigen::Index compute_input_bytes = compute_size * sizeof(InputT);
const Eigen::TensorOpCost cost(
compute_input_bytes,
0, // We'll be mostly writing to L1, assume store cost is 0
compute_size * Eigen::internal::functor_traits<BinaryFunctor>::Cost);
device.parallelFor(num_blocks, cost, compute);
using Output = Eigen::TensorMap<
Eigen::Tensor<AccumT, 1, Eigen::RowMajor, Eigen::Index>,
Eigen::Unaligned>;
// Aggregate partial results from temporary buffer into first block.
auto buf0 = Output(buffer_data, middle_dim);
// TODO(ezhulenev): Parallelize this loop for large inner dimensions?
for (int i = 1; i < num_blocks; ++i) {
auto buf = Output(buffer_data + i * middle_dim, middle_dim);
buf0 = Eigen::TensorCwiseBinaryOp<BinaryFunctor, const decltype(buf0),
const decltype(buf)>(buf0, buf);
}
// Write final result to the output.
output->template flat<OutputT>() =
buf0.template cast<OutputT>().reshape(output_dims);
}
};
} // namespace functor
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_REDUX_FUNCTOR_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/kernels/redux_functor.h |
# -*- coding: utf-8 -*-
"""
***************************************************************************
Polygonize.py
---------------------
Date : March 2013
Copyright : (C) 2013 by Piotr Pociask
Email : ppociask at o2 dot pl
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Piotr Pociask'
__date__ = 'March 2013'
__copyright__ = '(C) 2013, Piotr Pociask'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import QVariant
from qgis.core import QGis, QgsFields, QgsField, QgsFeature, QgsGeometry
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterBoolean
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class Polygonize(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
FIELDS = 'FIELDS'
GEOMETRY = 'GEOMETRY'
def processAlgorithm(self, progress):
try:
from shapely.ops import polygonize
from shapely.geometry import Point, MultiLineString
except ImportError:
raise GeoAlgorithmExecutionException(
self.tr('Polygonize algorithm requires shapely module!'))
vlayer = dataobjects.getObjectFromUri(self.getParameterValue(self.INPUT))
output = self.getOutputFromName(self.OUTPUT)
vprovider = vlayer.dataProvider()
if self.getParameterValue(self.FIELDS):
fields = vprovider.fields()
else:
fields = QgsFields()
if self.getParameterValue(self.GEOMETRY):
fieldsCount = fields.count()
fields.append(QgsField('area', QVariant.Double, 'double', 16, 2))
fields.append(QgsField('perimeter', QVariant.Double,
'double', 16, 2))
allLinesList = []
features = vector.features(vlayer)
current = 0
progress.setInfo(self.tr('Processing lines...'))
total = 40.0 / float(len(features))
for inFeat in features:
inGeom = inFeat.geometry()
if inGeom.isMultipart():
allLinesList.extend(inGeom.asMultiPolyline())
else:
allLinesList.append(inGeom.asPolyline())
current += 1
progress.setPercentage(int(current * total))
progress.setPercentage(40)
allLines = MultiLineString(allLinesList)
progress.setInfo(self.tr('Noding lines...'))
try:
from shapely.ops import unary_union
allLines = unary_union(allLines)
except ImportError:
allLines = allLines.union(Point(0, 0))
progress.setPercentage(45)
progress.setInfo(self.tr('Polygonizing...'))
polygons = list(polygonize([allLines]))
if not polygons:
raise GeoAlgorithmExecutionException(self.tr('No polygons were created!'))
progress.setPercentage(50)
progress.setInfo('Saving polygons...')
writer = output.getVectorWriter(fields, QGis.WKBPolygon, vlayer.crs())
outFeat = QgsFeature()
current = 0
total = 50.0 / float(len(polygons))
for polygon in polygons:
outFeat.setGeometry(QgsGeometry.fromWkt(polygon.wkt))
if self.getParameterValue(self.GEOMETRY):
outFeat.setAttributes([None] * fieldsCount + [polygon.area,
polygon.length])
writer.addFeature(outFeat)
current += 1
progress.setPercentage(50 + int(current * total))
progress.setInfo(self.tr('Finished'))
del writer
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Polygonize')
self.group, self.i18n_group = self.trAlgorithm('Vector geometry tools')
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_LINE]))
self.addParameter(ParameterBoolean(self.FIELDS,
self.tr('Keep table structure of line layer'), False))
self.addParameter(ParameterBoolean(self.GEOMETRY,
self.tr('Create geometry columns'), True))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Polygons from lines'))) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Toshio Kuratomi <tkuratomi@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
from ansible import context
class FakeOptions:
pass
def test_set_global_context():
options = FakeOptions()
options.tags = [u'production', u'webservers']
options.check_mode = True
options.start_at_task = u'Start with くらとみ'
expected = frozenset((('tags', (u'production', u'webservers')),
('check_mode', True),
('start_at_task', u'Start with くらとみ')))
context._init_global_context(options)
assert frozenset(context.CLIARGS.items()) == expected | python | github | https://github.com/ansible/ansible | test/units/test_context.py |
from rest_framework import status
from rest_framework.test import APITestCase
from django.urls import reverse
from faker import Factory
from django.contrib.auth.models import User
from farms.models import Farm,Zone
from farms.serializers import FarmSerializer
from collections import OrderedDict
###### Module configs and inits
fake = Factory.create('es_ES')
######
class FarmApiTest(APITestCase):
"""
"""
def setUp(self):
self.user = User.objects.create_user(username=fake.user_name(),password=fake.password(),email=fake.email())
self.client.force_authenticate(user=self.user)
self.farm = Farm.objects.create(owner=self.user,name=fake.md5())
self.zones = [Zone.objects.create(farm=self.farm,cols=fake.random_int(1,20),rows=fake.random_int(1,20),x=fake.random_int(1,50),y=fake.random_int(1,50)) for i in range(10)]
#####################################################
# CRUD FARM
#####################################################
def test_get_farm(self):
url = reverse('farm-detail',kwargs={'pk': self.farm.id})
response = self.client.get(url, format='json')
#print(response.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
#@override_settings()
def test_post_farm(self):
url = reverse('farm-list')
data = {'owner':self.user.id,'name':fake.md5(),'zone_set':[]}
response = self.client.post(url, data,format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_delete_farm(self):
url = reverse('farm-detail',kwargs={'pk': self.farm.id})
response = self.client.delete(url,format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_put_farm_same_zones(self):
url = reverse('farm-detail',kwargs={'pk': self.farm.id})
serializer = FarmSerializer(self.farm)
data = {'owner':self.user.id,'name':fake.md5(),'zone_set': serializer.data['zone_set']}
response = self.client.put(url, data,format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_put_farm_add_zone(self):
url = reverse('farm-detail',kwargs={'pk': self.farm.id})
serializer = FarmSerializer(self.farm)
serializer.data['zone_set'].append(OrderedDict([('especies', None), ('cols', 8), ('rows', 17), ('x', 42), ('y', 5), ('farm', self.farm.id)]))
data = {'owner':self.user.id,'name':fake.md5(),'zone_set': serializer.data['zone_set']}
response = self.client.put(url, data,format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
farm = Farm.objects.get(id=self.farm.id)
self.assertEqual(farm.zone_set.count() , 10 + 1)
def test_put_farm_modify_zone(self):
url = reverse('farm-detail',kwargs={'pk': self.farm.id})
serializer = FarmSerializer(self.farm)
zone = serializer.data['zone_set'][0]
serializer.data['zone_set'][0] = OrderedDict([('id',zone['id']),('especies', None), ('cols', 2), ('rows', 2), ('x', 2), ('y', 2), ('farm', self.farm.id)])
data = {'owner':self.user.id,'name':fake.md5(),'zone_set': serializer.data['zone_set']}
response = self.client.put(url, data,format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
farm = Farm.objects.get(id=self.farm.id)
self.assertEqual(farm.zone_set.count() , 10)
def test_put_farm_delete_zone(self):
url = reverse('farm-detail',kwargs={'pk': self.farm.id})
serializer = FarmSerializer(self.farm)
data= serializer.data['zone_set'][1:]
data = {'owner':self.user.id,'name':fake.md5(),'zone_set': data}
response = self.client.put(url, data,format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
farm = Farm.objects.get(id=self.farm.id)
self.assertEqual(farm.zone_set.count() , 10 - 1)
#####################################################
# FARM ACTIONS
#####################################################
def test_regar_farm(self):
url = reverse('farm-regar',kwargs={'pk': self.farm.id})
response = self.client.post(url, {},format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertQuerysetEqual(self.farm.action_object_actions.all(),['regar','created'],transform=lambda x:x.verb)
def test_filter_my_huertos(self):
other_user = User.objects.create_user(username=fake.user_name(),password=fake.password(),email=fake.email())
Farm.objects.create(owner=other_user,name=fake.md5())
from django.http import QueryDict
qdict = QueryDict('',mutable=True)
qdict.update({'owner__username':self.user.username})
url = reverse('farm-list') + '?' + qdict.urlencode()
response = self.client.get(url,format='json')
self.assertEqual(len(response.data),Farm.objects.filter(owner=self.user).count()) | unknown | codeparrot/codeparrot-clean | ||
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"type": "dashboard"
},
{
"datasource": {
"type": "grafana",
"uid": "grafana"
},
"enable": true,
"name": "Annotations \u0026 Alerts"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"links": [],
"panels": [
{
"autoMigrateFrom": "graph",
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"id": 1,
"targets": [
{
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A"
}
],
"title": "CPU Usage",
"type": "timeseries",
"yAxes": [
{
"show": true
}
]
},
{
"autoMigrateFrom": "singlestat",
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"id": 2,
"targets": [
{
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A"
}
],
"title": "Memory Usage",
"type": "stat",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
]
}
],
"refresh": "",
"schemaVersion": 42,
"tags": [],
"templating": {
"list": [
{
"datasource": {
"type": "prometheus"
},
"name": "server",
"options": [],
"query": "label_values(server)",
"refresh": 1,
"type": "query"
}
]
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
]
},
"timezone": "",
"title": "V15 No-Op Migration Test Dashboard",
"weekStart": ""
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/testdata/output/latest_version/v15.no-op-migration.v42.json |
//
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: str_format.h
// -----------------------------------------------------------------------------
//
// The `str_format` library is a typesafe replacement for the family of
// `printf()` string formatting routines within the `<cstdio>` standard library
// header. Like the `printf` family, `str_format` uses a "format string" to
// perform argument substitutions based on types. See the `FormatSpec` section
// below for format string documentation.
//
// Example:
//
// std::string s = absl::StrFormat(
// "%s %s You have $%d!", "Hello", name, dollars);
//
// The library consists of the following basic utilities:
//
// * `absl::StrFormat()`, a type-safe replacement for `std::sprintf()`, to
// write a format string to a `string` value.
// * `absl::StrAppendFormat()` to append a format string to a `string`
// * `absl::StreamFormat()` to more efficiently write a format string to a
// stream, such as`std::cout`.
// * `absl::PrintF()`, `absl::FPrintF()` and `absl::SNPrintF()` as
// drop-in replacements for `std::printf()`, `std::fprintf()` and
// `std::snprintf()`.
//
// Note: An `absl::SPrintF()` drop-in replacement is not supported as it
// is generally unsafe due to buffer overflows. Use `absl::StrFormat` which
// returns the string as output instead of expecting a pre-allocated buffer.
//
// Additionally, you can provide a format string (and its associated arguments)
// using one of the following abstractions:
//
// * A `FormatSpec` class template fully encapsulates a format string and its
// type arguments and is usually provided to `str_format` functions as a
// variadic argument of type `FormatSpec<Arg...>`. The `FormatSpec<Args...>`
// template is evaluated at compile-time, providing type safety.
// * A `ParsedFormat` instance, which encapsulates a specific, pre-compiled
// format string for a specific set of type(s), and which can be passed
// between API boundaries. (The `FormatSpec` type should not be used
// directly except as an argument type for wrapper functions.)
//
// The `str_format` library provides the ability to output its format strings to
// arbitrary sink types:
//
// * A generic `Format()` function to write outputs to arbitrary sink types,
// which must implement a `FormatRawSink` interface.
//
// * A `FormatUntyped()` function that is similar to `Format()` except it is
// loosely typed. `FormatUntyped()` is not a template and does not perform
// any compile-time checking of the format string; instead, it returns a
// boolean from a runtime check.
//
// In addition, the `str_format` library provides extension points for
// augmenting formatting to new types. See "StrFormat Extensions" below.
#ifndef ABSL_STRINGS_STR_FORMAT_H_
#define ABSL_STRINGS_STR_FORMAT_H_
#include <cstdio>
#include <string>
#include "absl/strings/internal/str_format/arg.h" // IWYU pragma: export
#include "absl/strings/internal/str_format/bind.h" // IWYU pragma: export
#include "absl/strings/internal/str_format/checker.h" // IWYU pragma: export
#include "absl/strings/internal/str_format/extension.h" // IWYU pragma: export
#include "absl/strings/internal/str_format/parser.h" // IWYU pragma: export
namespace absl {
ABSL_NAMESPACE_BEGIN
// UntypedFormatSpec
//
// A type-erased class that can be used directly within untyped API entry
// points. An `UntypedFormatSpec` is specifically used as an argument to
// `FormatUntyped()`.
//
// Example:
//
// absl::UntypedFormatSpec format("%d");
// std::string out;
// CHECK(absl::FormatUntyped(&out, format, {absl::FormatArg(1)}));
class UntypedFormatSpec {
public:
UntypedFormatSpec() = delete;
UntypedFormatSpec(const UntypedFormatSpec&) = delete;
UntypedFormatSpec& operator=(const UntypedFormatSpec&) = delete;
explicit UntypedFormatSpec(string_view s) : spec_(s) {}
protected:
explicit UntypedFormatSpec(const str_format_internal::ParsedFormatBase* pc)
: spec_(pc) {}
private:
friend str_format_internal::UntypedFormatSpecImpl;
str_format_internal::UntypedFormatSpecImpl spec_;
};
// FormatStreamed()
//
// Takes a streamable argument and returns an object that can print it
// with '%s'. Allows printing of types that have an `operator<<` but no
// intrinsic type support within `StrFormat()` itself.
//
// Example:
//
// absl::StrFormat("%s", absl::FormatStreamed(obj));
template <typename T>
str_format_internal::StreamedWrapper<T> FormatStreamed(const T& v) {
return str_format_internal::StreamedWrapper<T>(v);
}
// FormatCountCapture
//
// This class provides a way to safely wrap `StrFormat()` captures of `%n`
// conversions, which denote the number of characters written by a formatting
// operation to this point, into an integer value.
//
// This wrapper is designed to allow safe usage of `%n` within `StrFormat(); in
// the `printf()` family of functions, `%n` is not safe to use, as the `int *`
// buffer can be used to capture arbitrary data.
//
// Example:
//
// int n = 0;
// std::string s = absl::StrFormat("%s%d%n", "hello", 123,
// absl::FormatCountCapture(&n));
// EXPECT_EQ(8, n);
class FormatCountCapture {
public:
explicit FormatCountCapture(int* p) : p_(p) {}
private:
// FormatCountCaptureHelper is used to define FormatConvertImpl() for this
// class.
friend struct str_format_internal::FormatCountCaptureHelper;
// Unused() is here because of the false positive from -Wunused-private-field
// p_ is used in the templated function of the friend FormatCountCaptureHelper
// class.
int* Unused() { return p_; }
int* p_;
};
// FormatSpec
//
// The `FormatSpec` type defines the makeup of a format string within the
// `str_format` library. It is a variadic class template that is evaluated at
// compile-time, according to the format string and arguments that are passed to
// it.
//
// You should not need to manipulate this type directly. You should only name it
// if you are writing wrapper functions which accept format arguments that will
// be provided unmodified to functions in this library. Such a wrapper function
// might be a class method that provides format arguments and/or internally uses
// the result of formatting.
//
// For a `FormatSpec` to be valid at compile-time, it must be provided as
// either:
//
// * A `constexpr` literal or `absl::string_view`, which is how it most often
// used.
// * A `ParsedFormat` instantiation, which ensures the format string is
// valid before use. (See below.)
//
// Example:
//
// // Provided as a string literal.
// absl::StrFormat("Welcome to %s, Number %d!", "The Village", 6);
//
// // Provided as a constexpr absl::string_view.
// constexpr absl::string_view formatString = "Welcome to %s, Number %d!";
// absl::StrFormat(formatString, "The Village", 6);
//
// // Provided as a pre-compiled ParsedFormat object.
// // Note that this example is useful only for illustration purposes.
// absl::ParsedFormat<'s', 'd'> formatString("Welcome to %s, Number %d!");
// absl::StrFormat(formatString, "TheVillage", 6);
//
// A format string generally follows the POSIX syntax as used within the POSIX
// `printf` specification. (Exceptions are noted below.)
//
// (See http://pubs.opengroup.org/onlinepubs/9699919799/functions/fprintf.html)
//
// In specific, the `FormatSpec` supports the following type specifiers:
// * `c` for characters
// * `s` for strings
// * `d` or `i` for integers
// * `o` for unsigned integer conversions into octal
// * `x` or `X` for unsigned integer conversions into hex
// * `u` for unsigned integers
// * `f` or `F` for floating point values into decimal notation
// * `e` or `E` for floating point values into exponential notation
// * `a` or `A` for floating point values into hex exponential notation
// * `g` or `G` for floating point values into decimal or exponential
// notation based on their precision
// * `p` for pointer address values
// * `n` for the special case of writing out the number of characters
// written to this point. The resulting value must be captured within an
// `absl::FormatCountCapture` type.
// * `v` for values using the default format for a deduced type. These deduced
// types include many of the primitive types denoted here as well as
// user-defined types containing the proper extensions. (See below for more
// information.)
//
// Implementation-defined behavior:
// * A null pointer provided to "%s" or "%p" is output as "(nil)".
// * A non-null pointer provided to "%p" is output in hex as if by %#x or
// %#lx.
//
// NOTE: `o`, `x\X` and `u` will convert signed values to their unsigned
// counterpart before formatting.
//
// Examples:
// "%c", 'a' -> "a"
// "%c", 32 -> " "
// "%s", "C" -> "C"
// "%s", std::string("C++") -> "C++"
// "%d", -10 -> "-10"
// "%o", 10 -> "12"
// "%x", 16 -> "10"
// "%f", 123456789 -> "123456789.000000"
// "%e", .01 -> "1.00000e-2"
// "%a", -3.0 -> "-0x1.8p+1"
// "%g", .01 -> "1e-2"
// "%p", (void*)&value -> "0x7ffdeb6ad2a4"
//
// int n = 0;
// std::string s = absl::StrFormat(
// "%s%d%n", "hello", 123, absl::FormatCountCapture(&n));
// EXPECT_EQ(8, n);
//
// NOTE: the `v` specifier (for "value") is a type specifier not present in the
// POSIX specification. %v will format values according to their deduced type.
// `v` uses `d` for signed integer values, `u` for unsigned integer values, `g`
// for floating point values, and formats boolean values as "true"/"false"
// (instead of 1 or 0 for booleans formatted using d). `const char*` is not
// supported; please use `std:string` and `string_view`. `char` is also not
// supported due to ambiguity of the type. This specifier does not support
// modifiers.
//
// The `FormatSpec` intrinsically supports all of these fundamental C++ types:
//
// * Characters: `char`, `signed char`, `unsigned char`
// * Integers: `int`, `short`, `unsigned short`, `unsigned`, `long`,
// `unsigned long`, `long long`, `unsigned long long`
// * Enums: printed as their underlying integral value
// * Floating-point: `float`, `double`, `long double`
//
// However, in the `str_format` library, a format conversion specifies a broader
// C++ conceptual category instead of an exact type. For example, `%s` binds to
// any string-like argument, so `std::string`, `absl::string_view`, and
// `const char*` are all accepted. Likewise, `%d` accepts any integer-like
// argument, etc.
template <typename... Args>
using FormatSpec = str_format_internal::FormatSpecTemplate<
str_format_internal::ArgumentToConv<Args>()...>;
// ParsedFormat
//
// A `ParsedFormat` is a class template representing a preparsed `FormatSpec`,
// with template arguments specifying the conversion characters used within the
// format string. Such characters must be valid format type specifiers, and
// these type specifiers are checked at compile-time.
//
// Instances of `ParsedFormat` can be created, copied, and reused to speed up
// formatting loops. A `ParsedFormat` may either be constructed statically, or
// dynamically through its `New()` factory function, which only constructs a
// runtime object if the format is valid at that time.
//
// Example:
//
// // Verified at compile time.
// absl::ParsedFormat<'s', 'd'> formatString("Welcome to %s, Number %d!");
// absl::StrFormat(formatString, "TheVillage", 6);
//
// // Verified at runtime.
// auto format_runtime = absl::ParsedFormat<'d'>::New(format_string);
// if (format_runtime) {
// value = absl::StrFormat(*format_runtime, i);
// } else {
// ... error case ...
// }
#if defined(__cpp_nontype_template_parameter_auto)
// If C++17 is available, an 'extended' format is also allowed that can specify
// multiple conversion characters per format argument, using a combination of
// `absl::FormatConversionCharSet` enum values (logically a set union)
// via the `|` operator. (Single character-based arguments are still accepted,
// but cannot be combined). Some common conversions also have predefined enum
// values, such as `absl::FormatConversionCharSet::kIntegral`.
//
// Example:
// // Extended format supports multiple conversion characters per argument,
// // specified via a combination of `FormatConversionCharSet` enums.
// using MyFormat = absl::ParsedFormat<absl::FormatConversionCharSet::d |
// absl::FormatConversionCharSet::x>;
// MyFormat GetFormat(bool use_hex) {
// if (use_hex) return MyFormat("foo %x bar");
// return MyFormat("foo %d bar");
// }
// // `format` can be used with any value that supports 'd' and 'x',
// // like `int`.
// auto format = GetFormat(use_hex);
// value = StringF(format, i);
template <auto... Conv>
using ParsedFormat = absl::str_format_internal::ExtendedParsedFormat<
absl::str_format_internal::ToFormatConversionCharSet(Conv)...>;
#else
template <char... Conv>
using ParsedFormat = str_format_internal::ExtendedParsedFormat<
absl::str_format_internal::ToFormatConversionCharSet(Conv)...>;
#endif // defined(__cpp_nontype_template_parameter_auto)
// StrFormat()
//
// Returns a `string` given a `printf()`-style format string and zero or more
// additional arguments. Use it as you would `sprintf()`. `StrFormat()` is the
// primary formatting function within the `str_format` library, and should be
// used in most cases where you need type-safe conversion of types into
// formatted strings.
//
// The format string generally consists of ordinary character data along with
// one or more format conversion specifiers (denoted by the `%` character).
// Ordinary character data is returned unchanged into the result string, while
// each conversion specification performs a type substitution from
// `StrFormat()`'s other arguments. See the comments for `FormatSpec` for full
// information on the makeup of this format string.
//
// Example:
//
// std::string s = absl::StrFormat(
// "Welcome to %s, Number %d!", "The Village", 6);
// EXPECT_EQ("Welcome to The Village, Number 6!", s);
//
// Returns an empty string in case of error.
template <typename... Args>
ABSL_MUST_USE_RESULT std::string StrFormat(const FormatSpec<Args...>& format,
const Args&... args) {
return str_format_internal::FormatPack(
str_format_internal::UntypedFormatSpecImpl::Extract(format),
{str_format_internal::FormatArgImpl(args)...});
}
// StrAppendFormat()
//
// Appends to a `dst` string given a format string, and zero or more additional
// arguments, returning `*dst` as a convenience for chaining purposes. Appends
// nothing in case of error (but possibly alters its capacity).
//
// Example:
//
// std::string orig("For example PI is approximately ");
// std::cout << StrAppendFormat(&orig, "%12.6f", 3.14);
template <typename... Args>
std::string& StrAppendFormat(std::string* dst,
const FormatSpec<Args...>& format,
const Args&... args) {
return str_format_internal::AppendPack(
dst, str_format_internal::UntypedFormatSpecImpl::Extract(format),
{str_format_internal::FormatArgImpl(args)...});
}
// StreamFormat()
//
// Writes to an output stream given a format string and zero or more arguments,
// generally in a manner that is more efficient than streaming the result of
// `absl:: StrFormat()`. The returned object must be streamed before the full
// expression ends.
//
// Example:
//
// std::cout << StreamFormat("%12.6f", 3.14);
template <typename... Args>
ABSL_MUST_USE_RESULT str_format_internal::Streamable StreamFormat(
const FormatSpec<Args...>& format, const Args&... args) {
return str_format_internal::Streamable(
str_format_internal::UntypedFormatSpecImpl::Extract(format),
{str_format_internal::FormatArgImpl(args)...});
}
// PrintF()
//
// Writes to stdout given a format string and zero or more arguments. This
// function is functionally equivalent to `std::printf()` (and type-safe);
// prefer `absl::PrintF()` over `std::printf()`.
//
// Example:
//
// std::string_view s = "Ulaanbaatar";
// absl::PrintF("The capital of Mongolia is %s", s);
//
// Outputs: "The capital of Mongolia is Ulaanbaatar"
//
template <typename... Args>
int PrintF(const FormatSpec<Args...>& format, const Args&... args) {
return str_format_internal::FprintF(
stdout, str_format_internal::UntypedFormatSpecImpl::Extract(format),
{str_format_internal::FormatArgImpl(args)...});
}
// FPrintF()
//
// Writes to a file given a format string and zero or more arguments. This
// function is functionally equivalent to `std::fprintf()` (and type-safe);
// prefer `absl::FPrintF()` over `std::fprintf()`.
//
// Example:
//
// std::string_view s = "Ulaanbaatar";
// absl::FPrintF(stdout, "The capital of Mongolia is %s", s);
//
// Outputs: "The capital of Mongolia is Ulaanbaatar"
//
template <typename... Args>
int FPrintF(std::FILE* output, const FormatSpec<Args...>& format,
const Args&... args) {
return str_format_internal::FprintF(
output, str_format_internal::UntypedFormatSpecImpl::Extract(format),
{str_format_internal::FormatArgImpl(args)...});
}
// SNPrintF()
//
// Writes to a sized buffer given a format string and zero or more arguments.
// This function is functionally equivalent to `std::snprintf()` (and
// type-safe); prefer `absl::SNPrintF()` over `std::snprintf()`.
//
// In particular, a successful call to `absl::SNPrintF()` writes at most `size`
// bytes of the formatted output to `output`, including a NUL-terminator, and
// returns the number of bytes that would have been written if truncation did
// not occur. In the event of an error, a negative value is returned and `errno`
// is set.
//
// Example:
//
// std::string_view s = "Ulaanbaatar";
// char output[128];
// absl::SNPrintF(output, sizeof(output),
// "The capital of Mongolia is %s", s);
//
// Post-condition: output == "The capital of Mongolia is Ulaanbaatar"
//
template <typename... Args>
int SNPrintF(char* output, std::size_t size, const FormatSpec<Args...>& format,
const Args&... args) {
return str_format_internal::SnprintF(
output, size, str_format_internal::UntypedFormatSpecImpl::Extract(format),
{str_format_internal::FormatArgImpl(args)...});
}
// -----------------------------------------------------------------------------
// Custom Output Formatting Functions
// -----------------------------------------------------------------------------
// FormatRawSink
//
// FormatRawSink is a type erased wrapper around arbitrary sink objects
// specifically used as an argument to `Format()`.
//
// All the object has to do define an overload of `AbslFormatFlush()` for the
// sink, usually by adding a ADL-based free function in the same namespace as
// the sink:
//
// void AbslFormatFlush(MySink* dest, absl::string_view part);
//
// where `dest` is the pointer passed to `absl::Format()`. The function should
// append `part` to `dest`.
//
// FormatRawSink does not own the passed sink object. The passed object must
// outlive the FormatRawSink.
class FormatRawSink {
public:
// Implicitly convert from any type that provides the hook function as
// described above.
template <typename T,
typename = typename std::enable_if<std::is_constructible<
str_format_internal::FormatRawSinkImpl, T*>::value>::type>
FormatRawSink(T* raw) // NOLINT
: sink_(raw) {}
private:
friend str_format_internal::FormatRawSinkImpl;
str_format_internal::FormatRawSinkImpl sink_;
};
// Format()
//
// Writes a formatted string to an arbitrary sink object (implementing the
// `absl::FormatRawSink` interface), using a format string and zero or more
// additional arguments.
//
// By default, `std::string`, `std::ostream`, and `absl::Cord` are supported as
// destination objects. If a `std::string` is used the formatted string is
// appended to it.
//
// `absl::Format()` is a generic version of `absl::StrAppendFormat()`, for
// custom sinks. The format string, like format strings for `StrFormat()`, is
// checked at compile-time.
//
// On failure, this function returns `false` and the state of the sink is
// unspecified.
template <typename... Args>
bool Format(FormatRawSink raw_sink, const FormatSpec<Args...>& format,
const Args&... args) {
return str_format_internal::FormatUntyped(
str_format_internal::FormatRawSinkImpl::Extract(raw_sink),
str_format_internal::UntypedFormatSpecImpl::Extract(format),
{str_format_internal::FormatArgImpl(args)...});
}
// FormatArg
//
// A type-erased handle to a format argument specifically used as an argument to
// `FormatUntyped()`. You may construct `FormatArg` by passing
// reference-to-const of any printable type. `FormatArg` is both copyable and
// assignable. The source data must outlive the `FormatArg` instance. See
// example below.
//
using FormatArg = str_format_internal::FormatArgImpl;
// FormatUntyped()
//
// Writes a formatted string to an arbitrary sink object (implementing the
// `absl::FormatRawSink` interface), using an `UntypedFormatSpec` and zero or
// more additional arguments.
//
// This function acts as the most generic formatting function in the
// `str_format` library. The caller provides a raw sink, an unchecked format
// string, and (usually) a runtime specified list of arguments; no compile-time
// checking of formatting is performed within this function. As a result, a
// caller should check the return value to verify that no error occurred.
// On failure, this function returns `false` and the state of the sink is
// unspecified.
//
// The arguments are provided in an `absl::Span<const absl::FormatArg>`.
// Each `absl::FormatArg` object binds to a single argument and keeps a
// reference to it. The values used to create the `FormatArg` objects must
// outlive this function call.
//
// Example:
//
// std::optional<std::string> FormatDynamic(
// const std::string& in_format,
// const vector<std::string>& in_args) {
// std::string out;
// std::vector<absl::FormatArg> args;
// for (const auto& v : in_args) {
// // It is important that 'v' is a reference to the objects in in_args.
// // The values we pass to FormatArg must outlive the call to
// // FormatUntyped.
// args.emplace_back(v);
// }
// absl::UntypedFormatSpec format(in_format);
// if (!absl::FormatUntyped(&out, format, args)) {
// return std::nullopt;
// }
// return std::move(out);
// }
//
ABSL_MUST_USE_RESULT inline bool FormatUntyped(
FormatRawSink raw_sink, const UntypedFormatSpec& format,
absl::Span<const FormatArg> args) {
return str_format_internal::FormatUntyped(
str_format_internal::FormatRawSinkImpl::Extract(raw_sink),
str_format_internal::UntypedFormatSpecImpl::Extract(format), args);
}
//------------------------------------------------------------------------------
// StrFormat Extensions
//------------------------------------------------------------------------------
//
// AbslStringify()
//
// A simpler customization API for formatting user-defined types using
// absl::StrFormat(). The API relies on detecting an overload in the
// user-defined type's namespace of a free (non-member) `AbslStringify()`
// function as a friend definition with the following signature:
//
// template <typename Sink>
// void AbslStringify(Sink& sink, const X& value);
//
// An `AbslStringify()` overload for a type should only be declared in the same
// file and namespace as said type.
//
// Note that unlike with AbslFormatConvert(), AbslStringify() does not allow
// customization of allowed conversion characters. AbslStringify() uses `%v` as
// the underlying conversion specififer. Additionally, AbslStringify() supports
// use with absl::StrCat while AbslFormatConvert() does not.
//
// Example:
//
// struct Point {
// // To add formatting support to `Point`, we simply need to add a free
// // (non-member) function `AbslStringify()`. This method prints in the
// // request format using the underlying `%v` specifier. You can add such a
// // free function using a friend declaration within the body of the class.
// // The sink parameter is a templated type to avoid requiring dependencies.
// template <typename Sink>
// friend void AbslStringify(Sink& sink, const Point& p) {
// absl::Format(&sink, "(%v, %v)", p.x, p.y);
// }
//
// int x;
// int y;
// };
//
// AbslFormatConvert()
//
// The StrFormat library provides a customization API for formatting
// user-defined types using absl::StrFormat(). The API relies on detecting an
// overload in the user-defined type's namespace of a free (non-member)
// `AbslFormatConvert()` function, usually as a friend definition with the
// following signature:
//
// absl::FormatConvertResult<...> AbslFormatConvert(
// const X& value,
// const absl::FormatConversionSpec& spec,
// absl::FormatSink *sink);
//
// An `AbslFormatConvert()` overload for a type should only be declared in the
// same file and namespace as said type.
//
// The abstractions within this definition include:
//
// * An `absl::FormatConversionSpec` to specify the fields to pull from a
// user-defined type's format string
// * An `absl::FormatSink` to hold the converted string data during the
// conversion process.
// * An `absl::FormatConvertResult` to hold the status of the returned
// formatting operation
//
// The return type encodes all the conversion characters that your
// AbslFormatConvert() routine accepts. The return value should be {true}.
// A return value of {false} will result in `StrFormat()` returning
// an empty string. This result will be propagated to the result of
// `FormatUntyped`.
//
// Example:
//
// struct Point {
// // To add formatting support to `Point`, we simply need to add a free
// // (non-member) function `AbslFormatConvert()`. This method interprets
// // `spec` to print in the request format. The allowed conversion characters
// // can be restricted via the type of the result, in this example
// // string and integral formatting are allowed (but not, for instance
// // floating point characters like "%f"). You can add such a free function
// // using a friend declaration within the body of the class:
// friend absl::FormatConvertResult<absl::FormatConversionCharSet::kString |
// absl::FormatConversionCharSet::kIntegral>
// AbslFormatConvert(const Point& p, const absl::FormatConversionSpec& spec,
// absl::FormatSink* s) {
// if (spec.conversion_char() == absl::FormatConversionChar::s) {
// absl::Format(s, "x=%vy=%v", p.x, p.y);
// } else {
// absl::Format(s, "%v,%v", p.x, p.y);
// }
// return {true};
// }
//
// int x;
// int y;
// };
// clang-format off
// FormatConversionChar
//
// Specifies the formatting character provided in the format string
// passed to `StrFormat()`.
enum class FormatConversionChar : uint8_t {
c, s, // text
d, i, o, u, x, X, // int
f, F, e, E, g, G, a, A, // float
n, p, v // misc
};
// clang-format on
// FormatConversionSpec
//
// Specifies modifications to the conversion of the format string, through use
// of one or more format flags in the source format string.
class FormatConversionSpec {
public:
// FormatConversionSpec::is_basic()
//
// Indicates that width and precision are not specified, and no additional
// flags are set for this conversion character in the format string.
bool is_basic() const { return impl_.is_basic(); }
// FormatConversionSpec::has_left_flag()
//
// Indicates whether the result should be left justified for this conversion
// character in the format string. This flag is set through use of a '-'
// character in the format string. E.g. "%-s"
bool has_left_flag() const { return impl_.has_left_flag(); }
// FormatConversionSpec::has_show_pos_flag()
//
// Indicates whether a sign column is prepended to the result for this
// conversion character in the format string, even if the result is positive.
// This flag is set through use of a '+' character in the format string.
// E.g. "%+d"
bool has_show_pos_flag() const { return impl_.has_show_pos_flag(); }
// FormatConversionSpec::has_sign_col_flag()
//
// Indicates whether a mandatory sign column is added to the result for this
// conversion character. This flag is set through use of a space character
// (' ') in the format string. E.g. "% i"
bool has_sign_col_flag() const { return impl_.has_sign_col_flag(); }
// FormatConversionSpec::has_alt_flag()
//
// Indicates whether an "alternate" format is applied to the result for this
// conversion character. Alternative forms depend on the type of conversion
// character, and unallowed alternatives are undefined. This flag is set
// through use of a '#' character in the format string. E.g. "%#h"
bool has_alt_flag() const { return impl_.has_alt_flag(); }
// FormatConversionSpec::has_zero_flag()
//
// Indicates whether zeroes should be prepended to the result for this
// conversion character instead of spaces. This flag is set through use of the
// '0' character in the format string. E.g. "%0f"
bool has_zero_flag() const { return impl_.has_zero_flag(); }
// FormatConversionSpec::conversion_char()
//
// Returns the underlying conversion character.
FormatConversionChar conversion_char() const {
return impl_.conversion_char();
}
// FormatConversionSpec::width()
//
// Returns the specified width (indicated through use of a non-zero integer
// value or '*' character) of the conversion character. If width is
// unspecified, it returns a negative value.
int width() const { return impl_.width(); }
// FormatConversionSpec::precision()
//
// Returns the specified precision (through use of the '.' character followed
// by a non-zero integer value or '*' character) of the conversion character.
// If precision is unspecified, it returns a negative value.
int precision() const { return impl_.precision(); }
private:
explicit FormatConversionSpec(
str_format_internal::FormatConversionSpecImpl impl)
: impl_(impl) {}
friend str_format_internal::FormatConversionSpecImpl;
absl::str_format_internal::FormatConversionSpecImpl impl_;
};
// Type safe OR operator for FormatConversionCharSet to allow accepting multiple
// conversion chars in custom format converters.
constexpr FormatConversionCharSet operator|(FormatConversionCharSet a,
FormatConversionCharSet b) {
return static_cast<FormatConversionCharSet>(static_cast<uint64_t>(a) |
static_cast<uint64_t>(b));
}
// FormatConversionCharSet
//
// Specifies the _accepted_ conversion types as a template parameter to
// FormatConvertResult for custom implementations of `AbslFormatConvert`.
// Note the helper predefined alias definitions (kIntegral, etc.) below.
enum class FormatConversionCharSet : uint64_t {
// text
c = str_format_internal::FormatConversionCharToConvInt('c'),
s = str_format_internal::FormatConversionCharToConvInt('s'),
// integer
d = str_format_internal::FormatConversionCharToConvInt('d'),
i = str_format_internal::FormatConversionCharToConvInt('i'),
o = str_format_internal::FormatConversionCharToConvInt('o'),
u = str_format_internal::FormatConversionCharToConvInt('u'),
x = str_format_internal::FormatConversionCharToConvInt('x'),
X = str_format_internal::FormatConversionCharToConvInt('X'),
// Float
f = str_format_internal::FormatConversionCharToConvInt('f'),
F = str_format_internal::FormatConversionCharToConvInt('F'),
e = str_format_internal::FormatConversionCharToConvInt('e'),
E = str_format_internal::FormatConversionCharToConvInt('E'),
g = str_format_internal::FormatConversionCharToConvInt('g'),
G = str_format_internal::FormatConversionCharToConvInt('G'),
a = str_format_internal::FormatConversionCharToConvInt('a'),
A = str_format_internal::FormatConversionCharToConvInt('A'),
// misc
n = str_format_internal::FormatConversionCharToConvInt('n'),
p = str_format_internal::FormatConversionCharToConvInt('p'),
v = str_format_internal::FormatConversionCharToConvInt('v'),
// Used for width/precision '*' specification.
kStar = static_cast<uint64_t>(
absl::str_format_internal::FormatConversionCharSetInternal::kStar),
// Some predefined values:
kIntegral = d | i | u | o | x | X,
kFloating = a | e | f | g | A | E | F | G,
kNumeric = kIntegral | kFloating,
kString = s,
kPointer = p,
};
// FormatSink
//
// A format sink is a generic abstraction to which conversions may write their
// formatted string data. `absl::FormatConvert()` uses this sink to write its
// formatted string.
//
class FormatSink {
public:
// FormatSink::Append()
//
// Appends `count` copies of `ch` to the format sink.
void Append(size_t count, char ch) { sink_->Append(count, ch); }
// Overload of FormatSink::Append() for appending the characters of a string
// view to a format sink.
void Append(string_view v) { sink_->Append(v); }
// FormatSink::PutPaddedString()
//
// Appends `precision` number of bytes of `v` to the format sink. If this is
// less than `width`, spaces will be appended first (if `left` is false), or
// after (if `left` is true) to ensure the total amount appended is
// at least `width`.
bool PutPaddedString(string_view v, int width, int precision, bool left) {
return sink_->PutPaddedString(v, width, precision, left);
}
// Support `absl::Format(&sink, format, args...)`.
friend void AbslFormatFlush(FormatSink* sink, absl::string_view v) {
sink->Append(v);
}
private:
friend str_format_internal::FormatSinkImpl;
explicit FormatSink(str_format_internal::FormatSinkImpl* s) : sink_(s) {}
str_format_internal::FormatSinkImpl* sink_;
};
// FormatConvertResult
//
// Indicates whether a call to AbslFormatConvert() was successful.
// This return type informs the StrFormat extension framework (through
// ADL but using the return type) of what conversion characters are supported.
// It is strongly discouraged to return {false}, as this will result in an
// empty string in StrFormat.
template <FormatConversionCharSet C>
struct FormatConvertResult {
bool value;
};
ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_STRINGS_STR_FORMAT_H_ | c | github | https://github.com/mysql/mysql-server | extra/abseil/abseil-cpp-20230802.1/absl/strings/str_format.h |
// Copyright 2016-2019 Cargo-Bundle developers <https://github.com/burtonageo/cargo-bundle>
// Copyright 2019-2024 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
use crate::utils::CommandExt;
use std::process::Command;
// Copyright 2019-2024 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
#[derive(Debug, PartialEq, Eq)]
struct RustCfg {
target_arch: Option<String>,
}
fn parse_rust_cfg(cfg: String) -> RustCfg {
let target_line = "target_arch=\"";
let mut target_arch = None;
for line in cfg.split('\n') {
if line.starts_with(target_line) {
let len = target_line.len();
let arch = line.chars().skip(len).take(line.len() - len - 1).collect();
target_arch.replace(arch);
}
}
RustCfg { target_arch }
}
/// Try to determine the current target triple.
///
/// Returns a target triple (e.g. `x86_64-unknown-linux-gnu` or `i686-pc-windows-msvc`) or an
/// `Error::Config` if the current config cannot be determined or is not some combination of the
/// following values:
/// `linux, mac, windows` -- `i686, x86, armv7` -- `gnu, musl, msvc`
///
/// * Errors:
/// * Unexpected system config
pub fn target_triple() -> Result<String, crate::Error> {
let arch_res = Command::new("rustc").args(["--print", "cfg"]).output_ok();
let arch = match arch_res {
Ok(output) => parse_rust_cfg(String::from_utf8_lossy(&output.stdout).into())
.target_arch
.expect("could not find `target_arch` when running `rustc --print cfg`."),
Err(err) => {
log::warn!(
"failed to determine target arch using rustc, error: `{}`. The fallback is the architecture of the machine that compiled this crate.",
err,
);
if cfg!(target_arch = "x86") {
"i686".into()
} else if cfg!(target_arch = "x86_64") {
"x86_64".into()
} else if cfg!(target_arch = "arm") {
"armv7".into()
} else if cfg!(target_arch = "aarch64") {
"aarch64".into()
} else if cfg!(target_arch = "riscv64") {
"riscv64".into()
} else {
return Err(crate::Error::ArchError(String::from(
"Unable to determine target-architecture",
)));
}
}
};
let os = if cfg!(target_os = "linux") {
"unknown-linux"
} else if cfg!(target_os = "macos") {
"apple-darwin"
} else if cfg!(target_os = "windows") {
"pc-windows"
} else if cfg!(target_os = "freebsd") {
"unknown-freebsd"
} else {
return Err(crate::Error::ArchError(String::from(
"Unable to determine target-os",
)));
};
let os = if cfg!(target_os = "macos") || cfg!(target_os = "freebsd") {
String::from(os)
} else {
let env = if cfg!(target_env = "gnu") {
"gnu"
} else if cfg!(target_env = "musl") {
"musl"
} else if cfg!(target_env = "msvc") {
"msvc"
} else {
return Err(crate::Error::ArchError(String::from(
"Unable to determine target-environment",
)));
};
format!("{os}-{env}")
};
Ok(format!("{arch}-{os}"))
}
#[cfg(test)]
mod tests {
use super::RustCfg;
#[test]
fn parse_rust_cfg() {
assert_eq!(
super::parse_rust_cfg("target_arch".into()),
RustCfg { target_arch: None }
);
assert_eq!(
super::parse_rust_cfg(
r#"debug_assertions
target_arch="aarch64"
target_endian="little"
target_env=""
target_family="unix"
target_os="macos"
target_pointer_width="64"
target_vendor="apple"
unix"#
.into()
),
RustCfg {
target_arch: Some("aarch64".into())
}
);
}
} | rust | github | https://github.com/tauri-apps/tauri | crates/tauri-bundler/src/bundle/platform.rs |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from itertools import chain
import time
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.exceptions import except_orm
import openerp.addons.decimal_precision as dp
class price_type(osv.osv):
"""
The price type is used to points which field in the product form
is a price and in which currency is this price expressed.
When a field is a price, you can use it in pricelists to base
sale and purchase prices based on some fields of the product.
"""
def _price_field_get(self, cr, uid, context=None):
mf = self.pool.get('ir.model.fields')
ids = mf.search(cr, uid, [('model','in', (('product.product'),('product.template'))), ('ttype','=','float')], context=context)
res = []
for field in mf.browse(cr, uid, ids, context=context):
if not (field.name, field.field_description) in res:
res.append((field.name, field.field_description))
return res
def _get_field_currency(self, cr, uid, fname, ctx):
ids = self.search(cr, uid, [('field','=',fname)], context=ctx)
return self.browse(cr, uid, ids, context=ctx)[0].currency_id
def _get_currency(self, cr, uid, ctx):
comp = self.pool.get('res.users').browse(cr,uid,uid).company_id
if not comp:
comp_id = self.pool.get('res.company').search(cr, uid, [])[0]
comp = self.pool.get('res.company').browse(cr, uid, comp_id)
return comp.currency_id.id
_name = "product.price.type"
_description = "Price Type"
_columns = {
"name" : fields.char("Price Name", required=True, translate=True, help="Name of this kind of price."),
"active" : fields.boolean("Active"),
"field" : fields.selection(_price_field_get, "Product Field", size=32, required=True, help="Associated field in the product form."),
"currency_id" : fields.many2one('res.currency', "Currency", required=True, help="The currency the field is expressed in."),
}
_defaults = {
"active": lambda *args: True,
"currency_id": _get_currency
}
#----------------------------------------------------------
# Price lists
#----------------------------------------------------------
class product_pricelist_type(osv.osv):
_name = "product.pricelist.type"
_description = "Pricelist Type"
_columns = {
'name': fields.char('Name', required=True, translate=True),
'key': fields.char('Key', required=True, help="Used in the code to select specific prices based on the context. Keep unchanged."),
}
class product_pricelist(osv.osv):
def _pricelist_type_get(self, cr, uid, context=None):
pricelist_type_obj = self.pool.get('product.pricelist.type')
pricelist_type_ids = pricelist_type_obj.search(cr, uid, [], order='name')
pricelist_types = pricelist_type_obj.read(cr, uid, pricelist_type_ids, ['key','name'], context=context)
res = []
for type in pricelist_types:
res.append((type['key'],type['name']))
return res
_name = "product.pricelist"
_description = "Pricelist"
_order = 'name'
_columns = {
'name': fields.char('Pricelist Name', required=True, translate=True),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the pricelist without removing it."),
'type': fields.selection(_pricelist_type_get, 'Pricelist Type', required=True),
'version_id': fields.one2many('product.pricelist.version', 'pricelist_id', 'Pricelist Versions', copy=True),
'currency_id': fields.many2one('res.currency', 'Currency', required=True),
'company_id': fields.many2one('res.company', 'Company'),
}
def name_get(self, cr, uid, ids, context=None):
result= []
if not all(ids):
return result
for pl in self.browse(cr, uid, ids, context=context):
name = pl.name + ' ('+ pl.currency_id.name + ')'
result.append((pl.id,name))
return result
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if name and operator == '=' and not args:
# search on the name of the pricelist and its currency, opposite of name_get(),
# Used by the magic context filter in the product search view.
query_args = {'name': name, 'limit': limit, 'lang': (context or {}).get('lang') or 'en_US'}
query = """SELECT p.id
FROM ((
SELECT pr.id, pr.name
FROM product_pricelist pr JOIN
res_currency cur ON
(pr.currency_id = cur.id)
WHERE pr.name || ' (' || cur.name || ')' = %(name)s
)
UNION (
SELECT tr.res_id as id, tr.value as name
FROM ir_translation tr JOIN
product_pricelist pr ON (
pr.id = tr.res_id AND
tr.type = 'model' AND
tr.name = 'product.pricelist,name' AND
tr.lang = %(lang)s
) JOIN
res_currency cur ON
(pr.currency_id = cur.id)
WHERE tr.value || ' (' || cur.name || ')' = %(name)s
)
) p
ORDER BY p.name"""
if limit:
query += " LIMIT %(limit)s"
cr.execute(query, query_args)
ids = [r[0] for r in cr.fetchall()]
# regular search() to apply ACLs - may limit results below limit in some cases
ids = self.search(cr, uid, [('id', 'in', ids)], limit=limit, context=context)
if ids:
return self.name_get(cr, uid, ids, context)
return super(product_pricelist, self).name_search(
cr, uid, name, args, operator=operator, context=context, limit=limit)
def _get_currency(self, cr, uid, ctx):
comp = self.pool.get('res.users').browse(cr, uid, uid).company_id
if not comp:
comp_id = self.pool.get('res.company').search(cr, uid, [])[0]
comp = self.pool.get('res.company').browse(cr, uid, comp_id)
return comp.currency_id.id
_defaults = {
'active': lambda *a: 1,
"currency_id": _get_currency
}
def price_get_multi(self, cr, uid, ids, products_by_qty_by_partner, context=None):
return dict((key, dict((key, price[0]) for key, price in value.items())) for key, value in self.price_rule_get_multi(cr, uid, ids, products_by_qty_by_partner, context=context).items())
def price_rule_get_multi(self, cr, uid, ids, products_by_qty_by_partner, context=None):
"""multi products 'price_get'.
@param ids:
@param products_by_qty:
@param partner:
@param context: {
'date': Date of the pricelist (%Y-%m-%d),}
@return: a dict of dict with product_id as key and a dict 'price by pricelist' as value
"""
if not ids:
ids = self.pool.get('product.pricelist').search(cr, uid, [], context=context)
results = {}
for pricelist in self.browse(cr, uid, ids, context=context):
subres = self._price_rule_get_multi(cr, uid, pricelist, products_by_qty_by_partner, context=context)
for product_id,price in subres.items():
results.setdefault(product_id, {})
results[product_id][pricelist.id] = price
return results
def _price_get_multi(self, cr, uid, pricelist, products_by_qty_by_partner, context=None):
return dict((key, price[0]) for key, price in self._price_rule_get_multi(cr, uid, pricelist, products_by_qty_by_partner, context=context).items())
def _price_rule_get_multi(self, cr, uid, pricelist, products_by_qty_by_partner, context=None):
context = context or {}
date = context.get('date') or time.strftime('%Y-%m-%d')
date = date[0:10]
products = map(lambda x: x[0], products_by_qty_by_partner)
currency_obj = self.pool.get('res.currency')
product_obj = self.pool.get('product.template')
product_uom_obj = self.pool.get('product.uom')
price_type_obj = self.pool.get('product.price.type')
if not products:
return {}
version = False
for v in pricelist.version_id:
if ((v.date_start is False) or (v.date_start <= date)) and ((v.date_end is False) or (v.date_end >= date)):
version = v
break
if not version:
raise osv.except_osv(_('Warning!'), _("At least one pricelist has no active version !\nPlease create or activate one."))
categ_ids = {}
for p in products:
categ = p.categ_id
while categ:
categ_ids[categ.id] = True
categ = categ.parent_id
categ_ids = categ_ids.keys()
is_product_template = products[0]._name == "product.template"
if is_product_template:
prod_tmpl_ids = [tmpl.id for tmpl in products]
# all variants of all products
prod_ids = [p.id for p in
list(chain.from_iterable([t.product_variant_ids for t in products]))]
else:
prod_ids = [product.id for product in products]
prod_tmpl_ids = [product.product_tmpl_id.id for product in products]
# Load all rules
cr.execute(
'SELECT i.id '
'FROM product_pricelist_item AS i '
'WHERE (product_tmpl_id IS NULL OR product_tmpl_id = any(%s)) '
'AND (product_id IS NULL OR (product_id = any(%s))) '
'AND ((categ_id IS NULL) OR (categ_id = any(%s))) '
'AND (price_version_id = %s) '
'ORDER BY sequence, min_quantity desc',
(prod_tmpl_ids, prod_ids, categ_ids, version.id))
item_ids = [x[0] for x in cr.fetchall()]
items = self.pool.get('product.pricelist.item').browse(cr, uid, item_ids, context=context)
price_types = {}
results = {}
for product, qty, partner in products_by_qty_by_partner:
results[product.id] = 0.0
rule_id = False
price = False
# Final unit price is computed according to `qty` in the `qty_uom_id` UoM.
# An intermediary unit price may be computed according to a different UoM, in
# which case the price_uom_id contains that UoM.
# The final price will be converted to match `qty_uom_id`.
qty_uom_id = context.get('uom') or product.uom_id.id
price_uom_id = product.uom_id.id
qty_in_product_uom = qty
if qty_uom_id != product.uom_id.id:
try:
qty_in_product_uom = product_uom_obj._compute_qty(
cr, uid, context['uom'], qty, product.uom_id.id or product.uos_id.id)
except except_orm:
# Ignored - incompatible UoM in context, use default product UoM
pass
for rule in items:
if rule.min_quantity and qty_in_product_uom < rule.min_quantity:
continue
if is_product_template:
if rule.product_tmpl_id and product.id != rule.product_tmpl_id.id:
continue
if rule.product_id and \
(product.product_variant_count > 1 or product.product_variant_ids[0].id != rule.product_id.id):
# product rule acceptable on template if has only one variant
continue
else:
if rule.product_tmpl_id and product.product_tmpl_id.id != rule.product_tmpl_id.id:
continue
if rule.product_id and product.id != rule.product_id.id:
continue
if rule.categ_id:
cat = product.categ_id
while cat:
if cat.id == rule.categ_id.id:
break
cat = cat.parent_id
if not cat:
continue
if rule.base == -1:
if rule.base_pricelist_id:
price_tmp = self._price_get_multi(cr, uid,
rule.base_pricelist_id, [(product,
qty, partner)], context=context)[product.id]
ptype_src = rule.base_pricelist_id.currency_id.id
price_uom_id = qty_uom_id
price = currency_obj.compute(cr, uid,
ptype_src, pricelist.currency_id.id,
price_tmp, round=False,
context=context)
elif rule.base == -2:
seller = False
for seller_id in product.seller_ids:
if (not partner) or (seller_id.name.id != partner):
continue
seller = seller_id
if not seller and product.seller_ids:
seller = product.seller_ids[0]
if seller:
qty_in_seller_uom = qty
seller_uom = seller.product_uom.id
if qty_uom_id != seller_uom:
qty_in_seller_uom = product_uom_obj._compute_qty(cr, uid, qty_uom_id, qty, to_uom_id=seller_uom)
price_uom_id = seller_uom
for line in seller.pricelist_ids:
if line.min_quantity <= qty_in_seller_uom:
price = line.price
else:
if rule.base not in price_types:
price_types[rule.base] = price_type_obj.browse(cr, uid, int(rule.base))
price_type = price_types[rule.base]
# price_get returns the price in the context UoM, i.e. qty_uom_id
price_uom_id = qty_uom_id
price = currency_obj.compute(
cr, uid,
price_type.currency_id.id, pricelist.currency_id.id,
product_obj._price_get(cr, uid, [product], price_type.field, context=context)[product.id],
round=False, context=context)
if price is not False:
price_limit = price
price = price * (1.0+(rule.price_discount or 0.0))
if rule.price_round:
price = tools.float_round(price, precision_rounding=rule.price_round)
convert_to_price_uom = (lambda price: product_uom_obj._compute_price(
cr, uid, product.uom_id.id,
price, price_uom_id))
if rule.price_surcharge:
price_surcharge = convert_to_price_uom(rule.price_surcharge)
price += price_surcharge
if rule.price_min_margin:
price_min_margin = convert_to_price_uom(rule.price_min_margin)
price = max(price, price_limit + price_min_margin)
if rule.price_max_margin:
price_max_margin = convert_to_price_uom(rule.price_max_margin)
price = min(price, price_limit + price_max_margin)
rule_id = rule.id
break
# Final price conversion to target UoM
price = product_uom_obj._compute_price(cr, uid, price_uom_id, price, qty_uom_id)
results[product.id] = (price, rule_id)
return results
def price_get(self, cr, uid, ids, prod_id, qty, partner=None, context=None):
return dict((key, price[0]) for key, price in self.price_rule_get(cr, uid, ids, prod_id, qty, partner=partner, context=context).items())
def price_rule_get(self, cr, uid, ids, prod_id, qty, partner=None, context=None):
product = self.pool.get('product.product').browse(cr, uid, prod_id, context=context)
res_multi = self.price_rule_get_multi(cr, uid, ids, products_by_qty_by_partner=[(product, qty, partner)], context=context)
res = res_multi[prod_id]
return res
class product_pricelist_version(osv.osv):
_name = "product.pricelist.version"
_description = "Pricelist Version"
_columns = {
'pricelist_id': fields.many2one('product.pricelist', 'Price List',
required=True, select=True, ondelete='cascade'),
'name': fields.char('Name', required=True, translate=True),
'active': fields.boolean('Active',
help="When a version is duplicated it is set to non active, so that the " \
"dates do not overlaps with original version. You should change the dates " \
"and reactivate the pricelist"),
'items_id': fields.one2many('product.pricelist.item',
'price_version_id', 'Price List Items', required=True, copy=True),
'date_start': fields.date('Start Date', help="First valid date for the version."),
'date_end': fields.date('End Date', help="Last valid date for the version."),
'company_id': fields.related('pricelist_id','company_id',type='many2one',
readonly=True, relation='res.company', string='Company', store=True)
}
_defaults = {
'active': lambda *a: 1,
}
def _check_date(self, cursor, user, ids, context=None):
for pricelist_version in self.browse(cursor, user, ids, context=context):
if not pricelist_version.active:
continue
where = []
if pricelist_version.date_start:
where.append("((date_end>='%s') or (date_end is null))" % (pricelist_version.date_start,))
if pricelist_version.date_end:
where.append("((date_start<='%s') or (date_start is null))" % (pricelist_version.date_end,))
cursor.execute('SELECT id ' \
'FROM product_pricelist_version ' \
'WHERE '+' and '.join(where) + (where and ' and ' or '')+
'pricelist_id = %s ' \
'AND active ' \
'AND id <> %s', (
pricelist_version.pricelist_id.id,
pricelist_version.id))
if cursor.fetchall():
return False
return True
_constraints = [
(_check_date, 'You cannot have 2 pricelist versions that overlap!',
['date_start', 'date_end'])
]
def copy(self, cr, uid, id, default=None, context=None):
# set active False to prevent overlapping active pricelist
# versions
if not default:
default = {}
default['active'] = False
return super(product_pricelist_version, self).copy(cr, uid, id, default, context=context)
class product_pricelist_item(osv.osv):
def _price_field_get(self, cr, uid, context=None):
pt = self.pool.get('product.price.type')
ids = pt.search(cr, uid, [], context=context)
result = []
for line in pt.browse(cr, uid, ids, context=context):
result.append((line.id, line.name))
result.append((-1, _('Other Pricelist')))
result.append((-2, _('Supplier Prices on the product form')))
return result
# Added default function to fetch the Price type Based on Pricelist type.
def _get_default_base(self, cr, uid, fields, context=None):
product_price_type_obj = self.pool.get('product.price.type')
if fields.get('type') == 'purchase':
product_price_type_ids = product_price_type_obj.search(cr, uid, [('field', '=', 'standard_price')], context=context)
elif fields.get('type') == 'sale':
product_price_type_ids = product_price_type_obj.search(cr, uid, [('field','=','list_price')], context=context)
else:
return -1
if not product_price_type_ids:
return False
else:
pricetype = product_price_type_obj.browse(cr, uid, product_price_type_ids, context=context)[0]
return pricetype.id
_name = "product.pricelist.item"
_description = "Pricelist item"
_order = "sequence, min_quantity desc"
_defaults = {
'base': _get_default_base,
'min_quantity': lambda *a: 0,
'sequence': lambda *a: 5,
'price_discount': lambda *a: 0,
}
def _check_recursion(self, cr, uid, ids, context=None):
for obj_list in self.browse(cr, uid, ids, context=context):
if obj_list.base == -1:
main_pricelist = obj_list.price_version_id.pricelist_id.id
other_pricelist = obj_list.base_pricelist_id.id
if main_pricelist == other_pricelist:
return False
return True
def _check_margin(self, cr, uid, ids, context=None):
for item in self.browse(cr, uid, ids, context=context):
if item.price_max_margin and item.price_min_margin and (item.price_min_margin > item.price_max_margin):
return False
return True
_columns = {
'name': fields.char('Rule Name', help="Explicit rule name for this pricelist line."),
'price_version_id': fields.many2one('product.pricelist.version', 'Price List Version', required=True, select=True, ondelete='cascade'),
'product_tmpl_id': fields.many2one('product.template', 'Product Template', ondelete='cascade', help="Specify a template if this rule only applies to one product template. Keep empty otherwise."),
'product_id': fields.many2one('product.product', 'Product', ondelete='cascade', help="Specify a product if this rule only applies to one product. Keep empty otherwise."),
'categ_id': fields.many2one('product.category', 'Product Category', ondelete='cascade', help="Specify a product category if this rule only applies to products belonging to this category or its children categories. Keep empty otherwise."),
'min_quantity': fields.integer('Min. Quantity', required=True,
help="For the rule to apply, bought/sold quantity must be greater "
"than or equal to the minimum quantity specified in this field.\n"
"Expressed in the default UoM of the product."
),
'sequence': fields.integer('Sequence', required=True, help="Gives the order in which the pricelist items will be checked. The evaluation gives highest priority to lowest sequence and stops as soon as a matching item is found."),
'base': fields.selection(_price_field_get, 'Based on', required=True, size=-1, help="Base price for computation."),
'base_pricelist_id': fields.many2one('product.pricelist', 'Other Pricelist'),
'price_surcharge': fields.float('Price Surcharge',
digits_compute= dp.get_precision('Product Price'), help='Specify the fixed amount to add or substract(if negative) to the amount calculated with the discount.'),
'price_discount': fields.float('Price Discount', digits=(16,4)),
'price_round': fields.float('Price Rounding',
digits_compute= dp.get_precision('Product Price'),
help="Sets the price so that it is a multiple of this value.\n" \
"Rounding is applied after the discount and before the surcharge.\n" \
"To have prices that end in 9.99, set rounding 10, surcharge -0.01" \
),
'price_min_margin': fields.float('Min. Price Margin',
digits_compute= dp.get_precision('Product Price'), help='Specify the minimum amount of margin over the base price.'),
'price_max_margin': fields.float('Max. Price Margin',
digits_compute= dp.get_precision('Product Price'), help='Specify the maximum amount of margin over the base price.'),
'company_id': fields.related('price_version_id','company_id',type='many2one',
readonly=True, relation='res.company', string='Company', store=True)
}
_constraints = [
(_check_recursion, 'Error! You cannot assign the Main Pricelist as Other Pricelist in PriceList Item!', ['base_pricelist_id']),
(_check_margin, 'Error! The minimum margin should be lower than the maximum margin.', ['price_min_margin', 'price_max_margin'])
]
def product_id_change(self, cr, uid, ids, product_id, context=None):
if not product_id:
return {}
prod = self.pool.get('product.product').read(cr, uid, [product_id], ['code','name'])
if prod[0]['code']:
return {'value': {'name': prod[0]['code']}}
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""'with'-compliant StringIO implementation."""
import StringIO
class StringIO(StringIO.StringIO):
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import six
__all__ = [
'merge_dicts'
]
def merge_dicts(d1, d2):
"""
Merge values from d2 into d1 ignoring empty / None values.
:type d1: ``dict``
:type d2: ``dict``
:rtype: ``dict``
"""
result = copy.deepcopy(d1)
for key, value in six.iteritems(d2):
if isinstance(value, dict):
result[key] = merge_dicts(result[key], value)
elif key not in result or value is not None:
result[key] = value
return result | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train a linear classifier on the activations of BERT."""
import json
import os
import time
from absl import app
from absl import flags
import numpy as np
import torch
import torch.utils.data
import sys
sys.path.insert(1, 'helpers')
import folder_helper
FLAGS = flags.FLAGS
flags.DEFINE_string('output_dir', None,
'the output directory where the results will be written')
flags.DEFINE_string('train_dir', None, 'where to get the training data')
flags.DEFINE_integer('num_epochs', 1, 'number of optimization steps')
flags.DEFINE_integer('layer_id', 5, 'layer to optimize activation for')
flags.DEFINE_integer('batch_size', 32, 'batch size used for training')
flags.DEFINE_integer('random_seed', 42, 'random dataset shuffle seed')
flags.DEFINE_string('concept1', 'he', 'first concept to classify between')
flags.DEFINE_string('concept2', 'she', 'second concept to classify between')
flags.DEFINE_float('learning_rate', 0.1, 'learning rate of the optimizer')
flags.DEFINE_float('val_split', 0.1, 'train/validation split')
flags.DEFINE_bool('verbose', True, 'print the training progess')
flags.DEFINE_bool('mae', True, 'use mean absolute error, otherwise uses mean'
'squared error')
flags.DEFINE_bool('adam', True, 'use adam instead of sgd')
flags.DEFINE_bool('sigmoid', True, 'use adam instead of sgd')
def write_params(parent_dir):
"""Write the parameters of this run to the output directory.
Args:
parent_dir: The directory to save the new file to.
"""
params_file = open(os.path.join(parent_dir, 'params.json'), 'w')
params = {
'num_epochs': FLAGS.num_epochs,
'layer_id': FLAGS.layer_id,
'concept1': FLAGS.concept1,
'concept2': FLAGS.concept2,
'learning_rate': FLAGS.learning_rate,
'train_dir': FLAGS.train_dir,
'mae': FLAGS.mae,
'val_split': FLAGS.val_split,
'random_seed': FLAGS.random_seed,
'adam': FLAGS.adam
}
json.dump(params, params_file)
params_file.close()
def write_iteration(parent_dir, y, y_truth, loss):
"""Write the results of the current iteration to a file.
Args:
parent_dir: The directory to save the new file to.
y: The classification result.
y_truth: Ground truth for the classification.
loss: The loss of the current run.
"""
iteration_file = open(os.path.join(parent_dir, 'training.txt'), 'a')
iteration_file.write('Y: {}'.format(y.data.cpu().numpy()))
iteration_file.write('\n')
iteration_file.write('Y_Truth: {}'.format(y_truth.data.cpu().numpy()))
iteration_file.write('\n')
iteration_file.write('Loss: {}'.format(loss.item()))
iteration_file.write('\n')
iteration_file.write('\n')
def write_epoch(parent_dir, accuracy, epoch):
"""Write the results of the current iteration to a file.
Args:
parent_dir: The directory to save the new file to.
accuracy: The accuracy for this epoch on the test data.
epoch: The current epoch number.
"""
iteration_file = open(os.path.join(parent_dir, 'epochs.txt'), 'a')
epoch_result = 'Epoch {}, Accuracy {}'.format(epoch, accuracy)
iteration_file.write(epoch_result)
if FLAGS.verbose:
print(epoch_result)
iteration_file.write('\n')
class Data(torch.utils.data.Dataset):
"""Represents the training dataset for the concept embedding classifier."""
def __init__(self):
start_setup = time.time()
self.init_with_files()
print('Setup Time: {}'.format(time.time() - start_setup))
def __len__(self):
return len(self.concept_classes)
def __getitem__(self, index):
get_start = time.time()
# Get the file from the path that this dataset refers to for a concept
embeddings_file = open(self.items[index], 'r')
np_item = np.load(embeddings_file)
# Convert the training elements to tensors
torch_item = torch.tensor(np_item, dtype=torch.float)
torch_class = torch.tensor(self.concept_classes[index])
get_time = time.time() - get_start
return torch_item, torch_class, get_time
def init_with_files(self):
# Get all files belonging to concept 1
paths_concept1 = os.listdir(
os.path.join(FLAGS.train_dir, FLAGS.concept1, str(FLAGS.layer_id)))
paths_concept1 = [os.path.join(
FLAGS.train_dir, FLAGS.concept1, str(FLAGS.layer_id),
x) for x in paths_concept1]
# Get all files belonging to concept 2
paths_concept2 = os.listdir(
os.path.join(FLAGS.train_dir, FLAGS.concept2, str(FLAGS.layer_id)))
paths_concept2 = [os.path.join(
FLAGS.train_dir, FLAGS.concept2, str(FLAGS.layer_id),
x) for x in paths_concept2]
self.setup_classes_and_items(paths_concept1, paths_concept2)
def setup_classes_and_items(self, concept_1_items, concept_2_items):
# Set up the classes belonging to the concepts
concept1_classes = np.zeros(len(concept_1_items))
concept2_classes = np.ones(len(concept_2_items))
print('Found {} examples for concept "{}".'.format(len(concept_1_items),
FLAGS.concept1))
print('Found {} examples for concept "{}".'.format(len(concept_2_items),
FLAGS.concept2))
# Store the paths to the concepts files and the classes in this data object
self.items = concept_1_items + concept_2_items
self.concept_classes = np.concatenate((concept1_classes, concept2_classes),
axis=0)
def train_classifier(train_data, device, parent_dir):
"""Train a classifier to distinguish activations between two concepts.
Args:
train_data: The Data object for obtaining the training data.
device: On which device to train the classifier on.
parent_dir: The directory to write training results to.
"""
# Creating data indices for training and validation splits:
dataset_size = len(train_data)
indices = list(range(dataset_size))
split = int(np.floor(FLAGS.val_split * dataset_size))
np.random.seed(FLAGS.random_seed)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
# Creating PT data samplers and loaders:
train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_indices)
valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(val_indices)
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=FLAGS.batch_size,
sampler=train_sampler)
validation_loader = torch.utils.data.DataLoader(train_data,
batch_size=FLAGS.batch_size,
sampler=valid_sampler)
# Model setup for this linear classifier
train_item, _, _ = train_data.__getitem__(0)
weights = torch.zeros((train_item.shape[0], 1), device=device,
requires_grad=True)
if FLAGS.adam:
optimizer = torch.optim.Adam([weights], lr=FLAGS.learning_rate)
else:
optimizer = torch.optim.SGD([weights], lr=FLAGS.learning_rate)
# Evaluation Parameters
it_times = []
ep_times = []
get_times = []
# Training loop (num_epochs * num_training_elements)
for epoch in range(FLAGS.num_epochs):
ep_start = time.time()
# Training
for x, y_truth, get_time in train_loader:
get_times.append(np.average(get_time.data.cpu().numpy()))
it_start = time.time()
x, y_truth = x.to(device), y_truth.to(device)
optimizer.zero_grad()
y = x.matmul(weights)
if FLAGS.sigmoid:
y = torch.sigmoid(y)
y = y.reshape(-1)
if FLAGS.mae:
loss = torch.mean(torch.abs(y - y_truth))
else:
loss = torch.mean(torch.pow(y - y_truth, 2))
# Write iteration stats to file if preferred
if FLAGS.verbose:
write_iteration(parent_dir, y, y_truth, loss)
loss.backward()
optimizer.step()
it_times.append(time.time() - it_start)
ep_times.append(time.time() - ep_start)
# Validation
with torch.no_grad():
acc_value = 0
num_acc = 0
for x, y_truth, get_time in validation_loader:
x, y_truth = x.to(device), y_truth.to(device)
y = x.matmul(weights)
if FLAGS.sigmoid:
y = torch.sigmoid(y)
y = y.reshape(-1)
y = torch.where(y > 0.5, torch.ones_like(y), torch.zeros_like(y))
acc = y - y_truth
num_acc = num_acc + acc.shape[0]
acc = torch.sum(torch.abs(acc))
acc_value = acc_value + acc.item()
accuracy = 1.0 - (acc_value / num_acc)
write_epoch(parent_dir, accuracy, epoch)
# Write the final classification vector
weights_file = open(os.path.join(parent_dir, 'final_weights.np'), 'w')
np.save(weights_file, weights.data.cpu().numpy())
print('Iteration Time: {}'.format(np.average(it_times)))
print('Item Time: {}'.format(np.average(get_times)))
print('Epoch Time: {}'.format(np.average(ep_times)))
def main(_):
# Setup a directory to write the results to
folder_helper.make_folder_if_not_exists(FLAGS.output_dir)
loss_string = 'MSE'
if FLAGS.mae:
loss_string = 'MAE'
parent_dir = os.path.join(FLAGS.output_dir, loss_string)
folder_helper.make_folder_if_not_exists(parent_dir)
parent_dir = os.path.join(parent_dir, str(FLAGS.layer_id))
folder_helper.make_folder_if_not_exists(parent_dir)
write_params(parent_dir)
# Get the device to work with
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Initialize the training data
dataset = Data()
# Start the training process
train_classifier(dataset, device, parent_dir)
if __name__ == '__main__':
flags.mark_flag_as_required('output_dir')
flags.mark_flag_as_required('train_dir')
app.run(main) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
github3.orgs
============
This module contains all of the classes related to organizations.
"""
from __future__ import unicode_literals
import warnings
from json import dumps
from .events import Event
from .models import BaseAccount, GitHubCore
from .repos import Repository
from .users import User
from .decorators import requires_auth
from uritemplate import URITemplate
class Team(GitHubCore):
"""The :class:`Team <Team>` object.
Two team instances can be checked like so::
t1 == t2
t1 != t2
And is equivalent to::
t1.id == t2.id
t1.id != t2.id
See also: http://developer.github.com/v3/orgs/teams/
"""
def __init__(self, team, session=None):
super(Team, self).__init__(team, session)
self._api = team.get('url', '')
#: This team's name.
self.name = team.get('name')
#: Unique ID of the team.
self.id = team.get('id')
#: Permission leve of the group
self.permission = team.get('permission')
#: Number of members in this team.
self.members_count = team.get('members_count')
members = team.get('members_url')
#: Members URL Template. Expands with ``member``
self.members_urlt = URITemplate(members) if members else None
#: Number of repos owned by this team.
self.repos_count = team.get('repos_count')
#: Repositories url (not a template)
self.repositories_url = team.get('repositories_url')
def _repr(self):
return '<Team [{0}]>'.format(self.name)
def _update_(self, team):
self.__init__(team, self._session)
@requires_auth
def add_member(self, login):
"""Add ``login`` to this team.
:returns: bool
"""
warnings.warn(
'This is no longer supported by the GitHub API, see '
'https://developer.github.com/changes/2014-09-23-one-more-week'
'-before-the-add-team-member-api-breaking-change/',
DeprecationWarning)
url = self._build_url('members', login, base_url=self._api)
return self._boolean(self._put(url), 204, 404)
@requires_auth
def add_repo(self, repo):
"""Add ``repo`` to this team.
:param str repo: (required), form: 'user/repo'
:returns: bool
"""
url = self._build_url('repos', repo, base_url=self._api)
return self._boolean(self._put(url), 204, 404)
@requires_auth
def delete(self):
"""Delete this team.
:returns: bool
"""
return self._boolean(self._delete(self._api), 204, 404)
@requires_auth
def edit(self, name, permission=''):
"""Edit this team.
:param str name: (required)
:param str permission: (optional), ('pull', 'push', 'admin')
:returns: bool
"""
if name:
data = {'name': name, 'permission': permission}
json = self._json(self._patch(self._api, data=dumps(data)), 200)
if json:
self._update_(json)
return True
return False
def has_repo(self, repo):
"""Checks if this team has access to ``repo``
:param str repo: (required), form: 'user/repo'
:returns: bool
"""
url = self._build_url('repos', repo, base_url=self._api)
return self._boolean(self._get(url), 204, 404)
@requires_auth
def invite(self, username):
"""Invite the user to join this team.
This returns a dictionary like so::
{'state': 'pending', 'url': 'https://api.github.com/teams/...'}
:param str username: (required), user to invite to join this team.
:returns: dictionary
"""
url = self._build_url('memberships', username, base_url=self._api)
return self._json(self._put(url), 200)
def is_member(self, login):
"""Check if ``login`` is a member of this team.
:param str login: (required), login name of the user
:returns: bool
"""
url = self._build_url('members', login, base_url=self._api)
return self._boolean(self._get(url), 204, 404)
def iter_members(self, number=-1, etag=None):
"""Iterate over the members of this team.
:param int number: (optional), number of users to iterate over.
Default: -1 iterates over all values
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`User <github3.users.User>`\ s
"""
url = self._build_url('members', base_url=self._api)
return self._iter(int(number), url, User, etag=etag)
def iter_repos(self, number=-1, etag=None):
"""Iterate over the repositories this team has access to.
:param int number: (optional), number of repos to iterate over.
Default: -1 iterates over all values
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
objects
"""
url = self._build_url('repos', base_url=self._api)
return self._iter(int(number), url, Repository, etag=etag)
@requires_auth
def membership_for(self, username):
"""Retrieve the membership information for the user.
:param str username: (required), name of the user
:returns: dictionary
"""
url = self._build_url('memberships', username, base_url=self._api)
json = self._json(self._get(url), 200)
return json or {}
@requires_auth
def remove_member(self, login):
"""Remove ``login`` from this team.
:param str login: (required), login of the member to remove
:returns: bool
"""
warnings.warn(
'This is no longer supported by the GitHub API, see '
'https://developer.github.com/changes/2014-09-23-one-more-week'
'-before-the-add-team-member-api-breaking-change/',
DeprecationWarning)
url = self._build_url('members', login, base_url=self._api)
return self._boolean(self._delete(url), 204, 404)
@requires_auth
def revoke_membership(self, username):
"""Revoke this user's team membership.
:param str username: (required), name of the team member
:returns: bool
"""
url = self._build_url('memberships', username, base_url=self._api)
return self._boolean(self._delete(url), 204, 404)
@requires_auth
def remove_repo(self, repo):
"""Remove ``repo`` from this team.
:param str repo: (required), form: 'user/repo'
:returns: bool
"""
url = self._build_url('repos', repo, base_url=self._api)
return self._boolean(self._delete(url), 204, 404)
class Organization(BaseAccount):
"""The :class:`Organization <Organization>` object.
Two organization instances can be checked like so::
o1 == o2
o1 != o2
And is equivalent to::
o1.id == o2.id
o1.id != o2.id
See also: http://developer.github.com/v3/orgs/
"""
def __init__(self, org, session=None):
super(Organization, self).__init__(org, session)
if not self.type:
self.type = 'Organization'
#: Events url (not a template)
self.events_url = org.get('events_url')
#: Number of private repositories.
self.private_repos = org.get('private_repos', 0)
members = org.get('members_url')
#: Members URL Template. Expands with ``member``
self.members_urlt = URITemplate(members) if members else None
members = org.get('public_members_url')
#: Public Members URL Template. Expands with ``member``
self.public_members_urlt = URITemplate(members) if members else None
#: Repositories url (not a template)
self.repos_url = org.get('repos_url')
@requires_auth
def add_member(self, login, team):
"""Add ``login`` to ``team`` and thereby to this organization.
.. warning::
This method is no longer valid. To add a member to a team, you
must now retrieve the team directly, and use the ``invite``
method.
Any user that is to be added to an organization, must be added
to a team as per the GitHub api.
.. note::
This method is of complexity O(n). This iterates over all teams in
your organization and only adds the user when the team name
matches the team parameter above. If you want constant time, you
should retrieve the team and call ``add_member`` on that team
directly.
:param str login: (required), login name of the user to be added
:param str team: (required), team name
:returns: bool
"""
warnings.warn(
'This is no longer supported by the GitHub API, see '
'https://developer.github.com/changes/2014-09-23-one-more-week'
'-before-the-add-team-member-api-breaking-change/',
DeprecationWarning)
for t in self.iter_teams():
if team == t.name:
return t.add_member(login)
return False
@requires_auth
def add_repo(self, repo, team):
"""Add ``repo`` to ``team``.
.. note::
This method is of complexity O(n). This iterates over all teams in
your organization and only adds the repo when the team name
matches the team parameter above. If you want constant time, you
should retrieve the team and call ``add_repo`` on that team
directly.
:param str repo: (required), form: 'user/repo'
:param str team: (required), team name
"""
for t in self.iter_teams():
if team == t.name:
return t.add_repo(repo)
return False
@requires_auth
def create_repo(self,
name,
description='',
homepage='',
private=False,
has_issues=True,
has_wiki=True,
has_downloads=True,
team_id=0,
auto_init=False,
gitignore_template=''):
"""Create a repository for this organization if the authenticated user
is a member.
:param str name: (required), name of the repository
:param str description: (optional)
:param str homepage: (optional)
:param bool private: (optional), If ``True``, create a private
repository. API default: ``False``
:param bool has_issues: (optional), If ``True``, enable issues for
this repository. API default: ``True``
:param bool has_wiki: (optional), If ``True``, enable the wiki for
this repository. API default: ``True``
:param bool has_downloads: (optional), If ``True``, enable downloads
for this repository. API default: ``True``
:param int team_id: (optional), id of the team that will be granted
access to this repository
:param bool auto_init: (optional), auto initialize the repository.
:param str gitignore_template: (optional), name of the template; this
is ignored if auto_int = False.
:returns: :class:`Repository <github3.repos.Repository>`
.. warning: ``name`` should be no longer than 100 characters
"""
url = self._build_url('repos', base_url=self._api)
data = {'name': name, 'description': description,
'homepage': homepage, 'private': private,
'has_issues': has_issues, 'has_wiki': has_wiki,
'has_downloads': has_downloads, 'auto_init': auto_init,
'gitignore_template': gitignore_template}
if team_id > 0:
data.update({'team_id': team_id})
json = self._json(self._post(url, data), 201)
return Repository(json, self) if json else None
@requires_auth
def conceal_member(self, login):
"""Conceal ``login``'s membership in this organization.
:returns: bool
"""
url = self._build_url('public_members', login, base_url=self._api)
return self._boolean(self._delete(url), 204, 404)
@requires_auth
def create_team(self, name, repo_names=[], permission=''):
"""Assuming the authenticated user owns this organization,
create and return a new team.
:param str name: (required), name to be given to the team
:param list repo_names: (optional) repositories, e.g.
['github/dotfiles']
:param str permission: (optional), options:
- ``pull`` -- (default) members can not push or administer
repositories accessible by this team
- ``push`` -- members can push and pull but not administer
repositories accessible by this team
- ``admin`` -- members can push, pull and administer
repositories accessible by this team
:returns: :class:`Team <Team>`
"""
data = {'name': name, 'repo_names': repo_names,
'permission': permission}
url = self._build_url('teams', base_url=self._api)
json = self._json(self._post(url, data), 201)
return Team(json, self._session) if json else None
@requires_auth
def edit(self,
billing_email=None,
company=None,
email=None,
location=None,
name=None):
"""Edit this organization.
:param str billing_email: (optional) Billing email address (private)
:param str company: (optional)
:param str email: (optional) Public email address
:param str location: (optional)
:param str name: (optional)
:returns: bool
"""
json = None
data = {'billing_email': billing_email, 'company': company,
'email': email, 'location': location, 'name': name}
self._remove_none(data)
if data:
json = self._json(self._patch(self._api, data=dumps(data)), 200)
if json:
self._update_(json)
return True
return False
def is_member(self, login):
"""Check if the user with login ``login`` is a member.
:returns: bool
"""
url = self._build_url('members', login, base_url=self._api)
return self._boolean(self._get(url), 204, 404)
def is_public_member(self, login):
"""Check if the user with login ``login`` is a public member.
:returns: bool
"""
url = self._build_url('public_members', login, base_url=self._api)
return self._boolean(self._get(url), 204, 404)
def iter_events(self, number=-1, etag=None):
"""Iterate over events for this org.
:param int number: (optional), number of events to return. Default: -1
iterates over all events available.
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Event <github3.events.Event>`\ s
"""
url = self._build_url('events', base_url=self._api)
return self._iter(int(number), url, Event, etag=etag)
def iter_members(self, number=-1, etag=None):
"""Iterate over members of this organization.
:param int number: (optional), number of members to return. Default:
-1 will return all available.
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`User <github3.users.User>`\ s
"""
url = self._build_url('members', base_url=self._api)
return self._iter(int(number), url, User, etag=etag)
def iter_public_members(self, number=-1, etag=None):
"""Iterate over public members of this organization.
:param int number: (optional), number of members to return. Default:
-1 will return all available.
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`User <github3.users.User>`\ s
"""
url = self._build_url('public_members', base_url=self._api)
return self._iter(int(number), url, User, etag=etag)
def iter_repos(self, type='', number=-1, etag=None):
"""Iterate over repos for this organization.
:param str type: (optional), accepted values:
('all', 'public', 'member', 'private', 'forks', 'sources'), API
default: 'all'
:param int number: (optional), number of members to return. Default:
-1 will return all available.
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
"""
url = self._build_url('repos', base_url=self._api)
params = {}
if type in ('all', 'public', 'member', 'private', 'forks', 'sources'):
params['type'] = type
return self._iter(int(number), url, Repository, params, etag)
@requires_auth
def iter_teams(self, number=-1, etag=None):
"""Iterate over teams that are part of this organization.
:param int number: (optional), number of teams to return. Default: -1
returns all available teams.
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Team <Team>`\ s
"""
url = self._build_url('teams', base_url=self._api)
return self._iter(int(number), url, Team, etag=etag)
@requires_auth
def publicize_member(self, login):
"""Make ``login``'s membership in this organization public.
:returns: bool
"""
url = self._build_url('public_members', login, base_url=self._api)
return self._boolean(self._put(url), 204, 404)
@requires_auth
def remove_member(self, login):
"""Remove the user with login ``login`` from this
organization.
:returns: bool
"""
url = self._build_url('members', login, base_url=self._api)
return self._boolean(self._delete(url), 204, 404)
@requires_auth
def remove_repo(self, repo, team):
"""Remove ``repo`` from ``team``.
:param str repo: (required), form: 'user/repo'
:param str team: (required)
:returns: bool
"""
for t in self.iter_teams():
if team == t.name:
return t.remove_repo(repo)
return False
@requires_auth
def team(self, team_id):
"""Returns Team object with information about team specified by
``team_id``.
:param int team_id: (required), unique id for the team
:returns: :class:`Team <Team>`
"""
json = None
if int(team_id) > 0:
url = self._build_url('teams', str(team_id))
json = self._json(self._get(url), 200)
return Team(json, self._session) if json else None
class Membership(GitHubCore):
"""The wrapper for information about Team and Organization memberships."""
def __init__(self, membership, session=None):
super(Membership, self).__init__(membership, session)
self._update_attributes(membership)
def _repr(self):
return '<Membership [{0}]>'.format(self.organization)
def _update_attributes(self, membership):
self._api = membership.get('url')
self.organization = Organization(membership.get('organization', {}),
self)
self.state = membership.get('state', '')
self.organization_url = membership.get('organization_url')
self.active = self.state.lower() == 'active'
self.pending = self.state.lower() == 'pending'
@requires_auth
def edit(self, state):
"""Edit the user's membership.
:param str state: (required), the state the membership should be in.
Only accepts ``"active"``.
:returns: itself
"""
if state and state.lower() == 'active':
data = dumps({'state': state.lower()})
json = self._json(self._patch(self._api, data=data))
self._update_attributes(json)
return self | unknown | codeparrot/codeparrot-clean | ||
import numpy as np
coef_20_1_bre = np.array([-0.9185611])
se_20_1_bre = np.array([0.4706831])
time_20_1_bre = np.array([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,1.1,1.2,1.3,1.4,1.5])
hazard_20_1_bre = np.array([0,0,0.04139181,0.1755379,0.3121216,0.3121216,0.4263121,0.6196358,0.6196358,0.6196358,0.909556,1.31083,1.31083])
coef_20_1_et_bre = np.array([-0.8907007])
se_20_1_et_bre = np.array([0.4683384])
time_20_1_et_bre = np.array([0])
hazard_20_1_et_bre = np.array([0])
coef_20_1_st_bre = np.array([-0.5766809])
se_20_1_st_bre = np.array([0.4418918])
time_20_1_st_bre = np.array([0])
hazard_20_1_st_bre = np.array([0])
coef_20_1_et_st_bre = np.array([-0.5785683])
se_20_1_et_st_bre = np.array([0.4388437])
time_20_1_et_st_bre = np.array([0])
hazard_20_1_et_st_bre = np.array([0])
coef_20_1_efr = np.array([-0.9975319])
se_20_1_efr = np.array([0.4792421])
time_20_1_efr = np.array([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,1.1,1.2,1.3,1.4,1.5])
hazard_20_1_efr = np.array([0,0,0.03934634,0.1663316,0.2986427,0.2986427,0.4119189,0.6077373,0.6077373,0.6077373,0.8933041,1.285732,1.285732])
coef_20_1_et_efr = np.array([-0.9679541])
se_20_1_et_efr = np.array([0.4766406])
time_20_1_et_efr = np.array([0])
hazard_20_1_et_efr = np.array([0])
coef_20_1_st_efr = np.array([-0.6345294])
se_20_1_st_efr = np.array([0.4455952])
time_20_1_st_efr = np.array([0])
hazard_20_1_st_efr = np.array([0])
coef_20_1_et_st_efr = np.array([-0.6355622])
se_20_1_et_st_efr = np.array([0.4423104])
time_20_1_et_st_efr = np.array([0])
hazard_20_1_et_st_efr = np.array([0])
coef_50_1_bre = np.array([-0.6761247])
se_50_1_bre = np.array([0.25133])
time_50_1_bre = np.array([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.5,1.6,1.7,1.8,1.9,2.4,2.8])
hazard_50_1_bre = np.array([0,0.04895521,0.08457461,0.2073863,0.2382473,0.2793018,0.3271622,0.3842953,0.3842953,0.5310807,0.6360276,0.7648251,0.7648251,0.9294298,0.9294298,0.9294298,1.206438,1.555569,1.555569])
coef_50_1_et_bre = np.array([-0.6492871])
se_50_1_et_bre = np.array([0.2542493])
time_50_1_et_bre = np.array([0])
hazard_50_1_et_bre = np.array([0])
coef_50_1_st_bre = np.array([-0.7051135])
se_50_1_st_bre = np.array([0.2852093])
time_50_1_st_bre = np.array([0])
hazard_50_1_st_bre = np.array([0])
coef_50_1_et_st_bre = np.array([-0.8672546])
se_50_1_et_st_bre = np.array([0.3443235])
time_50_1_et_st_bre = np.array([0])
hazard_50_1_et_st_bre = np.array([0])
coef_50_1_efr = np.array([-0.7119322])
se_50_1_efr = np.array([0.2533563])
time_50_1_efr = np.array([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.5,1.6,1.7,1.8,1.9,2.4,2.8])
hazard_50_1_efr = np.array([0,0.04773902,0.08238731,0.2022993,0.2327053,0.2736316,0.3215519,0.3787123,0.3787123,0.526184,0.6323073,0.7627338,0.7627338,0.9288858,0.9288858,0.9288858,1.206835,1.556054,1.556054])
coef_50_1_et_efr = np.array([-0.7103063])
se_50_1_et_efr = np.array([0.2598129])
time_50_1_et_efr = np.array([0])
hazard_50_1_et_efr = np.array([0])
coef_50_1_st_efr = np.array([-0.7417904])
se_50_1_st_efr = np.array([0.2846437])
time_50_1_st_efr = np.array([0])
hazard_50_1_st_efr = np.array([0])
coef_50_1_et_st_efr = np.array([-0.9276112])
se_50_1_et_st_efr = np.array([0.3462638])
time_50_1_et_st_efr = np.array([0])
hazard_50_1_et_st_efr = np.array([0])
coef_50_2_bre = np.array([-0.5935189,0.5035724])
se_50_2_bre = np.array([0.2172841,0.2399933])
time_50_2_bre = np.array([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,1.9,2.7,2.9])
hazard_50_2_bre = np.array([0.02695812,0.09162381,0.1309537,0.1768423,0.2033353,0.2033353,0.3083449,0.3547287,0.4076453,0.4761318,0.5579718,0.7610905,0.918962,0.918962,1.136173,1.605757,2.457676,2.457676])
coef_50_2_et_bre = np.array([-0.4001465,0.4415933])
se_50_2_et_bre = np.array([0.1992302,0.2525949])
time_50_2_et_bre = np.array([0])
hazard_50_2_et_bre = np.array([0])
coef_50_2_st_bre = np.array([-0.6574891,0.4416079])
se_50_2_st_bre = np.array([0.2753398,0.269458])
time_50_2_st_bre = np.array([0])
hazard_50_2_st_bre = np.array([0])
coef_50_2_et_st_bre = np.array([-0.3607069,0.2731982])
se_50_2_et_st_bre = np.array([0.255415,0.306942])
time_50_2_et_st_bre = np.array([0])
hazard_50_2_et_st_bre = np.array([0])
coef_50_2_efr = np.array([-0.6107485,0.5309737])
se_50_2_efr = np.array([0.2177713,0.2440535])
time_50_2_efr = np.array([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,1.9,2.7,2.9])
hazard_50_2_efr = np.array([0.02610571,0.08933637,0.1279094,0.1731699,0.19933,0.19933,0.303598,0.3497025,0.4023939,0.4706978,0.5519237,0.7545023,0.9129989,0.9129989,1.13186,1.60574,2.472615,2.472615])
coef_50_2_et_efr = np.array([-0.4092002,0.4871344])
se_50_2_et_efr = np.array([0.1968905,0.2608527])
time_50_2_et_efr = np.array([0])
hazard_50_2_et_efr = np.array([0])
coef_50_2_st_efr = np.array([-0.6631286,0.4663285])
se_50_2_st_efr = np.array([0.2748224,0.273603])
time_50_2_st_efr = np.array([0])
hazard_50_2_st_efr = np.array([0])
coef_50_2_et_st_efr = np.array([-0.3656059,0.2943912])
se_50_2_et_st_efr = np.array([0.2540752,0.3124632])
time_50_2_et_st_efr = np.array([0])
hazard_50_2_et_st_efr = np.array([0])
coef_100_5_bre = np.array([-0.529776,-0.2916374,-0.1205425,0.3493476,0.6034305])
se_100_5_bre = np.array([0.1789305,0.1482505,0.1347422,0.1528205,0.1647927])
time_100_5_bre = np.array([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,2,2.1,2.5,2.8,3.2,3.3])
hazard_100_5_bre = np.array([0.02558588,0.05608812,0.1087773,0.1451098,0.1896703,0.2235791,0.3127521,0.3355107,0.439452,0.504983,0.5431706,0.5841462,0.5841462,0.5841462,0.6916466,0.7540191,0.8298704,1.027876,1.170335,1.379306,1.648758,1.943177,1.943177,1.943177,4.727101])
coef_100_5_et_bre = np.array([-0.4000784,-0.1790941,-0.1378969,0.3288529,0.533246])
se_100_5_et_bre = np.array([0.1745655,0.1513545,0.1393968,0.1487803,0.1686992])
time_100_5_et_bre = np.array([0])
hazard_100_5_et_bre = np.array([0])
coef_100_5_st_bre = np.array([-0.53019,-0.3225739,-0.1241568,0.3246598,0.6196859])
se_100_5_st_bre = np.array([0.1954581,0.1602811,0.1470644,0.17121,0.1784115])
time_100_5_st_bre = np.array([0])
hazard_100_5_st_bre = np.array([0])
coef_100_5_et_st_bre = np.array([-0.3977171,-0.2166136,-0.1387623,0.3251726,0.5664705])
se_100_5_et_st_bre = np.array([0.1951054,0.1707925,0.1501968,0.1699932,0.1843428])
time_100_5_et_st_bre = np.array([0])
hazard_100_5_et_st_bre = np.array([0])
coef_100_5_efr = np.array([-0.5641909,-0.3233021,-0.1234858,0.3712328,0.6421963])
se_100_5_efr = np.array([0.1804027,0.1496253,0.1338531,0.1529832,0.1670848])
time_100_5_efr = np.array([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,2,2.1,2.5,2.8,3.2,3.3])
hazard_100_5_efr = np.array([0.02393412,0.05276399,0.1028432,0.1383859,0.1823461,0.2158107,0.3037825,0.3264864,0.4306648,0.4964367,0.5348595,0.5760305,0.5760305,0.5760305,0.6842238,0.7468135,0.8228841,1.023195,1.166635,1.379361,1.652898,1.950119,1.950119,1.950119,4.910635])
coef_100_5_et_efr = np.array([-0.4338666,-0.2140139,-0.1397387,0.3535993,0.5768645])
se_100_5_et_efr = np.array([0.1756485,0.1527244,0.138298,0.1488427,0.1716654])
time_100_5_et_efr = np.array([0])
hazard_100_5_et_efr = np.array([0])
coef_100_5_st_efr = np.array([-0.5530876,-0.3331652,-0.128381,0.3503472,0.6397813])
se_100_5_st_efr = np.array([0.1969338,0.1614976,0.1464088,0.171299,0.1800787])
time_100_5_st_efr = np.array([0])
hazard_100_5_st_efr = np.array([0])
coef_100_5_et_st_efr = np.array([-0.421153,-0.2350069,-0.1433638,0.3538863,0.5934568])
se_100_5_et_st_efr = np.array([0.1961729,0.1724719,0.1492979,0.170464,0.1861849])
time_100_5_et_st_efr = np.array([0])
hazard_100_5_et_st_efr = np.array([0])
coef_1000_10_bre = np.array([-0.4699279,-0.464557,-0.308411,-0.2158298,-0.09048563,0.09359662,0.112588,0.3343705,0.3480601,0.5634985])
se_1000_10_bre = np.array([0.04722914,0.04785291,0.04503528,0.04586872,0.04429793,0.0446141,0.04139944,0.04464292,0.04559903,0.04864393])
time_1000_10_bre = np.array([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,2,2.1,2.2,2.3,2.4,2.5,2.6,2.7,2.8,2.9,3,3.1,3.2,3.3,3.4,3.5,3.6,3.7,3.8,3.9,4,4.1,4.2,4.3,4.4,4.6,4.8,4.9,5,5.1,5.2,5.7,5.8,5.9,6.9])
hazard_1000_10_bre = np.array([0.01610374,0.04853538,0.08984849,0.1311329,0.168397,0.2230488,0.2755388,0.3312606,0.3668702,0.4146558,0.477935,0.5290705,0.5831775,0.6503129,0.7113068,0.7830385,0.8361717,0.8910061,0.9615944,1.024011,1.113399,1.165349,1.239827,1.352902,1.409548,1.53197,1.601843,1.682158,1.714907,1.751564,1.790898,1.790898,1.83393,1.83393,1.936055,1.992303,2.050778,2.118776,2.263056,2.504999,2.739343,2.895514,3.090349,3.090349,3.391772,3.728142,4.152769,4.152769,4.152769,4.725957,4.725957,5.69653,5.69653,5.69653])
coef_1000_10_et_bre = np.array([-0.410889,-0.3929442,-0.2975845,-0.1851533,-0.0918359,0.1011997,0.106735,0.2899179,0.3220672,0.5069589])
se_1000_10_et_bre = np.array([0.04696754,0.04732169,0.04537707,0.04605371,0.04365232,0.04450021,0.04252475,0.04482007,0.04562374,0.04859727])
time_1000_10_et_bre = np.array([0])
hazard_1000_10_et_bre = np.array([0])
coef_1000_10_st_bre = np.array([-0.471015,-0.4766859,-0.3070839,-0.2091938,-0.09190845,0.0964942,0.1138269,0.3307131,0.3543551,0.562492])
se_1000_10_st_bre = np.array([0.04814778,0.04841938,0.04572291,0.04641227,0.04502525,0.04517603,0.04203737,0.04524356,0.04635037,0.04920866])
time_1000_10_st_bre = np.array([0])
hazard_1000_10_st_bre = np.array([0])
coef_1000_10_et_st_bre = np.array([-0.4165849,-0.4073504,-0.2980959,-0.1765194,-0.09152798,0.1013213,0.1009838,0.2859668,0.3247608,0.5044448])
se_1000_10_et_st_bre = np.array([0.04809818,0.04809499,0.0460829,0.04679922,0.0445294,0.04514045,0.04339298,0.04580591,0.04652447,0.04920744])
time_1000_10_et_st_bre = np.array([0])
hazard_1000_10_et_st_bre = np.array([0])
coef_1000_10_efr = np.array([-0.4894399,-0.4839746,-0.3227769,-0.2261293,-0.09318482,0.09767154,0.1173205,0.3493732,0.3640146,0.5879749])
se_1000_10_efr = np.array([0.0474181,0.04811855,0.04507655,0.04603044,0.04440409,0.04478202,0.04136728,0.04473343,0.045768,0.04891375])
time_1000_10_efr = np.array([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,2,2.1,2.2,2.3,2.4,2.5,2.6,2.7,2.8,2.9,3,3.1,3.2,3.3,3.4,3.5,3.6,3.7,3.8,3.9,4,4.1,4.2,4.3,4.4,4.6,4.8,4.9,5,5.1,5.2,5.7,5.8,5.9,6.9])
hazard_1000_10_efr = np.array([0.01549698,0.04680035,0.08682564,0.1269429,0.1632388,0.2167291,0.2682311,0.3231316,0.3582936,0.4054892,0.4681098,0.5188697,0.5727059,0.639571,0.7003012,0.7718979,0.825053,0.880063,0.950935,1.013828,1.103903,1.156314,1.231707,1.346235,1.40359,1.527475,1.598231,1.6795,1.712779,1.750227,1.790455,1.790455,1.834455,1.834455,1.938997,1.996804,2.056859,2.126816,2.275217,2.524027,2.76669,2.929268,3.13247,3.13247,3.448515,3.80143,4.249649,4.249649,4.249649,4.851365,4.851365,5.877307,5.877307,5.877307])
coef_1000_10_et_efr = np.array([-0.4373066,-0.4131901,-0.3177637,-0.1978493,-0.09679451,0.1092037,0.1136069,0.3088907,0.3442007,0.5394121])
se_1000_10_et_efr = np.array([0.04716041,0.04755342,0.04546713,0.04627802,0.04376583,0.04474868,0.04259991,0.04491564,0.04589027,0.04890847])
time_1000_10_et_efr = np.array([0])
hazard_1000_10_et_efr = np.array([0])
coef_1000_10_st_efr = np.array([-0.4911117,-0.4960756,-0.3226152,-0.220949,-0.09478141,0.1015735,0.1195524,0.3446977,0.3695904,0.5878576])
se_1000_10_st_efr = np.array([0.04833676,0.04868554,0.04578407,0.04661755,0.04518267,0.04537135,0.04202183,0.04531266,0.0464931,0.04949831])
time_1000_10_st_efr = np.array([0])
hazard_1000_10_st_efr = np.array([0])
coef_1000_10_et_st_efr = np.array([-0.444355,-0.4283278,-0.3198815,-0.1901781,-0.09727039,0.1106191,0.1092104,0.3034778,0.3451699,0.5382381])
se_1000_10_et_st_efr = np.array([0.04830664,0.04833619,0.04617371,0.04706401,0.04472699,0.0454208,0.04350539,0.04588588,0.04675675,0.04950987])
time_1000_10_et_st_efr = np.array([0])
hazard_1000_10_et_st_efr = np.array([0]) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.aop.interceptor;
import org.junit.jupiter.api.Test;
import org.springframework.aop.framework.ProxyFactory;
import org.springframework.beans.factory.NamedBean;
import org.springframework.beans.testfixture.beans.ITestBean;
import org.springframework.beans.testfixture.beans.TestBean;
import static org.assertj.core.api.Assertions.assertThat;
/**
* @author Rod Johnson
* @author Chris Beams
*/
class ExposeBeanNameAdvisorsTests {
private static class RequiresBeanNameBoundTestBean extends TestBean {
private final String beanName;
public RequiresBeanNameBoundTestBean(String beanName) {
this.beanName = beanName;
}
@Override
public int getAge() {
assertThat(ExposeBeanNameAdvisors.getBeanName()).isEqualTo(beanName);
return super.getAge();
}
}
@Test
void testNoIntroduction() {
String beanName = "foo";
TestBean target = new RequiresBeanNameBoundTestBean(beanName);
ProxyFactory pf = new ProxyFactory(target);
pf.addAdvisor(ExposeInvocationInterceptor.ADVISOR);
pf.addAdvisor(ExposeBeanNameAdvisors.createAdvisorWithoutIntroduction(beanName));
ITestBean proxy = (ITestBean) pf.getProxy();
boolean condition = proxy instanceof NamedBean;
assertThat(condition).as("No introduction").isFalse();
// Requires binding
proxy.getAge();
}
@Test
void testWithIntroduction() {
String beanName = "foo";
TestBean target = new RequiresBeanNameBoundTestBean(beanName);
ProxyFactory pf = new ProxyFactory(target);
pf.addAdvisor(ExposeInvocationInterceptor.ADVISOR);
pf.addAdvisor(ExposeBeanNameAdvisors.createAdvisorIntroducingNamedBean(beanName));
ITestBean proxy = (ITestBean) pf.getProxy();
boolean condition = proxy instanceof NamedBean;
assertThat(condition).as("Introduction was made").isTrue();
// Requires binding
proxy.getAge();
NamedBean nb = (NamedBean) proxy;
assertThat(nb.getBeanName()).as("Name returned correctly").isEqualTo(beanName);
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-aop/src/test/java/org/springframework/aop/interceptor/ExposeBeanNameAdvisorsTests.java |
/other-index.html | html | github | https://github.com/rails/rails | actionpack/test/fixtures/public/other-index.html |
"""
Base class for account settings page.
"""
from common.test.acceptance.pages.lms import BASE_URL
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
from common.test.acceptance.pages.lms.fields import FieldsMixin
class AccountSettingsPage(FieldsMixin, PageObject):
"""
Tests for Account Settings Page.
"""
url = "{base}/{settings}".format(base=BASE_URL, settings='account/settings')
def is_browser_on_page(self):
return self.q(css='.account-settings-container').present
def sections_structure(self):
"""
Return list of section titles and field titles for each section.
Example: [
{
'title': 'Section Title'
'fields': ['Field 1 title', 'Field 2 title',...]
},
...
]
"""
structure = []
sections = self.q(css='#aboutTabSections-tabpanel .section')
for section in sections:
section_title_element = section.find_element_by_class_name('section-header')
field_title_elements = section.find_elements_by_class_name('u-field-title')
structure.append({
'title': section_title_element.text,
'fields': [element.text for element in field_title_elements],
})
return structure
def _is_loading_in_progress(self):
"""
Check if loading indicator is visible.
"""
query = self.q(css='.ui-loading-indicator')
return query.present and 'is-hidden' not in query.attrs('class')[0].split()
def wait_for_loading_indicator(self):
"""
Wait for loading indicator to become visible.
"""
EmptyPromise(self._is_loading_in_progress, "Loading is in progress.").fulfill()
def switch_account_settings_tabs(self, tab_id):
"""
Switch between the different account settings tabs.
"""
self.q(css='#{}'.format(tab_id)).click()
@property
def is_order_history_tab_visible(self):
""" Check if tab with the name "Order History" is visible."""
return self.q(css='.u-field-orderHistory').visible
def get_value_of_order_history_row_item(self, field_id, field_name):
""" Return the text value of the provided order field name."""
query = self.q(css='.u-field-{} .u-field-order-{}'.format(field_id, field_name))
return query.text[0] if query.present else None
def order_button_is_visible(self, field_id):
""" Check that if hovering over the order history row shows the
order detail link or not.
"""
return self.q(css='.u-field-{} .u-field-{}'.format(field_id, 'link')).visible | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2011, VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Borrowed from nova code base, more utilities will be added/borrowed as and
# when needed.
"""Utilities and helper functions."""
import datetime
import functools
import hashlib
import logging as std_logging
import multiprocessing
import os
import random
import signal
import socket
import uuid
from eventlet.green import subprocess
from oslo.config import cfg
from oslo.utils import excutils
from neutron.common import constants as q_const
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
LOG = logging.getLogger(__name__)
SYNCHRONIZED_PREFIX = 'neutron-'
synchronized = lockutils.synchronized_with_prefix(SYNCHRONIZED_PREFIX)
class cache_method_results(object):
"""This decorator is intended for object methods only."""
def __init__(self, func):
self.func = func
functools.update_wrapper(self, func)
self._first_call = True
self._not_cached = object()
def _get_from_cache(self, target_self, *args, **kwargs):
func_name = "%(module)s.%(class)s.%(func_name)s" % {
'module': target_self.__module__,
'class': target_self.__class__.__name__,
'func_name': self.func.__name__,
}
key = (func_name,) + args
if kwargs:
key += dict2tuple(kwargs)
try:
item = target_self._cache.get(key, self._not_cached)
except TypeError:
LOG.debug("Method %(func_name)s cannot be cached due to "
"unhashable parameters: args: %(args)s, kwargs: "
"%(kwargs)s",
{'func_name': func_name,
'args': args,
'kwargs': kwargs})
return self.func(target_self, *args, **kwargs)
if item is self._not_cached:
item = self.func(target_self, *args, **kwargs)
target_self._cache.set(key, item, None)
return item
def __call__(self, target_self, *args, **kwargs):
if not hasattr(target_self, '_cache'):
raise NotImplementedError(
"Instance of class %(module)s.%(class)s must contain _cache "
"attribute" % {
'module': target_self.__module__,
'class': target_self.__class__.__name__})
if not target_self._cache:
if self._first_call:
LOG.debug("Instance of class %(module)s.%(class)s doesn't "
"contain attribute _cache therefore results "
"cannot be cached for %(func_name)s.",
{'module': target_self.__module__,
'class': target_self.__class__.__name__,
'func_name': self.func.__name__})
self._first_call = False
return self.func(target_self, *args, **kwargs)
return self._get_from_cache(target_self, *args, **kwargs)
def __get__(self, obj, objtype):
return functools.partial(self.__call__, obj)
def read_cached_file(filename, cache_info, reload_func=None):
"""Read from a file if it has been modified.
:param cache_info: dictionary to hold opaque cache.
:param reload_func: optional function to be called with data when
file is reloaded due to a modification.
:returns: data from file
"""
mtime = os.path.getmtime(filename)
if not cache_info or mtime != cache_info.get('mtime'):
LOG.debug("Reloading cached file %s", filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
if reload_func:
reload_func(cache_info['data'])
return cache_info['data']
def find_config_file(options, config_file):
"""Return the first config file found.
We search for the paste config file in the following order:
* If --config-file option is used, use that
* Search for the configuration files via common cfg directories
:retval Full path to config file, or None if no config file found
"""
fix_path = lambda p: os.path.abspath(os.path.expanduser(p))
if options.get('config_file'):
if os.path.exists(options['config_file']):
return fix_path(options['config_file'])
dir_to_common = os.path.dirname(os.path.abspath(__file__))
root = os.path.join(dir_to_common, '..', '..', '..', '..')
# Handle standard directory search for the config file
config_file_dirs = [fix_path(os.path.join(os.getcwd(), 'etc')),
fix_path(os.path.join('~', '.neutron-venv', 'etc',
'neutron')),
fix_path('~'),
os.path.join(cfg.CONF.state_path, 'etc'),
os.path.join(cfg.CONF.state_path, 'etc', 'neutron'),
fix_path(os.path.join('~', '.local',
'etc', 'neutron')),
'/usr/etc/neutron',
'/usr/local/etc/neutron',
'/etc/neutron/',
'/etc']
if 'plugin' in options:
config_file_dirs = [
os.path.join(x, 'neutron', 'plugins', options['plugin'])
for x in config_file_dirs
]
if os.path.exists(os.path.join(root, 'plugins')):
plugins = [fix_path(os.path.join(root, 'plugins', p, 'etc'))
for p in os.listdir(os.path.join(root, 'plugins'))]
plugins = [p for p in plugins if os.path.isdir(p)]
config_file_dirs.extend(plugins)
for cfg_dir in config_file_dirs:
cfg_file = os.path.join(cfg_dir, config_file)
if os.path.exists(cfg_file):
return cfg_file
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False,
env=None):
return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout,
stderr=stderr, preexec_fn=_subprocess_setup,
close_fds=True, env=env)
def parse_mappings(mapping_list, unique_values=True):
"""Parse a list of mapping strings into a dictionary.
:param mapping_list: a list of strings of the form '<key>:<value>'
:param unique_values: values must be unique if True
:returns: a dict mapping keys to values
"""
mappings = {}
for mapping in mapping_list:
mapping = mapping.strip()
if not mapping:
continue
split_result = mapping.split(':')
if len(split_result) != 2:
raise ValueError(_("Invalid mapping: '%s'") % mapping)
key = split_result[0].strip()
if not key:
raise ValueError(_("Missing key in mapping: '%s'") % mapping)
value = split_result[1].strip()
if not value:
raise ValueError(_("Missing value in mapping: '%s'") % mapping)
if key in mappings:
raise ValueError(_("Key %(key)s in mapping: '%(mapping)s' not "
"unique") % {'key': key, 'mapping': mapping})
if unique_values and value in mappings.itervalues():
raise ValueError(_("Value %(value)s in mapping: '%(mapping)s' "
"not unique") % {'value': value,
'mapping': mapping})
mappings[key] = value
return mappings
def get_hostname():
return socket.gethostname()
def compare_elements(a, b):
"""Compare elements if a and b have same elements.
This method doesn't consider ordering
"""
if a is None:
a = []
if b is None:
b = []
return set(a) == set(b)
def dict2str(dic):
return ','.join("%s=%s" % (key, val)
for key, val in sorted(dic.iteritems()))
def str2dict(string):
res_dict = {}
for keyvalue in string.split(','):
(key, value) = keyvalue.split('=', 1)
res_dict[key] = value
return res_dict
def dict2tuple(d):
items = d.items()
items.sort()
return tuple(items)
def diff_list_of_dict(old_list, new_list):
new_set = set([dict2str(l) for l in new_list])
old_set = set([dict2str(l) for l in old_list])
added = new_set - old_set
removed = old_set - new_set
return [str2dict(a) for a in added], [str2dict(r) for r in removed]
def is_extension_supported(plugin, ext_alias):
return ext_alias in getattr(
plugin, "supported_extension_aliases", [])
def log_opt_values(log):
cfg.CONF.log_opt_values(log, std_logging.DEBUG)
def is_valid_vlan_tag(vlan):
return q_const.MIN_VLAN_TAG <= vlan <= q_const.MAX_VLAN_TAG
def is_valid_gre_id(gre_id):
return q_const.MIN_GRE_ID <= gre_id <= q_const.MAX_GRE_ID
def is_valid_vxlan_vni(vni):
return q_const.MIN_VXLAN_VNI <= vni <= q_const.MAX_VXLAN_VNI
def get_random_mac(base_mac):
mac = [int(base_mac[0], 16), int(base_mac[1], 16),
int(base_mac[2], 16), random.randint(0x00, 0xff),
random.randint(0x00, 0xff), random.randint(0x00, 0xff)]
if base_mac[3] != '00':
mac[3] = int(base_mac[3], 16)
return ':'.join(["%02x" % x for x in mac])
def get_random_string(length):
"""Get a random hex string of the specified length.
based on Cinder library
cinder/transfer/api.py
"""
rndstr = ""
random.seed(datetime.datetime.now().microsecond)
while len(rndstr) < length:
rndstr += hashlib.sha224(str(random.random())).hexdigest()
return rndstr[0:length]
def get_dhcp_agent_device_id(network_id, host):
# Split host so as to always use only the hostname and
# not the domain name. This will guarantee consistentcy
# whether a local hostname or an fqdn is passed in.
local_hostname = host.split('.')[0]
host_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, str(local_hostname))
return 'dhcp%s-%s' % (host_uuid, network_id)
def cpu_count():
try:
return multiprocessing.cpu_count()
except NotImplementedError:
return 1
class exception_logger(object):
"""Wrap a function and log raised exception
:param logger: the logger to log the exception default is LOG.exception
:returns: origin value if no exception raised; re-raise the exception if
any occurred
"""
def __init__(self, logger=None):
self.logger = logger
def __call__(self, func):
if self.logger is None:
LOG = logging.getLogger(func.__module__)
self.logger = LOG.exception
def call(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
with excutils.save_and_reraise_exception():
self.logger(e)
return call
def is_dvr_serviced(device_owner):
"""Check if the port need to be serviced by DVR
Helper function to check the device owners of the
ports in the compute and service node to make sure
if they are required for DVR or any service directly or
indirectly associated with DVR.
"""
dvr_serviced_device_owners = (q_const.DEVICE_OWNER_LOADBALANCER,
q_const.DEVICE_OWNER_DHCP)
return (device_owner.startswith('compute:') or
device_owner in dvr_serviced_device_owners) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_wireless_controller_hotspot20_h2qp_operator_name
short_description: Configure operator friendly name in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify wireless_controller_hotspot20 feature and h2qp_operator_name category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
wireless_controller_hotspot20_h2qp_operator_name:
description:
- Configure operator friendly name.
default: null
type: dict
suboptions:
name:
description:
- Friendly name ID.
required: true
type: str
value_list:
description:
- Name list.
type: list
suboptions:
index:
description:
- Value index.
required: true
type: int
lang:
description:
- Language code.
type: str
value:
description:
- Friendly name value.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure operator friendly name.
fortios_wireless_controller_hotspot20_h2qp_operator_name:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
wireless_controller_hotspot20_h2qp_operator_name:
name: "default_name_3"
value_list:
-
index: "5"
lang: "<your_own_value>"
value: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_wireless_controller_hotspot20_h2qp_operator_name_data(json):
option_list = ['name', 'value_list']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def wireless_controller_hotspot20_h2qp_operator_name(data, fos):
vdom = data['vdom']
state = data['state']
wireless_controller_hotspot20_h2qp_operator_name_data = data['wireless_controller_hotspot20_h2qp_operator_name']
filtered_data = underscore_to_hyphen(filter_wireless_controller_hotspot20_h2qp_operator_name_data(wireless_controller_hotspot20_h2qp_operator_name_data))
if state == "present":
return fos.set('wireless-controller.hotspot20',
'h2qp-operator-name',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('wireless-controller.hotspot20',
'h2qp-operator-name',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_wireless_controller_hotspot20(data, fos):
if data['wireless_controller_hotspot20_h2qp_operator_name']:
resp = wireless_controller_hotspot20_h2qp_operator_name(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"wireless_controller_hotspot20_h2qp_operator_name": {
"required": False, "type": "dict", "default": None,
"options": {
"name": {"required": True, "type": "str"},
"value_list": {"required": False, "type": "list",
"options": {
"index": {"required": True, "type": "int"},
"lang": {"required": False, "type": "str"},
"value": {"required": False, "type": "str"}
}}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_wireless_controller_hotspot20(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_wireless_controller_hotspot20(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
# pylint: disable=g-classes-have-attributes
"""Built-in metrics.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import types
import numpy as np
import six
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.autograph.impl import api as autograph
from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import keras_tensor
from tensorflow.python.keras.losses import binary_crossentropy
from tensorflow.python.keras.losses import categorical_crossentropy
from tensorflow.python.keras.losses import categorical_hinge
from tensorflow.python.keras.losses import hinge
from tensorflow.python.keras.losses import kullback_leibler_divergence
from tensorflow.python.keras.losses import logcosh
from tensorflow.python.keras.losses import mean_absolute_error
from tensorflow.python.keras.losses import mean_absolute_percentage_error
from tensorflow.python.keras.losses import mean_squared_error
from tensorflow.python.keras.losses import mean_squared_logarithmic_error
from tensorflow.python.keras.losses import poisson
from tensorflow.python.keras.losses import sparse_categorical_crossentropy
from tensorflow.python.keras.losses import squared_hinge
from tensorflow.python.keras.saving.saved_model import metric_serialization
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.keras.utils import metrics_utils
from tensorflow.python.keras.utils import tf_inspect
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.keras.utils.generic_utils import to_list
from tensorflow.python.keras.utils.tf_utils import is_tensor_or_variable
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import dispatch
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
@keras_export('keras.metrics.Metric')
@six.add_metaclass(abc.ABCMeta)
class Metric(base_layer.Layer):
"""Encapsulates metric logic and state.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
**kwargs: Additional layer keywords arguments.
Standalone usage:
```python
m = SomeMetric(...)
for input in ...:
m.update_state(input)
print('Final result: ', m.result().numpy())
```
Usage with `compile()` API:
```python
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.compile(optimizer=tf.keras.optimizers.RMSprop(0.01),
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=[tf.keras.metrics.CategoricalAccuracy()])
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
model.fit(dataset, epochs=10)
```
To be implemented by subclasses:
* `__init__()`: All state variables should be created in this method by
calling `self.add_weight()` like: `self.var = self.add_weight(...)`
* `update_state()`: Has all updates to the state variables like:
self.var.assign_add(...).
* `result()`: Computes and returns a value for the metric
from the state variables.
Example subclass implementation:
```python
class BinaryTruePositives(tf.keras.metrics.Metric):
def __init__(self, name='binary_true_positives', **kwargs):
super(BinaryTruePositives, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name='tp', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True))
values = tf.cast(values, self.dtype)
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self.dtype)
sample_weight = tf.broadcast_to(sample_weight, values.shape)
values = tf.multiply(values, sample_weight)
self.true_positives.assign_add(tf.reduce_sum(values))
def result(self):
return self.true_positives
```
"""
def __init__(self, name=None, dtype=None, **kwargs):
super(Metric, self).__init__(name=name, dtype=dtype, **kwargs)
self.stateful = True # All metric layers are stateful.
self.built = True
if not base_layer_utils.v2_dtype_behavior_enabled():
# We only do this when the V2 behavior is not enabled, as when it is
# enabled, the dtype already defaults to floatx.
self._dtype = K.floatx() if dtype is None else dtypes.as_dtype(dtype).name
def __new__(cls, *args, **kwargs):
obj = super(Metric, cls).__new__(cls)
# If `update_state` is not in eager/tf.function and it is not from a
# built-in metric, wrap it in `tf.function`. This is so that users writing
# custom metrics in v1 need not worry about control dependencies and
# return ops.
if (base_layer_utils.is_in_eager_or_tf_function() or
is_built_in(cls)):
obj_update_state = obj.update_state
def update_state_fn(*args, **kwargs):
control_status = ag_ctx.control_status_ctx()
ag_update_state = autograph.tf_convert(obj_update_state, control_status)
return ag_update_state(*args, **kwargs)
else:
if isinstance(obj.update_state, def_function.Function):
update_state_fn = obj.update_state
else:
update_state_fn = def_function.function(obj.update_state)
obj.update_state = types.MethodType(
metrics_utils.update_state_wrapper(update_state_fn), obj)
obj_result = obj.result
def result_fn(*args, **kwargs):
control_status = ag_ctx.control_status_ctx()
ag_result = autograph.tf_convert(obj_result, control_status)
return ag_result(*args, **kwargs)
obj.result = types.MethodType(metrics_utils.result_wrapper(result_fn), obj)
return obj
def __call__(self, *args, **kwargs):
"""Accumulates statistics and then computes metric result value.
Args:
*args:
**kwargs: A mini-batch of inputs to the Metric,
passed on to `update_state()`.
Returns:
The metric value tensor.
"""
def replica_local_fn(*args, **kwargs):
"""Updates the state of the metric in a replica-local context."""
if any(
isinstance(arg, keras_tensor.KerasTensor)
for arg in nest.flatten((args, kwargs))):
update_op = None
else:
update_op = self.update_state(*args, **kwargs) # pylint: disable=not-callable
update_ops = []
if update_op is not None:
update_ops.append(update_op)
with ops.control_dependencies(update_ops):
result_t = self.result() # pylint: disable=not-callable
# We are adding the metric object as metadata on the result tensor.
# This is required when we want to use a metric with `add_metric` API on
# a Model/Layer in graph mode. This metric instance will later be used
# to reset variable state after each epoch of training.
# Example:
# model = Model()
# mean = Mean()
# model.add_metric(mean(values), name='mean')
result_t._metric_obj = self # pylint: disable=protected-access
return result_t
from tensorflow.python.keras.distribute import distributed_training_utils # pylint:disable=g-import-not-at-top
return distributed_training_utils.call_replica_local_fn(
replica_local_fn, *args, **kwargs)
@property
def dtype(self):
return self._dtype
def get_config(self):
"""Returns the serializable config of the metric."""
return {'name': self.name, 'dtype': self.dtype}
def reset_states(self):
"""Resets all of the metric state variables.
This function is called between epochs/steps,
when a metric is evaluated during training.
"""
K.batch_set_value([(v, 0) for v in self.variables])
@abc.abstractmethod
def update_state(self, *args, **kwargs):
"""Accumulates statistics for the metric.
Note: This function is executed as a graph function in graph mode.
This means:
a) Operations on the same resource are executed in textual order.
This should make it easier to do things like add the updated
value of a variable to another, for example.
b) You don't need to worry about collecting the update ops to execute.
All update ops added to the graph by this function will be executed.
As a result, code should generally work the same way with graph or
eager execution.
Args:
*args:
**kwargs: A mini-batch of inputs to the Metric.
"""
raise NotImplementedError('Must be implemented in subclasses.')
@abc.abstractmethod
def result(self):
"""Computes and returns the metric value tensor.
Result computation is an idempotent operation that simply calculates the
metric value using the state variables.
"""
raise NotImplementedError('Must be implemented in subclasses.')
### For use by subclasses ###
@doc_controls.for_subclass_implementers
def add_weight(self,
name,
shape=(),
aggregation=tf_variables.VariableAggregation.SUM,
synchronization=tf_variables.VariableSynchronization.ON_READ,
initializer=None,
dtype=None):
"""Adds state variable. Only for use by subclasses."""
from tensorflow.python.keras.distribute import distributed_training_utils # pylint:disable=g-import-not-at-top
if distribute_ctx.has_strategy():
strategy = distribute_ctx.get_strategy()
else:
strategy = None
# TODO(b/120571621): Make `ON_READ` work with Keras metrics on TPU.
if distributed_training_utils.is_tpu_strategy(strategy):
synchronization = tf_variables.VariableSynchronization.ON_WRITE
with ops.init_scope():
return super(Metric, self).add_weight(
name=name,
shape=shape,
dtype=self._dtype if dtype is None else dtype,
trainable=False,
initializer=initializer,
collections=[],
synchronization=synchronization,
aggregation=aggregation)
### End: For use by subclasses ###
@property
def _trackable_saved_model_saver(self):
return metric_serialization.MetricSavedModelSaver(self)
class Reduce(Metric):
"""Encapsulates metrics that perform a reduce operation on the values.
Args:
reduction: a `tf.keras.metrics.Reduction` enum value.
name: string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
def __init__(self, reduction, name, dtype=None):
super(Reduce, self).__init__(name=name, dtype=dtype)
self.reduction = reduction
self.total = self.add_weight(
'total', initializer=init_ops.zeros_initializer)
if reduction in [metrics_utils.Reduction.SUM_OVER_BATCH_SIZE,
metrics_utils.Reduction.WEIGHTED_MEAN]:
self.count = self.add_weight(
'count', initializer=init_ops.zeros_initializer)
def update_state(self, values, sample_weight=None):
"""Accumulates statistics for computing the metric.
Args:
values: Per-example value.
sample_weight: Optional weighting of each example. Defaults to 1.
Returns:
Update op.
"""
[values], sample_weight = \
metrics_utils.ragged_assert_compatible_and_get_flat_values(
[values], sample_weight)
values = math_ops.cast(values, self._dtype)
if sample_weight is not None:
sample_weight = math_ops.cast(sample_weight, self._dtype)
# Update dimensions of weights to match with values if possible.
values, _, sample_weight = losses_utils.squeeze_or_expand_dimensions(
values, sample_weight=sample_weight)
try:
# Broadcast weights if possible.
sample_weight = weights_broadcast_ops.broadcast_weights(
sample_weight, values)
except ValueError:
# Reduce values to same ndim as weight array
ndim = K.ndim(values)
weight_ndim = K.ndim(sample_weight)
if self.reduction == metrics_utils.Reduction.SUM:
values = math_ops.reduce_sum(
values, axis=list(range(weight_ndim, ndim)))
else:
values = math_ops.reduce_mean(
values, axis=list(range(weight_ndim, ndim)))
values = math_ops.multiply(values, sample_weight)
value_sum = math_ops.reduce_sum(values)
with ops.control_dependencies([value_sum]):
update_total_op = self.total.assign_add(value_sum)
# Exit early if the reduction doesn't have a denominator.
if self.reduction == metrics_utils.Reduction.SUM:
return update_total_op
# Update `count` for reductions that require a denominator.
if self.reduction == metrics_utils.Reduction.SUM_OVER_BATCH_SIZE:
num_values = math_ops.cast(array_ops.size(values), self._dtype)
elif self.reduction == metrics_utils.Reduction.WEIGHTED_MEAN:
if sample_weight is None:
num_values = math_ops.cast(array_ops.size(values), self._dtype)
else:
num_values = math_ops.reduce_sum(sample_weight)
else:
raise NotImplementedError(
'reduction [%s] not implemented' % self.reduction)
with ops.control_dependencies([update_total_op]):
return self.count.assign_add(num_values)
def result(self):
if self.reduction == metrics_utils.Reduction.SUM:
return array_ops.identity(self.total)
elif self.reduction in [
metrics_utils.Reduction.WEIGHTED_MEAN,
metrics_utils.Reduction.SUM_OVER_BATCH_SIZE
]:
return math_ops.div_no_nan(self.total, self.count)
else:
raise NotImplementedError(
'reduction [%s] not implemented' % self.reduction)
@keras_export('keras.metrics.Sum')
class Sum(Reduce):
"""Computes the (weighted) sum of the given values.
For example, if values is [1, 3, 5, 7] then the sum is 16.
If the weights were specified as [1, 1, 0, 0] then the sum would be 4.
This metric creates one variable, `total`, that is used to compute the sum of
`values`. This is ultimately returned as `sum`.
If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0
to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.Sum()
>>> m.update_state([1, 3, 5, 7])
>>> m.result().numpy()
16.0
Usage with `compile()` API:
```python
model.add_metric(tf.keras.metrics.Sum(name='sum_1')(outputs))
model.compile(optimizer='sgd', loss='mse')
```
"""
def __init__(self, name='sum', dtype=None):
super(Sum, self).__init__(reduction=metrics_utils.Reduction.SUM,
name=name, dtype=dtype)
@keras_export('keras.metrics.Mean')
class Mean(Reduce):
"""Computes the (weighted) mean of the given values.
For example, if values is [1, 3, 5, 7] then the mean is 4.
If the weights were specified as [1, 1, 0, 0] then the mean would be 2.
This metric creates two variables, `total` and `count` that are used to
compute the average of `values`. This average is ultimately returned as `mean`
which is an idempotent operation that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.Mean()
>>> m.update_state([1, 3, 5, 7])
>>> m.result().numpy()
4.0
>>> m.reset_states()
>>> m.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0])
>>> m.result().numpy()
2.0
Usage with `compile()` API:
```python
model.add_metric(tf.keras.metrics.Mean(name='mean_1')(outputs))
model.compile(optimizer='sgd', loss='mse')
```
"""
def __init__(self, name='mean', dtype=None):
super(Mean, self).__init__(
reduction=metrics_utils.Reduction.WEIGHTED_MEAN, name=name, dtype=dtype)
@keras_export('keras.metrics.MeanRelativeError')
class MeanRelativeError(Mean):
"""Computes the mean relative error by normalizing with the given values.
This metric creates two local variables, `total` and `count` that are used to
compute the mean relative error. This is weighted by `sample_weight`, and
it is ultimately returned as `mean_relative_error`:
an idempotent operation that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
normalizer: The normalizer values with same shape as predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.MeanRelativeError(normalizer=[1, 3, 2, 3])
>>> m.update_state([1, 3, 2, 3], [2, 4, 6, 8])
>>> # metric = mean(|y_pred - y_true| / normalizer)
>>> # = mean([1, 1, 4, 5] / [1, 3, 2, 3]) = mean([1, 1/3, 2, 5/3])
>>> # = 5/4 = 1.25
>>> m.result().numpy()
1.25
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanRelativeError(normalizer=[1, 3])])
```
"""
def __init__(self, normalizer, name=None, dtype=None):
super(MeanRelativeError, self).__init__(name=name, dtype=dtype)
normalizer = math_ops.cast(normalizer, self._dtype)
self.normalizer = normalizer
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates metric statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
[y_pred, y_true], sample_weight = \
metrics_utils.ragged_assert_compatible_and_get_flat_values(
[y_pred, y_true], sample_weight)
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
y_pred, self.normalizer = losses_utils.remove_squeezable_dimensions(
y_pred, self.normalizer)
y_pred.shape.assert_is_compatible_with(y_true.shape)
relative_errors = math_ops.div_no_nan(
math_ops.abs(y_true - y_pred), self.normalizer)
return super(MeanRelativeError, self).update_state(
relative_errors, sample_weight=sample_weight)
def get_config(self):
n = self.normalizer
config = {'normalizer': K.eval(n) if is_tensor_or_variable(n) else n}
base_config = super(MeanRelativeError, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class MeanMetricWrapper(Mean):
"""Wraps a stateless metric function with the Mean metric.
Args:
fn: The metric function to wrap, with signature `fn(y_true, y_pred,
**kwargs)`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
**kwargs: The keyword arguments that are passed on to `fn`.
"""
def __init__(self, fn, name=None, dtype=None, **kwargs):
super(MeanMetricWrapper, self).__init__(name=name, dtype=dtype)
self._fn = fn
self._fn_kwargs = kwargs
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates metric statistics.
`y_true` and `y_pred` should have the same shape.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
sample_weight: Optional `sample_weight` acts as a
coefficient for the metric. If a scalar is provided, then the metric is
simply scaled by the given value. If `sample_weight` is a tensor of size
`[batch_size]`, then the metric for each sample of the batch is rescaled
by the corresponding element in the `sample_weight` vector. If the shape
of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted
to this shape), then each metric element of `y_pred` is scaled by the
corresponding value of `sample_weight`. (Note on `dN-1`: all metric
functions reduce by 1 dimension, usually the last axis (-1)).
Returns:
Update op.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
[y_true, y_pred], sample_weight = \
metrics_utils.ragged_assert_compatible_and_get_flat_values(
[y_true, y_pred], sample_weight)
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
ag_fn = autograph.tf_convert(self._fn, ag_ctx.control_status_ctx())
matches = ag_fn(y_true, y_pred, **self._fn_kwargs)
return super(MeanMetricWrapper, self).update_state(
matches, sample_weight=sample_weight)
def get_config(self):
config = {}
if type(self) is MeanMetricWrapper: # pylint: disable=unidiomatic-typecheck
# Only include function argument when the object is a MeanMetricWrapper
# and not a subclass.
config['fn'] = self._fn
for k, v in six.iteritems(self._fn_kwargs):
config[k] = K.eval(v) if is_tensor_or_variable(v) else v
base_config = super(MeanMetricWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
# Note that while MeanMetricWrapper itself isn't public, objects of this
# class may be created and added to the model by calling model.compile.
fn = config.pop('fn', None)
if cls is MeanMetricWrapper:
return cls(get(fn), **config)
return super(MeanMetricWrapper, cls).from_config(config)
@keras_export('keras.metrics.Accuracy')
class Accuracy(MeanMetricWrapper):
"""Calculates how often predictions equal labels.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `binary accuracy`: an idempotent operation that simply
divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.Accuracy()
>>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]])
>>> m.result().numpy()
0.75
>>> m.reset_states()
>>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]],
... sample_weight=[1, 1, 0, 0])
>>> m.result().numpy()
0.5
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.Accuracy()])
```
"""
def __init__(self, name='accuracy', dtype=None):
super(Accuracy, self).__init__(accuracy, name, dtype=dtype)
@keras_export('keras.metrics.BinaryAccuracy')
class BinaryAccuracy(MeanMetricWrapper):
"""Calculates how often predictions match binary labels.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `binary accuracy`: an idempotent operation that simply
divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
threshold: (Optional) Float representing the threshold for deciding
whether prediction values are 1 or 0.
Standalone usage:
>>> m = tf.keras.metrics.BinaryAccuracy()
>>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]])
>>> m.result().numpy()
0.75
>>> m.reset_states()
>>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]],
... sample_weight=[1, 0, 0, 1])
>>> m.result().numpy()
0.5
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.BinaryAccuracy()])
```
"""
def __init__(self, name='binary_accuracy', dtype=None, threshold=0.5):
super(BinaryAccuracy, self).__init__(
binary_accuracy, name, dtype=dtype, threshold=threshold)
@keras_export('keras.metrics.CategoricalAccuracy')
class CategoricalAccuracy(MeanMetricWrapper):
"""Calculates how often predictions matches one-hot labels.
You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `categorical accuracy`: an idempotent operation that
simply divides `total` by `count`.
`y_pred` and `y_true` should be passed in as vectors of probabilities, rather
than as labels. If necessary, use `tf.one_hot` to expand `y_true` as a vector.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.CategoricalAccuracy()
>>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
... [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
... [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.CategoricalAccuracy()])
```
"""
def __init__(self, name='categorical_accuracy', dtype=None):
super(CategoricalAccuracy, self).__init__(
categorical_accuracy, name, dtype=dtype)
@keras_export('keras.metrics.SparseCategoricalAccuracy')
class SparseCategoricalAccuracy(MeanMetricWrapper):
"""Calculates how often predictions matches integer labels.
```python
acc = np.dot(sample_weight, np.equal(y_true, np.argmax(y_pred, axis=1))
```
You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `sparse categorical accuracy`: an idempotent operation
that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.SparseCategoricalAccuracy()
>>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
```
"""
def __init__(self, name='sparse_categorical_accuracy', dtype=None):
super(SparseCategoricalAccuracy, self).__init__(
sparse_categorical_accuracy, name, dtype=dtype)
@keras_export('keras.metrics.TopKCategoricalAccuracy')
class TopKCategoricalAccuracy(MeanMetricWrapper):
"""Computes how often targets are in the top `K` predictions.
Args:
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to 5.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.TopKCategoricalAccuracy(k=1)
>>> m.update_state([[0, 0, 1], [0, 1, 0]],
... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> m.update_state([[0, 0, 1], [0, 1, 0]],
... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.TopKCategoricalAccuracy()])
```
"""
def __init__(self, k=5, name='top_k_categorical_accuracy', dtype=None):
super(TopKCategoricalAccuracy, self).__init__(
top_k_categorical_accuracy, name, dtype=dtype, k=k)
@keras_export('keras.metrics.SparseTopKCategoricalAccuracy')
class SparseTopKCategoricalAccuracy(MeanMetricWrapper):
"""Computes how often integer targets are in the top `K` predictions.
Args:
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to 5.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1)
>>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.SparseTopKCategoricalAccuracy()])
```
"""
def __init__(self, k=5, name='sparse_top_k_categorical_accuracy', dtype=None):
super(SparseTopKCategoricalAccuracy, self).__init__(
sparse_top_k_categorical_accuracy, name, dtype=dtype, k=k)
class _ConfusionMatrixConditionCount(Metric):
"""Calculates the number of the given confusion matrix condition.
Args:
confusion_matrix_cond: One of `metrics_utils.ConfusionMatrix` conditions.
thresholds: (Optional) Defaults to 0.5. A float value or a python list/tuple
of float threshold values in [0, 1]. A threshold is compared with
prediction values to determine the truth value of predictions (i.e., above
the threshold is `true`, below is `false`). One metric value is generated
for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
def __init__(self,
confusion_matrix_cond,
thresholds=None,
name=None,
dtype=None):
super(_ConfusionMatrixConditionCount, self).__init__(name=name, dtype=dtype)
self._confusion_matrix_cond = confusion_matrix_cond
self.init_thresholds = thresholds
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=0.5)
self.accumulator = self.add_weight(
'accumulator',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates the metric statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables(
{self._confusion_matrix_cond: self.accumulator},
y_true,
y_pred,
thresholds=self.thresholds,
sample_weight=sample_weight)
def result(self):
if len(self.thresholds) == 1:
result = self.accumulator[0]
else:
result = self.accumulator
return ops.convert_to_tensor_v2_with_dispatch(result)
def reset_states(self):
num_thresholds = len(to_list(self.thresholds))
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
def get_config(self):
config = {'thresholds': self.init_thresholds}
base_config = super(_ConfusionMatrixConditionCount, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.FalsePositives')
class FalsePositives(_ConfusionMatrixConditionCount):
"""Calculates the number of false positives.
If `sample_weight` is given, calculates the sum of the weights of
false positives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of false positives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.FalsePositives()
>>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1])
>>> m.result().numpy()
2.0
>>> m.reset_states()
>>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1], sample_weight=[0, 0, 1, 0])
>>> m.result().numpy()
1.0
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.FalsePositives()])
```
"""
def __init__(self, thresholds=None, name=None, dtype=None):
super(FalsePositives, self).__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_POSITIVES,
thresholds=thresholds,
name=name,
dtype=dtype)
@keras_export('keras.metrics.FalseNegatives')
class FalseNegatives(_ConfusionMatrixConditionCount):
"""Calculates the number of false negatives.
If `sample_weight` is given, calculates the sum of the weights of
false negatives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of false negatives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.FalseNegatives()
>>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0])
>>> m.result().numpy()
2.0
>>> m.reset_states()
>>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0], sample_weight=[0, 0, 1, 0])
>>> m.result().numpy()
1.0
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.FalseNegatives()])
```
"""
def __init__(self, thresholds=None, name=None, dtype=None):
super(FalseNegatives, self).__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_NEGATIVES,
thresholds=thresholds,
name=name,
dtype=dtype)
@keras_export('keras.metrics.TrueNegatives')
class TrueNegatives(_ConfusionMatrixConditionCount):
"""Calculates the number of true negatives.
If `sample_weight` is given, calculates the sum of the weights of
true negatives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of true negatives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.TrueNegatives()
>>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0])
>>> m.result().numpy()
2.0
>>> m.reset_states()
>>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0], sample_weight=[0, 0, 1, 0])
>>> m.result().numpy()
1.0
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.TrueNegatives()])
```
"""
def __init__(self, thresholds=None, name=None, dtype=None):
super(TrueNegatives, self).__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_NEGATIVES,
thresholds=thresholds,
name=name,
dtype=dtype)
@keras_export('keras.metrics.TruePositives')
class TruePositives(_ConfusionMatrixConditionCount):
"""Calculates the number of true positives.
If `sample_weight` is given, calculates the sum of the weights of
true positives. This metric creates one local variable, `true_positives`
that is used to keep track of the number of true positives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.TruePositives()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
>>> m.result().numpy()
2.0
>>> m.reset_states()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
>>> m.result().numpy()
1.0
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.TruePositives()])
```
"""
def __init__(self, thresholds=None, name=None, dtype=None):
super(TruePositives, self).__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_POSITIVES,
thresholds=thresholds,
name=name,
dtype=dtype)
@keras_export('keras.metrics.Precision')
class Precision(Metric):
"""Computes the precision of the predictions with respect to the labels.
The metric creates two local variables, `true_positives` and `false_positives`
that are used to compute the precision. This value is ultimately returned as
`precision`, an idempotent operation that simply divides `true_positives`
by the sum of `true_positives` and `false_positives`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `top_k` is set, we'll calculate precision as how often on average a class
among the top-k classes with the highest predicted values of a batch entry is
correct and can be found in the label for that entry.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is above the threshold and/or in the
top-k highest predictions, and computing the fraction of them for which
`class_id` is indeed a correct label.
Args:
thresholds: (Optional) A float value or a python list/tuple of float
threshold values in [0, 1]. A threshold is compared with prediction
values to determine the truth value of predictions (i.e., above the
threshold is `true`, below is `false`). One metric value is generated
for each threshold value. If neither thresholds nor top_k are set, the
default is to calculate precision with `thresholds=0.5`.
top_k: (Optional) Unset by default. An int value specifying the top-k
predictions to consider when calculating precision.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.Precision()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
>>> m.result().numpy()
0.6666667
>>> m.reset_states()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
>>> m.result().numpy()
1.0
>>> # With top_k=2, it will calculate precision over y_true[:2] and y_pred[:2]
>>> m = tf.keras.metrics.Precision(top_k=2)
>>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1])
>>> m.result().numpy()
0.0
>>> # With top_k=4, it will calculate precision over y_true[:4] and y_pred[:4]
>>> m = tf.keras.metrics.Precision(top_k=4)
>>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1])
>>> m.result().numpy()
0.5
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.Precision()])
```
"""
def __init__(self,
thresholds=None,
top_k=None,
class_id=None,
name=None,
dtype=None):
super(Precision, self).__init__(name=name, dtype=dtype)
self.init_thresholds = thresholds
self.top_k = top_k
self.class_id = class_id
default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=default_threshold)
self.true_positives = self.add_weight(
'true_positives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
self.false_positives = self.add_weight(
'false_positives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates true positive and false positive statistics.
Args:
y_true: The ground truth values, with the same dimensions as `y_pred`.
Will be cast to `bool`.
y_pred: The predicted values. Each element must be in the range `[0, 1]`.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives
},
y_true,
y_pred,
thresholds=self.thresholds,
top_k=self.top_k,
class_id=self.class_id,
sample_weight=sample_weight)
def result(self):
result = math_ops.div_no_nan(self.true_positives,
self.true_positives + self.false_positives)
return result[0] if len(self.thresholds) == 1 else result
def reset_states(self):
num_thresholds = len(to_list(self.thresholds))
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
def get_config(self):
config = {
'thresholds': self.init_thresholds,
'top_k': self.top_k,
'class_id': self.class_id
}
base_config = super(Precision, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.Recall')
class Recall(Metric):
"""Computes the recall of the predictions with respect to the labels.
This metric creates two local variables, `true_positives` and
`false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `top_k` is set, recall will be computed as how often on average a class
among the labels of a batch entry is in the top-k predictions.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing the
fraction of them for which `class_id` is above the threshold and/or in the
top-k predictions.
Args:
thresholds: (Optional) A float value or a python list/tuple of float
threshold values in [0, 1]. A threshold is compared with prediction
values to determine the truth value of predictions (i.e., above the
threshold is `true`, below is `false`). One metric value is generated
for each threshold value. If neither thresholds nor top_k are set, the
default is to calculate recall with `thresholds=0.5`.
top_k: (Optional) Unset by default. An int value specifying the top-k
predictions to consider when calculating recall.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.Recall()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
>>> m.result().numpy()
0.6666667
>>> m.reset_states()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
>>> m.result().numpy()
1.0
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.Recall()])
```
"""
def __init__(self,
thresholds=None,
top_k=None,
class_id=None,
name=None,
dtype=None):
super(Recall, self).__init__(name=name, dtype=dtype)
self.init_thresholds = thresholds
self.top_k = top_k
self.class_id = class_id
default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=default_threshold)
self.true_positives = self.add_weight(
'true_positives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
self.false_negatives = self.add_weight(
'false_negatives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates true positive and false negative statistics.
Args:
y_true: The ground truth values, with the same dimensions as `y_pred`.
Will be cast to `bool`.
y_pred: The predicted values. Each element must be in the range `[0, 1]`.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives
},
y_true,
y_pred,
thresholds=self.thresholds,
top_k=self.top_k,
class_id=self.class_id,
sample_weight=sample_weight)
def result(self):
result = math_ops.div_no_nan(self.true_positives,
self.true_positives + self.false_negatives)
return result[0] if len(self.thresholds) == 1 else result
def reset_states(self):
num_thresholds = len(to_list(self.thresholds))
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
def get_config(self):
config = {
'thresholds': self.init_thresholds,
'top_k': self.top_k,
'class_id': self.class_id
}
base_config = super(Recall, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@six.add_metaclass(abc.ABCMeta)
class SensitivitySpecificityBase(Metric):
"""Abstract base class for computing sensitivity and specificity.
For additional information about specificity and sensitivity, see
[the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity).
"""
def __init__(self, value, num_thresholds=200, name=None, dtype=None):
super(SensitivitySpecificityBase, self).__init__(name=name, dtype=dtype)
if num_thresholds <= 0:
raise ValueError('`num_thresholds` must be > 0.')
self.value = value
self.true_positives = self.add_weight(
'true_positives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
self.true_negatives = self.add_weight(
'true_negatives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
self.false_positives = self.add_weight(
'false_positives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
self.false_negatives = self.add_weight(
'false_negatives',
shape=(num_thresholds,),
initializer=init_ops.zeros_initializer)
# Compute `num_thresholds` thresholds in [0, 1]
if num_thresholds == 1:
self.thresholds = [0.5]
else:
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds - 2)]
self.thresholds = [0.0] + thresholds + [1.0]
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives,
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives,
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives,
},
y_true,
y_pred,
thresholds=self.thresholds,
sample_weight=sample_weight)
def reset_states(self):
num_thresholds = len(self.thresholds)
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
def _find_max_under_constraint(self, constrained, dependent, predicate):
"""Returns the maximum of dependent_statistic that satisfies the constraint.
Args:
constrained: Over these values the constraint
is specified. A rank-1 tensor.
dependent: From these values the maximum that satiesfies the
constraint is selected. Values in this tensor and in
`constrained` are linked by having the same threshold at each
position, hence this tensor must have the same shape.
predicate: A binary boolean functor to be applied to arguments
`constrained` and `self.value`, e.g. `tf.greater`.
Returns maximal dependent value, if no value satiesfies the constraint 0.0.
"""
feasible = array_ops.where(predicate(constrained, self.value))
feasible_exists = math_ops.greater(array_ops.size(feasible), 0)
def get_max():
return math_ops.reduce_max(array_ops.gather(dependent, feasible))
return control_flow_ops.cond(feasible_exists, get_max, lambda: 0.0)
@keras_export('keras.metrics.SensitivityAtSpecificity')
class SensitivityAtSpecificity(SensitivitySpecificityBase):
"""Computes best sensitivity where specificity is >= specified value.
the sensitivity at a given specificity.
`Sensitivity` measures the proportion of actual positives that are correctly
identified as such (tp / (tp + fn)).
`Specificity` measures the proportion of actual negatives that are correctly
identified as such (tn / (tn + fp)).
This metric creates four local variables, `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` that are used to compute the
sensitivity at the given specificity. The threshold for the given specificity
value is computed and used to evaluate the corresponding sensitivity.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
For additional information about specificity and sensitivity, see
[the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity).
Args:
specificity: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use for matching the given specificity.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.SensitivityAtSpecificity(0.5)
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8],
... sample_weight=[1, 1, 2, 2, 1])
>>> m.result().numpy()
0.333333
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.SensitivityAtSpecificity()])
```
"""
def __init__(self, specificity, num_thresholds=200, name=None, dtype=None):
if specificity < 0 or specificity > 1:
raise ValueError('`specificity` must be in the range [0, 1].')
self.specificity = specificity
self.num_thresholds = num_thresholds
super(SensitivityAtSpecificity, self).__init__(
specificity, num_thresholds=num_thresholds, name=name, dtype=dtype)
def result(self):
specificities = math_ops.div_no_nan(
self.true_negatives, self.true_negatives + self.false_positives)
sensitivities = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_negatives)
return self._find_max_under_constraint(
specificities, sensitivities, math_ops.greater_equal)
def get_config(self):
config = {
'num_thresholds': self.num_thresholds,
'specificity': self.specificity
}
base_config = super(SensitivityAtSpecificity, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.SpecificityAtSensitivity')
class SpecificityAtSensitivity(SensitivitySpecificityBase):
"""Computes best specificity where sensitivity is >= specified value.
`Sensitivity` measures the proportion of actual positives that are correctly
identified as such (tp / (tp + fn)).
`Specificity` measures the proportion of actual negatives that are correctly
identified as such (tn / (tn + fp)).
This metric creates four local variables, `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` that are used to compute the
specificity at the given sensitivity. The threshold for the given sensitivity
value is computed and used to evaluate the corresponding specificity.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
For additional information about specificity and sensitivity, see
[the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity).
Args:
sensitivity: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use for matching the given sensitivity.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.SpecificityAtSensitivity(0.5)
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
>>> m.result().numpy()
0.66666667
>>> m.reset_states()
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8],
... sample_weight=[1, 1, 2, 2, 2])
>>> m.result().numpy()
0.5
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.SpecificityAtSensitivity()])
```
"""
def __init__(self, sensitivity, num_thresholds=200, name=None, dtype=None):
if sensitivity < 0 or sensitivity > 1:
raise ValueError('`sensitivity` must be in the range [0, 1].')
self.sensitivity = sensitivity
self.num_thresholds = num_thresholds
super(SpecificityAtSensitivity, self).__init__(
sensitivity, num_thresholds=num_thresholds, name=name, dtype=dtype)
def result(self):
sensitivities = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_negatives)
specificities = math_ops.div_no_nan(
self.true_negatives, self.true_negatives + self.false_positives)
return self._find_max_under_constraint(
sensitivities, specificities, math_ops.greater_equal)
def get_config(self):
config = {
'num_thresholds': self.num_thresholds,
'sensitivity': self.sensitivity
}
base_config = super(SpecificityAtSensitivity, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.PrecisionAtRecall')
class PrecisionAtRecall(SensitivitySpecificityBase):
"""Computes best precision where recall is >= specified value.
This metric creates four local variables, `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` that are used to compute the
precision at the given recall. The threshold for the given recall
value is computed and used to evaluate the corresponding precision.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
recall: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use for matching the given recall.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.PrecisionAtRecall(0.5)
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8],
... sample_weight=[2, 2, 2, 1, 1])
>>> m.result().numpy()
0.33333333
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.PrecisionAtRecall(recall=0.8)])
```
"""
def __init__(self, recall, num_thresholds=200, name=None, dtype=None):
if recall < 0 or recall > 1:
raise ValueError('`recall` must be in the range [0, 1].')
self.recall = recall
self.num_thresholds = num_thresholds
super(PrecisionAtRecall, self).__init__(
value=recall,
num_thresholds=num_thresholds,
name=name,
dtype=dtype)
def result(self):
recalls = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_negatives)
precisions = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_positives)
return self._find_max_under_constraint(
recalls, precisions, math_ops.greater_equal)
def get_config(self):
config = {'num_thresholds': self.num_thresholds, 'recall': self.recall}
base_config = super(PrecisionAtRecall, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.RecallAtPrecision')
class RecallAtPrecision(SensitivitySpecificityBase):
"""Computes best recall where precision is >= specified value.
For a given score-label-distribution the required precision might not
be achievable, in this case 0.0 is returned as recall.
This metric creates four local variables, `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` that are used to compute the
recall at the given precision. The threshold for the given precision
value is computed and used to evaluate the corresponding recall.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
precision: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use for matching the given precision.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.RecallAtPrecision(0.8)
>>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9],
... sample_weight=[1, 0, 0, 1])
>>> m.result().numpy()
1.0
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.RecallAtPrecision(precision=0.8)])
```
"""
def __init__(self, precision, num_thresholds=200, name=None, dtype=None):
if precision < 0 or precision > 1:
raise ValueError('`precision` must be in the range [0, 1].')
self.precision = precision
self.num_thresholds = num_thresholds
super(RecallAtPrecision, self).__init__(
value=precision,
num_thresholds=num_thresholds,
name=name,
dtype=dtype)
def result(self):
precisions = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_positives)
recalls = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_negatives)
return self._find_max_under_constraint(
precisions, recalls, math_ops.greater_equal)
def get_config(self):
config = {'num_thresholds': self.num_thresholds,
'precision': self.precision}
base_config = super(RecallAtPrecision, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.AUC')
class AUC(Metric):
"""Computes the approximate AUC (Area under the curve) via a Riemann sum.
This metric creates four local variables, `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` that are used to compute the AUC.
To discretize the AUC curve, a linearly spaced set of thresholds is used to
compute pairs of recall and precision values. The area under the ROC-curve is
therefore computed using the height of the recall values by the false positive
rate, while the area under the PR-curve is the computed using the height of
the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`. The `thresholds` parameter can be
used to manually specify thresholds which split the predictions more evenly.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case. Setting `summation_method`
to 'minoring' or 'majoring' can help quantify the error in the approximation
by providing lower or upper bound estimate of the AUC.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use when discretizing the roc curve. Values must be > 1.
curve: (Optional) Specifies the name of the curve to be computed, 'ROC'
[default] or 'PR' for the Precision-Recall-curve.
summation_method: (Optional) Specifies the [Riemann summation method](
https://en.wikipedia.org/wiki/Riemann_sum) used.
'interpolation' (default) applies mid-point summation scheme for `ROC`.
For PR-AUC, interpolates (true/false) positives but not the ratio that
is precision (see Davis & Goadrich 2006 for details);
'minoring' applies left summation
for increasing intervals and right summation for decreasing intervals;
'majoring' does the opposite.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
thresholds: (Optional) A list of floating point values to use as the
thresholds for discretizing the curve. If set, the `num_thresholds`
parameter is ignored. Values should be in [0, 1]. Endpoint thresholds
equal to {-epsilon, 1+epsilon} for a small positive epsilon value will
be automatically included with these to correctly handle predictions
equal to exactly 0 or 1.
multi_label: boolean indicating whether multilabel data should be
treated as such, wherein AUC is computed separately for each label and
then averaged across labels, or (when False) if the data should be
flattened into a single label before AUC computation. In the latter
case, when multilabel data is passed to AUC, each label-prediction pair
is treated as an individual data point. Should be set to False for
multi-class data.
label_weights: (optional) list, array, or tensor of non-negative weights
used to compute AUCs for multilabel data. When `multi_label` is True,
the weights are applied to the individual label AUCs when they are
averaged to produce the multi-label AUC. When it's False, they are used
to weight the individual label predictions in computing the confusion
matrix on the flattened data. Note that this is unlike class_weights in
that class_weights weights the example depending on the value of its
label, whereas label_weights depends only on the index of that label
before flattening; therefore `label_weights` should not be used for
multi-class data.
Standalone usage:
>>> m = tf.keras.metrics.AUC(num_thresholds=3)
>>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
>>> # threshold values are [0 - 1e-7, 0.5, 1 + 1e-7]
>>> # tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
>>> # recall = [1, 0.5, 0], fp_rate = [1, 0, 0]
>>> # auc = ((((1+0.5)/2)*(1-0))+ (((0.5+0)/2)*(0-0))) = 0.75
>>> m.result().numpy()
0.75
>>> m.reset_states()
>>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9],
... sample_weight=[1, 0, 0, 1])
>>> m.result().numpy()
1.0
Usage with `compile()` API:
```python
model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.AUC()])
```
"""
def __init__(self,
num_thresholds=200,
curve='ROC',
summation_method='interpolation',
name=None,
dtype=None,
thresholds=None,
multi_label=False,
label_weights=None):
# Validate configurations.
if isinstance(curve, metrics_utils.AUCCurve) and curve not in list(
metrics_utils.AUCCurve):
raise ValueError('Invalid curve: "{}". Valid options are: "{}"'.format(
curve, list(metrics_utils.AUCCurve)))
if isinstance(
summation_method,
metrics_utils.AUCSummationMethod) and summation_method not in list(
metrics_utils.AUCSummationMethod):
raise ValueError(
'Invalid summation method: "{}". Valid options are: "{}"'.format(
summation_method, list(metrics_utils.AUCSummationMethod)))
# Update properties.
if thresholds is not None:
# If specified, use the supplied thresholds.
self.num_thresholds = len(thresholds) + 2
thresholds = sorted(thresholds)
else:
if num_thresholds <= 1:
raise ValueError('`num_thresholds` must be > 1.')
# Otherwise, linearly interpolate (num_thresholds - 2) thresholds in
# (0, 1).
self.num_thresholds = num_thresholds
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds - 2)]
# Add an endpoint "threshold" below zero and above one for either
# threshold method to account for floating point imprecisions.
self._thresholds = np.array([0.0 - K.epsilon()] + thresholds +
[1.0 + K.epsilon()])
if isinstance(curve, metrics_utils.AUCCurve):
self.curve = curve
else:
self.curve = metrics_utils.AUCCurve.from_str(curve)
if isinstance(summation_method, metrics_utils.AUCSummationMethod):
self.summation_method = summation_method
else:
self.summation_method = metrics_utils.AUCSummationMethod.from_str(
summation_method)
super(AUC, self).__init__(name=name, dtype=dtype)
# Handle multilabel arguments.
self.multi_label = multi_label
if label_weights is not None:
label_weights = constant_op.constant(label_weights, dtype=self.dtype)
checks = [
check_ops.assert_non_negative(
label_weights,
message='All values of `label_weights` must be non-negative.')
]
self.label_weights = control_flow_ops.with_dependencies(
checks, label_weights)
else:
self.label_weights = None
self._built = False
if self.multi_label:
self._num_labels = None
else:
self._build(None)
@property
def thresholds(self):
"""The thresholds used for evaluating AUC."""
return list(self._thresholds)
def _build(self, shape):
"""Initialize TP, FP, TN, and FN tensors, given the shape of the data."""
if self.multi_label:
if shape.ndims != 2:
raise ValueError('`y_true` must have rank=2 when `multi_label` is '
'True. Found rank %s.' % shape.ndims)
self._num_labels = shape[1]
variable_shape = tensor_shape.TensorShape(
[tensor_shape.Dimension(self.num_thresholds), self._num_labels])
else:
variable_shape = tensor_shape.TensorShape(
[tensor_shape.Dimension(self.num_thresholds)])
self._build_input_shape = shape
# Create metric variables
self.true_positives = self.add_weight(
'true_positives',
shape=variable_shape,
initializer=init_ops.zeros_initializer)
self.true_negatives = self.add_weight(
'true_negatives',
shape=variable_shape,
initializer=init_ops.zeros_initializer)
self.false_positives = self.add_weight(
'false_positives',
shape=variable_shape,
initializer=init_ops.zeros_initializer)
self.false_negatives = self.add_weight(
'false_negatives',
shape=variable_shape,
initializer=init_ops.zeros_initializer)
if self.multi_label:
with ops.init_scope():
# This should only be necessary for handling v1 behavior. In v2, AUC
# should be initialized outside of any tf.functions, and therefore in
# eager mode.
if not context.executing_eagerly():
K._initialize_variables(K._get_session()) # pylint: disable=protected-access
self._built = True
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
deps = []
if not self._built:
self._build(tensor_shape.TensorShape(y_pred.shape))
if self.multi_label or (self.label_weights is not None):
# y_true should have shape (number of examples, number of labels).
shapes = [
(y_true, ('N', 'L'))
]
if self.multi_label:
# TP, TN, FP, and FN should all have shape
# (number of thresholds, number of labels).
shapes.extend([(self.true_positives, ('T', 'L')),
(self.true_negatives, ('T', 'L')),
(self.false_positives, ('T', 'L')),
(self.false_negatives, ('T', 'L'))])
if self.label_weights is not None:
# label_weights should be of length equal to the number of labels.
shapes.append((self.label_weights, ('L',)))
deps = [
check_ops.assert_shapes(
shapes, message='Number of labels is not consistent.')
]
# Only forward label_weights to update_confusion_matrix_variables when
# multi_label is False. Otherwise the averaging of individual label AUCs is
# handled in AUC.result
label_weights = None if self.multi_label else self.label_weights
with ops.control_dependencies(deps):
return metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES:
self.true_positives,
metrics_utils.ConfusionMatrix.TRUE_NEGATIVES:
self.true_negatives,
metrics_utils.ConfusionMatrix.FALSE_POSITIVES:
self.false_positives,
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES:
self.false_negatives,
},
y_true,
y_pred,
self._thresholds,
sample_weight=sample_weight,
multi_label=self.multi_label,
label_weights=label_weights)
def interpolate_pr_auc(self):
"""Interpolation formula inspired by section 4 of Davis & Goadrich 2006.
https://www.biostat.wisc.edu/~page/rocpr.pdf
Note here we derive & use a closed formula not present in the paper
as follows:
Precision = TP / (TP + FP) = TP / P
Modeling all of TP (true positive), FP (false positive) and their sum
P = TP + FP (predicted positive) as varying linearly within each interval
[A, B] between successive thresholds, we get
Precision slope = dTP / dP
= (TP_B - TP_A) / (P_B - P_A)
= (TP - TP_A) / (P - P_A)
Precision = (TP_A + slope * (P - P_A)) / P
The area within the interval is (slope / total_pos_weight) times
int_A^B{Precision.dP} = int_A^B{(TP_A + slope * (P - P_A)) * dP / P}
int_A^B{Precision.dP} = int_A^B{slope * dP + intercept * dP / P}
where intercept = TP_A - slope * P_A = TP_B - slope * P_B, resulting in
int_A^B{Precision.dP} = TP_B - TP_A + intercept * log(P_B / P_A)
Bringing back the factor (slope / total_pos_weight) we'd put aside, we get
slope * [dTP + intercept * log(P_B / P_A)] / total_pos_weight
where dTP == TP_B - TP_A.
Note that when P_A == 0 the above calculation simplifies into
int_A^B{Precision.dTP} = int_A^B{slope * dTP} = slope * (TP_B - TP_A)
which is really equivalent to imputing constant precision throughout the
first bucket having >0 true positives.
Returns:
pr_auc: an approximation of the area under the P-R curve.
"""
dtp = self.true_positives[:self.num_thresholds -
1] - self.true_positives[1:]
p = self.true_positives + self.false_positives
dp = p[:self.num_thresholds - 1] - p[1:]
prec_slope = math_ops.div_no_nan(
dtp, math_ops.maximum(dp, 0), name='prec_slope')
intercept = self.true_positives[1:] - math_ops.multiply(prec_slope, p[1:])
safe_p_ratio = array_ops.where(
math_ops.logical_and(p[:self.num_thresholds - 1] > 0, p[1:] > 0),
math_ops.div_no_nan(
p[:self.num_thresholds - 1],
math_ops.maximum(p[1:], 0),
name='recall_relative_ratio'),
array_ops.ones_like(p[1:]))
pr_auc_increment = math_ops.div_no_nan(
prec_slope * (dtp + intercept * math_ops.log(safe_p_ratio)),
math_ops.maximum(self.true_positives[1:] + self.false_negatives[1:], 0),
name='pr_auc_increment')
if self.multi_label:
by_label_auc = math_ops.reduce_sum(
pr_auc_increment, name=self.name + '_by_label', axis=0)
if self.label_weights is None:
# Evenly weighted average of the label AUCs.
return math_ops.reduce_mean(by_label_auc, name=self.name)
else:
# Weighted average of the label AUCs.
return math_ops.div_no_nan(
math_ops.reduce_sum(
math_ops.multiply(by_label_auc, self.label_weights)),
math_ops.reduce_sum(self.label_weights),
name=self.name)
else:
return math_ops.reduce_sum(pr_auc_increment, name='interpolate_pr_auc')
def result(self):
if (self.curve == metrics_utils.AUCCurve.PR and
self.summation_method == metrics_utils.AUCSummationMethod.INTERPOLATION
):
# This use case is different and is handled separately.
return self.interpolate_pr_auc()
# Set `x` and `y` values for the curves based on `curve` config.
recall = math_ops.div_no_nan(self.true_positives,
self.true_positives + self.false_negatives)
if self.curve == metrics_utils.AUCCurve.ROC:
fp_rate = math_ops.div_no_nan(self.false_positives,
self.false_positives + self.true_negatives)
x = fp_rate
y = recall
else: # curve == 'PR'.
precision = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_positives)
x = recall
y = precision
# Find the rectangle heights based on `summation_method`.
if self.summation_method == metrics_utils.AUCSummationMethod.INTERPOLATION:
# Note: the case ('PR', 'interpolation') has been handled above.
heights = (y[:self.num_thresholds - 1] + y[1:]) / 2.
elif self.summation_method == metrics_utils.AUCSummationMethod.MINORING:
heights = math_ops.minimum(y[:self.num_thresholds - 1], y[1:])
else: # self.summation_method = metrics_utils.AUCSummationMethod.MAJORING:
heights = math_ops.maximum(y[:self.num_thresholds - 1], y[1:])
# Sum up the areas of all the rectangles.
if self.multi_label:
riemann_terms = math_ops.multiply(x[:self.num_thresholds - 1] - x[1:],
heights)
by_label_auc = math_ops.reduce_sum(
riemann_terms, name=self.name + '_by_label', axis=0)
if self.label_weights is None:
# Unweighted average of the label AUCs.
return math_ops.reduce_mean(by_label_auc, name=self.name)
else:
# Weighted average of the label AUCs.
return math_ops.div_no_nan(
math_ops.reduce_sum(
math_ops.multiply(by_label_auc, self.label_weights)),
math_ops.reduce_sum(self.label_weights),
name=self.name)
else:
return math_ops.reduce_sum(
math_ops.multiply(x[:self.num_thresholds - 1] - x[1:], heights),
name=self.name)
def reset_states(self):
if self.multi_label:
K.batch_set_value([(v, np.zeros((self.num_thresholds, self._num_labels)))
for v in self.variables])
else:
K.batch_set_value([
(v, np.zeros((self.num_thresholds,))) for v in self.variables
])
def get_config(self):
if is_tensor_or_variable(self.label_weights):
label_weights = K.eval(self.label_weights)
else:
label_weights = self.label_weights
config = {
'num_thresholds': self.num_thresholds,
'curve': self.curve.value,
'summation_method': self.summation_method.value,
# We remove the endpoint thresholds as an inverse of how the thresholds
# were initialized. This ensures that a metric initialized from this
# config has the same thresholds.
'thresholds': self.thresholds[1:-1],
'multi_label': self.multi_label,
'label_weights': label_weights
}
base_config = super(AUC, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.CosineSimilarity')
class CosineSimilarity(MeanMetricWrapper):
"""Computes the cosine similarity between the labels and predictions.
`cosine similarity = (a . b) / ||a|| ||b||`
See: [Cosine Similarity](https://en.wikipedia.org/wiki/Cosine_similarity).
This metric keeps the average cosine similarity between `predictions` and
`labels` over a stream of data.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
axis: (Optional) Defaults to -1. The dimension along which the cosine
similarity is computed.
Standalone usage:
>>> # l2_norm(y_true) = [[0., 1.], [1./1.414], 1./1.414]]]
>>> # l2_norm(y_pred) = [[1., 0.], [1./1.414], 1./1.414]]]
>>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]]
>>> # result = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1))
>>> # = ((0. + 0.) + (0.5 + 0.5)) / 2
>>> m = tf.keras.metrics.CosineSimilarity(axis=1)
>>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]])
>>> m.result().numpy()
0.49999997
>>> m.reset_states()
>>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]],
... sample_weight=[0.3, 0.7])
>>> m.result().numpy()
0.6999999
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.CosineSimilarity(axis=1)])
```
"""
def __init__(self, name='cosine_similarity', dtype=None, axis=-1):
super(CosineSimilarity, self).__init__(
cosine_similarity, name, dtype=dtype, axis=axis)
@keras_export('keras.metrics.MeanAbsoluteError')
class MeanAbsoluteError(MeanMetricWrapper):
"""Computes the mean absolute error between the labels and predictions.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.MeanAbsoluteError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.25
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.5
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanAbsoluteError()])
```
"""
def __init__(self, name='mean_absolute_error', dtype=None):
super(MeanAbsoluteError, self).__init__(
mean_absolute_error, name, dtype=dtype)
@keras_export('keras.metrics.MeanAbsolutePercentageError')
class MeanAbsolutePercentageError(MeanMetricWrapper):
"""Computes the mean absolute percentage error between `y_true` and `y_pred`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.MeanAbsolutePercentageError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
250000000.0
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
500000000.0
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanAbsolutePercentageError()])
```
"""
def __init__(self, name='mean_absolute_percentage_error', dtype=None):
super(MeanAbsolutePercentageError, self).__init__(
mean_absolute_percentage_error, name, dtype=dtype)
@keras_export('keras.metrics.MeanSquaredError')
class MeanSquaredError(MeanMetricWrapper):
"""Computes the mean squared error between `y_true` and `y_pred`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.MeanSquaredError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.25
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.5
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanSquaredError()])
```
"""
def __init__(self, name='mean_squared_error', dtype=None):
super(MeanSquaredError, self).__init__(
mean_squared_error, name, dtype=dtype)
@keras_export('keras.metrics.MeanSquaredLogarithmicError')
class MeanSquaredLogarithmicError(MeanMetricWrapper):
"""Computes the mean squared logarithmic error between `y_true` and `y_pred`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.MeanSquaredLogarithmicError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.12011322
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.24022643
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanSquaredLogarithmicError()])
```
"""
def __init__(self, name='mean_squared_logarithmic_error', dtype=None):
super(MeanSquaredLogarithmicError, self).__init__(
mean_squared_logarithmic_error, name, dtype=dtype)
@keras_export('keras.metrics.Hinge')
class Hinge(MeanMetricWrapper):
"""Computes the hinge metric between `y_true` and `y_pred`.
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.Hinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
1.3
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result().numpy()
1.1
Usage with `compile()` API:
```python
model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.Hinge()])
```
"""
def __init__(self, name='hinge', dtype=None):
super(Hinge, self).__init__(hinge, name, dtype=dtype)
@keras_export('keras.metrics.SquaredHinge')
class SquaredHinge(MeanMetricWrapper):
"""Computes the squared hinge metric between `y_true` and `y_pred`.
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.SquaredHinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
1.86
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result().numpy()
1.46
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.SquaredHinge()])
```
"""
def __init__(self, name='squared_hinge', dtype=None):
super(SquaredHinge, self).__init__(squared_hinge, name, dtype=dtype)
@keras_export('keras.metrics.CategoricalHinge')
class CategoricalHinge(MeanMetricWrapper):
"""Computes the categorical hinge metric between `y_true` and `y_pred`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.CategoricalHinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
1.4000001
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result().numpy()
1.2
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.CategoricalHinge()])
```
"""
def __init__(self, name='categorical_hinge', dtype=None):
super(CategoricalHinge, self).__init__(categorical_hinge, name, dtype=dtype)
@keras_export('keras.metrics.RootMeanSquaredError')
class RootMeanSquaredError(Mean):
"""Computes root mean squared error metric between `y_true` and `y_pred`.
Standalone usage:
>>> m = tf.keras.metrics.RootMeanSquaredError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.70710677
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.RootMeanSquaredError()])
```
"""
def __init__(self, name='root_mean_squared_error', dtype=None):
super(RootMeanSquaredError, self).__init__(name, dtype=dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates root mean squared error statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
error_sq = math_ops.squared_difference(y_pred, y_true)
return super(RootMeanSquaredError, self).update_state(
error_sq, sample_weight=sample_weight)
def result(self):
return math_ops.sqrt(math_ops.div_no_nan(self.total, self.count))
@keras_export('keras.metrics.LogCoshError')
class LogCoshError(MeanMetricWrapper):
"""Computes the logarithm of the hyperbolic cosine of the prediction error.
`logcosh = log((exp(x) + exp(-x))/2)`, where x is the error (y_pred - y_true)
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.LogCoshError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.10844523
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.21689045
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.LogCoshError()])
```
"""
def __init__(self, name='logcosh', dtype=None):
super(LogCoshError, self).__init__(logcosh, name, dtype=dtype)
@keras_export('keras.metrics.Poisson')
class Poisson(MeanMetricWrapper):
"""Computes the Poisson metric between `y_true` and `y_pred`.
`metric = y_pred - y_true * log(y_pred)`
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.Poisson()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.49999997
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.99999994
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.Poisson()])
```
"""
def __init__(self, name='poisson', dtype=None):
super(Poisson, self).__init__(poisson, name, dtype=dtype)
@keras_export('keras.metrics.KLDivergence')
class KLDivergence(MeanMetricWrapper):
"""Computes Kullback-Leibler divergence metric between `y_true` and `y_pred`.
`metric = y_true * log(y_true / y_pred)`
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.KLDivergence()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
0.45814306
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.9162892
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.KLDivergence()])
```
"""
def __init__(self, name='kullback_leibler_divergence', dtype=None):
super(KLDivergence, self).__init__(
kullback_leibler_divergence, name, dtype=dtype)
@keras_export('keras.metrics.MeanIoU')
class MeanIoU(Metric):
"""Computes the mean Intersection-Over-Union metric.
Mean Intersection-Over-Union is a common evaluation metric for semantic image
segmentation, which first computes the IOU for each semantic class and then
computes the average over classes. IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by
`sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
num_classes: The possible number of labels the prediction task can have.
This value must be provided, since a confusion matrix of dimension =
[num_classes, num_classes] will be allocated.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> # cm = [[1, 1],
>>> # [1, 1]]
>>> # sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
>>> # iou = true_positives / (sum_row + sum_col - true_positives))
>>> # result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2 = 0.33
>>> m = tf.keras.metrics.MeanIoU(num_classes=2)
>>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1])
>>> m.result().numpy()
0.33333334
>>> m.reset_states()
>>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1],
... sample_weight=[0.3, 0.3, 0.3, 0.1])
>>> m.result().numpy()
0.23809525
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanIoU(num_classes=2)])
```
"""
def __init__(self, num_classes, name=None, dtype=None):
super(MeanIoU, self).__init__(name=name, dtype=dtype)
self.num_classes = num_classes
# Variable to accumulate the predictions in the confusion matrix. Setting
# the type to be `float64` as required by confusion_matrix_ops.
self.total_cm = self.add_weight(
'total_confusion_matrix',
shape=(num_classes, num_classes),
initializer=init_ops.zeros_initializer,
dtype=dtypes.float64)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates the confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
# Flatten the input if its rank > 1.
if y_pred.shape.ndims > 1:
y_pred = array_ops.reshape(y_pred, [-1])
if y_true.shape.ndims > 1:
y_true = array_ops.reshape(y_true, [-1])
if sample_weight is not None:
sample_weight = math_ops.cast(sample_weight, self._dtype)
if sample_weight.shape.ndims > 1:
sample_weight = array_ops.reshape(sample_weight, [-1])
# Accumulate the prediction to current confusion matrix.
current_cm = confusion_matrix.confusion_matrix(
y_true,
y_pred,
self.num_classes,
weights=sample_weight,
dtype=dtypes.float64)
return self.total_cm.assign_add(current_cm)
def result(self):
"""Compute the mean intersection-over-union via the confusion matrix."""
sum_over_row = math_ops.cast(
math_ops.reduce_sum(self.total_cm, axis=0), dtype=self._dtype)
sum_over_col = math_ops.cast(
math_ops.reduce_sum(self.total_cm, axis=1), dtype=self._dtype)
true_positives = math_ops.cast(
array_ops.tensor_diag_part(self.total_cm), dtype=self._dtype)
# sum_over_row + sum_over_col =
# 2 * true_positives + false_positives + false_negatives.
denominator = sum_over_row + sum_over_col - true_positives
# The mean is only computed over classes that appear in the
# label or prediction tensor. If the denominator is 0, we need to
# ignore the class.
num_valid_entries = math_ops.reduce_sum(
math_ops.cast(math_ops.not_equal(denominator, 0), dtype=self._dtype))
iou = math_ops.div_no_nan(true_positives, denominator)
return math_ops.div_no_nan(
math_ops.reduce_sum(iou, name='mean_iou'), num_valid_entries)
def reset_states(self):
K.set_value(self.total_cm, np.zeros((self.num_classes, self.num_classes)))
def get_config(self):
config = {'num_classes': self.num_classes}
base_config = super(MeanIoU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.metrics.MeanTensor')
class MeanTensor(Metric):
"""Computes the element-wise (weighted) mean of the given tensors.
`MeanTensor` returns a tensor with the same shape of the input tensors. The
mean value is updated by keeping local variables `total` and `count`. The
`total` tracks the sum of the weighted values, and `count` stores the sum of
the weighted counts.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.MeanTensor()
>>> m.update_state([0, 1, 2, 3])
>>> m.update_state([4, 5, 6, 7])
>>> m.result().numpy()
array([2., 3., 4., 5.], dtype=float32)
>>> m.update_state([12, 10, 8, 6], sample_weight= [0, 0.2, 0.5, 1])
>>> m.result().numpy()
array([2. , 3.6363635, 4.8 , 5.3333335], dtype=float32)
"""
def __init__(self, name='mean_tensor', dtype=None):
super(MeanTensor, self).__init__(name=name, dtype=dtype)
self._shape = None
self._total = None
self._count = None
self._built = False
def _build(self, shape):
self._shape = tensor_shape.TensorShape(shape)
self._build_input_shape = self._shape
# Create new state variables
self._total = self.add_weight(
'total', shape=shape, initializer=init_ops.zeros_initializer)
self._count = self.add_weight(
'count', shape=shape, initializer=init_ops.zeros_initializer)
with ops.init_scope():
if not context.executing_eagerly():
K._initialize_variables(K._get_session()) # pylint: disable=protected-access
self._built = True
@property
def total(self):
return self._total if self._built else None
@property
def count(self):
return self._count if self._built else None
def update_state(self, values, sample_weight=None):
"""Accumulates statistics for computing the element-wise mean.
Args:
values: Per-example value.
sample_weight: Optional weighting of each example. Defaults to 1.
Returns:
Update op.
"""
values = math_ops.cast(values, self._dtype)
if not self._built:
self._build(values.shape)
elif values.shape != self._shape:
raise ValueError('MeanTensor input values must always have the same '
'shape. Expected shape (set during the first call): {}. '
'Got: {}'.format(self._shape, values.shape))
num_values = array_ops.ones_like(values)
if sample_weight is not None:
sample_weight = math_ops.cast(sample_weight, self._dtype)
# Update dimensions of weights to match with values if possible.
values, _, sample_weight = losses_utils.squeeze_or_expand_dimensions(
values, sample_weight=sample_weight)
try:
# Broadcast weights if possible.
sample_weight = weights_broadcast_ops.broadcast_weights(
sample_weight, values)
except ValueError:
# Reduce values to same ndim as weight array
ndim = K.ndim(values)
weight_ndim = K.ndim(sample_weight)
values = math_ops.reduce_mean(
values, axis=list(range(weight_ndim, ndim)))
num_values = math_ops.multiply(num_values, sample_weight)
values = math_ops.multiply(values, sample_weight)
update_total_op = self._total.assign_add(values)
with ops.control_dependencies([update_total_op]):
return self._count.assign_add(num_values)
def result(self):
if not self._built:
raise ValueError(
'MeanTensor does not have any result yet. Please call the MeanTensor '
'instance or use `.update_state(value)` before retrieving the result.'
)
return math_ops.div_no_nan(self.total, self.count)
def reset_states(self):
if self._built:
K.batch_set_value(
[(v, np.zeros(self._shape.as_list())) for v in self.variables])
@keras_export('keras.metrics.BinaryCrossentropy')
class BinaryCrossentropy(MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
This is the crossentropy metric class to be used when there are only two
label classes (0 and 1).
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional )Whether output is expected to be a logits tensor.
By default, we consider that output encodes a probability distribution.
label_smoothing: (Optional) Float in [0, 1]. When > 0, label values are
smoothed, meaning the confidence on label values are relaxed.
e.g. `label_smoothing=0.2` means that we will use a value of `0.1` for
label `0` and `0.9` for label `1`".
Standalone usage:
>>> m = tf.keras.metrics.BinaryCrossentropy()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
0.81492424
>>> m.reset_states()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.9162905
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.BinaryCrossentropy()])
```
"""
def __init__(self,
name='binary_crossentropy',
dtype=None,
from_logits=False,
label_smoothing=0):
super(BinaryCrossentropy, self).__init__(
binary_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
label_smoothing=label_smoothing)
@keras_export('keras.metrics.CategoricalCrossentropy')
class CategoricalCrossentropy(MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
This is the crossentropy metric class to be used when there are multiple
label classes (2 or more). Here we assume that labels are given as a `one_hot`
representation. eg., When labels values are [2, 0, 1],
`y_true` = [[0, 0, 1], [1, 0, 0], [0, 1, 0]].
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional) Whether output is expected to be a logits tensor.
By default, we consider that output encodes a probability distribution.
label_smoothing: (Optional) Float in [0, 1]. When > 0, label values are
smoothed, meaning the confidence on label values are relaxed. e.g.
`label_smoothing=0.2` means that we will use a value of `0.1` for label
`0` and `0.9` for label `1`"
Standalone usage:
>>> # EPSILON = 1e-7, y = y_true, y` = y_pred
>>> # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
>>> # y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
>>> # xent = -sum(y * log(y'), axis = -1)
>>> # = -((log 0.95), (log 0.1))
>>> # = [0.051, 2.302]
>>> # Reduced xent = (0.051 + 2.302) / 2
>>> m = tf.keras.metrics.CategoricalCrossentropy()
>>> m.update_state([[0, 1, 0], [0, 0, 1]],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
>>> m.result().numpy()
1.1769392
>>> m.reset_states()
>>> m.update_state([[0, 1, 0], [0, 0, 1]],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]],
... sample_weight=tf.constant([0.3, 0.7]))
>>> m.result().numpy()
1.6271976
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.CategoricalCrossentropy()])
```
"""
def __init__(self,
name='categorical_crossentropy',
dtype=None,
from_logits=False,
label_smoothing=0):
super(CategoricalCrossentropy, self).__init__(
categorical_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
label_smoothing=label_smoothing)
@keras_export('keras.metrics.SparseCategoricalCrossentropy')
class SparseCategoricalCrossentropy(MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
Use this crossentropy metric when there are two or more label classes.
We expect labels to be provided as integers. If you want to provide labels
using `one-hot` representation, please use `CategoricalCrossentropy` metric.
There should be `# classes` floating point values per feature for `y_pred`
and a single floating point value per feature for `y_true`.
In the snippet below, there is a single floating point value per example for
`y_true` and `# classes` floating pointing values per example for `y_pred`.
The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is
`[batch_size, num_classes]`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional) Whether output is expected to be a logits tensor.
By default, we consider that output encodes a probability distribution.
axis: (Optional) Defaults to -1. The dimension along which the metric is
computed.
Standalone usage:
>>> # y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]
>>> # logits = log(y_pred)
>>> # softmax = exp(logits) / sum(exp(logits), axis=-1)
>>> # softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
>>> # xent = -sum(y * log(softmax), 1)
>>> # log(softmax) = [[-2.9957, -0.0513, -16.1181],
>>> # [-2.3026, -0.2231, -2.3026]]
>>> # y_true * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]
>>> # xent = [0.0513, 2.3026]
>>> # Reduced xent = (0.0513 + 2.3026) / 2
>>> m = tf.keras.metrics.SparseCategoricalCrossentropy()
>>> m.update_state([1, 2],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
>>> m.result().numpy()
1.1769392
>>> m.reset_states()
>>> m.update_state([1, 2],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]],
... sample_weight=tf.constant([0.3, 0.7]))
>>> m.result().numpy()
1.6271976
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.SparseCategoricalCrossentropy()])
```
"""
def __init__(self,
name='sparse_categorical_crossentropy',
dtype=None,
from_logits=False,
axis=-1):
super(SparseCategoricalCrossentropy, self).__init__(
sparse_categorical_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
axis=axis)
class SumOverBatchSize(Reduce):
"""Computes the weighted sum over batch size of the given values.
For example, if values is [1, 3, 5, 7] then the metric value is 4.
If the weights were specified as [1, 1, 0, 0] then the value would be 1.
This metric creates two variables, `total` and `count` that are used to
compute the average of `values`. This average is ultimately returned as sum
over batch size which is an idempotent operation that simply divides `total`
by `count`.
If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0
to mask values.
"""
def __init__(self, name='sum_over_batch_size', dtype=None):
super(SumOverBatchSize, self).__init__(
reduction=metrics_utils.Reduction.SUM_OVER_BATCH_SIZE,
name=name,
dtype=dtype)
class SumOverBatchSizeMetricWrapper(SumOverBatchSize):
"""Wraps a function with the `SumOverBatchSizeMetricWrapper` metric."""
def __init__(self, fn, name=None, dtype=None, **kwargs):
"""Creates a `SumOverBatchSizeMetricWrapper` instance.
Args:
fn: The metric function to wrap, with signature `fn(y_true, y_pred,
**kwargs)`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
**kwargs: The keyword arguments that are passed on to `fn`.
"""
super(SumOverBatchSizeMetricWrapper, self).__init__(name=name, dtype=dtype)
self._fn = fn
self._fn_kwargs = kwargs
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
ag_fn = autograph.tf_convert(self._fn, ag_ctx.control_status_ctx())
matches = ag_fn(y_true, y_pred, **self._fn_kwargs)
return super(SumOverBatchSizeMetricWrapper, self).update_state(
matches, sample_weight=sample_weight)
def get_config(self):
config = {}
for k, v in six.iteritems(self._fn_kwargs):
config[k] = K.eval(v) if is_tensor_or_variable(v) else v
base_config = super(SumOverBatchSizeMetricWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def accuracy(y_true, y_pred):
[y_pred, y_true], _ = \
metrics_utils.ragged_assert_compatible_and_get_flat_values(
[y_pred, y_true])
y_pred.shape.assert_is_compatible_with(y_true.shape)
if y_true.dtype != y_pred.dtype:
y_pred = math_ops.cast(y_pred, y_true.dtype)
return math_ops.cast(math_ops.equal(y_true, y_pred), K.floatx())
@keras_export('keras.metrics.binary_accuracy')
@dispatch.add_dispatch_support
def binary_accuracy(y_true, y_pred, threshold=0.5):
"""Calculates how often predictions matches binary labels.
Standalone usage:
>>> y_true = [[1], [1], [0], [0]]
>>> y_pred = [[1], [1], [0], [0]]
>>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred)
>>> assert m.shape == (4,)
>>> m.numpy()
array([1., 1., 1., 1.], dtype=float32)
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
threshold: (Optional) Float representing the threshold for deciding whether
prediction values are 1 or 0.
Returns:
Binary accuracy values. shape = `[batch_size, d0, .. dN-1]`
"""
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
threshold = math_ops.cast(threshold, y_pred.dtype)
y_pred = math_ops.cast(y_pred > threshold, y_pred.dtype)
return K.mean(math_ops.equal(y_true, y_pred), axis=-1)
@keras_export('keras.metrics.categorical_accuracy')
@dispatch.add_dispatch_support
def categorical_accuracy(y_true, y_pred):
"""Calculates how often predictions matches one-hot labels.
Standalone usage:
>>> y_true = [[0, 0, 1], [0, 1, 0]]
>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
>>> m = tf.keras.metrics.categorical_accuracy(y_true, y_pred)
>>> assert m.shape == (2,)
>>> m.numpy()
array([0., 1.], dtype=float32)
You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
Args:
y_true: One-hot ground truth values.
y_pred: The prediction values.
Returns:
Categorical accuracy values.
"""
return math_ops.cast(
math_ops.equal(
math_ops.argmax(y_true, axis=-1), math_ops.argmax(y_pred, axis=-1)),
K.floatx())
@keras_export('keras.metrics.sparse_categorical_accuracy')
@dispatch.add_dispatch_support
def sparse_categorical_accuracy(y_true, y_pred):
"""Calculates how often predictions matches integer labels.
Standalone usage:
>>> y_true = [2, 1]
>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
>>> m = tf.keras.metrics.sparse_categorical_accuracy(y_true, y_pred)
>>> assert m.shape == (2,)
>>> m.numpy()
array([0., 1.], dtype=float32)
You can provide logits of classes as `y_pred`, since argmax of
logits and probabilities are same.
Args:
y_true: Integer ground truth values.
y_pred: The prediction values.
Returns:
Sparse categorical accuracy values.
"""
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = ops.convert_to_tensor_v2_with_dispatch(y_true)
y_pred_rank = y_pred.shape.ndims
y_true_rank = y_true.shape.ndims
# If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
if (y_true_rank is not None) and (y_pred_rank is not None) and (len(
K.int_shape(y_true)) == len(K.int_shape(y_pred))):
y_true = array_ops.squeeze(y_true, [-1])
y_pred = math_ops.argmax(y_pred, axis=-1)
# If the predicted output and actual output types don't match, force cast them
# to match.
if K.dtype(y_pred) != K.dtype(y_true):
y_pred = math_ops.cast(y_pred, K.dtype(y_true))
return math_ops.cast(math_ops.equal(y_true, y_pred), K.floatx())
@keras_export('keras.metrics.top_k_categorical_accuracy')
@dispatch.add_dispatch_support
def top_k_categorical_accuracy(y_true, y_pred, k=5):
"""Computes how often targets are in the top `K` predictions.
Standalone usage:
>>> y_true = [[0, 0, 1], [0, 1, 0]]
>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
>>> m = tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=3)
>>> assert m.shape == (2,)
>>> m.numpy()
array([1., 1.], dtype=float32)
Args:
y_true: The ground truth values.
y_pred: The prediction values.
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to 5.
Returns:
Top K categorical accuracy value.
"""
return math_ops.cast(
nn.in_top_k(y_pred, math_ops.argmax(y_true, axis=-1), k), K.floatx())
@keras_export('keras.metrics.sparse_top_k_categorical_accuracy')
@dispatch.add_dispatch_support
def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):
"""Computes how often integer targets are in the top `K` predictions.
Standalone usage:
>>> y_true = [2, 1]
>>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
>>> m = tf.keras.metrics.sparse_top_k_categorical_accuracy(
... y_true, y_pred, k=3)
>>> assert m.shape == (2,)
>>> m.numpy()
array([1., 1.], dtype=float32)
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to 5.
Returns:
Sparse top K categorical accuracy value.
"""
y_pred_rank = ops.convert_to_tensor_v2_with_dispatch(y_pred).shape.ndims
y_true_rank = ops.convert_to_tensor_v2_with_dispatch(y_true).shape.ndims
# Flatten y_pred to (batch_size, num_samples) and y_true to (num_samples,)
if (y_true_rank is not None) and (y_pred_rank is not None):
if y_pred_rank > 2:
y_pred = array_ops.reshape(y_pred, [-1, y_pred.shape[-1]])
if y_true_rank > 1:
y_true = array_ops.reshape(y_true, [-1])
return math_ops.cast(
nn.in_top_k(y_pred, math_ops.cast(y_true, 'int32'), k), K.floatx())
def cosine_proximity(y_true, y_pred, axis=-1):
"""Computes the cosine similarity between labels and predictions.
Args:
y_true: The ground truth values.
y_pred: The prediction values.
axis: (Optional) Defaults to -1. The dimension along which the cosine
similarity is computed.
Returns:
Cosine similarity value.
"""
y_true = nn.l2_normalize(y_true, axis=axis)
y_pred = nn.l2_normalize(y_pred, axis=axis)
return math_ops.reduce_sum(y_true * y_pred, axis=axis)
# Aliases
acc = ACC = accuracy
bce = BCE = binary_crossentropy
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
cosine_similarity = cosine_proximity
log_cosh = logcosh
def clone_metric(metric):
"""Returns a clone of the metric if stateful, otherwise returns it as is."""
if isinstance(metric, Metric):
with ops.init_scope():
return metric.__class__.from_config(metric.get_config())
return metric
def clone_metrics(metrics):
"""Clones the given metric list/dict."""
return nest.map_structure(clone_metric, metrics)
@keras_export('keras.metrics.serialize')
def serialize(metric):
"""Serializes metric function or `Metric` instance.
Arguments:
metric: A Keras `Metric` instance or a metric function.
Returns:
Metric configuration dictionary.
"""
return serialize_keras_object(metric)
@keras_export('keras.metrics.deserialize')
def deserialize(config, custom_objects=None):
"""Deserializes a serialized metric class/function instance.
Arguments:
config: Metric configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during deserialization.
Returns:
A Keras `Metric` instance or a metric function.
"""
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='metric function')
@keras_export('keras.metrics.get')
def get(identifier):
"""Retrieves a Keras metric as a `function`/`Metric` class instance.
The `identifier` may be the string name of a metric function or class.
>>> metric = tf.keras.metrics.get("categorical_crossentropy")
>>> type(metric)
<class 'function'>
>>> metric = tf.keras.metrics.get("CategoricalCrossentropy")
>>> type(metric)
<class '...tensorflow.python.keras.metrics.CategoricalCrossentropy'>
You can also specify `config` of the metric to this function by passing dict
containing `class_name` and `config` as an identifier. Also note that the
`class_name` must map to a `Metric` class
>>> identifier = {"class_name": "CategoricalCrossentropy",
... "config": {"from_logits": True}}
>>> metric = tf.keras.metrics.get(identifier)
>>> type(metric)
<class '...tensorflow.python.keras.metrics.CategoricalCrossentropy'>
Arguments:
identifier: A metric identifier. One of None or string name of a metric
function/class or metric configuration dictionary or a metric function or
a metric class instance
Returns:
A Keras metric as a `function`/ `Metric` class instance.
Raises:
ValueError: If `identifier` cannot be interpreted.
"""
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
return deserialize(str(identifier))
elif callable(identifier):
return identifier
else:
raise ValueError(
'Could not interpret metric function identifier: {}'.format(identifier))
def is_built_in(cls):
return cls.__module__ == Metric.__module__ | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import numpy
# create dense matrix A
A=numpy.array([[1,2,3],[4,0,0],[0,0,0],[0,5,0],[0,0,6],[9,9,9]], dtype=numpy.float64)
parameter_list=[[A]]
def features_sparse_modular (A):
from scipy.sparse import csc_matrix
from modshogun import SparseRealFeatures
from numpy import array, float64, all
# sparse representation X of dense matrix A
# note, will work with types other than float64 too,
# but requires recent scipy.sparse
X=csc_matrix(A)
#print(A)
# create sparse shogun features from dense matrix A
a=SparseRealFeatures(A)
a_out=a.get_full_feature_matrix()
#print(a_out)
assert(all(a_out==A))
#print(a_out)
# create sparse shogun features from sparse matrix X
a.set_sparse_feature_matrix(X)
a_out=a.get_full_feature_matrix()
#print(a_out)
assert(all(a_out==A))
# create sparse shogun features from sparse matrix X
a=SparseRealFeatures(X)
a_out=a.get_full_feature_matrix()
#print(a_out)
assert(all(a_out==A))
# obtain (data,row,indptr) csc arrays of sparse shogun features
z=csc_matrix(a.get_sparse_feature_matrix())
z_out=z.todense()
#print(z_out)
assert(all(z_out==A))
if __name__=='__main__':
print('Sparse Features')
features_sparse_modular(*parameter_list[0]) | unknown | codeparrot/codeparrot-clean | ||
#
# The Python Imaging Library.
# $Id$
#
# EPS file handling
#
# History:
# 1995-09-01 fl Created (0.1)
# 1996-05-18 fl Don't choke on "atend" fields, Ghostscript interface (0.2)
# 1996-08-22 fl Don't choke on floating point BoundingBox values
# 1996-08-23 fl Handle files from Macintosh (0.3)
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)
# 2003-09-07 fl Check gs.close status (from Federico Di Gregorio) (0.5)
# 2014-05-07 e Handling of EPS with binary preview and fixed resolution
# resizing
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.5"
import re
import io
from PIL import Image, ImageFile, _binary
#
# --------------------------------------------------------------------
i32 = _binary.i32le
o32 = _binary.o32le
split = re.compile(r"^%%([^:]*):[ \t]*(.*)[ \t]*$")
field = re.compile(r"^%[%!\w]([^:]*)[ \t]*$")
gs_windows_binary = None
import sys
if sys.platform.startswith('win'):
import shutil
if hasattr(shutil, 'which'):
which = shutil.which
else:
# Python < 3.3
import distutils.spawn
which = distutils.spawn.find_executable
for binary in ('gswin32c', 'gswin64c', 'gs'):
if which(binary) is not None:
gs_windows_binary = binary
break
else:
gs_windows_binary = False
def has_ghostscript():
if gs_windows_binary:
return True
if not sys.platform.startswith('win'):
import subprocess
try:
gs = subprocess.Popen(['gs', '--version'], stdout=subprocess.PIPE)
gs.stdout.read()
return True
except OSError:
# no ghostscript
pass
return False
def Ghostscript(tile, size, fp, scale=1):
"""Render an image using Ghostscript"""
# Unpack decoder tile
decoder, tile, offset, data = tile[0]
length, bbox = data
# Hack to support hi-res rendering
scale = int(scale) or 1
# orig_size = size
# orig_bbox = bbox
size = (size[0] * scale, size[1] * scale)
# resolution is dependent on bbox and size
res = (float((72.0 * size[0]) / (bbox[2]-bbox[0])),
float((72.0 * size[1]) / (bbox[3]-bbox[1])))
# print("Ghostscript", scale, size, orig_size, bbox, orig_bbox, res)
import os
import subprocess
import tempfile
out_fd, outfile = tempfile.mkstemp()
os.close(out_fd)
infile_temp = None
if hasattr(fp, 'name') and os.path.exists(fp.name):
infile = fp.name
else:
in_fd, infile_temp = tempfile.mkstemp()
os.close(in_fd)
infile = infile_temp
# ignore length and offset!
# ghostscript can read it
# copy whole file to read in ghostscript
with open(infile_temp, 'wb') as f:
# fetch length of fp
fp.seek(0, 2)
fsize = fp.tell()
# ensure start position
# go back
fp.seek(0)
lengthfile = fsize
while lengthfile > 0:
s = fp.read(min(lengthfile, 100*1024))
if not s:
break
lengthfile -= len(s)
f.write(s)
# Build ghostscript command
command = ["gs",
"-q", # quiet mode
"-g%dx%d" % size, # set output geometry (pixels)
"-r%fx%f" % res, # set input DPI (dots per inch)
"-dNOPAUSE -dSAFER", # don't pause between pages,
# safe mode
"-sDEVICE=ppmraw", # ppm driver
"-sOutputFile=%s" % outfile, # output file
"-c", "%d %d translate" % (-bbox[0], -bbox[1]),
# adjust for image origin
"-f", infile, # input file
]
if gs_windows_binary is not None:
if not gs_windows_binary:
raise WindowsError('Unable to locate Ghostscript on paths')
command[0] = gs_windows_binary
# push data through ghostscript
try:
gs = subprocess.Popen(command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
gs.stdin.close()
status = gs.wait()
if status:
raise IOError("gs failed (status %d)" % status)
im = Image.core.open_ppm(outfile)
finally:
try:
os.unlink(outfile)
if infile_temp:
os.unlink(infile_temp)
except:
pass
return im
class PSFile:
"""
Wrapper for bytesio object that treats either CR or LF as end of line.
"""
def __init__(self, fp):
self.fp = fp
self.char = None
def seek(self, offset, whence=0):
self.char = None
self.fp.seek(offset, whence)
def readline(self):
s = self.char or b""
self.char = None
c = self.fp.read(1)
while c not in b"\r\n":
s = s + c
c = self.fp.read(1)
self.char = self.fp.read(1)
# line endings can be 1 or 2 of \r \n, in either order
if self.char in b"\r\n":
self.char = None
return s.decode('latin-1')
def _accept(prefix):
return prefix[:4] == b"%!PS" or i32(prefix) == 0xC6D3D0C5
##
# Image plugin for Encapsulated Postscript. This plugin supports only
# a few variants of this format.
class EpsImageFile(ImageFile.ImageFile):
"""EPS File Parser for the Python Imaging Library"""
format = "EPS"
format_description = "Encapsulated Postscript"
mode_map = {1: "L", 2: "LAB", 3: "RGB"}
def _open(self):
(length, offset) = self._find_offset(self.fp)
# Rewrap the open file pointer in something that will
# convert line endings and decode to latin-1.
try:
if bytes is str:
# Python2, no encoding conversion necessary
fp = open(self.fp.name, "Ur")
else:
# Python3, can use bare open command.
fp = open(self.fp.name, "Ur", encoding='latin-1')
except:
# Expect this for bytesio/stringio
fp = PSFile(self.fp)
# go to offset - start of "%!PS"
fp.seek(offset)
box = None
self.mode = "RGB"
self.size = 1, 1 # FIXME: huh?
#
# Load EPS header
s = fp.readline().strip('\r\n')
while s:
if len(s) > 255:
raise SyntaxError("not an EPS file")
try:
m = split.match(s)
except re.error as v:
raise SyntaxError("not an EPS file")
if m:
k, v = m.group(1, 2)
self.info[k] = v
if k == "BoundingBox":
try:
# Note: The DSC spec says that BoundingBox
# fields should be integers, but some drivers
# put floating point values there anyway.
box = [int(float(s)) for s in v.split()]
self.size = box[2] - box[0], box[3] - box[1]
self.tile = [("eps", (0, 0) + self.size, offset,
(length, box))]
except:
pass
else:
m = field.match(s)
if m:
k = m.group(1)
if k == "EndComments":
break
if k[:8] == "PS-Adobe":
self.info[k[:8]] = k[9:]
else:
self.info[k] = ""
elif s[0] == '%':
# handle non-DSC Postscript comments that some
# tools mistakenly put in the Comments section
pass
else:
raise IOError("bad EPS header")
s = fp.readline().strip('\r\n')
if s[0] != "%":
break
#
# Scan for an "ImageData" descriptor
while s[0] == "%":
if len(s) > 255:
raise SyntaxError("not an EPS file")
if s[:11] == "%ImageData:":
# Encoded bitmapped image.
[x, y, bi, mo, z3, z4, en, id] = s[11:].split(None, 7)
if int(bi) != 8:
break
try:
self.mode = self.mode_map[int(mo)]
except:
break
self.size = int(x), int(y)
return
s = fp.readline().strip('\r\n')
if not s:
break
if not box:
raise IOError("cannot determine EPS bounding box")
def _find_offset(self, fp):
s = fp.read(160)
if s[:4] == b"%!PS":
# for HEAD without binary preview
fp.seek(0, 2)
length = fp.tell()
offset = 0
elif i32(s[0:4]) == 0xC6D3D0C5:
# FIX for: Some EPS file not handled correctly / issue #302
# EPS can contain binary data
# or start directly with latin coding
# more info see:
# http://partners.adobe.com/public/developer/en/ps/5002.EPSF_Spec.pdf
offset = i32(s[4:8])
length = i32(s[8:12])
else:
raise SyntaxError("not an EPS file")
return (length, offset)
def load(self, scale=1):
# Load EPS via Ghostscript
if not self.tile:
return
self.im = Ghostscript(self.tile, self.size, self.fp, scale)
self.mode = self.im.mode
self.size = self.im.size
self.tile = []
def load_seek(self, *args, **kwargs):
# we can't incrementally load, so force ImageFile.parser to
# use our custom load method by defining this method.
pass
#
# --------------------------------------------------------------------
def _save(im, fp, filename, eps=1):
"""EPS Writer for the Python Imaging Library."""
#
# make sure image data is available
im.load()
#
# determine postscript image mode
if im.mode == "L":
operator = (8, 1, "image")
elif im.mode == "RGB":
operator = (8, 3, "false 3 colorimage")
elif im.mode == "CMYK":
operator = (8, 4, "false 4 colorimage")
else:
raise ValueError("image mode is not supported")
class NoCloseStream:
def __init__(self, fp):
self.fp = fp
def __getattr__(self, name):
return getattr(self.fp, name)
def close(self):
pass
base_fp = fp
fp = NoCloseStream(fp)
if sys.version_info[0] > 2:
fp = io.TextIOWrapper(fp, encoding='latin-1')
if eps:
#
# write EPS header
fp.write("%!PS-Adobe-3.0 EPSF-3.0\n")
fp.write("%%Creator: PIL 0.1 EpsEncode\n")
# fp.write("%%CreationDate: %s"...)
fp.write("%%%%BoundingBox: 0 0 %d %d\n" % im.size)
fp.write("%%Pages: 1\n")
fp.write("%%EndComments\n")
fp.write("%%Page: 1 1\n")
fp.write("%%ImageData: %d %d " % im.size)
fp.write("%d %d 0 1 1 \"%s\"\n" % operator)
#
# image header
fp.write("gsave\n")
fp.write("10 dict begin\n")
fp.write("/buf %d string def\n" % (im.size[0] * operator[1]))
fp.write("%d %d scale\n" % im.size)
fp.write("%d %d 8\n" % im.size) # <= bits
fp.write("[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1]))
fp.write("{ currentfile buf readhexstring pop } bind\n")
fp.write(operator[2] + "\n")
fp.flush()
ImageFile._save(im, base_fp, [("eps", (0, 0)+im.size, 0, None)])
fp.write("\n%%%%EndBinary\n")
fp.write("grestore end\n")
fp.flush()
#
# --------------------------------------------------------------------
Image.register_open(EpsImageFile.format, EpsImageFile, _accept)
Image.register_save(EpsImageFile.format, _save)
Image.register_extension(EpsImageFile.format, ".ps")
Image.register_extension(EpsImageFile.format, ".eps")
Image.register_mime(EpsImageFile.format, "application/postscript") | unknown | codeparrot/codeparrot-clean | ||
"""
Convergence acceleration / extrapolation methods for series and
sequences.
References:
Carl M. Bender & Steven A. Orszag, "Advanced Mathematical Methods for
Scientists and Engineers: Asymptotic Methods and Perturbation Theory",
Springer 1999. (Shanks transformation: pp. 368-375, Richardson
extrapolation: pp. 375-377.)
"""
from __future__ import print_function, division
from sympy import factorial, Integer, S
from sympy.core.compatibility import range
def richardson(A, k, n, N):
"""
Calculate an approximation for lim k->oo A(k) using Richardson
extrapolation with the terms A(n), A(n+1), ..., A(n+N+1).
Choosing N ~= 2*n often gives good results.
A simple example is to calculate exp(1) using the limit definition.
This limit converges slowly; n = 100 only produces two accurate
digits:
>>> from sympy.abc import n
>>> e = (1 + 1/n)**n
>>> print(round(e.subs(n, 100).evalf(), 10))
2.7048138294
Richardson extrapolation with 11 appropriately chosen terms gives
a value that is accurate to the indicated precision:
>>> from sympy import E
>>> from sympy.series.acceleration import richardson
>>> print(round(richardson(e, n, 10, 20).evalf(), 10))
2.7182818285
>>> print(round(E.evalf(), 10))
2.7182818285
Another useful application is to speed up convergence of series.
Computing 100 terms of the zeta(2) series 1/k**2 yields only
two accurate digits:
>>> from sympy.abc import k, n
>>> from sympy import Sum
>>> A = Sum(k**-2, (k, 1, n))
>>> print(round(A.subs(n, 100).evalf(), 10))
1.6349839002
Richardson extrapolation performs much better:
>>> from sympy import pi
>>> print(round(richardson(A, n, 10, 20).evalf(), 10))
1.6449340668
>>> print(round(((pi**2)/6).evalf(), 10)) # Exact value
1.6449340668
"""
s = S.Zero
for j in range(0, N + 1):
s += A.subs(k, Integer(n + j)).doit() * (n + j)**N * (-1)**(j + N) / \
(factorial(j) * factorial(N - j))
return s
def shanks(A, k, n, m=1):
"""
Calculate an approximation for lim k->oo A(k) using the n-term Shanks
transformation S(A)(n). With m > 1, calculate the m-fold recursive
Shanks transformation S(S(...S(A)...))(n).
The Shanks transformation is useful for summing Taylor series that
converge slowly near a pole or singularity, e.g. for log(2):
>>> from sympy.abc import k, n
>>> from sympy import Sum, Integer
>>> from sympy.series.acceleration import shanks
>>> A = Sum(Integer(-1)**(k+1) / k, (k, 1, n))
>>> print(round(A.subs(n, 100).doit().evalf(), 10))
0.6881721793
>>> print(round(shanks(A, n, 25).evalf(), 10))
0.6931396564
>>> print(round(shanks(A, n, 25, 5).evalf(), 10))
0.6931471806
The correct value is 0.6931471805599453094172321215.
"""
table = [A.subs(k, Integer(j)).doit() for j in range(n + m + 2)]
table2 = table[:]
for i in range(1, m + 1):
for j in range(i, n + m + 1):
x, y, z = table[j - 1], table[j], table[j + 1]
table2[j] = (z*x - y**2) / (z + x - 2*y)
table = table2[:]
return table[n] | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-instance-attributes,too-many-locals
# pylint: disable=too-many-branches,too-many-statements,too-many-arguments
"""Executor group is a convenient tool for managing a group of executors."""
import logging
from collections import OrderedDict
from .. import context as ctx
from .. import ndarray as nd
from ..io import DataDesc
from ..executor_manager import _split_input_slice
def _load_general(data, targets, major_axis):
"""Load a list of arrays into a list of arrays specified by slices."""
for d_src, d_targets, axis in zip(data, targets, major_axis): # pylint: disable=too-many-nested-blocks
if isinstance(d_targets, nd.NDArray):
d_src.copyto(d_targets)
elif isinstance(d_src, (list, tuple)):
for src, dst in zip(d_src, d_targets):
src.copyto(dst)
else:
for slice_idx, d_dst in d_targets:
if axis >= 0:
# copy slice
shape = d_src.shape
do_crop = (slice_idx.start != 0 or shape[axis] != slice_idx.stop)
# pylint: disable=no-member,protected-access
if do_crop:
if axis == 0:
d_src[slice_idx.start:slice_idx.stop].copyto(d_dst)
else:
if d_src.context == d_dst.context:
nd.slice_axis(d_src, axis=axis, begin=slice_idx.start,
end=slice_idx.stop, out=d_dst)
else:
# on different device, crop and then do cross device copy
d_dst_copy = nd.slice_axis(d_src, axis=axis, begin=slice_idx.start,
end=slice_idx.stop)
d_dst_copy.copyto(d_dst)
else:
d_src.copyto(d_dst)
# pylint: enable=no-member,protected-access
else:
d_src.copyto(d_dst)
def _load_data(batch, targets, major_axis):
"""Load data into sliced arrays."""
_load_general(batch.data, targets, major_axis)
def _load_label(batch, targets, major_axis):
"""Load label into sliced arrays."""
_load_general(batch.label, targets, major_axis)
def _merge_multi_context(outputs, major_axis):
"""Merge outputs that lives on multiple context into one, so that they look
like living on one context.
"""
rets = []
for tensors, axis in zip(outputs, major_axis):
if axis >= 0:
# pylint: disable=no-member,protected-access
if len(tensors) == 1:
rets.append(tensors[0])
else:
# Concatenate if necessary
rets.append(nd.concat(*[tensor.as_in_context(tensors[0].context)
for tensor in tensors],
dim=axis))
# pylint: enable=no-member,protected-access
else:
# negative axis means the there is no batch_size axis, and all the
# results should be the same on each device. We simply take the
# first one, without checking they are actually the same
rets.append(tensors[0])
return rets
def _prepare_group2ctxs(group2ctxs, ctx_len):
"""Prepare the group2contexts, will duplicate the context
if some ctx_group map to only one context.
"""
if group2ctxs is None:
return [None] * ctx_len
elif isinstance(group2ctxs, list):
assert(len(group2ctxs) == ctx_len), "length of group2ctxs\
should be %d" % ctx_len
return group2ctxs
elif isinstance(group2ctxs, dict):
ret = [{} for i in range(ctx_len)]
for k, v in group2ctxs.items():
ctxs = None
if isinstance(v, ctx.Context):
ctxs = [v] * ctx_len
else:
if len(v) == 1:
ctxs = v * ctx_len
else:
assert(len(v) == ctx_len), "length of group2ctxs[%s]\
should be %d or 1" % (k, ctx_len)
ctxs = v
for i in range(ctx_len):
ret[i][k] = ctxs[i]
return ret
else:
assert(False), "group2ctxs should be list of dict of str to context,\
or dict of str to context or list of context"
class DataParallelExecutorGroup(object):
"""A group of executors that lives on a group of devices.
This is a helper class used to implement data parallelization. Each mini-batch will
be split and run on the devices.
Parameters
----------
symbol : Symbol
The common symbolic computation graph for all executors.
contexts : list
A list of contexts.
workload : list
If not ``None``, could be a list of numbers that specify the workload to be assigned
to different context. Larger number indicate heavier workload.
data_shapes : list
Should be a list of (name, shape) tuples, for the shapes of data. Note the order is
important and should be the same as the order that the `DataIter` provide the data.
label_shapes : list
Should be a list of (name, shape) tuples, for the shapes of label. Note the order is
important and should be the same as the order that the `DataIter` provide the label.
param_names : list
A list of strings, indicating the names of parameters (e.g. weights, filters, etc.)
in the computation graph.
for_training : bool
Indicate whether the executors should be bind for training. When not doing training,
the memory for gradients will not be allocated.
inputs_need_grad : bool
Indicate whether the gradients for the input data should be computed. This is currently
not used. It will be useful for implementing composition of modules.
shared_group : DataParallelExecutorGroup
Defaults to ``None``. This is used in bucketing. When not ``None``, it should be a executor
group corresponding to a different bucket. In other words, it will correspond to a different
symbol with the same set of parameters (e.g. unrolled RNNs with different lengths).
In this case the memory regions of the parameters will be shared.
logger : Logger
Default is `logging`.
fixed_param_names: list of str
Parameters to be fixed during training. For these parameters, not gradients
will be calculated and thus no space will be allocated for the gradient.
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
group2ctxs : dict of str to context or list of context,
or list of dict of str to context
Default is `None`. Mapping the `ctx_group` attribute to the context assignment.
"""
def __init__(self, symbol, contexts, workload, data_shapes, label_shapes, param_names,
for_training, inputs_need_grad, shared_group=None, logger=logging,
fixed_param_names=None, grad_req='write', state_names=None, group2ctxs=None):
self.param_names = param_names
self.arg_names = symbol.list_arguments()
self.aux_names = symbol.list_auxiliary_states()
self.symbol = symbol
self.contexts = contexts
self.workload = workload
self.group2ctxs = _prepare_group2ctxs(group2ctxs, len(contexts))
self.for_training = for_training
self.inputs_need_grad = inputs_need_grad
self.logger = logger
#In the future we should have a better way to profile memory per device (haibin)
self._total_exec_bytes = 0
self.fixed_param_names = fixed_param_names
if self.fixed_param_names is None:
self.fixed_param_names = []
self.state_names = state_names
if self.state_names is None:
self.state_names = []
if not for_training:
grad_req = 'null'
data_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in data_shapes]
if label_shapes is not None:
label_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in label_shapes]
data_names = [x.name for x in data_shapes]
if isinstance(grad_req, str):
self.grad_req = {}
for k in self.arg_names:
if k in self.param_names:
self.grad_req[k] = 'null' if k in self.fixed_param_names else grad_req
elif k in data_names:
self.grad_req[k] = grad_req if self.inputs_need_grad else 'null'
else:
self.grad_req[k] = 'null'
elif isinstance(grad_req, (list, tuple)):
assert len(grad_req) == len(self.arg_names)
self.grad_req = dict(zip(self.arg_names, grad_req))
elif isinstance(grad_req, dict):
self.grad_req = {}
for k in self.arg_names:
if k in self.param_names:
self.grad_req[k] = 'null' if k in self.fixed_param_names else 'write'
elif k in data_names:
self.grad_req[k] = 'write' if self.inputs_need_grad else 'null'
else:
self.grad_req[k] = 'null'
self.grad_req.update(grad_req)
else:
raise ValueError("grad_req must be one of str, list, tuple, or dict.")
if shared_group is not None:
self.shared_data_arrays = shared_group.shared_data_arrays
else:
self.shared_data_arrays = [{} for _ in contexts]
# initialize some instance variables
self.batch_size = None
self.slices = None
self.execs = []
self._default_execs = None
self.data_arrays = None
self.label_arrays = None
self.param_arrays = None
self.state_arrays = None
self.grad_arrays = None
self.aux_arrays = None
self.input_grad_arrays = None
self.data_shapes = None
self.label_shapes = None
self.data_names = None
self.label_names = None
self.data_layouts = None
self.label_layouts = None
self.output_names = self.symbol.list_outputs()
self.output_layouts = [DataDesc.get_batch_axis(self.symbol[name].attr('__layout__'))
for name in self.output_names]
self.num_outputs = len(self.symbol.list_outputs())
self.bind_exec(data_shapes, label_shapes, shared_group)
def decide_slices(self, data_shapes):
"""Decide the slices for each context according to the workload.
Parameters
----------
data_shapes : list
list of (name, shape) specifying the shapes for the input data or label.
"""
assert len(data_shapes) > 0
major_axis = [DataDesc.get_batch_axis(x.layout) for x in data_shapes]
for (name, shape), axis in zip(data_shapes, major_axis):
if axis == -1:
continue
batch_size = shape[axis]
if self.batch_size is not None:
assert batch_size == self.batch_size, ("all data must have the same batch size: "
+ ("batch_size = %d, but " % self.batch_size)
+ ("%s has shape %s" % (name, shape)))
else:
self.batch_size = batch_size
self.slices = _split_input_slice(self.batch_size, self.workload)
return major_axis
def _collect_arrays(self):
"""Collect internal arrays from executors."""
# convenient data structures
self.data_arrays = [[(self.slices[i], e.arg_dict[name]) for i, e in enumerate(self.execs)]
for name, _ in self.data_shapes]
self.state_arrays = [[e.arg_dict[name] for e in self.execs]
for name in self.state_names]
if self.label_shapes is not None:
self.label_arrays = [[(self.slices[i], e.arg_dict[name])
for i, e in enumerate(self.execs)]
for name, _ in self.label_shapes]
else:
self.label_arrays = None
self.param_arrays = [[exec_.arg_arrays[i] for exec_ in self.execs]
for i, name in enumerate(self.arg_names)
if name in self.param_names]
if self.for_training:
self.grad_arrays = [[exec_.grad_arrays[i] for exec_ in self.execs]
for i, name in enumerate(self.arg_names)
if name in self.param_names]
else:
self.grad_arrays = None
data_names = [x[0] for x in self.data_shapes]
if self.inputs_need_grad:
self.input_grad_arrays = [[exec_.grad_arrays[self.arg_names.index(name)]
for exec_ in self.execs]
for name in data_names if name in self.arg_names]
else:
self.input_grad_arrays = None
self.aux_arrays = [[exec_.aux_arrays[i] for exec_ in self.execs]
for i in range(len(self.aux_names))]
def bind_exec(self, data_shapes, label_shapes, shared_group=None, reshape=False):
"""Bind executors on their respective devices.
Parameters
----------
data_shapes : list
label_shapes : list
shared_group : DataParallelExecutorGroup
reshape : bool
"""
assert reshape or not self.execs
self.batch_size = None
# calculate workload and bind executors
self.data_layouts = self.decide_slices(data_shapes)
if label_shapes is not None:
# call it to make sure labels has the same batch size as data
self.label_layouts = self.decide_slices(label_shapes)
for i in range(len(self.contexts)):
data_shapes_i = self._sliced_shape(data_shapes, i, self.data_layouts)
if label_shapes is not None:
label_shapes_i = self._sliced_shape(label_shapes, i, self.label_layouts)
else:
label_shapes_i = []
if reshape:
self.execs[i] = self._default_execs[i].reshape(
allow_up_sizing=True, **dict(data_shapes_i + label_shapes_i))
else:
self.execs.append(self._bind_ith_exec(i, data_shapes_i, label_shapes_i,
shared_group))
self.data_shapes = data_shapes
self.label_shapes = label_shapes
self.data_names = [i.name for i in self.data_shapes]
if label_shapes is not None:
self.label_names = [i.name for i in self.label_shapes]
self._collect_arrays()
def reshape(self, data_shapes, label_shapes):
"""Reshape executors.
Parameters
----------
data_shapes : list
label_shapes : list
"""
if data_shapes == self.data_shapes and label_shapes == self.label_shapes:
return
if self._default_execs is None:
self._default_execs = [i for i in self.execs]
self.bind_exec(data_shapes, label_shapes, reshape=True)
def set_params(self, arg_params, aux_params, allow_extra=False):
"""Assign, i.e. copy parameters to all the executors.
Parameters
----------
arg_params : dict
A dictionary of name to `NDArray` parameter mapping.
aux_params : dict
A dictionary of name to `NDArray` auxiliary variable mapping.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
"""
for exec_ in self.execs:
exec_.copy_params_from(arg_params, aux_params, allow_extra_params=allow_extra)
def get_params(self, arg_params, aux_params):
""" Copy data from each executor to `arg_params` and `aux_params`.
Parameters
----------
arg_params : list of NDArray
Target parameter arrays.
aux_params : list of NDArray
Target aux arrays.
Notes
-----
- This function will inplace update the NDArrays in arg_params and aux_params.
"""
for name, block in zip(self.param_names, self.param_arrays):
weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block)
weight.astype(arg_params[name].dtype).copyto(arg_params[name])
for name, block in zip(self.aux_names, self.aux_arrays):
weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block)
weight.astype(aux_params[name].dtype).copyto(aux_params[name])
def forward(self, data_batch, is_train=None):
"""Split `data_batch` according to workload and run forward on each devices.
Parameters
----------
data_batch : DataBatch
Or could be any object implementing similar interface.
is_train : bool
The hint for the backend, indicating whether we are during training phase.
Default is `None`, then the value `self.for_training` will be used.
Returns
-------
"""
_load_data(data_batch, self.data_arrays, self.data_layouts)
if is_train is None:
is_train = self.for_training
if self.label_arrays is not None and data_batch.label:
_load_label(data_batch, self.label_arrays, self.label_layouts)
for exec_ in self.execs:
exec_.forward(is_train=is_train)
def get_output_shapes(self):
"""Get the shapes of the outputs."""
outputs = self.execs[0].outputs
shapes = [out.shape for out in outputs]
concat_shapes = []
for key, the_shape, axis in zip(self.symbol.list_outputs(), shapes, self.output_layouts):
the_shape = list(the_shape)
if axis >= 0:
the_shape[axis] = self.batch_size
concat_shapes.append((key, tuple(the_shape)))
return concat_shapes
def get_outputs(self, merge_multi_context=True, begin=0, end=None):
"""Get outputs of the previous forward computation.
If begin or end is specified, return [begin, end)-th outputs,
otherwise return all outputs.
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
begin : int
starting index of returned outputs in all outputs
end : int or None
ending index (excluded) of returned outputs.
Returns
-------
If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are `NDArray`.
"""
if end is None:
end = self.num_outputs
outputs = [[exec_.outputs[i] for exec_ in self.execs]
for i in range(begin, end)]
if merge_multi_context:
outputs = _merge_multi_context(outputs, self.output_layouts)
return outputs
def get_states(self, merge_multi_context=True):
"""Get states from all devices.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the states
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are `NDArray`.
"""
assert not merge_multi_context, \
"merge_multi_context=True is not supported for get_states yet."
return self.state_arrays
def set_states(self, states=None, value=None):
"""Set value for states. Only one of states & value can be specified.
Parameters
----------
states : list of list of NDArrays
source states arrays formatted like [[state1_dev1, state1_dev2],
[state2_dev1, state2_dev2]].
value : number
a single scalar value for all state arrays.
"""
if states is not None:
assert value is None, "Only one of states & value can be specified."
_load_general(states, self.state_arrays, (0,)*len(states))
else:
assert value is not None, "At least one of states & value must be specified."
assert states is None, "Only one of states & value can be specified."
for d_dst in self.state_arrays:
for dst in d_dst:
dst[:] = value
def get_input_grads(self, merge_multi_context=True):
"""Get the gradients with respect to the inputs of the module.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it
is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output
elements are `NDArray`.
"""
assert self.inputs_need_grad
if merge_multi_context:
return _merge_multi_context(self.input_grad_arrays, self.data_layouts)
return self.input_grad_arrays
def backward(self, out_grads=None):
"""Run backward on all devices. A backward should be called after
a call to the forward function. Backward cannot be called unless
``self.for_training`` is ``True``.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
"""
assert self.for_training, 're-bind with for_training=True to run backward'
if out_grads is None:
out_grads = []
for i, (exec_, islice) in enumerate(zip(self.execs, self.slices)):
out_grads_slice = []
for grad, axis in zip(out_grads, self.output_layouts):
if axis >= 0:
# pylint: disable=no-member
og_my_slice = nd.slice_axis(grad, axis=axis, begin=islice.start,
end=islice.stop)
# pylint: enable=no-member
out_grads_slice.append(og_my_slice.as_in_context(self.contexts[i]))
else:
out_grads_slice.append(grad.copyto(self.contexts[i]))
exec_.backward(out_grads=out_grads_slice)
def update_metric(self, eval_metric, labels):
"""Accumulate the performance according to `eval_metric` on all devices
by comparing outputs from [begin, end) to labels. By default use all
outputs.
Parameters
----------
eval_metric : EvalMetric
The metric used for evaluation.
labels : list of NDArray
Typically comes from `label` of a `DataBatch`.
begin : int
Starting index of used outputs.
end : int or None
Ending index of used outputs.
"""
for texec, islice in zip(self.execs, self.slices):
labels_slice = []
for label, axis in zip(labels, self.label_layouts):
if axis == 0:
# slicing NDArray along axis 0 can avoid copying
labels_slice.append(label[islice])
elif axis > 0:
# pylint: disable=no-member
label_my_slice = nd.slice_axis(label, axis=axis, begin=islice.start,
end=islice.stop).as_in_context(label.context)
# pylint: enable=no-member
labels_slice.append(label_my_slice)
else:
labels_slice.append(label)
labels_ = OrderedDict(zip(self.label_names, labels_slice))
preds = OrderedDict(zip(self.output_names, texec.outputs))
eval_metric.update_dict(labels_, preds)
def _bind_ith_exec(self, i, data_shapes, label_shapes, shared_group):
"""Internal utility function to bind the i-th executor.
This function utilizes simple_bind python interface.
"""
shared_exec = None if shared_group is None else shared_group.execs[i]
context = self.contexts[i]
shared_data_arrays = self.shared_data_arrays[i]
input_shapes = dict(data_shapes)
if label_shapes is not None:
input_shapes.update(dict(label_shapes))
input_types = {x.name: x.dtype for x in data_shapes}
if label_shapes is not None:
input_types.update({x.name: x.dtype for x in label_shapes})
group2ctx = self.group2ctxs[i]
executor = self.symbol.simple_bind(ctx=context, grad_req=self.grad_req,
type_dict=input_types, shared_arg_names=self.param_names,
shared_exec=shared_exec, group2ctx=group2ctx,
shared_buffer=shared_data_arrays, **input_shapes)
self._total_exec_bytes += int(executor.debug_str().split('\n')[-3].split()[1])
return executor
def _sliced_shape(self, shapes, i, major_axis):
"""Get the sliced shapes for the i-th executor.
Parameters
----------
shapes : list of (str, tuple)
The original (name, shape) pairs.
i : int
Which executor we are dealing with.
"""
sliced_shapes = []
for desc, axis in zip(shapes, major_axis):
shape = list(desc.shape)
if axis >= 0:
shape[axis] = self.slices[i].stop - self.slices[i].start
sliced_shapes.append(DataDesc(desc.name, tuple(shape), desc.dtype, desc.layout))
return sliced_shapes
def install_monitor(self, mon):
"""Install monitor on all executors"""
for exe in self.execs:
mon.install(exe) | unknown | codeparrot/codeparrot-clean | ||
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#PY25 compatible for GAE.
#
# Copyright 2007 Google Inc. All Rights Reserved.
"""Contains routines for printing protocol messages in text format."""
__author__ = 'kenton@google.com (Kenton Varda)'
import cStringIO
import re
from google.protobuf.internal import type_checkers
from google.protobuf import descriptor
from google.protobuf import text_encoding
__all__ = ['MessageToString', 'PrintMessage', 'PrintField',
'PrintFieldValue', 'Merge']
_INTEGER_CHECKERS = (type_checkers.Uint32ValueChecker(),
type_checkers.Int32ValueChecker(),
type_checkers.Uint64ValueChecker(),
type_checkers.Int64ValueChecker())
_FLOAT_INFINITY = re.compile('-?inf(?:inity)?f?', re.IGNORECASE)
_FLOAT_NAN = re.compile('nanf?', re.IGNORECASE)
_FLOAT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_FLOAT,
descriptor.FieldDescriptor.CPPTYPE_DOUBLE])
class Error(Exception):
"""Top-level module error for text_format."""
class ParseError(Error):
"""Thrown in case of ASCII parsing error."""
def MessageToString(message, as_utf8=False, as_one_line=False,
pointy_brackets=False, use_index_order=False,
float_format=None):
"""Convert protobuf message to text format.
Floating point values can be formatted compactly with 15 digits of
precision (which is the most that IEEE 754 "double" can guarantee)
using float_format='.15g'.
Args:
message: The protocol buffers message.
as_utf8: Produce text output in UTF8 format.
as_one_line: Don't introduce newlines between fields.
pointy_brackets: If True, use angle brackets instead of curly braces for
nesting.
use_index_order: If True, print fields of a proto message using the order
defined in source code instead of the field number. By default, use the
field number order.
float_format: If set, use this to specify floating point number formatting
(per the "Format Specification Mini-Language"); otherwise, str() is used.
Returns:
A string of the text formatted protocol buffer message.
"""
out = cStringIO.StringIO()
PrintMessage(message, out, as_utf8=as_utf8, as_one_line=as_one_line,
pointy_brackets=pointy_brackets,
use_index_order=use_index_order,
float_format=float_format)
result = out.getvalue()
out.close()
if as_one_line:
return result.rstrip()
return result
def PrintMessage(message, out, indent=0, as_utf8=False, as_one_line=False,
pointy_brackets=False, use_index_order=False,
float_format=None):
fields = message.ListFields()
if use_index_order:
fields.sort(key=lambda x: x[0].index)
for field, value in fields:
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
for element in value:
PrintField(field, element, out, indent, as_utf8, as_one_line,
pointy_brackets=pointy_brackets,
float_format=float_format)
else:
PrintField(field, value, out, indent, as_utf8, as_one_line,
pointy_brackets=pointy_brackets,
float_format=float_format)
def PrintField(field, value, out, indent=0, as_utf8=False, as_one_line=False,
pointy_brackets=False, float_format=None):
"""Print a single field name/value pair. For repeated fields, the value
should be a single element."""
out.write(' ' * indent)
if field.is_extension:
out.write('[')
if (field.containing_type.GetOptions().message_set_wire_format and
field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL):
out.write(field.message_type.full_name)
else:
out.write(field.full_name)
out.write(']')
elif field.type == descriptor.FieldDescriptor.TYPE_GROUP:
# For groups, use the capitalized name.
out.write(field.message_type.name)
else:
out.write(field.name)
if field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
# The colon is optional in this case, but our cross-language golden files
# don't include it.
out.write(': ')
PrintFieldValue(field, value, out, indent, as_utf8, as_one_line,
pointy_brackets=pointy_brackets,
float_format=float_format)
if as_one_line:
out.write(' ')
else:
out.write('\n')
def PrintFieldValue(field, value, out, indent=0, as_utf8=False,
as_one_line=False, pointy_brackets=False,
float_format=None):
"""Print a single field value (not including name). For repeated fields,
the value should be a single element."""
if pointy_brackets:
openb = '<'
closeb = '>'
else:
openb = '{'
closeb = '}'
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
if as_one_line:
out.write(' %s ' % openb)
PrintMessage(value, out, indent, as_utf8, as_one_line,
pointy_brackets=pointy_brackets,
float_format=float_format)
out.write(closeb)
else:
out.write(' %s\n' % openb)
PrintMessage(value, out, indent + 2, as_utf8, as_one_line,
pointy_brackets=pointy_brackets,
float_format=float_format)
out.write(' ' * indent + closeb)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
enum_value = field.enum_type.values_by_number.get(value, None)
if enum_value is not None:
out.write(enum_value.name)
else:
out.write(str(value))
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
out.write('\"')
if isinstance(value, unicode):
out_value = value.encode('utf-8')
else:
out_value = value
if field.type == descriptor.FieldDescriptor.TYPE_BYTES:
# We need to escape non-UTF8 chars in TYPE_BYTES field.
out_as_utf8 = False
else:
out_as_utf8 = as_utf8
out.write(text_encoding.CEscape(out_value, out_as_utf8))
out.write('\"')
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
if value:
out.write('true')
else:
out.write('false')
elif field.cpp_type in _FLOAT_TYPES and float_format is not None:
out.write('{1:{0}}'.format(float_format, value))
else:
out.write(str(value))
def _ParseOrMerge(lines, message, allow_multiple_scalars):
"""Converts an ASCII representation of a protocol message into a message.
Args:
lines: Lines of a message's ASCII representation.
message: A protocol buffer message to merge into.
allow_multiple_scalars: Determines if repeated values for a non-repeated
field are permitted, e.g., the string "foo: 1 foo: 2" for a
required/optional field named "foo".
Raises:
ParseError: On ASCII parsing problems.
"""
tokenizer = _Tokenizer(lines)
while not tokenizer.AtEnd():
_MergeField(tokenizer, message, allow_multiple_scalars)
def Parse(text, message):
"""Parses an ASCII representation of a protocol message into a message.
Args:
text: Message ASCII representation.
message: A protocol buffer message to merge into.
Returns:
The same message passed as argument.
Raises:
ParseError: On ASCII parsing problems.
"""
if not isinstance(text, str): text = text.decode('utf-8')
return ParseLines(text.split('\n'), message)
def Merge(text, message):
"""Parses an ASCII representation of a protocol message into a message.
Like Parse(), but allows repeated values for a non-repeated field, and uses
the last one.
Args:
text: Message ASCII representation.
message: A protocol buffer message to merge into.
Returns:
The same message passed as argument.
Raises:
ParseError: On ASCII parsing problems.
"""
return MergeLines(text.split('\n'), message)
def ParseLines(lines, message):
"""Parses an ASCII representation of a protocol message into a message.
Args:
lines: An iterable of lines of a message's ASCII representation.
message: A protocol buffer message to merge into.
Returns:
The same message passed as argument.
Raises:
ParseError: On ASCII parsing problems.
"""
_ParseOrMerge(lines, message, False)
return message
def MergeLines(lines, message):
"""Parses an ASCII representation of a protocol message into a message.
Args:
lines: An iterable of lines of a message's ASCII representation.
message: A protocol buffer message to merge into.
Returns:
The same message passed as argument.
Raises:
ParseError: On ASCII parsing problems.
"""
_ParseOrMerge(lines, message, True)
return message
def _MergeField(tokenizer, message, allow_multiple_scalars):
"""Merges a single protocol message field into a message.
Args:
tokenizer: A tokenizer to parse the field name and values.
message: A protocol message to record the data.
allow_multiple_scalars: Determines if repeated values for a non-repeated
field are permitted, e.g., the string "foo: 1 foo: 2" for a
required/optional field named "foo".
Raises:
ParseError: In case of ASCII parsing problems.
"""
message_descriptor = message.DESCRIPTOR
if tokenizer.TryConsume('['):
name = [tokenizer.ConsumeIdentifier()]
while tokenizer.TryConsume('.'):
name.append(tokenizer.ConsumeIdentifier())
name = '.'.join(name)
if not message_descriptor.is_extendable:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" does not have extensions.' %
message_descriptor.full_name)
# pylint: disable=protected-access
field = message.Extensions._FindExtensionByName(name)
# pylint: enable=protected-access
if not field:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" not registered.' % name)
elif message_descriptor != field.containing_type:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" does not extend message type "%s".' % (
name, message_descriptor.full_name))
tokenizer.Consume(']')
else:
name = tokenizer.ConsumeIdentifier()
field = message_descriptor.fields_by_name.get(name, None)
# Group names are expected to be capitalized as they appear in the
# .proto file, which actually matches their type names, not their field
# names.
if not field:
field = message_descriptor.fields_by_name.get(name.lower(), None)
if field and field.type != descriptor.FieldDescriptor.TYPE_GROUP:
field = None
if (field and field.type == descriptor.FieldDescriptor.TYPE_GROUP and
field.message_type.name != name):
field = None
if not field:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" has no field named "%s".' % (
message_descriptor.full_name, name))
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
tokenizer.TryConsume(':')
if tokenizer.TryConsume('<'):
end_token = '>'
else:
tokenizer.Consume('{')
end_token = '}'
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
sub_message = message.Extensions[field].add()
else:
sub_message = getattr(message, field.name).add()
else:
if field.is_extension:
sub_message = message.Extensions[field]
else:
sub_message = getattr(message, field.name)
sub_message.SetInParent()
while not tokenizer.TryConsume(end_token):
if tokenizer.AtEnd():
raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token))
_MergeField(tokenizer, sub_message, allow_multiple_scalars)
else:
_MergeScalarField(tokenizer, message, field, allow_multiple_scalars)
# For historical reasons, fields may optionally be separated by commas or
# semicolons.
if not tokenizer.TryConsume(','):
tokenizer.TryConsume(';')
def _MergeScalarField(tokenizer, message, field, allow_multiple_scalars):
"""Merges a single protocol message scalar field into a message.
Args:
tokenizer: A tokenizer to parse the field value.
message: A protocol message to record the data.
field: The descriptor of the field to be merged.
allow_multiple_scalars: Determines if repeated values for a non-repeated
field are permitted, e.g., the string "foo: 1 foo: 2" for a
required/optional field named "foo".
Raises:
ParseError: In case of ASCII parsing problems.
RuntimeError: On runtime errors.
"""
tokenizer.Consume(':')
value = None
if field.type in (descriptor.FieldDescriptor.TYPE_INT32,
descriptor.FieldDescriptor.TYPE_SINT32,
descriptor.FieldDescriptor.TYPE_SFIXED32):
value = tokenizer.ConsumeInt32()
elif field.type in (descriptor.FieldDescriptor.TYPE_INT64,
descriptor.FieldDescriptor.TYPE_SINT64,
descriptor.FieldDescriptor.TYPE_SFIXED64):
value = tokenizer.ConsumeInt64()
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32,
descriptor.FieldDescriptor.TYPE_FIXED32):
value = tokenizer.ConsumeUint32()
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64,
descriptor.FieldDescriptor.TYPE_FIXED64):
value = tokenizer.ConsumeUint64()
elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT,
descriptor.FieldDescriptor.TYPE_DOUBLE):
value = tokenizer.ConsumeFloat()
elif field.type == descriptor.FieldDescriptor.TYPE_BOOL:
value = tokenizer.ConsumeBool()
elif field.type == descriptor.FieldDescriptor.TYPE_STRING:
value = tokenizer.ConsumeString()
elif field.type == descriptor.FieldDescriptor.TYPE_BYTES:
value = tokenizer.ConsumeByteString()
elif field.type == descriptor.FieldDescriptor.TYPE_ENUM:
value = tokenizer.ConsumeEnum(field)
else:
raise RuntimeError('Unknown field type %d' % field.type)
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
message.Extensions[field].append(value)
else:
getattr(message, field.name).append(value)
else:
if field.is_extension:
if not allow_multiple_scalars and message.HasExtension(field):
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" should not have multiple "%s" extensions.' %
(message.DESCRIPTOR.full_name, field.full_name))
else:
message.Extensions[field] = value
else:
if not allow_multiple_scalars and message.HasField(field.name):
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" should not have multiple "%s" fields.' %
(message.DESCRIPTOR.full_name, field.name))
else:
setattr(message, field.name, value)
class _Tokenizer(object):
"""Protocol buffer ASCII representation tokenizer.
This class handles the lower level string parsing by splitting it into
meaningful tokens.
It was directly ported from the Java protocol buffer API.
"""
_WHITESPACE = re.compile('(\\s|(#.*$))+', re.MULTILINE)
_TOKEN = re.compile(
'[a-zA-Z_][0-9a-zA-Z_+-]*|' # an identifier
'[0-9+-][0-9a-zA-Z_.+-]*|' # a number
'\"([^\"\n\\\\]|\\\\.)*(\"|\\\\?$)|' # a double-quoted string
'\'([^\'\n\\\\]|\\\\.)*(\'|\\\\?$)') # a single-quoted string
_IDENTIFIER = re.compile(r'\w+')
def __init__(self, lines):
self._position = 0
self._line = -1
self._column = 0
self._token_start = None
self.token = ''
self._lines = iter(lines)
self._current_line = ''
self._previous_line = 0
self._previous_column = 0
self._more_lines = True
self._SkipWhitespace()
self.NextToken()
def AtEnd(self):
"""Checks the end of the text was reached.
Returns:
True iff the end was reached.
"""
return not self.token
def _PopLine(self):
while len(self._current_line) <= self._column:
try:
self._current_line = self._lines.next()
except StopIteration:
self._current_line = ''
self._more_lines = False
return
else:
self._line += 1
self._column = 0
def _SkipWhitespace(self):
while True:
self._PopLine()
match = self._WHITESPACE.match(self._current_line, self._column)
if not match:
break
length = len(match.group(0))
self._column += length
def TryConsume(self, token):
"""Tries to consume a given piece of text.
Args:
token: Text to consume.
Returns:
True iff the text was consumed.
"""
if self.token == token:
self.NextToken()
return True
return False
def Consume(self, token):
"""Consumes a piece of text.
Args:
token: Text to consume.
Raises:
ParseError: If the text couldn't be consumed.
"""
if not self.TryConsume(token):
raise self._ParseError('Expected "%s".' % token)
def ConsumeIdentifier(self):
"""Consumes protocol message field identifier.
Returns:
Identifier string.
Raises:
ParseError: If an identifier couldn't be consumed.
"""
result = self.token
if not self._IDENTIFIER.match(result):
raise self._ParseError('Expected identifier.')
self.NextToken()
return result
def ConsumeInt32(self):
"""Consumes a signed 32bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 32bit integer couldn't be consumed.
"""
try:
result = ParseInteger(self.token, is_signed=True, is_long=False)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeUint32(self):
"""Consumes an unsigned 32bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If an unsigned 32bit integer couldn't be consumed.
"""
try:
result = ParseInteger(self.token, is_signed=False, is_long=False)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeInt64(self):
"""Consumes a signed 64bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 64bit integer couldn't be consumed.
"""
try:
result = ParseInteger(self.token, is_signed=True, is_long=True)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeUint64(self):
"""Consumes an unsigned 64bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If an unsigned 64bit integer couldn't be consumed.
"""
try:
result = ParseInteger(self.token, is_signed=False, is_long=True)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeFloat(self):
"""Consumes an floating point number.
Returns:
The number parsed.
Raises:
ParseError: If a floating point number couldn't be consumed.
"""
try:
result = ParseFloat(self.token)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeBool(self):
"""Consumes a boolean value.
Returns:
The bool parsed.
Raises:
ParseError: If a boolean value couldn't be consumed.
"""
try:
result = ParseBool(self.token)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeString(self):
"""Consumes a string value.
Returns:
The string parsed.
Raises:
ParseError: If a string value couldn't be consumed.
"""
the_bytes = self.ConsumeByteString()
try:
return unicode(the_bytes, 'utf-8')
except UnicodeDecodeError, e:
raise self._StringParseError(e)
def ConsumeByteString(self):
"""Consumes a byte array value.
Returns:
The array parsed (as a string).
Raises:
ParseError: If a byte array value couldn't be consumed.
"""
the_list = [self._ConsumeSingleByteString()]
while self.token and self.token[0] in ('\'', '"'):
the_list.append(self._ConsumeSingleByteString())
return ''.encode('latin1').join(the_list) ##PY25
##!PY25 return b''.join(the_list)
def _ConsumeSingleByteString(self):
"""Consume one token of a string literal.
String literals (whether bytes or text) can come in multiple adjacent
tokens which are automatically concatenated, like in C or Python. This
method only consumes one token.
"""
text = self.token
if len(text) < 1 or text[0] not in ('\'', '"'):
raise self._ParseError('Expected string.')
if len(text) < 2 or text[-1] != text[0]:
raise self._ParseError('String missing ending quote.')
try:
result = text_encoding.CUnescape(text[1:-1])
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeEnum(self, field):
try:
result = ParseEnum(field, self.token)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ParseErrorPreviousToken(self, message):
"""Creates and *returns* a ParseError for the previously read token.
Args:
message: A message to set for the exception.
Returns:
A ParseError instance.
"""
return ParseError('%d:%d : %s' % (
self._previous_line + 1, self._previous_column + 1, message))
def _ParseError(self, message):
"""Creates and *returns* a ParseError for the current token."""
return ParseError('%d:%d : %s' % (
self._line + 1, self._column + 1, message))
def _StringParseError(self, e):
return self._ParseError('Couldn\'t parse string: ' + str(e))
def NextToken(self):
"""Reads the next meaningful token."""
self._previous_line = self._line
self._previous_column = self._column
self._column += len(self.token)
self._SkipWhitespace()
if not self._more_lines:
self.token = ''
return
match = self._TOKEN.match(self._current_line, self._column)
if match:
token = match.group(0)
self.token = token
else:
self.token = self._current_line[self._column]
def ParseInteger(text, is_signed=False, is_long=False):
"""Parses an integer.
Args:
text: The text to parse.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer.
"""
# Do the actual parsing. Exception handling is propagated to caller.
try:
# We force 32-bit values to int and 64-bit values to long to make
# alternate implementations where the distinction is more significant
# (e.g. the C++ implementation) simpler.
if is_long:
result = long(text, 0)
else:
result = int(text, 0)
except ValueError:
raise ValueError('Couldn\'t parse integer: %s' % text)
# Check if the integer is sane. Exceptions handled by callers.
checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)]
checker.CheckValue(result)
return result
def ParseFloat(text):
"""Parse a floating point number.
Args:
text: Text to parse.
Returns:
The number parsed.
Raises:
ValueError: If a floating point number couldn't be parsed.
"""
try:
# Assume Python compatible syntax.
return float(text)
except ValueError:
# Check alternative spellings.
if _FLOAT_INFINITY.match(text):
if text[0] == '-':
return float('-inf')
else:
return float('inf')
elif _FLOAT_NAN.match(text):
return float('nan')
else:
# assume '1.0f' format
try:
return float(text.rstrip('f'))
except ValueError:
raise ValueError('Couldn\'t parse float: %s' % text)
def ParseBool(text):
"""Parse a boolean value.
Args:
text: Text to parse.
Returns:
Boolean values parsed
Raises:
ValueError: If text is not a valid boolean.
"""
if text in ('true', 't', '1'):
return True
elif text in ('false', 'f', '0'):
return False
else:
raise ValueError('Expected "true" or "false".')
def ParseEnum(field, value):
"""Parse an enum value.
The value can be specified by a number (the enum value), or by
a string literal (the enum name).
Args:
field: Enum field descriptor.
value: String value.
Returns:
Enum value number.
Raises:
ValueError: If the enum value could not be parsed.
"""
enum_descriptor = field.enum_type
try:
number = int(value, 0)
except ValueError:
# Identifier.
enum_value = enum_descriptor.values_by_name.get(value, None)
if enum_value is None:
raise ValueError(
'Enum type "%s" has no value named %s.' % (
enum_descriptor.full_name, value))
else:
# Numeric value.
enum_value = enum_descriptor.values_by_number.get(number, None)
if enum_value is None:
raise ValueError(
'Enum type "%s" has no value with number %d.' % (
enum_descriptor.full_name, number))
return enum_value.number | unknown | codeparrot/codeparrot-clean | ||
""" Misc utilities for the library
Authors: Bertrand Thirion, Matthew Brett, 2015
"""
import sys
import scipy.linalg as spl
import numpy as np
from scipy.stats import norm
from warnings import warn
py3 = sys.version_info[0] >= 3
def z_score(pvalue):
""" Return the z-score corresponding to a given p-value.
"""
pvalue = np.minimum(np.maximum(pvalue, 1.e-300), 1. - 1.e-16)
return norm.isf(pvalue)
def multiple_fast_inv(a):
"""Compute the inverse of a set of arrays.
Parameters
----------
a: array_like of shape (n_samples, n_dim, n_dim)
Set of square matrices to be inverted. A is changed in place.
Returns
-------
a: ndarray
yielding the inverse of the inputs
Raises
------
LinAlgError :
If `a` is singular.
ValueError :
If `a` is not square, or not 2-dimensional.
Notes
-----
This function is borrowed from scipy.linalg.inv,
but with some customizations for speed-up.
"""
if a.shape[1] != a.shape[2]:
raise ValueError('a must have shape (n_samples, n_dim, n_dim)')
from scipy.linalg import calc_lwork
from scipy.linalg.lapack import get_lapack_funcs
a1, n = a[0], a.shape[0]
getrf, getri = get_lapack_funcs(('getrf', 'getri'), (a1,))
for i in range(n):
if (getrf.module_name[:7] == 'clapack'
and getri.module_name[:7] != 'clapack'):
# ATLAS 3.2.1 has getrf but not getri.
lu, piv, info = getrf(np.transpose(a[i]), rowmajor=0,
overwrite_a=True)
a[i] = np.transpose(lu)
else:
a[i], piv, info = getrf(a[i], overwrite_a=True)
if info == 0:
if getri.module_name[:7] == 'flapack':
lwork = calc_lwork.getri(getri.prefix, a1.shape[0])
lwork = lwork[1]
# XXX: the following line fixes curious SEGFAULT when
# benchmarking 500x500 matrix inverse. This seems to
# be a bug in LAPACK ?getri routine because if lwork is
# minimal (when using lwork[0] instead of lwork[1]) then
# all tests pass. Further investigation is required if
# more such SEGFAULTs occur.
lwork = int(1.01 * lwork)
a[i], _ = getri(a[i], piv, lwork=lwork, overwrite_lu=1)
else: # clapack
a[i], _ = getri(a[i], piv, overwrite_lu=1)
else:
raise ValueError('Matrix LU decomposition failed')
return a
def multiple_mahalanobis(effect, covariance):
"""Returns the squared Mahalanobis distance for a given set of samples
Parameters
----------
effect: array of shape (n_features, n_samples),
Each column represents a vector to be evaluated
covariance: array of shape (n_features, n_features, n_samples),
Corresponding covariance models stacked along the last axis
Returns
-------
sqd: array of shape (n_samples,)
the squared distances (one per sample)
"""
# check size
if effect.ndim == 1:
effect = effect[:, np.newaxis]
if covariance.ndim == 2:
covariance = covariance[:, :, np.newaxis]
if effect.shape[0] != covariance.shape[0]:
raise ValueError('Inconsistant shape for effect and covariance')
if covariance.shape[0] != covariance.shape[1]:
raise ValueError('Inconsistant shape for covariance')
# transpose and make contuguous for the sake of speed
Xt, Kt = np.ascontiguousarray(effect.T), np.ascontiguousarray(covariance.T)
# compute the inverse of the covariances
Kt = multiple_fast_inv(Kt)
# derive the squared Mahalanobis distances
sqd = np.sum(np.sum(Xt[:, :, np.newaxis] * Xt[:, np.newaxis] * Kt, 1), 1)
return sqd
def full_rank(X, cmax=1e15):
""" Computes the condition number of X and if it is larger than cmax,
returns a matrix with a condition number smaller than cmax.
Parameters
----------
X : array of shape (nrows, ncols)
input array
cmax : float, optional (default:1.e15),
tolerance for condition number
Returns
-------
X : array of shape (nrows, ncols)
output array
cond : float,
actual condition number
"""
U, s, V = spl.svd(X, full_matrices=False)
smax, smin = s.max(), s.min()
cond = smax / smin
if cond < cmax:
return X, cond
warn('Matrix is singular at working precision, regularizing...')
lda = (smax - cmax * smin) / (cmax - 1)
X = np.dot(U, np.dot(np.diag(s + lda), V))
return X, cmax
def pos_recipr(X):
""" Return element-wise reciprocal of array, setting `X`>=0 to 0
Return the reciprocal of an array, setting all entries less than or
equal to 0 to 0. Therefore, it presumes that X should be positive in
general.
Parameters
----------
X : array-like
Returns
-------
rX : array
array of same shape as `X`, dtype np.float, with values set to
1/X where X > 0, 0 otherwise
"""
X = np.asarray(X)
return np.where(X <= 0, 0, 1. / X)
_basestring = str if py3 else basestring | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_gemma import *
from .modeling_gemma import *
from .tokenization_gemma import *
from .tokenization_gemma_fast import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) | python | github | https://github.com/huggingface/transformers | src/transformers/models/gemma/__init__.py |
#!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
# Program Parameters
#
import os
import subprocess
import sys
from os import access
from os import getenv
from os import X_OK
jar_file = 'SnpSift.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args)
def main():
java = java_executable()
jar_dir = real_dirname(sys.argv[0])
(mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
import unittest
import logging
import tkp.db.model
from tkp.testutil.alchemy import gen_band, gen_dataset, gen_skyregion,\
gen_lightcurve
from tkp.testutil.decorators import database_disabled
import tkp.db
from tkp.steps.varmetric import execute_store_varmetric
logging.basicConfig(level=logging.INFO)
logging.getLogger('sqlalchemy.engine').setLevel(logging.WARNING)
class TestApi(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Can't use a regular skip here, due to a Nose bug:
# https://github.com/nose-devs/nose/issues/946
if database_disabled():
raise unittest.SkipTest("Database functionality disabled "
"in configuration.")
cls.db = tkp.db.Database()
cls.db.connect()
def setUp(self):
self.session = self.db.Session()
self.dataset = gen_dataset('test varmetric step')
band = gen_band(dataset=self.dataset, central=150**6)
skyregion = gen_skyregion(self.dataset)
lightcurve = gen_lightcurve(band, self.dataset, skyregion)
self.session.add_all(lightcurve)
self.session.flush()
self.session.commit()
def test_execute_store_varmetric(self):
session = self.db.Session()
execute_store_varmetric(session=session, dataset_id=self.dataset.id)
self.session.flush()
def test_execute_store_varmetric_twice(self):
session = self.db.Session()
execute_store_varmetric(session=session, dataset_id=self.dataset.id)
self.session.flush()
execute_store_varmetric(session=session, dataset_id=self.dataset.id)
self.session.flush() | unknown | codeparrot/codeparrot-clean | ||
{
"private": true,
"scripts": {
"dev": "next dev",
"build": "next build",
"start": "next start",
"lint": "eslint ."
},
"dependencies": {
"@popperjs/core": "^2.10.2",
"acorn": "^8.0.0",
"bootstrap": "^5.1.3",
"buttercms": "^1.2.8",
"camelcase-keys": "^7.0.1",
"date-fns": "^2.25.0",
"next": "^12.1.0",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"sharp": "^0.32.5",
"tiny-slider": "^2.9.4",
"typescript": "^3.3.1"
},
"devDependencies": {
"eslint": "^9",
"eslint-config-next": "^12.1.4"
}
} | json | github | https://github.com/vercel/next.js | examples/cms-buttercms/package.json |
#!/usr/bin/env python
import argparse
import h5py
from lfd.rapprentice.knot_classifier import isKnot
import sys
import os.path as osp
from string import lower
C_vals = [1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 10, 100, 1000]
C_strs = ['1e-05', '0.0001', '0.001', '0.01', '1.0', '10.0', '100.0', '1000.0']
feature_types = ['base', 'mul', 'mul_s', 'mul_quad', 'landmark']
MODEL_TYPE='bellman'
def estimate_performance(results_file):
if type(results_file) is str:
results_file = h5py.File(results_file, 'r')
num_knots = 0
knot_inds = []
not_inds = []
ctr = 0
n_checks = len(results_file) - 1
for (i_task, task_info) in results_file.iteritems():
sys.stdout.write("\rchecking task {} / {} ".format(ctr, n_checks))
sys.stdout.flush()
ctr += 1
if str(i_task) == 'args':
continue
# if int(i_task) > 3:
# break
N_steps = len(task_info)
final_cld = task_info[str(N_steps-1)]['next_state']['rope_nodes'][:]
if isKnot(final_cld):
knot_inds.append(int(i_task))
num_knots += 1
else:
not_inds.append(int(i_task))
print
return num_knots, knot_inds, not_inds
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("outfile")
parser.add_argument("--baseline", type=str, default='../data/evals/7_3_0.1_baseline.h5')
args = parser.parse_args()
recompute_results = True
if osp.exists(args.outfile):
user_resp = raw_input("Overwrite results file {}[y/N]".format(args.outfile))
recompute_results = lower(user_resp) == 'y'
if recompute_results:
outf = h5py.File(args.outfile, 'w')
base_successes, a, b = estimate_performance(args.baseline)
base_rate = base_successes / float(len(a) + len(b))
outf['base_rate'] = base_rate
for f in feature_types:
for c in C_strs:
results_fname = '../data/evals/jul_6_{}_0.1_c={}_{}.h5'.format(f, c, MODEL_TYPE)
print "checking {}".format(results_fname)
num_successes, knot_inds, not_inds = estimate_performance(results_fname)
print "Success rate:", num_successes / float(len(knot_inds) + len(not_inds))
key = str((f, float(c)))
outf[key] = num_successes / float(len(knot_inds) + len(not_inds))
else:
outf = h5py.File(args.outfile, 'r')
for c in C_strs:
sys.stdout.write('\t\t{}'.format(c))
print
for f in feature_types:
if f =='mul_quad' or f == 'landmark':
sys.stdout.write('{}\t'.format(f))
else:
sys.stdout.write('{}\t\t'.format(f))
for c in C_strs:
sys.stdout.write('{:.2f}'.format(outf[str((f, float(c)))][()]))
sys.stdout.write('\t\t')
print
sys.stdout.write('baseline\t')
for c in C_strs:
sys.stdout.write('{:.2f}'.format(outf['base_rate'][()]))
sys.stdout.write('\t\t')
print
outf.close() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
"""
Copyright 2014 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Utilities for working with URLs.
TODO(epoger): move this into tools/utils for broader use?
"""
# System-level imports
import contextlib
import os
import shutil
import urllib
import urlparse
def create_filepath_url(filepath):
""" Returns a file:/// URL pointing at the given filepath on local disk.
Args:
filepath: string; path to a file on local disk (may be absolute or relative,
and the file does not need to exist)
Returns:
A file:/// URL pointing at the file. Regardless of whether filepath was
specified as a relative or absolute path, the URL will contain an
absolute path to the file.
Raises:
An Exception, if filepath is already a URL.
"""
if urlparse.urlparse(filepath).scheme:
raise Exception('"%s" is already a URL' % filepath)
return urlparse.urljoin(
'file:', urllib.pathname2url(os.path.abspath(filepath)))
def copy_contents(source_url, dest_path, create_subdirs_if_needed=False):
""" Copies the full contents of the URL 'source_url' into
filepath 'dest_path'.
Args:
source_url: string; complete URL to read from
dest_path: string; complete filepath to write to (may be absolute or
relative)
create_subdirs_if_needed: boolean; whether to create subdirectories as
needed to create dest_path
Raises:
Some subclass of Exception if unable to read source_url or write dest_path.
"""
if create_subdirs_if_needed:
dest_dir = os.path.dirname(dest_path)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
with contextlib.closing(urllib.urlopen(source_url)) as source_handle:
with open(dest_path, 'wb') as dest_handle:
shutil.copyfileobj(fsrc=source_handle, fdst=dest_handle) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class TestMail(common.SavepointCase):
@classmethod
def _init_mock_build_email(cls):
cls._build_email_args_list = []
cls._build_email_kwargs_list = []
def setUp(self):
super(TestMail, self).setUp()
self._build_email_args_list[:] = []
self._build_email_kwargs_list[:] = []
@classmethod
def setUpClass(cls):
super(TestMail, cls).setUpClass()
cr, uid = cls.cr, cls.uid
def build_email(self, *args, **kwargs):
cls._build_email_args_list.append(args)
cls._build_email_kwargs_list.append(kwargs)
return build_email.origin(self, *args, **kwargs)
def send_email(self, cr, uid, message, *args, **kwargs):
return message['Message-Id']
cls._init_mock_build_email()
cls.registry('ir.mail_server')._patch_method('build_email', build_email)
cls.registry('ir.mail_server')._patch_method('send_email', send_email)
# Usefull models
cls.ir_model = cls.registry('ir.model')
cls.ir_model_data = cls.registry('ir.model.data')
cls.ir_attachment = cls.registry('ir.attachment')
cls.mail_alias = cls.registry('mail.alias')
cls.mail_thread = cls.registry('mail.thread')
cls.mail_group = cls.registry('mail.group')
cls.mail_mail = cls.registry('mail.mail')
cls.mail_message = cls.registry('mail.message')
cls.mail_notification = cls.registry('mail.notification')
cls.mail_followers = cls.registry('mail.followers')
cls.mail_message_subtype = cls.registry('mail.message.subtype')
cls.res_users = cls.registry('res.users')
cls.res_partner = cls.registry('res.partner')
# Find Employee group
cls.group_employee_id = cls.env.ref('base.group_user').id or False
# Partner Data
# User Data: employee, noone
cls.user_employee_id = cls.res_users.create(cr, uid, {
'name': 'Ernest Employee',
'login': 'ernest',
'alias_name': 'ernest',
'email': 'e.e@example.com',
'signature': '--\nErnest',
'notify_email': 'always',
'groups_id': [(6, 0, [cls.group_employee_id])]
}, {'no_reset_password': True})
cls.user_noone_id = cls.res_users.create(cr, uid, {
'name': 'Noemie NoOne',
'login': 'noemie',
'alias_name': 'noemie',
'email': 'n.n@example.com',
'signature': '--\nNoemie',
'notify_email': 'always',
'groups_id': [(6, 0, [])]
}, {'no_reset_password': True})
# Test users to use through the various tests
cls.res_users.write(cr, uid, uid, {'name': 'Administrator'})
cls.user_raoul_id = cls.res_users.create(cr, uid, {
'name': 'Raoul Grosbedon',
'signature': 'SignRaoul',
'email': 'raoul@raoul.fr',
'login': 'raoul',
'alias_name': 'raoul',
'groups_id': [(6, 0, [cls.group_employee_id])]
}, {'no_reset_password': True})
cls.user_bert_id = cls.res_users.create(cr, uid, {
'name': 'Bert Tartignole',
'signature': 'SignBert',
'email': 'bert@bert.fr',
'login': 'bert',
'alias_name': 'bert',
'groups_id': [(6, 0, [])]
}, {'no_reset_password': True})
cls.user_raoul = cls.res_users.browse(cr, uid, cls.user_raoul_id)
cls.user_bert = cls.res_users.browse(cr, uid, cls.user_bert_id)
cls.user_admin = cls.res_users.browse(cr, uid, uid)
cls.partner_admin_id = cls.user_admin.partner_id.id
cls.partner_raoul_id = cls.user_raoul.partner_id.id
cls.partner_bert_id = cls.user_bert.partner_id.id
# Test 'pigs' group to use through the various tests
cls.group_pigs_id = cls.mail_group.create(
cr, uid,
{'name': 'Pigs', 'description': 'Fans of Pigs, unite !', 'alias_name': 'group+pigs'},
{'mail_create_nolog': True}
)
cls.group_pigs = cls.mail_group.browse(cr, uid, cls.group_pigs_id)
# Test mail.group: public to provide access to everyone
cls.group_jobs_id = cls.mail_group.create(cr, uid, {'name': 'Jobs', 'public': 'public'})
# Test mail.group: private to restrict access
cls.group_priv_id = cls.mail_group.create(cr, uid, {'name': 'Private', 'public': 'private'})
@classmethod
def tearDownClass(cls):
# Remove mocks
cls.registry('ir.mail_server')._revert_method('build_email')
cls.registry('ir.mail_server')._revert_method('send_email')
super(TestMail, cls).tearDownClass() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Mockery Docs documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 3 14:04:26 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Mockery Docs'
copyright = u'2014, Pádraic Brady, Dave Marshall, Wouter, Graham Campbell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9'
# The full version, including alpha/beta/rc tags.
release = '0.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MockeryDocsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index2', 'MockeryDocs.tex', u'Mockery Docs Documentation',
u'Pádraic Brady, Dave Marshall, Wouter, Graham Campbell', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index2', 'mockerydocs', u'Mockery Docs Documentation',
[u'Pádraic Brady, Dave Marshall, Wouter, Graham Campbell'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index2', 'MockeryDocs', u'Mockery Docs Documentation',
u'Pádraic Brady, Dave Marshall, Wouter, Graham Campbell', 'MockeryDocs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
#on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
print sphinx_rtd_theme.get_html_theme_path() | unknown | codeparrot/codeparrot-clean | ||
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.ssl;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import javax.net.ssl.KeyManager;
import javax.net.ssl.TrustManager;
import java.io.IOException;
import java.security.GeneralSecurityException;
/**
* Interface that gives access to {@link KeyManager} and {@link TrustManager}
* implementations.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface KeyStoresFactory extends Configurable {
/**
* Initializes the keystores of the factory.
*
* @param mode if the keystores are to be used in client or server mode.
* @throws IOException thrown if the keystores could not be initialized due
* to an IO error.
* @throws GeneralSecurityException thrown if the keystores could not be
* initialized due to an security error.
*/
public void init(SSLFactory.Mode mode) throws IOException, GeneralSecurityException;
/**
* Releases any resources being used.
*/
public void destroy();
/**
* Returns the keymanagers for owned certificates.
*
* @return the keymanagers for owned certificates.
*/
public KeyManager[] getKeyManagers();
/**
* Returns the trustmanagers for trusted certificates.
*
* @return the trustmanagers for trusted certificates.
*/
public TrustManager[] getTrustManagers();
} | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/KeyStoresFactory.java |
from collections import defaultdict, OrderedDict
from .base import DateRangeReportView
from djanalytics.reports import utils
class AudienceOverviewReport(DateRangeReportView):
report_name = "Audience Overview"
def _build_report_data(self):
data = []
tmp_data = OrderedDict()
totals = defaultdict(int)
visits = self.visit_queryset().order_by('visit_date')
for visit in visits:
# ideally this would be done in the database
# but grouping really isn't a thing Django does well
date = visit.visit_date
if date not in tmp_data:
tmp_data[date] = defaultdict(int)
tmp_data[date]['visits'] += 1
page_visits = visit.pagevisit_set.count()
tmp_data[date]['pageviews'] += page_visits
tmp_data[date]['duration'] += visit.duration or 0
if page_visits == 1:
tmp_data[date]['bounces'] += 1
if visit.visitor.visit_set.count() > 1:
tmp_data[date]['returning_visits'] += 1
else:
tmp_data[date]['new_visits'] += 1
for visit_date, visit_data in tmp_data.iteritems():
for key, value in visit_data.items():
totals[key] += value
date_label = utils.format_date(visit_date)
data.append(self._build_row(visit_data, date_label))
if data:
data.append(self._build_row(totals, "Totals"))
return data
def _build_report_headers(self):
return (
"Date",
"Visits",
"New Visits",
"Returning Visits",
"Pageviews",
"Bounce Rate",
"Pages/Session",
"Avg. Session Duration",
)
def _build_row(self, datadict, first_column):
bounce_rate = utils.percentage(
datadict['bounces'], datadict['visits']
)
pages_per_session = utils.average(
datadict['pageviews'], datadict['visits']
)
avg_duration = utils.average_duration(
datadict['duration'], datadict['visits']
)
return (
first_column,
datadict['visits'],
datadict['new_visits'],
datadict['returning_visits'],
datadict['pageviews'],
bounce_rate,
pages_per_session,
avg_duration,
)
audience_overview = AudienceOverviewReport.as_view() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# Copyright (c) 2016 Hewlett-Packard Enterprise
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_recordset
short_description: Manage OpenStack DNS recordsets
extends_documentation_fragment: openstack
version_added: "2.2"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
description:
- Manage OpenStack DNS recordsets. Recordsets can be created, deleted or
updated. Only the I(records), I(description), and I(ttl) values
can be updated.
options:
zone:
description:
- Zone managing the recordset
required: true
name:
description:
- Name of the recordset
required: true
recordset_type:
description:
- Recordset type
required: true
records:
description:
- List of recordset definitions
required: true
description:
description:
- Description of the recordset
ttl:
description:
- TTL (Time To Live) value in seconds
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
# Create a recordset named "www.example.net."
- os_recordset:
cloud: mycloud
state: present
zone: example.net.
name: www
recordset_type: primary
records: ['10.1.1.1']
description: test recordset
ttl: 3600
# Update the TTL on existing "www.example.net." recordset
- os_recordset:
cloud: mycloud
state: present
zone: example.net.
name: www
ttl: 7200
# Delete recorset named "www.example.net."
- os_recordset:
cloud: mycloud
state: absent
zone: example.net.
name: www
'''
RETURN = '''
recordset:
description: Dictionary describing the recordset.
returned: On success when I(state) is 'present'.
type: complex
contains:
id:
description: Unique recordset ID
type: string
sample: "c1c530a3-3619-46f3-b0f6-236927b2618c"
name:
description: Recordset name
type: string
sample: "www.example.net."
zone_id:
description: Zone id
type: string
sample: 9508e177-41d8-434e-962c-6fe6ca880af7
type:
description: Recordset type
type: string
sample: "A"
description:
description: Recordset description
type: string
sample: "Test description"
ttl:
description: Zone TTL value
type: int
sample: 3600
records:
description: Recordset records
type: list
sample: ['10.0.0.1']
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _system_state_change(state, records, description, ttl, zone, recordset):
if state == 'present':
if recordset is None:
return True
if records is not None and recordset.records != records:
return True
if description is not None and recordset.description != description:
return True
if ttl is not None and recordset.ttl != ttl:
return True
if state == 'absent' and recordset:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
zone=dict(required=True),
name=dict(required=True),
recordset_type=dict(required=False),
records=dict(required=False, type='list'),
description=dict(required=False, default=None),
ttl=dict(required=False, default=None, type='int'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
required_if=[
('state', 'present',
['recordset_type', 'records'])],
supports_check_mode=True,
**module_kwargs)
zone = module.params.get('zone')
name = module.params.get('name')
state = module.params.get('state')
sdk, cloud = openstack_cloud_from_module(module)
try:
recordset_type = module.params.get('recordset_type')
recordset_filter = {'type': recordset_type}
recordsets = cloud.search_recordsets(zone, name_or_id=name + '.' + zone, filters=recordset_filter)
if len(recordsets) == 1:
recordset = recordsets[0]
try:
recordset_id = recordset['id']
except KeyError as e:
module.fail_json(msg=str(e))
else:
# recordsets is filtered by type and should never be more than 1 return
recordset = None
if state == 'present':
records = module.params.get('records')
description = module.params.get('description')
ttl = module.params.get('ttl')
if module.check_mode:
module.exit_json(changed=_system_state_change(state,
records, description,
ttl, zone,
recordset))
if recordset is None:
recordset = cloud.create_recordset(
zone=zone, name=name, recordset_type=recordset_type,
records=records, description=description, ttl=ttl)
changed = True
else:
if records is None:
records = []
pre_update_recordset = recordset
changed = _system_state_change(state, records,
description, ttl,
zone, pre_update_recordset)
if changed:
zone = cloud.update_recordset(
zone, recordset_id,
records=records,
description=description,
ttl=ttl)
module.exit_json(changed=changed, recordset=recordset)
elif state == 'absent':
if module.check_mode:
module.exit_json(changed=_system_state_change(state,
None, None,
None,
None, recordset))
if recordset is None:
changed = False
else:
cloud.delete_recordset(zone, recordset_id)
changed = True
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import report_vote
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
#ifndef __riscv_vector
#error RVV not supported
#endif
#include <riscv_vector.h>
int main(void)
{
size_t vlmax = __riscv_vsetvlmax_e32m1();
vuint32m1_t a = __riscv_vmv_v_x_u32m1(0, vlmax);
vuint32m1_t b = __riscv_vadd_vv_u32m1(a, a, vlmax);
return __riscv_vmv_x_s_u32m1_u32(b);
} | c | github | https://github.com/numpy/numpy | numpy/_core/src/_simd/checks/cpu_rvv.c |
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.docs.web.websocket.stomp.websocketstompmessageflow
import org.springframework.context.annotation.Configuration
import org.springframework.messaging.simp.config.MessageBrokerRegistry
import org.springframework.web.socket.config.annotation.EnableWebSocketMessageBroker
import org.springframework.web.socket.config.annotation.StompEndpointRegistry
import org.springframework.web.socket.config.annotation.WebSocketMessageBrokerConfigurer
// tag::snippet[]
@Configuration
@EnableWebSocketMessageBroker
class WebSocketConfiguration : WebSocketMessageBrokerConfigurer {
override fun registerStompEndpoints(registry: StompEndpointRegistry) {
registry.addEndpoint("/portfolio")
}
override fun configureMessageBroker(registry: MessageBrokerRegistry) {
registry.setApplicationDestinationPrefixes("/app")
registry.enableSimpleBroker("/topic")
}
}
// end::snippet[] | kotlin | github | https://github.com/spring-projects/spring-framework | framework-docs/src/main/kotlin/org/springframework/docs/web/websocket/stomp/websocketstompmessageflow/WebSocketConfiguration.kt |
## Input
```javascript
function Foo(props) {
let x;
(x = []), null;
return x;
}
export const FIXTURE_ENTRYPOINT = {
fn: Foo,
params: [{}],
};
```
## Code
```javascript
import { c as _c } from "react/compiler-runtime";
function Foo(props) {
const $ = _c(1);
let x;
if ($[0] === Symbol.for("react.memo_cache_sentinel")) {
(x = []), null;
$[0] = x;
} else {
x = $[0];
}
return x;
}
export const FIXTURE_ENTRYPOINT = {
fn: Foo,
params: [{}],
};
```
### Eval output
(kind: ok) [] | unknown | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/memoize-value-block-value-sequence.expect.md |
# Copyright 2011 James McCauley
# Copyright 2008 (C) Nicira, Inc.
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
# This file is derived from the packet library in NOX, which was
# developed by Nicira, Inc.
#======================================================================
#
# EAPOL Header Format (see IEEE 802.1X-2004):
#
# Octet 0: Protocol version (1 or 2).
# Octet 1: Packet type:
# 0 = EAP packet
# 1 = EAPOL-Start
# 2 = EAPOL-Logoff
# 3 = EAPOL-Key
# 4 = EAPOL-Encapsulated-ASF-Alert
# Octets 2-3: Length of packet body field (0 if packet body is absent)
# Octets 4-end: Packet body (present only for packet types 0, 3, 4)
#
#======================================================================
import struct
from packet_utils import *
from packet_base import packet_base
from eap import *
class eapol(packet_base):
"EAP over LAN packet"
MIN_LEN = 4
V1_PROTO = 1
V2_PROTO = 2
EAP_TYPE = 0
EAPOL_START_TYPE = 1
EAPOL_LOGOFF_TYPE = 2
EAPOL_KEY_TYPE = 3
EAPOL_ENCAPSULATED_ASF_ALERT = 4
type_names = {EAP_TYPE: "EAP",
EAPOL_START_TYPE: "EAPOL-Start",
EAPOL_LOGOFF_TYPE: "EAPOL-Logoff",
EAPOL_KEY_TYPE: "EAPOL-Key",
EAPOL_ENCAPSULATED_ASF_ALERT: "EAPOL-Encapsulated-ASF-Alert"}
@staticmethod
def type_name(type):
return eapol.type_names.get(type, "type%d" % type)
def __init__(self, raw=None, prev=None, **kw):
packet_base.__init__(self)
self.prev = prev
self.version = self.V1_PROTO
self.type = self.EAP_TYPE
self.bodylen = 0
if raw is not None:
self.parse(raw)
self._init(kw)
def __str__(self):
s = '[EAPOL v%d %s]' % (self.version, self.type_name(self.type))
return s
def parse(self, raw):
assert isinstance(raw, bytes)
self.raw = raw
dlen = len(raw)
if dlen < self.MIN_LEN:
self.msg('(eapol parse) warning EAPOL packet data too short to parse header: data len %u' % (dlen,))
return
(self.version, self.type, self.bodylen) \
= struct.unpack('!BBH', raw[:self.MIN_LEN])
self.parsed = True
if self.type == self.EAP_TYPE:
self.next = eap(raw=raw[self.MIN_LEN:], prev=self)
elif (self.type == self.EAPOL_START_TYPE
or self.type == self.EAPOL_LOGOFF_TYPE):
pass # These types have no payloads.
else:
self.msg('warning unsupported EAPOL type: %s' % (self.type_name(self.type),))
def hdr(self, payload):
return struct.pack('!BBH', self.version, self.type, self.bodylen) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014-2016 Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import click
from platformio import app, exception
from platformio.libmanager import LibraryManager
from platformio.util import get_api_result
LIBLIST_TPL = ("[{id:^14}] {name:<25} {compatibility:<30} "
"\"{authornames}\": {description}")
def echo_liblist_header():
click.echo(LIBLIST_TPL.format(
id=click.style("ID", fg="green"),
name=click.style("Name", fg="cyan"),
compatibility=click.style("Compatibility", fg="yellow"),
authornames="Authors",
description="Description"
))
terminal_width, _ = click.get_terminal_size()
click.echo("-" * terminal_width)
def echo_liblist_item(item):
click.echo(LIBLIST_TPL.format(
id=click.style(str(item['id']), fg="green"),
name=click.style(item['name'], fg="cyan"),
compatibility=click.style(
", ".join(item['frameworks'] + item['platforms']),
fg="yellow"
),
authornames=", ".join(item['authornames']),
description=item['description']
))
@click.group(short_help="Library Manager")
def cli():
pass
@cli.command("search", short_help="Search for library")
@click.option("--json-output", is_flag=True)
@click.option("--page", type=click.INT, default=1)
@click.option("-a", "--author", multiple=True)
@click.option("-k", "--keyword", multiple=True)
@click.option("-f", "--framework", multiple=True)
@click.option("-p", "--platform", multiple=True)
@click.argument("query", required=False, nargs=-1)
def lib_search(query, json_output, page, **filters):
if not query:
query = []
if not isinstance(query, list):
query = list(query)
for key, values in filters.iteritems():
for value in values:
query.append('%s:"%s"' % (key, value))
result = get_api_result("/lib/search",
dict(query=" ".join(query), page=page))
if json_output:
click.echo(json.dumps(result))
return
if result['total'] == 0:
click.secho(
"Nothing has been found by your request\n"
"Try a less-specific search or use truncation (or wildcard) "
"operator", fg="yellow", nl=False)
click.secho(" *", fg="green")
click.secho("For example: DS*, PCA*, DHT* and etc.\n", fg="yellow")
click.echo("For more examples and advanced search syntax, "
"please use documentation:")
click.secho("http://docs.platformio.org"
"/en/latest/userguide/lib/cmd_search.html\n", fg="cyan")
return
click.secho("Found %d libraries:\n" % result['total'],
fg="green" if result['total'] else "yellow")
if result['total']:
echo_liblist_header()
while True:
for item in result['items']:
echo_liblist_item(item)
if (int(result['page']) * int(result['perpage']) >=
int(result['total'])):
break
if (app.get_setting("enable_prompts") and
click.confirm("Show next libraries?")):
result = get_api_result(
"/lib/search",
dict(query=" ".join(query), page=int(result['page']) + 1)
)
else:
break
@cli.command("install", short_help="Install library")
@click.argument("libid", type=click.INT, nargs=-1, metavar="[LIBRARY_ID]")
@click.option("-v", "--version")
@click.pass_context
def lib_install(ctx, libid, version):
lm = LibraryManager()
for id_ in libid:
click.echo(
"Installing library [ %s ]:" % click.style(str(id_), fg="green"))
try:
if not lm.install(id_, version):
continue
info = lm.get_info(id_)
click.secho(
"The library #%s '%s' has been successfully installed!"
% (str(id_), info['name']), fg="green")
if "dependencies" in info:
click.secho("Installing dependencies:", fg="yellow")
_dependencies = info['dependencies']
if not isinstance(_dependencies, list):
_dependencies = [_dependencies]
for item in _dependencies:
try:
lib_install_dependency(ctx, item)
except AssertionError:
raise exception.LibInstallDependencyError(str(item))
except exception.LibAlreadyInstalled:
click.secho("Already installed", fg="yellow")
def lib_install_dependency(ctx, data):
assert isinstance(data, dict)
query = []
for key in data.keys():
if key in ("authors", "frameworks", "platforms", "keywords"):
values = data[key]
if not isinstance(values, list):
values = [v.strip() for v in values.split(",") if v]
for value in values:
query.append('%s:"%s"' % (key[:-1], value))
elif isinstance(data[key], basestring):
query.append('+"%s"' % data[key])
result = get_api_result("/lib/search", dict(query=" ".join(query)))
assert result['total'] > 0
if result['total'] == 1 or not app.get_setting("enable_prompts"):
ctx.invoke(lib_install, libid=[result['items'][0]['id']])
else:
click.secho(
"Conflict: More than one dependent libraries have been found "
"by request %s:" % json.dumps(data), fg="red")
echo_liblist_header()
for item in result['items']:
echo_liblist_item(item)
deplib_id = click.prompt(
"Please choose one dependent library ID",
type=click.Choice([str(i['id']) for i in result['items']]))
ctx.invoke(lib_install, libid=[int(deplib_id)])
@cli.command("uninstall", short_help="Uninstall libraries")
@click.argument("libid", type=click.INT, nargs=-1)
def lib_uninstall(libid):
lm = LibraryManager()
for id_ in libid:
info = lm.get_info(id_)
if lm.uninstall(id_):
click.secho("The library #%s '%s' has been successfully "
"uninstalled!" % (str(id_), info['name']), fg="green")
@cli.command("list", short_help="List installed libraries")
@click.option("--json-output", is_flag=True)
def lib_list(json_output):
lm = LibraryManager()
items = lm.get_installed().values()
if json_output:
click.echo(json.dumps(items))
return
if not items:
return
echo_liblist_header()
for item in sorted(items, key=lambda i: i['id']):
item['authornames'] = [i['name'] for i in item['authors']]
echo_liblist_item(item)
@cli.command("show", short_help="Show details about installed library")
@click.argument("libid", type=click.INT)
def lib_show(libid):
lm = LibraryManager()
info = lm.get_info(libid)
click.secho(info['name'], fg="cyan")
click.echo("-" * len(info['name']))
_authors = []
for author in info['authors']:
_data = []
for key in ("name", "email", "url", "maintainer"):
if not author[key]:
continue
if key == "email":
_data.append("<%s>" % author[key])
elif key == "maintainer":
_data.append("(maintainer)")
else:
_data.append(author[key])
_authors.append(" ".join(_data))
click.echo("Authors: %s" % ", ".join(_authors))
click.echo("Keywords: %s" % ", ".join(info['keywords']))
if "frameworks" in info:
click.echo("Frameworks: %s" % ", ".join(info['frameworks']))
if "platforms" in info:
click.echo("Platforms: %s" % ", ".join(info['platforms']))
click.echo("Version: %s" % info['version'])
click.echo()
click.echo(info['description'])
click.echo()
@cli.command("update", short_help="Update installed libraries")
@click.argument("libid", type=click.INT, nargs=-1, required=False,
metavar="[LIBRARY_ID]")
@click.pass_context
def lib_update(ctx, libid):
lm = LibraryManager()
for id_, latest_version in (lm.get_latest_versions() or {}).items():
if libid and int(id_) not in libid:
continue
info = lm.get_info(int(id_))
click.echo("Updating [ %s ] %s library:" % (
click.style(id_, fg="yellow"),
click.style(info['name'], fg="cyan")))
current_version = info['version']
if latest_version is None:
click.secho("Unknown library", fg="red")
continue
click.echo("Versions: Current=%s, Latest=%s \t " % (
current_version, latest_version), nl=False)
if current_version == latest_version:
click.echo("[%s]" % (click.style("Up-to-date", fg="green")))
continue
else:
click.echo("[%s]" % (click.style("Out-of-date", fg="red")))
ctx.invoke(lib_uninstall, libid=[int(id_)])
ctx.invoke(lib_install, libid=[int(id_)])
@cli.command("register", short_help="Register new library")
@click.argument("config_url")
def lib_register(config_url):
if (not config_url.startswith("http://") and not
config_url.startswith("https://")):
raise exception.InvalidLibConfURL(config_url)
result = get_api_result("/lib/register", data=dict(config_url=config_url))
if "message" in result and result['message']:
click.secho(result['message'], fg="green" if "successed" in result and
result['successed'] else "red") | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V2alpha1CronJob(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V2alpha1CronJobSpec',
'status': 'V2alpha1CronJobStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None):
"""
V2alpha1CronJob - a model defined in Swagger
"""
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""
Gets the api_version of this V2alpha1CronJob.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V2alpha1CronJob.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V2alpha1CronJob.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V2alpha1CronJob.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V2alpha1CronJob.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V2alpha1CronJob.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V2alpha1CronJob.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V2alpha1CronJob.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V2alpha1CronJob.
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:return: The metadata of this V2alpha1CronJob.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V2alpha1CronJob.
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:param metadata: The metadata of this V2alpha1CronJob.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""
Gets the spec of this V2alpha1CronJob.
Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
:return: The spec of this V2alpha1CronJob.
:rtype: V2alpha1CronJobSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""
Sets the spec of this V2alpha1CronJob.
Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
:param spec: The spec of this V2alpha1CronJob.
:type: V2alpha1CronJobSpec
"""
self._spec = spec
@property
def status(self):
"""
Gets the status of this V2alpha1CronJob.
Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
:return: The status of this V2alpha1CronJob.
:rtype: V2alpha1CronJobStatus
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V2alpha1CronJob.
Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
:param status: The status of this V2alpha1CronJob.
:type: V2alpha1CronJobStatus
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V2alpha1CronJob):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other | unknown | codeparrot/codeparrot-clean | ||
import sys
import pygame
from pygame import Rect
def draw_background(screen, tile_img_file, field_rect):
tile_img = pygame.image.load(tile_img_file).convert_alpha()
img_rect = tile_img.get_rect()
nrows = int(screen.get_height() / img_rect.height) + 1
ncols = int(screen.get_width() / img_rect.width) + 1
for y in range(nrows):
for x in range(ncols):
img_rect.topleft = (x * img_rect.width,
y * img_rect.height)
screen.blit(tile_img, img_rect)
field_color = (109, 41, 1)
boundary_rect = Rect( field_rect.left - 4,
field_rect.top - 4,
field_rect.width + 8,
field_rect.height + 8)
pygame.draw.rect(screen, (0, 0, 0), boundary_rect)
pygame.draw.rect(screen, field_color, field_rect)
def run_game():
# Game parameters
SCREEN_WIDTH, SCREEN_HEIGHT = 400, 400
FIELD_RECT = Rect(50, 50, 300, 300)
BG_TILE_IMG = '../images/brick_tile.png'
pygame.init()
screen = pygame.display.set_mode(
(SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)
clock = pygame.time.Clock()
while True:
time_passed = clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit_game()
draw_background(screen, BG_TILE_IMG, FIELD_RECT)
pygame.display.flip()
def exit_game():
sys.exit()
run_game() | unknown | codeparrot/codeparrot-clean | ||
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
from cStringIO import StringIO
from .. import parser, serializer
class TokenizerTest(unittest.TestCase):
def setUp(self):
self.serializer = serializer.ManifestSerializer()
self.parser = parser.Parser()
def serialize(self, input_str):
return self.serializer.serialize(self.parser.parse(input_str))
def compare(self, input_str, expected=None):
if expected is None:
expected = input_str
expected = expected.encode("utf8")
actual = self.serialize(input_str)
self.assertEquals(actual, expected)
def test_0(self):
self.compare("""key: value
[Heading 1]
other_key: other_value
""")
def test_1(self):
self.compare("""key: value
[Heading 1]
other_key:
if a or b: other_value
""")
def test_2(self):
self.compare("""key: value
[Heading 1]
other_key:
if a or b: other_value
fallback_value
""")
def test_3(self):
self.compare("""key: value
[Heading 1]
other_key:
if a == 1: other_value
fallback_value
""")
def test_4(self):
self.compare("""key: value
[Heading 1]
other_key:
if a == "1": other_value
fallback_value
""")
def test_5(self):
self.compare("""key: value
[Heading 1]
other_key:
if a == "abc"[1]: other_value
fallback_value
""")
def test_6(self):
self.compare("""key: value
[Heading 1]
other_key:
if a == "abc"[c]: other_value
fallback_value
""")
def test_7(self):
self.compare("""key: value
[Heading 1]
other_key:
if (a or b) and c: other_value
fallback_value
""",
"""key: value
[Heading 1]
other_key:
if a or b and c: other_value
fallback_value
""")
def test_8(self):
self.compare("""key: value
[Heading 1]
other_key:
if a or (b and c): other_value
fallback_value
""")
def test_9(self):
self.compare("""key: value
[Heading 1]
other_key:
if not (a and b): other_value
fallback_value
""")
def test_10(self):
self.compare("""key: value
[Heading 1]
some_key: some_value
[Heading 2]
other_key: other_value
""")
def test_11(self):
self.compare("""key:
if not a and b and c and d: true
""")
def test_12(self):
self.compare("""[Heading 1]
key: [a:1, b:2]
""")
def test_13(self):
self.compare("""key: [a:1, "b:#"]
""")
def test_14(self):
self.compare("""key: [","]
""")
def test_15(self):
self.compare("""key: ,
""")
def test_16(self):
self.compare("""key: ["]", b]
""")
def test_17(self):
self.compare("""key: ]
""")
def test_18(self):
self.compare("""key: \]
""", """key: ]
""")
def test_escape_0(self):
self.compare(r"""k\t\:y: \a\b\f\n\r\t\v""",
r"""k\t\:y: \x07\x08\x0c\n\r\t\x0b
""")
def test_escape_1(self):
self.compare(r"""k\x00: \x12A\x45""",
r"""k\x00: \x12AE
""")
def test_escape_2(self):
self.compare(r"""k\u0045y: \u1234A\uABc6""",
u"""kEy: \u1234A\uabc6
""")
def test_escape_3(self):
self.compare(r"""k\u0045y: \u1234A\uABc6""",
u"""kEy: \u1234A\uabc6
""")
def test_escape_4(self):
self.compare(r"""key: '\u1234A\uABc6'""",
u"""key: \u1234A\uabc6
""")
def test_escape_5(self):
self.compare(r"""key: [\u1234A\uABc6]""",
u"""key: [\u1234A\uabc6]
""")
def test_escape_6(self):
self.compare(r"""key: [\u1234A\uABc6\,]""",
u"""key: ["\u1234A\uabc6,"]
""")
def test_escape_7(self):
self.compare(r"""key: [\,\]\#]""",
r"""key: [",]#"]
""")
def test_escape_8(self):
self.compare(r"""key: \#""",
r"""key: "#"
""")
def test_escape_9(self):
self.compare(r"""key: \U10FFFFabc""",
u"""key: \U0010FFFFabc
""")
def test_escape_10(self):
self.compare(r"""key: \u10FFab""",
u"""key: \u10FFab
""")
def test_escape_11(self):
self.compare(r"""key: \\ab
""")
def test_atom_1(self):
self.compare(r"""key: @True
""")
def test_atom_2(self):
self.compare(r"""key: @False
""")
def test_atom_3(self):
self.compare(r"""key: @Reset
""")
def test_atom_4(self):
self.compare(r"""key: [a, @Reset, b]
""") | unknown | codeparrot/codeparrot-clean | ||
import functools
from django.http import HttpRequest
def sensitive_variables(*variables):
"""
Indicate which variables used in the decorated function are sensitive so
that those variables can later be treated in a special way, for example
by hiding them when logging unhandled exceptions.
Accept two forms:
* with specified variable names:
@sensitive_variables('user', 'password', 'credit_card')
def my_function(user):
password = user.pass_word
credit_card = user.credit_card_number
...
* without any specified variable names, in which case consider all
variables are sensitive:
@sensitive_variables()
def my_function()
...
"""
def decorator(func):
@functools.wraps(func)
def sensitive_variables_wrapper(*func_args, **func_kwargs):
if variables:
sensitive_variables_wrapper.sensitive_variables = variables
else:
sensitive_variables_wrapper.sensitive_variables = '__ALL__'
return func(*func_args, **func_kwargs)
return sensitive_variables_wrapper
return decorator
def sensitive_post_parameters(*parameters):
"""
Indicate which POST parameters used in the decorated view are sensitive,
so that those parameters can later be treated in a special way, for example
by hiding them when logging unhandled exceptions.
Accept two forms:
* with specified parameters:
@sensitive_post_parameters('password', 'credit_card')
def my_view(request):
pw = request.POST['password']
cc = request.POST['credit_card']
...
* without any specified parameters, in which case consider all
variables are sensitive:
@sensitive_post_parameters()
def my_view(request)
...
"""
def decorator(view):
@functools.wraps(view)
def sensitive_post_parameters_wrapper(request, *args, **kwargs):
assert isinstance(request, HttpRequest), (
"sensitive_post_parameters didn't receive an HttpRequest. "
"If you are decorating a classmethod, be sure to use "
"@method_decorator."
)
if parameters:
request.sensitive_post_parameters = parameters
else:
request.sensitive_post_parameters = '__ALL__'
return view(request, *args, **kwargs)
return sensitive_post_parameters_wrapper
return decorator | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
require_relative '../json/ruby_events'
module Psych
module Visitors
class JSONTree < YAMLTree
include Psych::JSON::RubyEvents
def self.create options = {}
emitter = Psych::JSON::TreeBuilder.new
class_loader = ClassLoader.new
ss = ScalarScanner.new class_loader
new(emitter, ss, options)
end
def accept target
if target.respond_to?(:encode_with)
dump_coder target
else
send(@dispatch_cache[target.class], target)
end
end
end
end
end | ruby | github | https://github.com/ruby/ruby | ext/psych/lib/psych/visitors/json_tree.rb |
#!/usr/bin/env python
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
The TestRunner2 package is an alternate implementation of the TestRunner
class that uses the manager_worker_broker module to send sets of tests to
workers and receive their completion messages accordingly.
"""
import logging
import time
from webkitpy.tool import grammar
from webkitpy.layout_tests.layout_package import manager_worker_broker
from webkitpy.layout_tests.layout_package import test_runner
from webkitpy.layout_tests.layout_package import worker
_log = logging.getLogger(__name__)
class _WorkerState(object):
"""A class for the TestRunner/manager to use to track the current state
of the workers."""
def __init__(self, number, worker_connection):
self.worker_connection = worker_connection
self.number = number
self.done = False
self.current_test_name = None
self.next_timeout = None
self.wedged = False
self.stats = {}
self.stats['name'] = worker_connection.name
self.stats['num_tests'] = 0
self.stats['total_time'] = 0
def __repr__(self):
return "_WorkerState(" + str(self.__dict__) + ")"
class TestRunner2(test_runner.TestRunner):
def __init__(self, port, options, printer):
test_runner.TestRunner.__init__(self, port, options, printer)
self._all_results = []
self._group_stats = {}
self._current_result_summary = None
# This maps worker names to the state we are tracking for each of them.
self._worker_states = {}
def is_done(self):
worker_states = self._worker_states.values()
return worker_states and all(self._worker_is_done(worker_state) for worker_state in worker_states)
def _worker_is_done(self, worker_state):
t = time.time()
if worker_state.done or worker_state.wedged:
return True
next_timeout = worker_state.next_timeout
WEDGE_PADDING = 40.0
if next_timeout and t > next_timeout + WEDGE_PADDING:
_log.error('')
worker_state.worker_connection.log_wedged_worker(worker_state.current_test_name)
_log.error('')
worker_state.wedged = True
return True
return False
def name(self):
return 'TestRunner2'
def _run_tests(self, file_list, result_summary):
"""Runs the tests in the file_list.
Return: A tuple (interrupted, keyboard_interrupted, thread_timings,
test_timings, individual_test_timings)
interrupted is whether the run was interrupted
keyboard_interrupted is whether someone typed Ctrl^C
thread_timings is a list of dicts with the total runtime
of each thread with 'name', 'num_tests', 'total_time' properties
test_timings is a list of timings for each sharded subdirectory
of the form [time, directory_name, num_tests]
individual_test_timings is a list of run times for each test
in the form {filename:filename, test_run_time:test_run_time}
result_summary: summary object to populate with the results
"""
self._current_result_summary = result_summary
self._all_results = []
self._group_stats = {}
self._worker_states = {}
keyboard_interrupted = False
interrupted = False
thread_timings = []
self._printer.print_update('Sharding tests ...')
test_lists = self._shard_tests(file_list,
(int(self._options.child_processes) > 1) and not self._options.experimental_fully_parallel)
num_workers = self._num_workers(len(test_lists))
manager_connection = manager_worker_broker.get(self._port, self._options,
self, worker.Worker)
if self._options.dry_run:
return (keyboard_interrupted, interrupted, thread_timings,
self._group_stats, self._all_results)
self._printer.print_update('Starting %s ...' %
grammar.pluralize('worker', num_workers))
for worker_number in xrange(num_workers):
worker_connection = manager_connection.start_worker(worker_number)
worker_state = _WorkerState(worker_number, worker_connection)
self._worker_states[worker_connection.name] = worker_state
# FIXME: If we start workers up too quickly, DumpRenderTree appears
# to thrash on something and time out its first few tests. Until
# we can figure out what's going on, sleep a bit in between
# workers.
time.sleep(0.1)
self._printer.print_update("Starting testing ...")
for test_list in test_lists:
manager_connection.post_message('test_list', test_list[0], test_list[1])
# We post one 'stop' message for each worker. Because the stop message
# are sent after all of the tests, and because each worker will stop
# reading messsages after receiving a stop, we can be sure each
# worker will get a stop message and hence they will all shut down.
for i in xrange(num_workers):
manager_connection.post_message('stop')
try:
while not self.is_done():
# We loop with a timeout in order to be able to detect wedged threads.
manager_connection.run_message_loop(delay_secs=1.0)
if any(worker_state.wedged for worker_state in self._worker_states.values()):
_log.error('')
_log.error('Remaining workers are wedged, bailing out.')
_log.error('')
else:
_log.debug('No wedged threads')
# Make sure all of the workers have shut down (if possible).
for worker_state in self._worker_states.values():
if not worker_state.wedged and worker_state.worker_connection.is_alive():
worker_state.worker_connection.join(0.5)
assert not worker_state.worker_connection.is_alive()
except KeyboardInterrupt:
_log.info("Interrupted, exiting")
self.cancel_workers()
keyboard_interrupted = True
except test_runner.TestRunInterruptedException, e:
_log.info(e.reason)
self.cancel_workers()
interrupted = True
except:
# Unexpected exception; don't try to clean up workers.
_log.info("Exception raised, exiting")
raise
thread_timings = [worker_state.stats for worker_state in self._worker_states.values()]
# FIXME: should this be a class instead of a tuple?
return (interrupted, keyboard_interrupted, thread_timings,
self._group_stats, self._all_results)
def cancel_workers(self):
for worker_state in self._worker_states.values():
worker_state.worker_connection.cancel()
def handle_started_test(self, source, test_info, hang_timeout):
worker_state = self._worker_states[source]
worker_state.current_test_name = self._port.relative_test_filename(test_info.filename)
worker_state.next_timeout = time.time() + hang_timeout
def handle_done(self, source):
worker_state = self._worker_states[source]
worker_state.done = True
def handle_exception(self, source, exception_info):
exception_type, exception_value, exception_traceback = exception_info
raise exception_type, exception_value, exception_traceback
def handle_finished_list(self, source, list_name, num_tests, elapsed_time):
self._group_stats[list_name] = (num_tests, elapsed_time)
def handle_finished_test(self, source, result, elapsed_time):
worker_state = self._worker_states[source]
worker_state.next_timeout = None
worker_state.current_test_name = None
worker_state.stats['total_time'] += elapsed_time
worker_state.stats['num_tests'] += 1
if worker_state.wedged:
# This shouldn't happen if we have our timeouts tuned properly.
_log.error("%s unwedged", source)
self._all_results.append(result)
self._update_summary_with_result(self._current_result_summary, result) | unknown | codeparrot/codeparrot-clean | ||
from .vartypes import typefilter, continuous_types
from theano import theano, tensor as t
from theano.gof.graph import inputs
from .memoize import memoize
__all__ = ['gradient', 'hessian', 'hessian_diag', 'inputvars', 'cont_inputs']
def inputvars(a):
"""
Get the inputs into a theano variables
Parameters
----------
a : theano variable
Returns
-------
r : list of tensor variables that are inputs
"""
return [v for v in inputs(makeiter(a)) if isinstance(v, t.TensorVariable)]
def cont_inputs(f):
"""
Get the continuous inputs into a theano variables
Parameters
----------
a : theano variable
Returns
-------
r : list of tensor variables that are continuous inputs
"""
return typefilter(inputvars(f), continuous_types)
"""
Theano derivative functions
"""
def gradient1(f, v):
"""flat gradient of f wrt v"""
return t.flatten(t.grad(f, v, disconnected_inputs='warn'))
@memoize
def gradient(f, vars=None):
if vars is None:
vars = cont_inputs(f)
return t.concatenate([gradient1(f, v) for v in vars], axis=0)
def jacobian1(f, v):
"""jacobian of f wrt v"""
f = t.flatten(f)
idx = t.arange(f.shape[0])
def grad_i(i):
return gradient1(f[i], v)
return theano.map(grad_i, idx)[0]
@memoize
def jacobian(f, vars=None):
if vars is None:
vars = cont_inputs(f)
return t.concatenate([jacobian1(f, v) for v in vars], axis=1)
@memoize
def hessian(f, vars=None):
return -jacobian(gradient(f, vars), vars)
def hessian_diag1(f, v):
g = gradient1(f, v)
idx = t.arange(g.shape[0])
def hess_ii(i):
return gradient1(g[i], v)[i]
return theano.map(hess_ii, idx)[0]
@memoize
def hessian_diag(f, vars=None):
if vars is None:
vars = cont_inputs(f)
return -t.concatenate([hessian_diag1(f, v) for v in vars], axis=0)
def makeiter(a):
if isinstance(a, (tuple, list)):
return a
else:
return [a] | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python -u
#
# this tests the entities substitutions with the XmlTextReader interface
#
import sys
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
result = ""
def processNode(reader):
global result
result = result + "%d %d %s %d\n" % (reader.Depth(), reader.NodeType(),
reader.Name(), reader.IsEmptyElement())
#
# Parse a document testing the readerForxxx API
#
docstr="""<foo>
<label>some text</label>
<item>100</item>
</foo>"""
expect="""0 1 foo 0
1 14 #text 0
1 1 label 0
2 3 #text 0
1 15 label 0
1 14 #text 0
1 1 item 0
2 3 #text 0
1 15 item 0
1 14 #text 0
0 15 foo 0
"""
result = ""
reader = libxml2.readerForDoc(docstr, "test1", None, 0)
ret = reader.Read()
while ret == 1:
processNode(reader)
ret = reader.Read()
if ret != 0:
print("Error parsing the document test1")
sys.exit(1)
if result != expect:
print("Unexpected result for test1")
print(result)
sys.exit(1)
#
# Reuse the reader for another document testing the ReaderNewxxx API
#
docstr="""<foo>
<label>some text</label>
<item>1000</item>
</foo>"""
expect="""0 1 foo 0
1 14 #text 0
1 1 label 0
2 3 #text 0
1 15 label 0
1 14 #text 0
1 1 item 0
2 3 #text 0
1 15 item 0
1 14 #text 0
0 15 foo 0
"""
result = ""
reader.NewDoc(docstr, "test2", None, 0)
ret = reader.Read()
while ret == 1:
processNode(reader)
ret = reader.Read()
if ret != 0:
print("Error parsing the document test2")
sys.exit(1)
if result != expect:
print("Unexpected result for test2")
print(result)
sys.exit(1)
#
# cleanup
#
del reader
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print("OK")
else:
print("Memory leak %d bytes" % (libxml2.debugMemory(1)))
libxml2.dumpMemory() | unknown | codeparrot/codeparrot-clean | ||
## Input
```javascript
// @enableNameAnonymousFunctions
import {useCallback, useEffect} from 'react';
import {identity, Stringify, useIdentity} from 'shared-runtime';
import * as SharedRuntime from 'shared-runtime';
function Component(props) {
function named() {
const inner = () => props.named;
const innerIdentity = identity(() => props.named);
return inner(innerIdentity());
}
const callback = useCallback(() => {
return 'ok';
}, []);
const namedVariable = function () {
return props.namedVariable;
};
const methodCall = SharedRuntime.identity(() => props.methodCall);
const call = identity(() => props.call);
const builtinElementAttr = <div onClick={() => props.builtinElementAttr} />;
const namedElementAttr = <Stringify onClick={() => props.namedElementAttr} />;
const hookArgument = useIdentity(() => props.hookArgument);
useEffect(() => {
console.log(props.useEffect);
JSON.stringify(null, null, () => props.useEffect);
const g = () => props.useEffect;
console.log(g());
}, [props.useEffect]);
return (
<>
{named()}
{callback()}
{namedVariable()}
{methodCall()}
{call()}
{builtinElementAttr}
{namedElementAttr}
{hookArgument()}
</>
);
}
export const TODO_FIXTURE_ENTRYPOINT = {
fn: Component,
params: [
{
named: '<named>',
namedVariable: '<namedVariable>',
methodCall: '<methodCall>',
call: '<call>',
builtinElementAttr: '<builtinElementAttr>',
namedElementAttr: '<namedElementAttr>',
hookArgument: '<hookArgument>',
useEffect: '<useEffect>',
},
],
};
```
## Code
```javascript
import { c as _c } from "react/compiler-runtime"; // @enableNameAnonymousFunctions
import { useCallback, useEffect } from "react";
import { identity, Stringify, useIdentity } from "shared-runtime";
import * as SharedRuntime from "shared-runtime";
function Component(props) {
const $ = _c(31);
let t0;
if ($[0] !== props.named) {
t0 = function named() {
const inner = { "Component[named > inner]": () => props.named }[
"Component[named > inner]"
];
const innerIdentity = identity(
{ "Component[named > identity()]": () => props.named }[
"Component[named > identity()]"
],
);
return inner(innerIdentity());
};
$[0] = props.named;
$[1] = t0;
} else {
t0 = $[1];
}
const named = t0;
const callback = _ComponentCallback;
let t1;
if ($[2] !== props.namedVariable) {
t1 = {
"Component[namedVariable]": function () {
return props.namedVariable;
},
}["Component[namedVariable]"];
$[2] = props.namedVariable;
$[3] = t1;
} else {
t1 = $[3];
}
const namedVariable = t1;
let t2;
if ($[4] !== props.methodCall) {
t2 = { "Component[SharedRuntime.identity()]": () => props.methodCall }[
"Component[SharedRuntime.identity()]"
];
$[4] = props.methodCall;
$[5] = t2;
} else {
t2 = $[5];
}
const methodCall = SharedRuntime.identity(t2);
let t3;
if ($[6] !== props.call) {
t3 = { "Component[identity()]": () => props.call }["Component[identity()]"];
$[6] = props.call;
$[7] = t3;
} else {
t3 = $[7];
}
const call = identity(t3);
let t4;
if ($[8] !== props.builtinElementAttr) {
t4 = (
<div
onClick={
{ "Component[<div>.onClick]": () => props.builtinElementAttr }[
"Component[<div>.onClick]"
]
}
/>
);
$[8] = props.builtinElementAttr;
$[9] = t4;
} else {
t4 = $[9];
}
const builtinElementAttr = t4;
let t5;
if ($[10] !== props.namedElementAttr) {
t5 = (
<Stringify
onClick={
{ "Component[<Stringify>.onClick]": () => props.namedElementAttr }[
"Component[<Stringify>.onClick]"
]
}
/>
);
$[10] = props.namedElementAttr;
$[11] = t5;
} else {
t5 = $[11];
}
const namedElementAttr = t5;
let t6;
if ($[12] !== props.hookArgument) {
t6 = { "Component[useIdentity()]": () => props.hookArgument }[
"Component[useIdentity()]"
];
$[12] = props.hookArgument;
$[13] = t6;
} else {
t6 = $[13];
}
const hookArgument = useIdentity(t6);
let t7;
let t8;
if ($[14] !== props.useEffect) {
t7 = {
"Component[useEffect()]": () => {
console.log(props.useEffect);
JSON.stringify(
null,
null,
{
"Component[useEffect() > JSON.stringify()]": () => props.useEffect,
}["Component[useEffect() > JSON.stringify()]"],
);
const g = { "Component[useEffect() > g]": () => props.useEffect }[
"Component[useEffect() > g]"
];
console.log(g());
},
}["Component[useEffect()]"];
t8 = [props.useEffect];
$[14] = props.useEffect;
$[15] = t7;
$[16] = t8;
} else {
t7 = $[15];
t8 = $[16];
}
useEffect(t7, t8);
let t9;
if ($[17] !== named) {
t9 = named();
$[17] = named;
$[18] = t9;
} else {
t9 = $[18];
}
const t10 = callback();
let t11;
if ($[19] !== namedVariable) {
t11 = namedVariable();
$[19] = namedVariable;
$[20] = t11;
} else {
t11 = $[20];
}
const t12 = methodCall();
const t13 = call();
let t14;
if ($[21] !== hookArgument) {
t14 = hookArgument();
$[21] = hookArgument;
$[22] = t14;
} else {
t14 = $[22];
}
let t15;
if (
$[23] !== builtinElementAttr ||
$[24] !== namedElementAttr ||
$[25] !== t11 ||
$[26] !== t12 ||
$[27] !== t13 ||
$[28] !== t14 ||
$[29] !== t9
) {
t15 = (
<>
{t9}
{t10}
{t11}
{t12}
{t13}
{builtinElementAttr}
{namedElementAttr}
{t14}
</>
);
$[23] = builtinElementAttr;
$[24] = namedElementAttr;
$[25] = t11;
$[26] = t12;
$[27] = t13;
$[28] = t14;
$[29] = t9;
$[30] = t15;
} else {
t15 = $[30];
}
return t15;
}
function _ComponentCallback() {
return "ok";
}
export const TODO_FIXTURE_ENTRYPOINT = {
fn: Component,
params: [
{
named: "<named>",
namedVariable: "<namedVariable>",
methodCall: "<methodCall>",
call: "<call>",
builtinElementAttr: "<builtinElementAttr>",
namedElementAttr: "<namedElementAttr>",
hookArgument: "<hookArgument>",
useEffect: "<useEffect>",
},
],
};
```
### Eval output
(kind: exception) Fixture not implemented | unknown | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/name-anonymous-functions.expect.md |
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package ssh
import (
"context"
"crypto/dsa"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"errors"
"fmt"
"io"
"regexp"
"strconv"
"strings"
"time"
"github.com/hashicorp/go-secure-stdlib/parseutil"
"github.com/hashicorp/vault/builtin/logical/ssh/managed_key"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/helper/certutil"
"github.com/hashicorp/vault/sdk/helper/strutil"
"github.com/hashicorp/vault/sdk/logical"
"golang.org/x/crypto/ssh"
)
var containsTemplateRegex = regexp.MustCompile(`{{.+?}}`)
var ecCurveBitsToAlgoName = map[int]string{
256: ssh.KeyAlgoECDSA256,
384: ssh.KeyAlgoECDSA384,
521: ssh.KeyAlgoECDSA521,
}
// If the algorithm is not found, it could be that we have a curve
// that we haven't added a constant for yet. But they could allow it
// (assuming x/crypto/ssh can parse it) via setting a ec: <keyBits>
// mapping rather than using a named SSH key type, so erring out here
// isn't advisable.
type creationBundle struct {
KeyID string
ValidPrincipals []string
PublicKey ssh.PublicKey
CertificateType uint32
TTL time.Duration
Signer ssh.Signer
Role *sshRole
CriticalOptions map[string]string
Extensions map[string]string
}
func (b *backend) pathSignIssueCertificateHelper(ctx context.Context, req *logical.Request, data *framework.FieldData, role *sshRole, publicKey ssh.PublicKey) (*logical.Response, map[string]interface{}, error) {
// Note that these various functions always return "user errors" so we pass
// them as 4xx values
keyID, err := b.calculateKeyID(data, req, role, publicKey)
if err != nil {
return logical.ErrorResponse(err.Error()), nil, nil
}
certificateType, err := b.calculateCertificateType(data, role)
if err != nil {
return logical.ErrorResponse(err.Error()), nil, nil
}
var parsedPrincipals []string
if certificateType == ssh.HostCert {
parsedPrincipals, err = b.calculateValidPrincipals(data, req, role, "", role.AllowedDomains, role.AllowedDomainsTemplate, validateValidPrincipalForHosts(role))
if err != nil {
return logical.ErrorResponse(err.Error()), nil, nil
}
} else {
defaultPrincipal := role.DefaultUser
if role.DefaultUserTemplate {
defaultPrincipal, err = b.renderPrincipal(role.DefaultUser, req)
if err != nil {
return nil, nil, err
}
}
parsedPrincipals, err = b.calculateValidPrincipals(data, req, role, defaultPrincipal, role.AllowedUsers, role.AllowedUsersTemplate, strutil.StrListContains)
if err != nil {
return logical.ErrorResponse(err.Error()), nil, nil
}
}
ttl, err := b.calculateTTL(data, role)
if err != nil {
return logical.ErrorResponse(err.Error()), nil, nil
}
criticalOptions, err := b.calculateCriticalOptions(data, role)
if err != nil {
return logical.ErrorResponse(err.Error()), nil, nil
}
extensions, addExtTemplatingWarning, err := b.calculateExtensions(data, req, role)
if err != nil {
return logical.ErrorResponse(err.Error()), nil, nil
}
signer, err := b.getCASigner(ctx, req.Storage)
if err != nil {
return nil, nil, fmt.Errorf("error creating signer: %w", err)
}
cBundle := creationBundle{
KeyID: keyID,
PublicKey: publicKey,
Signer: signer,
ValidPrincipals: parsedPrincipals,
TTL: ttl,
CertificateType: certificateType,
Role: role,
CriticalOptions: criticalOptions,
Extensions: extensions,
}
certificate, err := cBundle.sign()
if err != nil {
return nil, nil, err
}
signedSSHCertificate := ssh.MarshalAuthorizedKey(certificate)
if len(signedSSHCertificate) == 0 {
return nil, nil, errors.New("error marshaling signed certificate")
}
response := &logical.Response{
Data: map[string]interface{}{
"serial_number": strconv.FormatUint(certificate.Serial, 16),
"signed_key": string(signedSSHCertificate),
},
}
if addExtTemplatingWarning {
response.AddWarning("default_extension templating enabled with at least one extension requiring identity templating. However, this request lacked identity entity information, causing one or more extensions to be skipped from the generated certificate.")
}
metadata := map[string]interface{}{
"certificate_type": certificateType,
"ttl": ttl.String(),
"serial_number": strconv.FormatUint(certificate.Serial, 16),
"key_id": keyID,
}
return response, metadata, nil
}
func (b *backend) renderPrincipal(principal string, req *logical.Request) (string, error) {
// Look for templating markers {{ .* }}
matched := containsTemplateRegex.MatchString(principal)
if matched {
if req.EntityID != "" {
// Retrieve principal based on template + entityID from request.
renderedPrincipal, err := framework.PopulateIdentityTemplate(principal, req.EntityID, b.System())
if err != nil {
return "", fmt.Errorf("template '%s' could not be rendered -> %s", principal, err)
}
return renderedPrincipal, nil
}
}
// Static principal
return principal, nil
}
func (b *backend) calculateValidPrincipals(data *framework.FieldData, req *logical.Request, role *sshRole, defaultPrincipal, principalsAllowedByRole string, enableTemplating bool, validatePrincipal func([]string, string) bool) ([]string, error) {
validPrincipals := ""
validPrincipalsRaw, ok := data.GetOk("valid_principals")
if ok {
validPrincipals = validPrincipalsRaw.(string)
} else {
validPrincipals = defaultPrincipal
}
parsedPrincipals := strutil.RemoveDuplicates(strutil.ParseStringSlice(validPrincipals, ","), false)
// Build list of allowed Principals from template and static principalsAllowedByRole
var allowedPrincipals []string
if enableTemplating {
rendered, err := b.renderPrincipal(principalsAllowedByRole, req)
if err != nil {
return nil, err
}
allowedPrincipals = strutil.RemoveDuplicates(strutil.ParseStringSlice(rendered, ","), false)
} else {
allowedPrincipals = strutil.RemoveDuplicates(strutil.ParseStringSlice(principalsAllowedByRole, ","), false)
}
if len(parsedPrincipals) == 0 && defaultPrincipal != "" {
// defaultPrincipal will either be the defaultUser or a rendered defaultUserTemplate
parsedPrincipals = []string{defaultPrincipal}
}
switch {
case len(parsedPrincipals) == 0:
if role.AllowEmptyPrincipals {
// There is nothing to process
return nil, nil
} else {
return nil, fmt.Errorf("empty valid principals not allowed by role")
}
case len(allowedPrincipals) == 0:
// User has requested principals to be set, but role is not configured
// with any principals
return nil, fmt.Errorf("role is not configured to allow any principals")
default:
// Role was explicitly configured to allow any principal.
if principalsAllowedByRole == "*" {
return parsedPrincipals, nil
}
for _, principal := range parsedPrincipals {
if !validatePrincipal(strutil.RemoveDuplicates(allowedPrincipals, false), principal) {
return nil, fmt.Errorf("%v is not a valid value for valid_principals", principal)
}
}
return parsedPrincipals, nil
}
}
func validateValidPrincipalForHosts(role *sshRole) func([]string, string) bool {
return func(allowedPrincipals []string, validPrincipal string) bool {
for _, allowedPrincipal := range allowedPrincipals {
if allowedPrincipal == validPrincipal && role.AllowBareDomains {
return true
}
if role.AllowSubdomains && strings.HasSuffix(validPrincipal, "."+allowedPrincipal) {
return true
}
}
return false
}
}
func (b *backend) calculateCertificateType(data *framework.FieldData, role *sshRole) (uint32, error) {
requestedCertificateType := data.Get("cert_type").(string)
var certificateType uint32
switch requestedCertificateType {
case "user":
if !role.AllowUserCertificates {
return 0, errors.New("cert_type 'user' is not allowed by role")
}
certificateType = ssh.UserCert
case "host":
if !role.AllowHostCertificates {
return 0, errors.New("cert_type 'host' is not allowed by role")
}
certificateType = ssh.HostCert
default:
return 0, errors.New("cert_type must be either 'user' or 'host'")
}
return certificateType, nil
}
func (b *backend) calculateKeyID(data *framework.FieldData, req *logical.Request, role *sshRole, pubKey ssh.PublicKey) (string, error) {
reqID := data.Get("key_id").(string)
if reqID != "" {
if !role.AllowUserKeyIDs {
return "", fmt.Errorf("setting key_id is not allowed by role")
}
return reqID, nil
}
keyIDFormat := "vault-{{token_display_name}}-{{public_key_hash}}"
if req.DisplayName == "" {
keyIDFormat = "vault-{{public_key_hash}}"
}
if role.KeyIDFormat != "" {
keyIDFormat = role.KeyIDFormat
}
keyID := substQuery(keyIDFormat, map[string]string{
"token_display_name": req.DisplayName,
"role_name": data.Get("role").(string),
"public_key_hash": fmt.Sprintf("%x", sha256.Sum256(pubKey.Marshal())),
})
return keyID, nil
}
func (b *backend) calculateCriticalOptions(data *framework.FieldData, role *sshRole) (map[string]string, error) {
unparsedCriticalOptions := data.Get("critical_options").(map[string]interface{})
if len(unparsedCriticalOptions) == 0 {
return role.DefaultCriticalOptions, nil
}
criticalOptions := convertMapToStringValue(unparsedCriticalOptions)
if role.AllowedCriticalOptions != "" {
notAllowedOptions := []string{}
allowedCriticalOptions := strings.Split(role.AllowedCriticalOptions, ",")
for option := range criticalOptions {
if !strutil.StrListContains(allowedCriticalOptions, option) {
notAllowedOptions = append(notAllowedOptions, option)
}
}
if len(notAllowedOptions) != 0 {
return nil, fmt.Errorf("critical options not on allowed list: %v", notAllowedOptions)
}
}
return criticalOptions, nil
}
func (b *backend) calculateExtensions(data *framework.FieldData, req *logical.Request, role *sshRole) (map[string]string, bool, error) {
unparsedExtensions := data.Get("extensions").(map[string]interface{})
extensions := make(map[string]string)
if len(unparsedExtensions) > 0 {
extensions := convertMapToStringValue(unparsedExtensions)
if role.AllowedExtensions == "*" {
// Allowed extensions was configured to allow all
return extensions, false, nil
}
notAllowed := []string{}
allowedExtensions := strings.Split(role.AllowedExtensions, ",")
for extensionKey := range extensions {
if !strutil.StrListContains(allowedExtensions, extensionKey) {
notAllowed = append(notAllowed, extensionKey)
}
}
if len(notAllowed) != 0 {
return nil, false, fmt.Errorf("extensions %v are not on allowed list", notAllowed)
}
return extensions, false, nil
}
haveMissingEntityInfoWithTemplatedExt := false
if role.DefaultExtensionsTemplate {
for extensionKey, extensionValue := range role.DefaultExtensions {
// Look for templating markers {{ .* }}
matched := containsTemplateRegex.MatchString(extensionValue)
if matched {
if req.EntityID != "" {
// Retrieve extension value based on template + entityID from request.
templateExtensionValue, err := framework.PopulateIdentityTemplate(extensionValue, req.EntityID, b.System())
if err == nil {
// Template returned an extension value that we can use
extensions[extensionKey] = templateExtensionValue
} else {
return nil, false, fmt.Errorf("template '%s' could not be rendered -> %s", extensionValue, err)
}
} else {
haveMissingEntityInfoWithTemplatedExt = true
}
} else {
// Static extension value or err template
extensions[extensionKey] = extensionValue
}
}
} else {
extensions = role.DefaultExtensions
}
return extensions, haveMissingEntityInfoWithTemplatedExt, nil
}
func (b *backend) calculateTTL(data *framework.FieldData, role *sshRole) (time.Duration, error) {
var ttl, maxTTL time.Duration
var err error
ttlRaw, specifiedTTL := data.GetOk("ttl")
if specifiedTTL {
ttl = time.Duration(ttlRaw.(int)) * time.Second
} else {
ttl, err = parseutil.ParseDurationSecond(role.TTL)
if err != nil {
return 0, err
}
}
if ttl == 0 {
ttl = b.System().DefaultLeaseTTL()
}
maxTTL, err = parseutil.ParseDurationSecond(role.MaxTTL)
if err != nil {
return 0, err
}
if maxTTL == 0 {
maxTTL = b.System().MaxLeaseTTL()
}
if ttl > maxTTL {
// Don't error if they were using system defaults, only error if
// they specifically chose a bad TTL
if !specifiedTTL {
ttl = maxTTL
} else {
return 0, fmt.Errorf("ttl is larger than maximum allowed %d", maxTTL/time.Second)
}
}
return ttl, nil
}
func (b *backend) validateSignedKeyRequirements(publickey ssh.PublicKey, role *sshRole) error {
if len(role.AllowedUserKeyTypesLengths) != 0 {
var keyType string
var keyBits int
switch k := publickey.(type) {
case ssh.CryptoPublicKey:
ff := k.CryptoPublicKey()
switch k := ff.(type) {
case *rsa.PublicKey:
keyType = "rsa"
keyBits = k.N.BitLen()
case *dsa.PublicKey:
keyType = "dsa"
keyBits = k.Parameters.P.BitLen()
case *ecdsa.PublicKey:
keyType = "ecdsa"
keyBits = k.Curve.Params().BitSize
case ed25519.PublicKey:
keyType = "ed25519"
default:
return fmt.Errorf("public key type of %s is not allowed", keyType)
}
default:
return fmt.Errorf("pubkey not suitable for crypto (expected ssh.CryptoPublicKey but found %T)", k)
}
keyTypeToMapKey := createKeyTypeToMapKey(keyType, keyBits)
var present bool
var pass bool
for _, kstr := range keyTypeToMapKey[keyType] {
allowed_values, ok := role.AllowedUserKeyTypesLengths[kstr]
if !ok {
continue
}
present = true
for _, value := range allowed_values {
if keyType == "rsa" || keyType == "dsa" {
// Regardless of map naming, we always need to validate the
// bit length of RSA and DSA keys. Use the keyType flag to
if keyBits == value {
pass = true
}
} else if kstr == "ec" || kstr == "ecdsa" {
// If the map string is "ecdsa", we have to validate the keyBits
// are a match for an allowed value, meaning that our curve
// is allowed. This isn't necessary when a named curve (e.g.
// ssh.KeyAlgoECDSA256) is allowed (and hence kstr is that),
// because keyBits is already specified in the kstr. Thus,
// we have conditioned around kstr and not keyType (like with
// rsa or dsa).
if keyBits == value {
pass = true
}
} else {
// We get here in two cases: we have a algo-named EC key
// matching a format specifier in the key map (e.g., a P-256
// key with a KeyAlgoECDSA256 entry in the map) or we have a
// ed25519 key (which is always allowed).
pass = true
}
}
}
if !present {
return fmt.Errorf("key of type %s is not allowed", keyType)
}
if !pass {
return fmt.Errorf("key is of an invalid size: %v", keyBits)
}
}
return nil
}
func (b *creationBundle) sign() (retCert *ssh.Certificate, retErr error) {
defer func() {
if r := recover(); r != nil {
errMsg, ok := r.(string)
if ok {
retCert = nil
retErr = errors.New(errMsg)
}
}
}()
serialNumber, err := certutil.GenerateSerialNumber()
if err != nil {
return nil, err
}
now := time.Now()
sshAlgorithmSigner, ok := b.Signer.(ssh.AlgorithmSigner)
if !ok {
return nil, fmt.Errorf("failed to generate signed SSH key: signer is not an AlgorithmSigner")
}
// prepare certificate for signing
nonce := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
return nil, fmt.Errorf("failed to generate signed SSH key: error generating random nonce: %w", err)
}
certificate := &ssh.Certificate{
Serial: serialNumber.Uint64(),
Key: b.PublicKey,
KeyId: b.KeyID,
ValidPrincipals: b.ValidPrincipals,
ValidAfter: uint64(now.Add(-b.Role.NotBeforeDuration).In(time.UTC).Unix()),
ValidBefore: uint64(now.Add(b.TTL).In(time.UTC).Unix()),
CertType: b.CertificateType,
Permissions: ssh.Permissions{
CriticalOptions: b.CriticalOptions,
Extensions: b.Extensions,
},
Nonce: nonce,
SignatureKey: sshAlgorithmSigner.PublicKey(),
}
// get bytes to sign; this is based on Certificate.bytesForSigning() from the go ssh lib
out := certificate.Marshal()
// Drop trailing signature length.
certificateBytes := out[:len(out)-4]
algo := b.Role.AlgorithmSigner
// Handle the new default algorithm selection process correctly.
if algo == DefaultAlgorithmSigner && sshAlgorithmSigner.PublicKey().Type() == ssh.KeyAlgoRSA {
algo = ssh.SigAlgoRSASHA2256
} else if algo == DefaultAlgorithmSigner {
algo = ""
}
sig, err := sshAlgorithmSigner.SignWithAlgorithm(rand.Reader, certificateBytes, algo)
if err != nil {
return nil, fmt.Errorf("failed to generate signed SSH key: sign error: %w", err)
}
certificate.Signature = sig
return certificate, nil
}
func createKeyTypeToMapKey(keyType string, keyBits int) map[string][]string {
keyTypeToMapKey := map[string][]string{
"rsa": {"rsa", ssh.KeyAlgoRSA},
"dsa": {"dsa", ssh.KeyAlgoDSA},
"ecdsa": {"ecdsa", "ec"},
"ed25519": {"ed25519", ssh.KeyAlgoED25519},
}
if keyType == "ecdsa" {
if algo, ok := ecCurveBitsToAlgoName[keyBits]; ok {
keyTypeToMapKey[keyType] = append(keyTypeToMapKey[keyType], algo)
}
}
return keyTypeToMapKey
}
func (b *backend) getCASigner(ctx context.Context, s logical.Storage) (ssh.Signer, error) {
var signer ssh.Signer
const allowMigration = true // migration from deprecated paths is allowed when signing
storedKey, err := readStoredKey(ctx, s, caPrivateKey, allowMigration)
if err != nil {
return nil, fmt.Errorf("error reading stored key: %w", err)
}
if storedKey != nil {
if storedKey.Key == "" {
return nil, errors.New("stored private key was empty")
}
signer, err = ssh.ParsePrivateKey([]byte(storedKey.Key))
if err != nil {
return nil, fmt.Errorf("failed to parse stored CA private key: %w", err)
}
} else {
managedKey, err := readManagedKey(ctx, s)
if err != nil {
return nil, fmt.Errorf("error reading managed key: %w", err)
}
if managedKey == nil {
return nil, errors.New("no keys configured")
}
signer, err = managed_key.GetManagedKeyInfo(ctx, b, managedKey.KeyId)
if err != nil {
return nil, fmt.Errorf("error getting managed key info: %w", err)
}
}
return signer, nil
} | go | github | https://github.com/hashicorp/vault | builtin/logical/ssh/path_issue_sign.go |
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_UTILS_TF_QUANTIZE_OP_UTILS_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_UTILS_TF_QUANTIZE_OP_UTILS_H_
#include "mlir/IR/BuiltinTypes.h" // from @llvm-project
#include "mlir/IR/Types.h" // from @llvm-project
namespace mlir {
namespace quant {
UnrankedTensorType CreateUnknownShapeFromElementType(Type tensor_type);
} // namespace quant
} // namespace mlir
#endif // TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_UTILS_TF_QUANTIZE_OP_UTILS_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_quantize_op_utils.h |
from handler.base import Handler
import tornado.web
import re
class Set(Handler):
@tornado.web.authenticated
def get(self, action):
if action == 'new':
self.render('set_new.html')
else:
self.render('set_list.html', avail = self.db.query('SELECT id, name, words FROM sets'))
@tornado.web.authenticated
def post(self, action):
self.check_xsrf_cookie()
data = self.bulk_arguments(('name', 'words',))
data['owner'] = self.session['user']['email']
self.db.insert('sets', data)
self.redirect('/sets/list')
class SetMixin():
def set_record(self, set_id):
the_set = self.db.query_one('SELECT * FROM sets WHERE id = %s', (set_id,))
return dict(
the_set = the_set,
words_list = re.split('\s+', the_set['words'])
)
class Words(Handler, SetMixin):
def get(self, set_id = None):
if not set_id:
self.redirect('/sets/list')
return
self.render('dict.html', **(self.set_record(set_id)))
class Example(Handler, SetMixin):
def get(self, set_id = None):
if not set_id:
self.redirect('/sets/list')
return
self.render('example.html', **(self.set_record(set_id))) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.build.mavenplugin;
import java.util.Set;
import java.util.stream.Collectors;
import javax.inject.Inject;
import org.gradle.api.DefaultTask;
import org.gradle.api.Task;
import org.gradle.api.artifacts.Configuration;
import org.gradle.api.artifacts.ConfigurationContainer;
import org.gradle.api.artifacts.dsl.DependencyHandler;
import org.gradle.api.file.ArchiveOperations;
import org.gradle.api.file.DirectoryProperty;
import org.gradle.api.file.FileSystemOperations;
import org.gradle.api.file.FileTree;
import org.gradle.api.provider.Provider;
import org.gradle.api.provider.SetProperty;
import org.gradle.api.tasks.Input;
import org.gradle.api.tasks.OutputDirectory;
import org.gradle.api.tasks.TaskAction;
/**
* {@link Task} to make Maven binaries available for integration testing.
*
* @author Andy Wilkinson
*/
public abstract class PrepareMavenBinaries extends DefaultTask {
private final FileSystemOperations fileSystemOperations;
private final Provider<Set<FileTree>> binaries;
@Inject
public PrepareMavenBinaries(FileSystemOperations fileSystemOperations, ArchiveOperations archiveOperations) {
this.fileSystemOperations = fileSystemOperations;
ConfigurationContainer configurations = getProject().getConfigurations();
DependencyHandler dependencies = getProject().getDependencies();
this.binaries = getVersions().map((versions) -> versions.stream()
.map((version) -> configurations
.detachedConfiguration(dependencies.create("org.apache.maven:apache-maven:" + version + ":bin@zip")))
.map(Configuration::getSingleFile)
.map(archiveOperations::zipTree)
.collect(Collectors.toSet()));
}
@OutputDirectory
public abstract DirectoryProperty getOutputDir();
@Input
public abstract SetProperty<String> getVersions();
@TaskAction
public void prepareBinaries() {
this.fileSystemOperations.sync((sync) -> {
sync.into(getOutputDir());
this.binaries.get().forEach(sync::from);
});
}
} | java | github | https://github.com/spring-projects/spring-boot | buildSrc/src/main/java/org/springframework/boot/build/mavenplugin/PrepareMavenBinaries.java |
from django.db import models
from contacts.models import Contact
from phone_numbers.models import PhoneNumber
class SmsMessage(models.Model):
date_created = models.DateTimeField(auto_now_add=True, db_index=True)
date_modified = models.DateTimeField(auto_now=True)
deleted = models.BooleanField(default=False)
sid = models.CharField(max_length=255, db_index=True)
from_number = models.CharField(max_length=255, db_index=True)
to_number = models.CharField(max_length=255, db_index=True)
body = models.TextField(db_index=True)
related_contact = models.ForeignKey(Contact,
null=True,
related_name="sms_messages",
on_delete=models.CASCADE)
related_phone_number = models.ForeignKey(PhoneNumber,
null=True,
related_name="sms_messages",
on_delete=models.CASCADE)
def __str__(self):
return "{0}: from {1} to {2}".format(self.date_created,
self.from_number,
self.to_number) | unknown | codeparrot/codeparrot-clean | ||
//@flow
import {useRef} from 'react';
component C() {
const r = useRef(null);
const r2 = useRef(null);
if (r.current == null) {
r2.current = 1;
}
}
export const FIXTURE_ENTRYPOINT = {
fn: C,
params: [{}],
}; | javascript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/error.ref-initialization-other.js |
---
navigation_title: "Exists"
mapped_pages:
- https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-exists-query.html
---
# Exists query [query-dsl-exists-query]
Returns documents that contain an indexed value for a field.
An indexed value may not exist for a document’s field due to a variety of reasons:
* The field in the source JSON is `null` or `[]`
* The field has `"index" : false` and `"doc_values" : false` set in the mapping
* The length of the field value exceeded an `ignore_above` setting in the mapping
* The field value was malformed and `ignore_malformed` was defined in the mapping
## Example request [exists-query-ex-request]
```console
GET /_search
{
"query": {
"exists": {
"field": "user"
}
}
}
```
## Top-level parameters for `exists` [exists-query-top-level-params]
`field`
: (Required, string) Name of the field you wish to search.
While a field is deemed non-existent if the JSON value is `null` or `[]`, these values will indicate the field does exist:
* Empty strings, such as `""` or `"-"`
* Arrays containing `null` and another value, such as `[null, "foo"]`
* A custom [`null-value`](/reference/elasticsearch/mapping-reference/null-value.md), defined in field mapping
## Notes [exists-query-notes]
### Find documents missing indexed values [find-docs-null-values]
To find documents that are missing an indexed value for a field, use the `must_not` [boolean query](/reference/query-languages/query-dsl/query-dsl-bool-query.md) with the `exists` query.
The following search returns documents that are missing an indexed value for the `user.id` field.
```console
GET /_search
{
"query": {
"bool": {
"must_not": {
"exists": {
"field": "user.id"
}
}
}
}
}
``` | unknown | github | https://github.com/elastic/elasticsearch | docs/reference/query-languages/query-dsl/query-dsl-exists-query.md |
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import shutil
import unittest
from htmresearch.encoders import EncoderTypes
from htmresearch.frameworks.nlp.htm_runner import HTMRunner
from htmresearch.frameworks.nlp.runner import Runner
from htmresearch.support.csv_helper import readCSV
DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_data")
class ClassificationModelsTest(unittest.TestCase):
"""Test class for ClassificationModelKeywords."""
def tearDown(self):
# remove the generated HTM network data file
networkDataFilePath = os.path.join(DATA_DIR, "responses_network_0.csv")
if os.path.exists(networkDataFilePath):
os.remove(networkDataFilePath)
@staticmethod
def runExperiment(runner):
try:
runner.setupData()
runner.encodeSamples()
runner.runExperiment()
except Exception as e:
print "Runner could not execute the experiment."
raise e
finally:
# cleanup
shutil.rmtree(runner.model.modelDir.split("/")[0])
@staticmethod
def getExpectedClassifications(runner, expectationFilePath):
"""
Return a list of the labels predicted by runner and a list of expected
labels from the expected classifications file path.
"""
dataDict = readCSV(expectationFilePath, numLabels=3)
expectedClasses = []
resultClasses = []
for trial, trialResults in enumerate(runner.results):
for i, predictionList in enumerate(trialResults[0]):
predictions = [runner.labelRefs[p] for p in predictionList]
if predictions == []:
predictions = ["(none)"]
resultClasses.append(predictions)
expectedClasses.append(dataDict.items()[i+runner.trainSizes[trial]][1][1])
return expectedClasses, resultClasses
def testClassifyKeywordsAsExpected(self):
"""
Tests ClassificationModelKeywords.
Training on the first five samples of the dataset, and testing on the rest,
the model's classifications should match those in the expected classes
data file.
"""
modelName = "Keywords"
runner = Runner(dataPath=os.path.join(DATA_DIR, "responses.csv"),
resultsDir="",
experimentName="keywords_test",
experimentType="incremental",
loadPath=None,
modelName=modelName,
numClasses=3,
plots=0,
orderedSplit=True,
trainSizes=[5],
verbosity=0)
runner.initModel(modelName)
self.runExperiment(runner)
expectedClasses, resultClasses = self.getExpectedClassifications(
runner, os.path.join(DATA_DIR, "responses_expected_classes_keywords.csv"))
for i, (e, r) in enumerate(zip(expectedClasses, resultClasses)):
if i in (7, 9, 12):
# Ties amongst winning labels are handled randomly, which affects the
# third classification in these test samples.
e = e[:2]
r = r[:2]
self.assertEqual(sorted(e), sorted(r),
"Keywords model predicted classes other than what we expect.")
def testClassifyDocumentFingerprintsAsExpected(self):
"""
Tests ClassificationModelFingerprint (for encoder type 'document').
Training on the first five samples of the dataset, and testing on the rest,
the model's classifications should match those in the expected classes
data file.
"""
modelName = "CioDocumentFingerprint"
runner = Runner(dataPath=os.path.join(DATA_DIR, "responses.csv"),
resultsDir="",
experimentName="fingerprints_test",
experimentType="incremental",
loadPath=None,
modelName=modelName,
numClasses=3,
plots=0,
orderedSplit=True,
trainSizes=[5],
verbosity=0)
runner.initModel(modelName)
runner.model.encoder.fingerprintType = EncoderTypes.document
self.runExperiment(runner)
expectedClasses, resultClasses = self.getExpectedClassifications(runner,
os.path.join(DATA_DIR,
"responses_expected_classes_fingerprint_document.csv"))
[self.assertEqual(sorted(e), sorted(r),
"Fingerprint model predicted classes other than what we expect.")
for e, r in zip(expectedClasses, resultClasses)]
def testClassifyWordFingerprintsAsExpected(self):
"""
Tests ClassificationModelFingerprint (for encoder type 'word').
Training on the first five samples of the dataset, and testing on the rest,
the model's classifications should match those in the expected classes
data file.
"""
modelName = "CioWordFingerprint"
runner = Runner(dataPath=os.path.join(DATA_DIR, "responses.csv"),
resultsDir="",
experimentName="fingerprints_test",
experimentType="incremental",
loadPath=None,
modelName=modelName,
numClasses=3,
plots=0,
orderedSplit=True,
trainSizes=[5],
verbosity=0)
runner.initModel(modelName)
runner.model.encoder.fingerprintType = EncoderTypes.word
self.runExperiment(runner)
expectedClasses, resultClasses = self.getExpectedClassifications(runner,
os.path.join(DATA_DIR, "responses_expected_classes_fingerprint_word.csv"))
[self.assertEqual(sorted(e), sorted(r),
"Fingerprint model predicted classes other than what we expect.")
for e, r in zip(expectedClasses, resultClasses)]
def testClassifyEndpointAsExpected(self):
"""
Tests ClassificationModelEndpoint.
Training on the first five samples of the dataset, and testing on the rest,
the model's classifications should match those in the expected classes
data file.
"""
modelName = "CioEndpoint"
runner = Runner(dataPath=os.path.join(DATA_DIR, "responses.csv"),
resultsDir="",
experimentName="endpoint_test",
experimentType="incremental",
loadPath=None,
modelName=modelName,
numClasses=3,
plots=0,
orderedSplit=True,
trainSizes=[5],
verbosity=0)
runner.initModel(modelName)
self.runExperiment(runner)
expectedClasses, resultClasses = self.getExpectedClassifications(runner,
os.path.join(DATA_DIR, "responses_expected_classes_endpoint.csv"))
[self.assertEqual(sorted(e), sorted(r),
"Endpoint model predicted classes other than what we expect.")
for e, r in zip(expectedClasses, resultClasses)]
def testClassifyHTMAsExpectedWithKNN(self):
"""
Tests ClassificationModelHTM, where the network is
LanguageSensor->KNNClassifier.
Training on the first five samples of the dataset, and testing on the rest,
the model's classifications should match those in the expected classes
data file.
"""
modelName = "HTMNetwork"
runner = HTMRunner(dataPath=os.path.join(DATA_DIR, "responses.csv"),
networkConfigPath=os.path.join(
DATA_DIR, "network_config_ls_knn.json"),
resultsDir="",
experimentName="htm_test",
experimentType="incremental",
loadPath=None,
modelName=modelName,
numClasses=3,
plots=0,
orderedSplit=True,
trainSizes=[5],
verbosity=0,
generateData=True,
votingMethod="most")
runner.initModel(0)
runner.runExperiment()
expectedClasses, resultClasses = self.getExpectedClassifications(runner,
os.path.join(DATA_DIR, "responses_expected_classes_htm.csv"))
[self.assertEqual(sorted(e), sorted(r),
"HTM model predicted classes other than what we expect.")
for e, r in zip(expectedClasses, resultClasses)]
@unittest.skip(
"We do not yet know what the expected (correct) classifications are.")
def testClassifyHTMUsingTPAsExpectedWithKNN(self):
"""
Tests ClassificationModelHTM using TP region.
Training on the first five samples of the dataset, and testing on the rest,
the model's classifications should match those in the expected classes
data file.
"""
modelName = "HTMNetwork"
runner = HTMRunner(dataPath=os.path.join(DATA_DIR, "responses.csv"),
networkConfigPath=os.path.join(
DATA_DIR, "network_config_tp_knn.json"),
resultsDir="",
experimentName="htm_test",
experimentType="incremental",
loadPath=None,
modelName=modelName,
numClasses=3,
plots=0,
orderedSplit=True,
trainSizes=[5],
verbosity=0,
generateData=True,
votingMethod="most")
runner.initModel(0)
runner.runExperiment()
expectedClasses, resultClasses = self.getExpectedClassifications(runner,
os.path.join(DATA_DIR, "responses_expected_classes_htm_tp.csv"))
[self.assertEqual(sorted(e), sorted(r),
"HTM model predicted classes other than what we expect.")
for e, r in zip(expectedClasses, resultClasses)]
# TODO: add the following tests...
# def testTrainOnAllTestOnAll(self):
# """Train on all samples, save model, load model, and test on all samples."""
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for reminders for models with reminderable mixin."""
from datetime import datetime
from freezegun import freeze_time
from sqlalchemy import and_
from ggrc import db
from ggrc import models
from ggrc.notifications import common
from integration import ggrc
from integration.ggrc import api_helper
from integration.ggrc.models import factories
class TestReminderable(ggrc.TestCase):
"""Test sending reminder."""
def setUp(self):
ggrc.TestCase.setUp(self)
self.client.get("/login")
self._fix_notification_init()
self.api_helper = api_helper.Api()
def _fix_notification_init(self):
"""Fix Notification object init function.
This is a fix needed for correct created_at field when using freezgun. By
default the created_at field is left empty and filed by database, which
uses system time and not the fake date set by freezugun plugin. This fix
makes sure that object created in freeze_time block has all dates set with
the correct date and time.
"""
def init_decorator(init):
"""Wrapper for Notification init function."""
def new_init(self, *args, **kwargs):
init(self, *args, **kwargs)
if hasattr(self, "created_at"):
self.created_at = datetime.now()
return new_init
models.Notification.__init__ = init_decorator(models.Notification.__init__)
@classmethod
def _get_notifications(cls, sent=False, notif_type=None):
"""Get a notification query.
Args:
sent (boolean): flag to filter out only notifications that have been
sent.
notif_type (string): name of the notification type.
Returns:
sqlalchemy query for selected notifications.
"""
if sent:
notif_filter = models.Notification.sent_at.isnot(None)
else:
notif_filter = models.Notification.sent_at.is_(None)
if notif_type:
notif_filter = and_(notif_filter,
models.NotificationType.name == notif_type)
return db.session.query(models.Notification).join(
models.NotificationType).filter(notif_filter)
@classmethod
def create_assignees(cls, obj, persons):
"""Create assignees for object.
Args:
obj: Assignable object.
persons: [("(string) email", "Assignee roles"), ...] A list of people
and their roles
Returns:
[(person, object-person relationship,
object-person relationship attributes), ...] A list of persons with
their relationships and relationship attributes.
"""
assignees = []
for person, roles in persons:
person = factories.PersonFactory(email=person)
object_person_rel = factories.RelationshipFactory(
source=obj,
destination=person
)
object_person_rel_attrs = factories.RelationshipAttrFactory(
relationship_id=object_person_rel.id,
attr_name="AssigneeType",
attr_value=roles
)
assignees += [(person, object_person_rel, object_person_rel_attrs)]
return assignees
def create_assessment(self, people=None):
"""Create default assessment with some default assignees in all roles.
Args:
people: List of tuples with email address and their assignee roles for
Assessments.
Returns:
Assessment object.
"""
assessment = factories.AssessmentFactory()
if not people:
people = [
("creator@example.com", "Creator"),
("assessor_1@example.com", "Assessor"),
("assessor_2@example.com", "Assessor"),
("verifier_1@example.com", "Verifier"),
("verifier_2@example.com", "Verifier"),
]
self.create_assignees(assessment, people)
creators = [assignee for assignee, roles in assessment.assignees
if "Creator" in roles]
assignees = [assignee for assignee, roles in assessment.assignees
if "Assessor" in roles]
verifiers = [assignee for assignee, roles in assessment.assignees
if "Verifier" in roles]
self.assertEqual(len(creators), 1)
self.assertEqual(len(assignees), 2)
self.assertEqual(len(verifiers), 2)
return assessment
@classmethod
def refresh_object(cls, obj):
"""Returns a new instance of a model, fresh and warm from the database."""
return obj.query.filter_by(id=obj.id).first()
def change_status(self, obj, status):
"""Change status of an object."""
self.api_helper.modify_object(obj, {
"status": status
})
obj = self.refresh_object(obj)
self.assertEqual(obj.status, status)
return obj
def send_reminder(self, obj, reminder_type=None):
"""Sends reminder to object."""
if not reminder_type:
reminder_type = "statusToPerson"
return self.api_helper.modify_object(obj, {
"reminderType": reminder_type
})
def test_assessment_open_reminder(self):
"""Tests that notifications get generated when in `Open` state."""
with freeze_time("2015-04-01 17:13:15"):
assessment = self.create_assessment()
self.api_helper.modify_object(assessment, {
"reminderType": "statusToPerson"
})
self.assertEqual(
self._get_notifications(
False, "assessment_assessor_reminder").count(),
1)
with freeze_time("2015-04-02 01:01:01"):
self.client.get("/_notifications/send_daily_digest")
self.assertEqual(self._get_notifications().count(), 0)
def test_assessment_inprogress_reminder(self):
"""Tests that notifications get generated when in `In Progress` state."""
# pylint: disable=invalid-name
with freeze_time("2015-04-01 17:13:15"):
assessment = self.create_assessment()
assessment = self.change_status(assessment, "In Progress")
self.send_reminder(assessment)
self.assertEqual(
self._get_notifications(
False, "assessment_assessor_reminder").count(),
1)
with freeze_time("2015-04-02 01:01:01"):
self.client.get("/_notifications/send_daily_digest")
self.assertEqual(self._get_notifications().count(), 0)
def test_assessment_finished_reminder(self):
"""Tests that there are no notifications when in `Finished` state"""
# pylint: disable=invalid-name
with freeze_time("2015-04-01 17:13:15"):
assessment = self.create_assessment()
assessment = self.change_status(assessment,
models.Assessment.PROGRESS_STATE)
assessment = self.change_status(assessment, models.Assessment.DONE_STATE)
self.send_reminder(assessment)
self.assertEqual(
self._get_notifications(
False, "assessment_assessor_reminder").count(),
0)
with freeze_time("2015-04-02 01:01:01"):
self.client.get("/_notifications/send_daily_digest")
self.assertEqual(self._get_notifications().count(), 0)
def test_assessment_inprogress_reminder_finish_afterwards(self):
"""Tests that notifications don't get sent if Finished already.
Tests that notifications don't get sent out if assessment has been moved to
`Finished` state since reminder was activated.
"""
# pylint: disable=invalid-name
with freeze_time("2015-04-01 17:13:15"):
assessment = self.create_assessment()
assessment = self.change_status(assessment,
models.Assessment.PROGRESS_STATE)
self.send_reminder(assessment)
self.assertEqual(
self._get_notifications(
False, "assessment_assessor_reminder").count(),
1)
self.change_status(assessment, models.Assessment.DONE_STATE)
_, notif_data = common.get_daily_notifications()
self.assertEqual(notif_data, {}) | unknown | codeparrot/codeparrot-clean | ||
{% spaceless %}{% for widget in widget.subwidgets %}{% include widget.template_name %}{% endfor %}{% endspaceless %} | html | github | https://github.com/django/django | django/forms/templates/django/forms/widgets/multiwidget.html |
'''
Copyright (C) 2021 Gitcoin Core
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'stub for local testing'
def handle(self, *args, **options):
if not settings.DEBUG:
print("cannot be run without settings.DEBUG")
return
from kudos.models import Token
from avatar.utils import svg_to_png_inkscape
token = Token.objects.get(pk=182)
file_path = f"/code/app/assets/{token.image}"
with open(file_path, 'rb') as f:
print(svg_to_png_inkscape(f.read())) | unknown | codeparrot/codeparrot-clean | ||
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from testrunner import testhelp
import re
import bz2
import itertools
from conary_test import rephelp
from conary import conaryclient, errors
from conary.deps import deps
from conary import conarycfg
from conary import versions
from conary.cmds.queryrep import VERSION_FILTER_ALL, VERSION_FILTER_LATEST
from conary.cmds.queryrep import VERSION_FILTER_LEAVES
from conary.cmds.queryrep import FLAVOR_FILTER_ALL, FLAVOR_FILTER_AVAIL
from conary.cmds.queryrep import FLAVOR_FILTER_BEST, FLAVOR_FILTER_EXACT
from conary.cmds import queryrep
from conary.conaryclient import cmdline
from conary.repository import trovesource
from conary.repository.trovesource import TROVE_QUERY_NORMAL
from conary.repository.trovesource import TROVE_QUERY_PRESENT
from conary.repository.trovesource import TROVE_QUERY_ALL
from conary.versions import VersionFromString as VFS
class RepQueryTest(rephelp.RepositoryHelper):
def _rdiff(self, troveSpec, **kwargs):
client = conaryclient.ConaryClient(self.cfg)
return self.captureOutput(queryrep.rdiffCommand, self.cfg,
client, client.getDatabase(), troveSpec,
**kwargs)
def testBadQuery(self):
try:
queryrep.getTrovesToDisplay(None, ['=conary.rpath.com@'], [], [],
VERSION_FILTER_ALL, FLAVOR_FILTER_ALL,
self.cfg.installLabelPath, self.cfg.flavor,
None)
except errors.ParseError, msg:
assert(str(msg) == 'Error with spec "=conary.rpath.com@": Trove name is required')
else:
assert(0)
def _checkTupVers(self, tups, vers):
for tup, item in itertools.izip(tups, vers):
if isinstance(item, (list, tuple)):
verStr, flStr = item
else:
verStr = item
flStr = None
if '-' in verStr:
assert(str(tup[1].trailingRevision()) == verStr)
else:
assert(str(tup[1].trailingRevision().version) == verStr)
if flStr:
assert(str(tup[2]) == flStr)
def testGetTrovesVersions(self):
self.addComponent('foo:foo', '1.0', 'ssl')
self.addComponent('foo:foo', '1.0', '~ssl')
self.addComponent('foo:foo', '1.0', '!ssl')
self.addComponent('foo:foo', '1.0', 'readline')
self.addComponent('foo:foo', '2.0-1-1', 'readline')
self.addComponent('foo:foo', '2.0-2-1', 'readline')
self.addComponent('foo:foo', '3.0', '!readline')
repos = self.openRepository()
targetFlavor = [ deps.parseFlavor('~readline,ssl is:x86') ]
def _check(troveSpecs, versionFilter, flavorFilter, expected):
tups = queryrep.getTrovesToDisplay(repos, troveSpecs, [], [],
versionFilter, flavorFilter,
self.cfg.installLabelPath,
targetFlavor,
None)
self._checkTupVers(tups, expected)
# test ALL
_check(['foo:foo'], VERSION_FILTER_ALL, FLAVOR_FILTER_BEST,
['1.0', '2.0-1-1', '2.0-2-1'])
_check(['foo:foo=2.0'], VERSION_FILTER_ALL, FLAVOR_FILTER_BEST,
['2.0-1-1', '2.0-2-1'])
_check(['foo:foo[!readline]'], VERSION_FILTER_ALL, FLAVOR_FILTER_BEST,
['1.0', '3.0'])
_check(['foo:foo[!readline]'], VERSION_FILTER_ALL, FLAVOR_FILTER_AVAIL,
['1.0', '1.0', '3.0'])
_check(['foo:foo[!readline]'], VERSION_FILTER_ALL, FLAVOR_FILTER_ALL,
['1.0', '1.0', '1.0', '3.0'])
_check(['foo:foo'], VERSION_FILTER_ALL, FLAVOR_FILTER_ALL,
['1.0', '1.0', '1.0', '1.0', '2.0', '2.0', '3.0'])
# test ALL w/ no spec
_check([], VERSION_FILTER_ALL, FLAVOR_FILTER_BEST,
['1.0', '2.0-1-1', '2.0-2-1'])
_check([], VERSION_FILTER_ALL, FLAVOR_FILTER_AVAIL,
# ssl, ~ssl, readline,
['1.0', '1.0', '1.0', '2.0-1-1', '2.0-2-1'])
_check([], VERSION_FILTER_ALL, FLAVOR_FILTER_ALL,
['1.0', '1.0', '1.0', '1.0', '2.0', '2.0', '3.0'])
# test LEAVES
_check(['foo:foo'], VERSION_FILTER_LEAVES, FLAVOR_FILTER_BEST,
['2.0-2-1'])
_check(['foo:foo=2.0'], VERSION_FILTER_LEAVES, FLAVOR_FILTER_BEST,
['2.0-2-1'])
_check(['foo:foo[!readline]'], VERSION_FILTER_LEAVES,
FLAVOR_FILTER_BEST, ['3.0'])
_check(['foo:foo[!readline]'], VERSION_FILTER_LEAVES, # does not have
# !ssl
FLAVOR_FILTER_AVAIL, ['1.0', '1.0', '3.0'])
_check(['foo:foo[!readline]'], VERSION_FILTER_LEAVES, # include !ssl
FLAVOR_FILTER_ALL, ['1.0', '1.0', '1.0', '3.0'])
_check(['foo:foo'], VERSION_FILTER_LEAVES, FLAVOR_FILTER_ALL,
['1.0', '1.0', '1.0', '2.0-2-1', '3.0'])
# check LEAVES with no spec
_check([], VERSION_FILTER_LEAVES, FLAVOR_FILTER_BEST,
# best 1.0 flavor, best 2.0 flavor, no other version nodes
# have compatible flavor leaves.
['1.0', '2.0-2-1'])
_check([], VERSION_FILTER_LEAVES, FLAVOR_FILTER_AVAIL,
# add in ~ssl because it was also at 1.0 node
['1.0', '1.0', '2.0-2-1'])
_check([], VERSION_FILTER_LEAVES, FLAVOR_FILTER_ALL,
['1.0', '1.0', '1.0', '2.0', '3.0'])
# test LATEST
_check(['foo:foo'], VERSION_FILTER_LATEST, FLAVOR_FILTER_BEST,
['2.0-2-1'])
_check(['foo:foo=2.0'], VERSION_FILTER_LATEST, FLAVOR_FILTER_BEST,
['2.0-2-1'])
_check(['foo:foo[!readline]'], VERSION_FILTER_LATEST,
FLAVOR_FILTER_BEST, ['3.0'])
_check(['foo:foo[!readline]'], VERSION_FILTER_LATEST,
FLAVOR_FILTER_AVAIL, ['3.0'])
_check(['foo:foo[!readline]'], VERSION_FILTER_LATEST, # include !ssl
FLAVOR_FILTER_ALL, ['3.0'])
_check(['foo:foo'], VERSION_FILTER_LATEST, FLAVOR_FILTER_ALL,
['3.0'])
# to really test latest, we need to make the latest node more
# interesting..
# we've already got !readline there.
self.addComponent('foo:foo', '3.0', 'readline')
self.addComponent('foo:foo', '3.0', '!ssl')
self.addComponent('foo:foo', '3.0', 'ssl')
self.addComponent('foo:foo', '3.0', '~ssl')
_check(['foo:foo'], VERSION_FILTER_LATEST, FLAVOR_FILTER_BEST,
['3.0'])
_check(['foo:foo[!readline]'], VERSION_FILTER_LATEST,
FLAVOR_FILTER_BEST, ['3.0'])
_check(['foo:foo[!readline]'], VERSION_FILTER_LATEST,
# !readline, ssl, ~ssl
FLAVOR_FILTER_AVAIL, ['3.0', '3.0', '3.0'])
_check(['foo:foo[!readline]'], VERSION_FILTER_LATEST,
# !readline, ssl, ~ssl, !ssl
FLAVOR_FILTER_ALL, ['3.0', '3.0', '3.0', '3.0'])
# test LATEST w/ no spec
_check([], VERSION_FILTER_LATEST, FLAVOR_FILTER_BEST,
['3.0'])
_check([], VERSION_FILTER_LATEST,
# readline, ssl, ~ssl
FLAVOR_FILTER_AVAIL, ['3.0', '3.0', '3.0'])
_check([], VERSION_FILTER_LATEST,
# readline, ssl, ~ssl, !ssl
FLAVOR_FILTER_ALL, ['3.0', '3.0', '3.0', '3.0'])
def testQueryByTroveType(self):
repos = self.openRepository()
def _check(troveSpecs, versionFilter, flavorFilter, expected, all=False,
present=False):
if all:
troveTypes = TROVE_QUERY_ALL
elif present:
troveTypes = TROVE_QUERY_PRESENT
else:
troveTypes = TROVE_QUERY_NORMAL
tups = queryrep.getTrovesToDisplay(repos, troveSpecs, [], [],
versionFilter, flavorFilter,
self.cfg.installLabelPath,
self.cfg.flavor,
None,
troveTypes=troveTypes)
self._checkTupVers(tups, expected)
# foo is replaced by a redirect, bar is replaced by a redirect and a removed trove
self.addComponent('foo:run', '1.0')
self.addComponent('foo:run', '2.0', redirect=['bar:run'])
self.addComponent('bar:run', '1.0')
self.addComponent('bar:run', '2.0', redirect=['foo:run'])
self.addComponent('bar:run', '3.0')
self.markRemoved('bar:run=3.0')
# test ALL
_check([], VERSION_FILTER_ALL, FLAVOR_FILTER_ALL,
['1.0', '1.0'])
_check([], VERSION_FILTER_ALL, FLAVOR_FILTER_ALL,
['1.0', '2.0', '1.0', '2.0'], present=True)
_check([], VERSION_FILTER_ALL, FLAVOR_FILTER_ALL,
['1.0', '2.0', '3.0', '1.0', '2.0'], all=True)
# test LEAVES
_check([], VERSION_FILTER_LEAVES, FLAVOR_FILTER_ALL, [])
_check([], VERSION_FILTER_LEAVES, FLAVOR_FILTER_ALL,
['2.0', '2.0'], present=True)
_check([], VERSION_FILTER_LEAVES, FLAVOR_FILTER_ALL,
['3.0', '2.0'], all=True)
# test LATEST
_check([], VERSION_FILTER_LATEST, FLAVOR_FILTER_ALL, [])
_check([], VERSION_FILTER_LATEST, FLAVOR_FILTER_ALL, ['2.0', '2.0'], present=True)
_check([], VERSION_FILTER_LATEST, FLAVOR_FILTER_ALL, ['3.0', '2.0'], all=True)
def testGetTrovesLeavesMultiRepos(self):
v1 = self.addComponent('foo:foo', '1.0').getVersion()
v2 = self.addComponent('foo:foo', ':branch/1.0').getVersion()
installLabelPath = conarycfg.CfgLabelList(
[versions.Label('localhost@rpl:branch'),
self.cfg.buildLabel])
repos = self.openRepository()
tups = queryrep.getTrovesToDisplay(repos, [], [], [],
VERSION_FILTER_LEAVES,
FLAVOR_FILTER_ALL,
installLabelPath,
self.cfg.flavor, affinityDb=None)
assert(set([x[1] for x in tups]) == set((v1, v2)))
tups = queryrep.getTrovesToDisplay(repos, ['foo:foo'], [], [],
VERSION_FILTER_LEAVES,
FLAVOR_FILTER_ALL,
installLabelPath,
self.cfg.flavor, affinityDb=None)
assert(set([x[1] for x in tups]) == set((v1, v2)))
tups = queryrep.getTrovesToDisplay(repos, ['foo:foo'], [], [],
VERSION_FILTER_LATEST,
FLAVOR_FILTER_ALL,
installLabelPath,
self.cfg.flavor, affinityDb=None)
assert(set([x[1] for x in tups]) == set((v1, v2)))
tups = queryrep.getTrovesToDisplay(repos, [], [], [],
VERSION_FILTER_LATEST,
FLAVOR_FILTER_ALL,
installLabelPath,
self.cfg.flavor, affinityDb=None)
assert(set([x[1] for x in tups]) == set((v1, v2)))
def testGetTrovesLatestByLabel(self):
# test out the no trove queries that now use getTrovesLatestByLabel
trv1 = self.addComponent('foo:run', '/localhost@rpl:branch//rpl:linux/1.0-1-1')
trv2 = self.addComponent('foo:run', '1.0-1-2')
trv3 = self.addComponent('foo:run', '/localhost@rpl:branch//rpl:linux/1.0-1-3', 'ssl')
repos = self.openRepository()
tups = queryrep.getTrovesToDisplay(repos, [], [], [],
VERSION_FILTER_LATEST,
FLAVOR_FILTER_BEST,
self.cfg.installLabelPath,
self.cfg.flavor, affinityDb=None)
assert(len(tups) == 1)
assert(tups[0] == trv3.getNameVersionFlavor())
tups = queryrep.getTrovesToDisplay(repos, [], [], [],
VERSION_FILTER_LEAVES,
FLAVOR_FILTER_BEST,
self.cfg.installLabelPath,
self.cfg.flavor, affinityDb=None)
assert(len(tups) == 2)
assert(set(tups) == set([trv3.getNameVersionFlavor(),
trv2.getNameVersionFlavor()]))
def testLatestIsOfWrongFlavor(self):
# CNY-784 - if the latest version was of an incompatible flavor,
# conary rq <no args> would display nothing for that trove
v1 = self.addComponent('foo:foo', '1.0', 'is:x86').getVersion()
v2 = self.addComponent('foo:foo', '1.1', 'is:x86_64').getVersion()
targetFlavor = [ deps.parseFlavor('is:x86') ]
repos = self.openRepository()
tups = queryrep.getTrovesToDisplay(repos, [], [], [],
VERSION_FILTER_LATEST,
FLAVOR_FILTER_BEST,
self.cfg.installLabelPath,
targetFlavor, affinityDb=None)
assert(set([x[1] for x in tups]) == set((v1,)))
tups = queryrep.getTrovesToDisplay(repos, [], [], [],
VERSION_FILTER_LATEST,
FLAVOR_FILTER_AVAIL,
self.cfg.installLabelPath,
targetFlavor, affinityDb=None)
assert(set([x[1] for x in tups]) == set((v1,)))
def testExactFlavor(self):
self.addComponent('foo:run[~ssl]')
repos = self.openRepository()
def _get(troveSpec):
try:
return queryrep.getTrovesToDisplay(repos, [troveSpec], [], [],
VERSION_FILTER_LATEST,
FLAVOR_FILTER_EXACT,
self.cfg.installLabelPath,
self.cfg.flavor, None)
except errors.TroveNotFound:
return []
assert(not _get('foo:run[ssl]'))
assert(not _get('foo:run'))
assert(_get('foo:run[~ssl]'))
def testTroveNames(self):
for x in "12":
for ver in "12":
self.addComponent("trv%s:lib" % x, ver)
self.addComponent("trv%s:runtime" % x, ver)
self.addCollection("trv%s" % x, ver, [":lib", ":runtime"])
repos = self.openRepository()
ret = repos.troveNamesOnServer("localhost")
self.assertEqual(set(ret),
set(['trv1:lib', 'trv1:runtime', 'trv1',
'trv2:lib', 'trv2:runtime', 'trv2']))
self.markRemoved("trv1=1")
ret = repos.troveNamesOnServer("localhost")
self.assertEqual(set(ret),
set(['trv1:lib', 'trv1:runtime', 'trv1',
'trv2:lib', 'trv2:runtime', 'trv2']))
self.markRemoved("trv1=2")
ret = repos.troveNamesOnServer("localhost")
self.assertEqual(set(ret), set(['trv2:lib', 'trv2:runtime', 'trv2']))
self.addCollection("group-trv", [("trv2:runtime", "2"), ("trv3:runtime", "0")])
ret = repos.troveNamesOnServer("localhost")
# trv3:runtime is not present thus it shouldn't appear in the list
self.assertEqual(set(ret), set(['trv2:lib', 'trv2:runtime', 'trv2', "group-trv"]))
self.markRemoved("trv2=1")
ret = repos.troveNamesOnServer("localhost")
# trv2=2 is still there
self.assertEqual(set(ret), set(['trv2:lib', 'trv2:runtime', 'trv2', "group-trv"]))
self.markRemoved("trv2=2")
ret = repos.troveNamesOnServer("localhost")
self.assertEqual(set(ret), set(["group-trv"]))
self.markRemoved("group-trv")
ret = repos.troveNamesOnServer("localhost")
self.assertEqual(set(ret), set())
self.addCollection("group-other", ["foo:runtime", "foo:lib"])
self.addComponent("foo:lib", "999")
ret = repos.troveNamesOnServer("localhost")
self.assertEqual(set(ret), set(["foo:lib", "group-other"]))
self.markRemoved("foo:lib=999")
ret = repos.troveNamesOnServer("localhost")
self.assertEqual(set(ret), set(["group-other"]))
def testAffinity(self):
self.addComponent('foo:r', '/localhost@rpl:branch/1.0-1-1', '!readline',
['/usr/bin/foo'])
self.addComponent('foo:r', '/localhost@rpl:branch/2.0-1-1',
'readline,~!ssl', ['/usr/bin/foo'])
self.addComponent('foo:r', '/localhost@rpl:branch/2.0-1-1',
'readline,~ssl', ['/usr/bin/foo'])
self.addComponent('foo:r', '/localhost@rpl:branch/2.0-1-1',
'!readline,~ssl', ['/usr/bin/foo'])
self.addComponent('foo:r', '/localhost@rpl:branch/2.0-1-1',
'!readline,~!ssl', ['/usr/bin/foo'])
# orig branch - found by very few queries
self.addComponent('foo:r',
'/localhost@rpl:linux/1.0-1-1', 'readline')
self.updatePkg('foo:r=:branch/1.0[!readline]')
repos = self.openRepository()
def _get(affinityDb, versionFilter, flavorFilter, troveSpec):
return queryrep.getTrovesToDisplay(repos, troveSpec, [], [],
versionFilter,
flavorFilter,
self.cfg.installLabelPath,
self.cfg.flavor, affinityDb)
db = self.openDatabase()
troveTups = _get(db, VERSION_FILTER_LATEST,
FLAVOR_FILTER_BEST, ['foo:r'])
assert(len(troveTups) == 1)
assert(troveTups[0][1].branch() == VFS('/localhost@rpl:branch'))
assert(str(troveTups[0][2]) == '!readline,~ssl')
troveTups = _get(db, VERSION_FILTER_LATEST,
FLAVOR_FILTER_AVAIL, ['foo:r'])
assert(len(troveTups) == 2)
flavors = set(str(x[2]) for x in troveTups)
assert('readline,~ssl' in flavors)
assert('!readline,~ssl' in flavors)
# system compatible, should ignore db
troveTups = _get(None, VERSION_FILTER_LATEST,
FLAVOR_FILTER_AVAIL, ['foo:r'])
assert(len(troveTups) == 1)
assert(troveTups[0][1].branch() == VFS('/localhost@rpl:linux'))
flavors = set(str(x[2]) for x in troveTups)
assert('readline' in flavors)
def testQueryByPath(self):
for troveName in 'foo:run', 'bar:run':
self.addComponent(troveName, '1.0', 'ssl', ['/usr/bin/foo'])
self.addComponent(troveName, '1.0', '~ssl', ['/usr/bin/foo'])
self.addComponent(troveName, '1.0', '!ssl', ['/usr/bin/foo'])
self.addComponent(troveName, '1.0', 'readline', ['/usr/bin/foo'])
self.addComponent(troveName, '2.0-1-1', 'readline', ['/usr/bin/foo'])
self.addComponent(troveName, '2.0-2-1', 'readline', ['/usr/bin/foo'])
self.addComponent(troveName, '3.0', '!readline', ['/usr/bin/foo'])
repos = self.openRepository()
targetFlavor = [ deps.parseFlavor('~readline,ssl is:x86') ]
def _getByPath(versionFilter, flavorFilter, pathList=['/usr/bin/foo']):
return queryrep.getTrovesByPath(repos, pathList,
versionFilter, flavorFilter,
self.cfg.installLabelPath,
targetFlavor)
def _check(tups, troveSpecs):
source = trovesource.SimpleTroveSource(tups)
source.searchAsDatabase()
troveSpecs = [ cmdline.parseTroveSpec(x) for x in troveSpecs ]
results = source.findTroves(None, troveSpecs)
receivedTups = itertools.chain(*results.itervalues())
assert(set(receivedTups) == set(tups))
assert(len(_getByPath(VERSION_FILTER_ALL, FLAVOR_FILTER_ALL)) == 14)
tups = _getByPath(VERSION_FILTER_LATEST, FLAVOR_FILTER_ALL)
_check(tups, ['bar:run=3.0', 'foo:run=3.0'])
# check leaves, should leave out 2.0-1-1 readline and 1.0 readline.
tups = _getByPath(VERSION_FILTER_LEAVES, FLAVOR_FILTER_ALL)
assert(len(tups) == 10)
# we don't really need to check both foo and bar here...
tups = [ x for x in tups if x[0] == 'bar:run' ]
_check(tups, ['bar:run=1.0[ssl]', 'bar:run=2.0-2-1[readline]',
'bar:run=1.0[!ssl]', 'bar:run=1.0[~ssl]',
'bar:run=3.0[!readline]'])
# get all compatible flavors, should leave out !readline and !ssl
tups = _getByPath(VERSION_FILTER_ALL, FLAVOR_FILTER_AVAIL)
assert(len(tups) == 10)
tups = [ x for x in tups if x[0] == 'bar:run' ]
_check(tups, ['bar:run=1.0[ssl]', 'bar:run=1.0[readline]',
'bar:run=2.0-1-1[readline]', 'bar:run=2.0-2-1[readline]',
'bar:run=1.0[~ssl]'])
# get best best flavors for each version
tups = _getByPath(VERSION_FILTER_ALL, FLAVOR_FILTER_BEST)
tups = [ x for x in tups if x[0] == 'bar:run' ]
_check(tups, ['bar:run=1.0[ssl]', 'bar:run=2.0-1-1[readline]',
'bar:run=2.0-2-1[readline]'])
tups = _getByPath(VERSION_FILTER_LEAVES, FLAVOR_FILTER_BEST)
assert(len(tups) == 2)
tups = [ x for x in tups if x[0] == 'bar:run' ]
_check(tups, [ 'bar:run=2.0-2-1[readline]'])
tups = _getByPath(VERSION_FILTER_LATEST, FLAVOR_FILTER_BEST)
assert(len(tups) == 2)
_check(tups, ['bar:run=2.0-2-1[readline]', 'foo:run=2.0-2-1[readline]'])
# add another path for testing querying two paths at once,
# with different latest versions to test leaves handing.
self.addComponent('foo:lib', '1.0', 'ssl', ['/usr/lib/foo'])
self.addComponent('bar:lib', '1.0', 'ssl', ['/usr/lib/foo'])
self.addComponent('bar:lib', '2.0', 'ssl', ['/usr/lib/foo'])
tups = _getByPath(VERSION_FILTER_LATEST, FLAVOR_FILTER_BEST,
['/usr/lib/foo', '/usr/bin/foo'])
_check(tups, ['bar:run=2.0-2-1[readline]', 'foo:run=2.0-2-1[readline]',
'foo:lib=1.0', 'bar:lib=2.0'])
def testWhatProvides(self):
targetFlavor = [ deps.parseFlavor('is:x86') ]
def _get(versionFilter, flavorFilter, whatProvidesList):
return queryrep.getTrovesToDisplay(repos, [], [],
[deps.parseDep(x) for x in whatProvidesList],
versionFilter,
flavorFilter,
self.cfg.installLabelPath,
targetFlavor, None)
self.addComponent('foo:run', '1', 'is:x86')
self.addComponent('foo:run', '2', 'is:x86')
self.addComponent('foo:run', '2', 'is:x86_64')
repos = self.openRepository()
troveTups = _get(VERSION_FILTER_LATEST, FLAVOR_FILTER_BEST, ['trove:foo:run'])
assert(len(troveTups) == 1)
troveTups = _get(VERSION_FILTER_LATEST, FLAVOR_FILTER_ALL, ['trove:foo:run'])
assert(len(troveTups) == 2)
troveTups = _get(VERSION_FILTER_ALL, FLAVOR_FILTER_ALL, ['trove:foo:run'])
# --all-versions doesn't really work with what-provides
assert(len(troveTups) == 2)
def testBuildLogDisplay(self):
buildlog = 'This is test buildlog';
bz2compressor = bz2.BZ2Compressor()
bz2compressor.compress(buildlog)
contents = bz2compressor.flush()
self.addComponent('foo:runtime')
self.addComponent('foo:debuginfo', [('/usr/bin/foo', rephelp.RegularFile(contents=contents, tags=['buildlog']))])
self.addCollection('foo', [(':runtime', True), (':debuginfo', False)])
repos = self.openRepository()
output = self.captureOutput(queryrep.displayTroves, self.cfg, ['foo'], [], [],
queryrep.VERSION_FILTER_LATEST, queryrep.FLAVOR_FILTER_BEST, showBuildLog = True)
self.assertEqual(output[1], buildlog)
def testShowFile(self):
contents1 = 'This is test content';
contents2 = 'This is another test content';
self.addComponent('foo:runtime', [('/usr/bin/foofile', contents1), ('/usr/bin/barfile', contents2)])
self.addCollection('foo', [':runtime'])
repos = self.openRepository()
output = self.captureOutput(queryrep.displayTroves, self.cfg, ['foo'], [], [],
queryrep.VERSION_FILTER_LATEST, queryrep.FLAVOR_FILTER_BEST, filesToShow = ['/usr/bin/barfile'])
self.assertEqual(output[1], contents2)
def testRdiff1(self):
req1 = 'soname: ELF32/libfoo1(blah)'
req2 = 'soname: ELF32/lib/foo2(blah)'
req3 = 'soname: ELF32/lib/foo3(blah) trove:bar(1)'
prov1 = "trove:bar(1) trove:baz(1)"
prov2 = "trove:baz(1) trove:bloop(1)"
prov3 = "trove:bloop(2) trove:bar(1)"
buildReqs1 = [ ('py', '1', 'is: x'), ('by', '1', 'is: y'),
('ty', '1', 'is: z')]
buildReqs2 = [ ('py', '1', 'is: x'), ('my', '1', 'is: y'),
('by', '2', 'is: z')]
rf1 = rephelp.RegularFile(contents='1\n2\n3\n4\n5\n6\n7\n8\n',
perms = 0644, provides = prov1, requires = req1,
mtime = 1136921017,)
rf2 = rephelp.RegularFile(contents='1\n2\n4\n5\n6\n7\n8\n9\n',
perms = 0755, provides = prov2, requires = req2,
mtime = 1136921317, tags=['tag2', 'tag1', 'tag3'])
rf3 = rephelp.RegularFile(contents='1\n2\n4\n5\n6\n7\n8\n10\n',
perms = 0400, provides = prov3, requires = req3,
mtime = 1136921017)
# rf5 differs from rf1 just by tags
rf5 = rephelp.RegularFile(contents='1\n2\n3\n4\n5\n6\n7\n8\n',
perms = 0644, provides = prov1, requires = req1,
mtime = 1136921017, tags=['tag2', 'tag1', 'tag3'])
self.addComponent('foo:run', '1', 'is:x86',
[('/usr/bin/foo', rf1),
('/usr/bin/bar', rf2),
('/usr/bin/file1', rf1),
])
self.addComponent('foo:supdoc', '1', 'is:x86',
[('/usr/share/doc/foo1', rf1)])
self.addCollection('foo', '1',
[(x, '1', 'is:x86') for x in [':run', ':supdoc']],
buildReqs=buildReqs1)
self.addComponent('foo:run', '2', 'is:x86_64',
[('/usr/bin/foo', rf2),
('/usr/bin/file1', rf5),
('/usr/bin/baz', rf3),])
self.addComponent('foo:doc', '2', 'is:x86_64',
[('/usr/share/doc/foo2', rf2)])
self.addCollection('foo', '2',
[(x, '2', 'is:x86_64') for x in [':run', ':doc']],
buildReqs=buildReqs2)
# Force search flavor to x86_64 to get consistent output on x86
self.cfg.flavor = [deps.parseFlavor('is: x86 x86_64')]
repos = self.openRepository()
troveSpec = 'foo=1[is:x86]--2[is:x86_64]'
ret, outs = self._rdiff(troveSpec)
self.assertEqual(outs, expOutput1noargs)
self.cfg.fullFlavors = True
ret, outs = self._rdiff(troveSpec)
self.assertEqual(outs, expOutput1fullFlavors)
self.cfg.fullFlavors = False
self.cfg.fullVersions = True
ret, outs = self._rdiff(troveSpec)
self.assertEqual(outs, expOutput1fullVersions)
self.cfg.fullVersions = False
ret, outs = self._rdiff(troveSpec, ls = True)
self.assertEqual(outs, expOutput1withFiles)
ret, outs = self._rdiff(troveSpec, fileVersions = True)
self.assertEqual(outs, expOutput1withFileVersions)
ret, outs = self._rdiff(troveSpec, lsl = True)
self.assertEqual(outs, expOutput1withFilesStat)
ret, outs = self._rdiff(troveSpec, tags = True)
self.assertEqual(outs, expOutput1withFileTags)
# Diffing against ourselves
troveSpec = 'foo=1[is:x86]--1[is:x86]'
ret, outs = self._rdiff(troveSpec, tags = True)
self.assertEqual(outs, 'Identical troves\n')
def testRdiff2(self):
# Test showing of troves with no changes
req1 = 'soname: ELF32/lib/foo3(blah) trove:bar(1)'
req2 = 'soname: ELF32/lib/foo2(blah)'
prov1 = "trove:bar(1) trove:baz(1)"
prov2 = "trove:bar(1) trove:baz(1) soname: ELF32/lib/foo2(blah)"
rf1 = rephelp.RegularFile(contents='1\n2\n3\n4\n5\n6\n7\n8\n',
perms = 0644, provides = prov1, requires = req1,
mtime = 1176921017,)
rf2 = rephelp.RegularFile(contents='1\n2\n4\n5\n6\n7\n8\n9\n',
perms = 0755, provides = prov2, requires = req2,
mtime = 1176921317, tags=['tag2', 'tag1', 'tag3'])
self.addComponent('foo:run', '1', 'is:x86', [('/usr/bin/foo', rf1)])
self.addComponent('foo:supdoc', '1', 'is:x86', [('/usr/doc/foo1', rf2)])
self.addCollection('foo', '1',
[(x, '1', 'is:x86') for x in [':run', ':supdoc']])
self.addComponent('bar:run', '1', [ ('/usr/bin/bar', rf1) ])
self.addCollection('bar', '1', [':run'])
self.addCollection('group-bar', '1', ['bar'])
self.addCollection('group-foo', '1',
[('foo', '1', 'is:x86'), 'group-bar'])
self.addComponent('foo:run', '2', 'is:x86', [('/usr/bin/foo', rf1)])
self.addComponent('foo:doc', '2', 'is:x86', [('/usr/doc/foo1', rf2)])
self.addCollection('foo', '2',
[(x, '2', 'is:x86') for x in [':run', ':doc']])
self.addCollection('group-foo', '2', [('foo', '2', 'is:x86'),
('group-bar', '1', '')])
troveSpec = 'group-foo=1--2'
ret, outs = self._rdiff(troveSpec)
self.assertEqual(outs, expOutput2)
def testRdiff3(self):
# Have a file change from regular file to symbolic link
rf1 = rephelp.RegularFile(contents='1\n2\n3\n4\n5\n6\n7\n8\n',
perms = 0644, mtime = 1136921017)
lf1 = rephelp.Symlink("/etc/passwd")
self.addComponent('foo:run', '1', [('/usr/bin/foo', rf1)])
self.addCollection('foo', '1', [':run'])
self.addComponent('foo:run', '2',
[('/etc/passwd', rf1), ('/usr/bin/foo', lf1)])
self.addCollection('foo', '2', [':run'])
ret, outs = self._rdiff('foo=1--2', lsl = True)
#re.sub("Symbolic", "<TIMESTRING> (Symbolic", outs)
outs = re.sub(" [0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]* ", " <TIMESTRING TIMESTAMP> ", outs)
self.assertEqual(outs, expOutput3)
def testRdiff4(self):
# test trove dependencies
req1 = 'soname: ELF32/lib/foo3(blah) trove:bar(1)'
req2 = 'soname: ELF32/lib/foo2(blah)'
prov1 = "trove:bar(1) trove:baz(1)"
prov2 = "trove:bar(1) trove:baz(1) soname: ELF32/lib/foo2(blah)"
rf1 = rephelp.RegularFile(contents='1\n2\n3\n4\n5\n6\n7\n8\n',
perms = 0644, provides = prov1, requires = req1,
mtime = 1176921017,)
rf2 = rephelp.RegularFile(contents='1\n2\n4\n5\n6\n7\n8\n9\n',
perms = 0755, provides = prov2, requires = req2,
mtime = 1176921317, tags=['tag2', 'tag1', 'tag3'])
self.addComponent('foo:run', '1', [('/usr/bin/foo', rf1)])
self.addCollection('foo', '1', [':run'])
self.addComponent('foo:run', '2', [('/usr/bin/foo', rf2)])
self.addCollection('foo', '2', [':run'])
ret, outs = self._rdiff('foo=1--2')
self.assertEqual(outs, expOutput4)
ret, outs = self._rdiff('foo:run=1--2', deps = True)
self.assertEqual(outs, expOutput4withTroveDeps)
def testRdiff5(self):
# CNY-1605
# Create two flavors of the same trove and add them to the same group
flv1 = '~ssl'
flv2 = '~!ssl'
rf11 = rephelp.RegularFile(contents='1\n2\n3\n4\n5\n6\n7\n8\n',
flavor=flv1)
rf12 = rephelp.RegularFile(contents='1\n2\n4\n5\n6\n7\n8\n9\n',
flavor=flv2)
rf21 = rephelp.RegularFile(contents='1\n2\n4\n5\n6\n7\n8\n9\n',
flavor=flv1)
rf22 = rephelp.RegularFile(contents='1\n2\n3\n4\n5\n6\n7\n8\n',
flavor=flv2)
files = [ (rf11, rf12), (rf21, rf22) ]
for v, fileobjs in zip([ '1', '2' ], files):
file1 = fileobjs[0]
file2 = fileobjs[1]
t1 = self.addComponent('foo:run', v, fileContents=[('/bin/foo', file1)])
p1 = self.addCollection('foo', v, [(':run', v, t1.getFlavor())])
t2 = self.addComponent('foo:run', v, fileContents=[('/bin/foo', file2)])
p2 = self.addCollection('foo', v, [(':run', v, t2.getFlavor())])
self.addCollection('group-foo', v,
[('foo', v, flv1), ('foo', v, flv2)])
troveSpec = 'group-foo=1--2'
self.cfg.fullFlavors = True
ret, outs = self._rdiff(troveSpec)
self.cfg.fullFlavors = False
self.assertEqual(outs, expOutput5)
def testRdiff6(self):
# Test that added and removed troves show up properly
self.addComponent('foo:run', '1', filePrimer=1)
self.addCollection('foo', '1', [':run'])
self.addComponent('erased:run', '1', filePrimer=1)
self.addCollection('erased', '1', [':run'])
self.addComponent('added:run', '1', filePrimer=1)
self.addCollection('added', '1', [':run'])
self.addCollection('group-foo', '1',
[ ('foo', '1'), ('erased', '1') ])
self.addCollection('group-foo', '2',
[ ('foo', '1'), ('added', '1') ])
troveSpec = 'group-foo=1--2'
ret, outs = self._rdiff(troveSpec)
self.assertEqual(outs, expOutput6)
def testRdiff8(self):
# CNY-1753
# Different files living on different branches
raise testhelp.SkipTestException("Unable to reproduce CNY-1753 in a test case")
# Manifested in running conary rdiff
# mkinitrd=conary.rpath.com@rpl:1--usplash.rb.rpath.com@rpl:1
rf1 = rephelp.RegularFile(contents='\000\001\002\003',
perms = 0644, mtime = 1176921017,)
rf2 = rephelp.RegularFile(contents='\000\001\003\005',
perms = 0644, mtime = 1176921317,)
v1 = versions.ThawVersion('/localhost@rpl:1/1:1-1-1')
v2 = versions.ThawVersion('/localhost1@rpl:2/2:2-2-2')
self.openRepository()
self.openRepository(1)
self.addComponent('foo:run', v1, [('/bin/foo', rf1)])
self.addCollection('foo', v1, [':run'])
self.addComponent('foo:run', v2, [('/bin/foo', rf2)])
self.addCollection('foo', v2, [':run'])
troveSpec = cmdline.parseChangeList('foo=%s--%s' % (v1, v2))[0]
ret, outs = self.captureOutput(queryrep.diffTroves,
self.cfg, troveSpec)
self.assertEqual(outs, '')
def testRdiff9(self):
"""Binary changes to config; using --diff"""
rf1 = rephelp.RegularFile(contents='1\n2\n3\n4\n5\n6\n7\n8\n',
perms = 0644, mtime = 1136921017, config=False)
rf2 = rephelp.RegularFile(contents='1\n2\n4\n5\n6\n7\n8\n9\n',
perms = 0644, mtime = 1136921317, config=True)
self.addComponent('foo:config', '1', [('/etc/foo', rf1)])
self.addComponent('foo:config', '2', [('/etc/foo', rf2)])
ret, outs = self._rdiff('foo:config=1--2', asDiff=True)
self.assertEqual(outs, expOutput9)
expOutput1noargs = """\
Update foo(:run) (1-1-1[is: x86] -> 2-1-1[is: x86_64])
Install foo:doc=2-1-1
Erase foo:supdoc=1-1-1
"""
expOutput1fullFlavors = """\
Update foo(:run) (1-1-1[is: x86] -> 2-1-1[is: x86_64])
Install foo:doc=2-1-1[is: x86_64]
Erase foo:supdoc=1-1-1[is: x86]
"""
expOutput1fullVersions = """\
Update foo(:run) (/localhost@rpl:linux/1-1-1[is: x86] -> /localhost@rpl:linux/2-1-1[is: x86_64])
Install foo:doc=/localhost@rpl:linux/2-1-1
Erase foo:supdoc=/localhost@rpl:linux/1-1-1
"""
expOutput1withFiles = """\
/usr/share/doc/foo2
/usr/bin/bar
/usr/bin/baz
/usr/bin/file1
/usr/bin/foo
/usr/share/doc/foo1
"""
expOutput1withFileVersions = """\
/usr/share/doc/foo2 2-1-1
/usr/bin/bar 1-1-1
/usr/bin/baz 2-1-1
/usr/bin/file1 2-1-1
/usr/bin/foo 2-1-1
/usr/share/doc/foo1 1-1-1
"""
expOutput1withFilesStat = """\
New -rwxr-xr-x 1 root root 16 2006-01-10 19:28:37 UTC /usr/share/doc/foo2
Del -rwxr-xr-x 1 root root 16 2006-01-10 19:28:37 UTC /usr/bin/bar
New -r-------- 1 root root 17 2006-01-10 19:23:37 UTC /usr/bin/baz
Mod -rw-r--r-- 1 root root 16 2006-01-10 19:23:37 UTC /usr/bin/file1
Mod -rwxr-xr-x 1 root root 16 2006-01-10 19:28:37 UTC /usr/bin/foo
Del -rw-r--r-- 1 root root 16 2006-01-10 19:23:37 UTC /usr/share/doc/foo1
"""
expOutput1withFileTags = """\
/usr/share/doc/foo2 {tag1 tag2 tag3}
/usr/bin/bar {tag1 tag2 tag3}
/usr/bin/baz
/usr/bin/file1 {tag1 tag2 tag3}
/usr/bin/foo {tag1 tag2 tag3}
/usr/share/doc/foo1
"""
expOutput2 = """\
Update foo(:run) (1-1-1 -> 2-1-1)
Install foo:doc=2-1-1
Erase foo:supdoc=1-1-1
Update group-foo (1-1-1 -> 2-1-1)
"""
expOutput3 = """\
New -rw-r--r-- 1 root root 16 <TIMESTRING TIMESTAMP> UTC /etc/passwd
Mod lrwxrwxrwx 1 root root 11 <TIMESTRING TIMESTAMP> UTC /usr/bin/foo -> /etc/passwd
"""
expOutput4 = """\
Update foo(:run) (1-1-1 -> 2-1-1)
"""
expOutput4withTroveDeps = """\
Update foo:run (1-1-1 -> 2-1-1)\nProvides:\n trove: bar(1)\n trove: baz(1)\n trove: foo:run\n soname: ELF32/lib/foo2(blah)\n\nRequires:\n soname: ELF32/lib/foo2(blah)\n
"""
expOutput5 = """\
Update foo(:run) (1-1-1[~!ssl] -> 2-1-1[~!ssl])
Update foo(:run) (1-1-1[~ssl] -> 2-1-1[~ssl])
Update group-foo (1-1-1 -> 2-1-1)
"""
expOutput6 = """\
Install added(:run)=1-1-1
Erase erased=1-1-1
Update group-foo (1-1-1 -> 2-1-1)
"""
expOutput9 = """\
diff --git a/etc/foo b/etc/foo
--- a/etc/foo
+++ b/etc/foo
@@ -1,8 +1,8 @@
1
2
-3
4
5
6
7
8
+9
"""
class MultiRepQueryTest(rephelp.RepositoryHelper):
def setUp(self):
rephelp.RepositoryHelper.setUp(self)
def tearDown(self):
self.servers.stopServer(1)
self.servers.stopServer(0)
def _rdiff(self, troveSpec, **kwargs):
client = conaryclient.ConaryClient(self.cfg)
return self.captureOutput(queryrep.rdiffCommand, self.cfg,
client, client.getDatabase(), troveSpec,
**kwargs)
def _openRepository(self, idx, serverName="localhost"):
# this could be left open from a previoius testsuite running
label = versions.Label("%s@foo:bar" % serverName)
self.servers.stopServer(idx)
repo = self.openRepository(idx, serverName=[serverName])
self.resetRepository(idx)
self.addUserAndRole(repo, label, "user", "pass")
repo.addAcl(label, "user", None, None, write=True, remove=True)
return repo
def testRdiffMulti(self):
# CNY-2544 - groups including troves from foreign repos
r0 = self._openRepository(0, "localhost")
r1 = self._openRepository(1, "otherhost")
c = self.getRepositoryClient("user", "pass")
self.addComponent("other:runtime", "/otherhost@foo:bar/9", repos = c)
self.addComponent("other:lib", "/otherhost@foo:bar/9", repos = c)
trv = self.addCollection("other", "/otherhost@foo:bar/9", [ ":runtime", ":lib"], repos = c)
grpfuu = self.addCollection("group-fuu", "/localhost@foo:bar/1", [ trv.getNameVersionFlavor() ], repos = c)
grpfoo1 = self.addCollection("group-foo", "/localhost@foo:bar/1", [ grpfuu.getNameVersionFlavor() ], repos = c)
grpfoo2 = self.addCollection("group-foo", "/localhost@foo:bar/2", [ trv.getNameVersionFlavor() ], repos = c)
ret, outs = self._rdiff(
'group-foo=localhost@foo:bar/1--localhost@foo:bar/2')
self.assertEqual(outs, expOutput7)
expOutput7 = """\
Update group-foo (1-1-1 -> 2-1-1)
Erase group-fuu=1-1-1
Install other(:lib :runtime)=9-1-1
""" | unknown | codeparrot/codeparrot-clean | ||
"""
Acceptance tests for Studio related to the container page.
"""
from nose.plugins.attrib import attr
from ..pages.studio.overview import CourseOutlinePage
from ..fixtures.course import XBlockFixtureDesc
from ..pages.studio.component_editor import ComponentEditorView
from ..pages.studio.utils import add_discussion
from unittest import skip
from acceptance.tests.base_studio_test import StudioCourseTest
@attr('shard_1')
class ContainerBase(StudioCourseTest):
"""
Base class for tests that do operations on the container page.
"""
__test__ = False
def setUp(self):
"""
Create a unique identifier for the course used in this test.
"""
# Ensure that the superclass sets up
super(ContainerBase, self).setUp()
self.outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def go_to_container_page(self, make_draft=False):
"""
Go to the test container page.
If make_draft is true, the unit page (accessed on way to container page) will be put into draft mode.
"""
unit = self.go_to_unit_page(make_draft)
container = unit.components[0].go_to_container()
return container
def go_to_unit_page(self, make_draft=False):
"""
Go to the test unit page.
If make_draft is true, the unit page will be put into draft mode.
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.toggle_expand().unit('Test Unit').go_to()
if make_draft:
unit.edit_draft()
return unit
def verify_ordering(self, container, expected_orderings):
"""
Verifies the expected ordering of xblocks on the page.
"""
xblocks = container.xblocks
blocks_checked = set()
for expected_ordering in expected_orderings:
for xblock in xblocks:
parent = expected_ordering.keys()[0]
if xblock.name == parent:
blocks_checked.add(parent)
children = xblock.children
expected_length = len(expected_ordering.get(parent))
self.assertEqual(
expected_length, len(children),
"Number of children incorrect for group {0}. Expected {1} but got {2}.".format(parent, expected_length, len(children)))
for idx, expected in enumerate(expected_ordering.get(parent)):
self.assertEqual(expected, children[idx].name)
blocks_checked.add(expected)
break
self.assertEqual(len(blocks_checked), len(xblocks))
def do_action_and_verify(self, action, expected_ordering):
"""
Perform the supplied action and then verify the resulting ordering.
"""
container = self.go_to_container_page(make_draft=True)
action(container)
self.verify_ordering(container, expected_ordering)
# Reload the page to see that the change was persisted.
container = self.go_to_container_page()
self.verify_ordering(container, expected_ordering)
class NestedVerticalTest(ContainerBase):
__test__ = False
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with nested verticals.
"""
self.container_title = ""
self.group_a = "Expand or Collapse\nGroup A"
self.group_b = "Expand or Collapse\nGroup B"
self.group_empty = "Expand or Collapse\nGroup Empty"
self.group_a_item_1 = "Group A Item 1"
self.group_a_item_2 = "Group A Item 2"
self.group_b_item_1 = "Group B Item 1"
self.group_b_item_2 = "Group B Item 2"
self.group_a_handle = 0
self.group_a_item_1_handle = 1
self.group_a_item_2_handle = 2
self.group_empty_handle = 3
self.group_b_handle = 4
self.group_b_item_1_handle = 5
self.group_b_item_2_handle = 6
self.group_a_item_1_action_index = 0
self.group_a_item_2_action_index = 1
self.duplicate_label = "Duplicate of '{0}'"
self.discussion_label = "Discussion"
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('vertical', 'Test Container').add_children(
XBlockFixtureDesc('vertical', 'Group A').add_children(
XBlockFixtureDesc('html', self.group_a_item_1),
XBlockFixtureDesc('html', self.group_a_item_2)
),
XBlockFixtureDesc('vertical', 'Group Empty'),
XBlockFixtureDesc('vertical', 'Group B').add_children(
XBlockFixtureDesc('html', self.group_b_item_1),
XBlockFixtureDesc('html', self.group_b_item_2)
)
)
)
)
)
)
@attr('shard_1')
class DragAndDropTest(NestedVerticalTest):
"""
Tests of reordering within the container page.
"""
__test__ = True
def drag_and_verify(self, source, target, expected_ordering):
self.do_action_and_verify(
lambda (container): container.drag(source, target),
expected_ordering
)
@skip("Sporadically drags outside of the Group.")
def test_reorder_in_group(self):
"""
Drag Group A Item 2 before Group A Item 1.
"""
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_2, self.group_a_item_1]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.drag_and_verify(self.group_a_item_2_handle, self.group_a_item_1_handle, expected_ordering)
def test_drag_to_top(self):
"""
Drag Group A Item 1 to top level (outside of Group A).
"""
expected_ordering = [{self.container_title: [self.group_a_item_1, self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.drag_and_verify(self.group_a_item_1_handle, self.group_a_handle, expected_ordering)
def test_drag_into_different_group(self):
"""
Drag Group B Item 1 into Group A (first element).
"""
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_b_item_1, self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_2]},
{self.group_empty: []}]
self.drag_and_verify(self.group_b_item_1_handle, self.group_a_item_1_handle, expected_ordering)
def test_drag_group_into_group(self):
"""
Drag Group B into Group A (first element).
"""
expected_ordering = [{self.container_title: [self.group_a, self.group_empty]},
{self.group_a: [self.group_b, self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.drag_and_verify(self.group_b_handle, self.group_a_item_1_handle, expected_ordering)
def test_drag_after_addition(self):
"""
Add some components and then verify that drag and drop still works.
"""
group_a_menu = 0
def add_new_components_and_rearrange(container):
# Add a video component to Group 1
add_discussion(container, group_a_menu)
# Duplicate the first item in Group A
container.duplicate(self.group_a_item_1_action_index)
first_handle = self.group_a_item_1_handle
# Drag newly added video component to top.
container.drag(first_handle + 3, first_handle)
# Drag duplicated component to top.
container.drag(first_handle + 2, first_handle)
duplicate_label = self.duplicate_label.format(self.group_a_item_1)
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [duplicate_label, self.discussion_label, self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.do_action_and_verify(add_new_components_and_rearrange, expected_ordering)
@attr('shard_1')
class AddComponentTest(NestedVerticalTest):
"""
Tests of adding a component to the container page.
"""
__test__ = True
def add_and_verify(self, menu_index, expected_ordering):
self.do_action_and_verify(
lambda (container): add_discussion(container, menu_index),
expected_ordering
)
def test_add_component_in_group(self):
group_b_menu = 2
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2, self.discussion_label]},
{self.group_empty: []}]
self.add_and_verify(group_b_menu, expected_ordering)
def test_add_component_in_empty_group(self):
group_empty_menu = 1
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: [self.discussion_label]}]
self.add_and_verify(group_empty_menu, expected_ordering)
def test_add_component_in_container(self):
container_menu = 3
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b, self.discussion_label]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.add_and_verify(container_menu, expected_ordering)
@attr('shard_1')
class DuplicateComponentTest(NestedVerticalTest):
"""
Tests of duplicating a component on the container page.
"""
__test__ = True
def duplicate_and_verify(self, source_index, expected_ordering):
self.do_action_and_verify(
lambda (container): container.duplicate(source_index),
expected_ordering
)
def test_duplicate_first_in_group(self):
duplicate_label = self.duplicate_label.format(self.group_a_item_1)
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, duplicate_label, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.duplicate_and_verify(self.group_a_item_1_action_index, expected_ordering)
def test_duplicate_second_in_group(self):
duplicate_label = self.duplicate_label.format(self.group_a_item_2)
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2, duplicate_label]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.duplicate_and_verify(self.group_a_item_2_action_index, expected_ordering)
def test_duplicate_the_duplicate(self):
first_duplicate_label = self.duplicate_label.format(self.group_a_item_1)
second_duplicate_label = self.duplicate_label.format(first_duplicate_label)
expected_ordering = [
{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, first_duplicate_label, second_duplicate_label, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}
]
def duplicate_twice(container):
container.duplicate(self.group_a_item_1_action_index)
container.duplicate(self.group_a_item_1_action_index + 1)
self.do_action_and_verify(duplicate_twice, expected_ordering)
@attr('shard_1')
class DeleteComponentTest(NestedVerticalTest):
"""
Tests of deleting a component from the container page.
"""
__test__ = True
def delete_and_verify(self, source_index, expected_ordering):
self.do_action_and_verify(
lambda (container): container.delete(source_index),
expected_ordering
)
def test_delete_first_in_group(self):
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
# Group A itself has a delete icon now, so item_1 is index 1 instead of 0.
group_a_item_1_delete_index = 1
self.delete_and_verify(group_a_item_1_delete_index, expected_ordering)
@attr('shard_1')
class EditContainerTest(NestedVerticalTest):
"""
Tests of editing a container.
"""
__test__ = True
def modify_display_name_and_verify(self, component):
"""
Helper method for changing a display name.
"""
modified_name = 'modified'
self.assertNotEqual(component.name, modified_name)
component.edit()
component_editor = ComponentEditorView(self.browser, component.locator)
component_editor.set_field_value_and_save('Display Name', modified_name)
self.assertEqual(component.name, modified_name)
def test_edit_container_on_unit_page(self):
"""
Test the "edit" button on a container appearing on the unit page.
"""
unit = self.go_to_unit_page(make_draft=True)
component = unit.components[0]
self.modify_display_name_and_verify(component)
def test_edit_container_on_container_page(self):
"""
Test the "edit" button on a container appearing on the container page.
"""
container = self.go_to_container_page(make_draft=True)
self.modify_display_name_and_verify(container) | unknown | codeparrot/codeparrot-clean | ||
import { test } from '../../test';
export default test({
mode: ['client', 'hydrate'], // there's no class instance to retrieve in SSR mode
html: `
<div>foo</div>
<div>has foo: true</div>
`
}); | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/tests/runtime-legacy/samples/binding-this-component-computed-key/_config.js |
""" monkeypatching and mocking functionality. """
from __future__ import absolute_import, division, print_function
import os
import sys
import re
import six
from _pytest.fixtures import fixture
RE_IMPORT_ERROR_NAME = re.compile("^No module named (.*)$")
@fixture
def monkeypatch():
"""The returned ``monkeypatch`` fixture provides these
helper methods to modify objects, dictionaries or os.environ::
monkeypatch.setattr(obj, name, value, raising=True)
monkeypatch.delattr(obj, name, raising=True)
monkeypatch.setitem(mapping, name, value)
monkeypatch.delitem(obj, name, raising=True)
monkeypatch.setenv(name, value, prepend=False)
monkeypatch.delenv(name, value, raising=True)
monkeypatch.syspath_prepend(path)
monkeypatch.chdir(path)
All modifications will be undone after the requesting
test function or fixture has finished. The ``raising``
parameter determines if a KeyError or AttributeError
will be raised if the set/deletion operation has no target.
"""
mpatch = MonkeyPatch()
yield mpatch
mpatch.undo()
def resolve(name):
# simplified from zope.dottedname
parts = name.split('.')
used = parts.pop(0)
found = __import__(used)
for part in parts:
used += '.' + part
try:
found = getattr(found, part)
except AttributeError:
pass
else:
continue
# we use explicit un-nesting of the handling block in order
# to avoid nested exceptions on python 3
try:
__import__(used)
except ImportError as ex:
# str is used for py2 vs py3
expected = str(ex).split()[-1]
if expected == used:
raise
else:
raise ImportError(
'import error in %s: %s' % (used, ex)
)
found = annotated_getattr(found, part, used)
return found
def annotated_getattr(obj, name, ann):
try:
obj = getattr(obj, name)
except AttributeError:
raise AttributeError(
'%r object at %s has no attribute %r' % (
type(obj).__name__, ann, name
)
)
return obj
def derive_importpath(import_path, raising):
if not isinstance(import_path, six.string_types) or "." not in import_path:
raise TypeError("must be absolute import path string, not %r" %
(import_path,))
module, attr = import_path.rsplit('.', 1)
target = resolve(module)
if raising:
annotated_getattr(target, attr, ann=module)
return attr, target
class Notset:
def __repr__(self):
return "<notset>"
notset = Notset()
class MonkeyPatch:
""" Object returned by the ``monkeypatch`` fixture keeping a record of setattr/item/env/syspath changes.
"""
def __init__(self):
self._setattr = []
self._setitem = []
self._cwd = None
self._savesyspath = None
def setattr(self, target, name, value=notset, raising=True):
""" Set attribute value on target, memorizing the old value.
By default raise AttributeError if the attribute did not exist.
For convenience you can specify a string as ``target`` which
will be interpreted as a dotted import path, with the last part
being the attribute name. Example:
``monkeypatch.setattr("os.getcwd", lambda x: "/")``
would set the ``getcwd`` function of the ``os`` module.
The ``raising`` value determines if the setattr should fail
if the attribute is not already present (defaults to True
which means it will raise).
"""
__tracebackhide__ = True
import inspect
if value is notset:
if not isinstance(target, six.string_types):
raise TypeError("use setattr(target, name, value) or "
"setattr(target, value) with target being a dotted "
"import string")
value = name
name, target = derive_importpath(target, raising)
oldval = getattr(target, name, notset)
if raising and oldval is notset:
raise AttributeError("%r has no attribute %r" % (target, name))
# avoid class descriptors like staticmethod/classmethod
if inspect.isclass(target):
oldval = target.__dict__.get(name, notset)
self._setattr.append((target, name, oldval))
setattr(target, name, value)
def delattr(self, target, name=notset, raising=True):
""" Delete attribute ``name`` from ``target``, by default raise
AttributeError it the attribute did not previously exist.
If no ``name`` is specified and ``target`` is a string
it will be interpreted as a dotted import path with the
last part being the attribute name.
If ``raising`` is set to False, no exception will be raised if the
attribute is missing.
"""
__tracebackhide__ = True
if name is notset:
if not isinstance(target, six.string_types):
raise TypeError("use delattr(target, name) or "
"delattr(target) with target being a dotted "
"import string")
name, target = derive_importpath(target, raising)
if not hasattr(target, name):
if raising:
raise AttributeError(name)
else:
self._setattr.append((target, name, getattr(target, name, notset)))
delattr(target, name)
def setitem(self, dic, name, value):
""" Set dictionary entry ``name`` to value. """
self._setitem.append((dic, name, dic.get(name, notset)))
dic[name] = value
def delitem(self, dic, name, raising=True):
""" Delete ``name`` from dict. Raise KeyError if it doesn't exist.
If ``raising`` is set to False, no exception will be raised if the
key is missing.
"""
if name not in dic:
if raising:
raise KeyError(name)
else:
self._setitem.append((dic, name, dic.get(name, notset)))
del dic[name]
def setenv(self, name, value, prepend=None):
""" Set environment variable ``name`` to ``value``. If ``prepend``
is a character, read the current environment variable value
and prepend the ``value`` adjoined with the ``prepend`` character."""
value = str(value)
if prepend and name in os.environ:
value = value + prepend + os.environ[name]
self.setitem(os.environ, name, value)
def delenv(self, name, raising=True):
""" Delete ``name`` from the environment. Raise KeyError it does not
exist.
If ``raising`` is set to False, no exception will be raised if the
environment variable is missing.
"""
self.delitem(os.environ, name, raising=raising)
def syspath_prepend(self, path):
""" Prepend ``path`` to ``sys.path`` list of import locations. """
if self._savesyspath is None:
self._savesyspath = sys.path[:]
sys.path.insert(0, str(path))
def chdir(self, path):
""" Change the current working directory to the specified path.
Path can be a string or a py.path.local object.
"""
if self._cwd is None:
self._cwd = os.getcwd()
if hasattr(path, "chdir"):
path.chdir()
else:
os.chdir(path)
def undo(self):
""" Undo previous changes. This call consumes the
undo stack. Calling it a second time has no effect unless
you do more monkeypatching after the undo call.
There is generally no need to call `undo()`, since it is
called automatically during tear-down.
Note that the same `monkeypatch` fixture is used across a
single test function invocation. If `monkeypatch` is used both by
the test function itself and one of the test fixtures,
calling `undo()` will undo all of the changes made in
both functions.
"""
for obj, name, value in reversed(self._setattr):
if value is not notset:
setattr(obj, name, value)
else:
delattr(obj, name)
self._setattr[:] = []
for dictionary, name, value in reversed(self._setitem):
if value is notset:
try:
del dictionary[name]
except KeyError:
pass # was already deleted, so we have the desired state
else:
dictionary[name] = value
self._setitem[:] = []
if self._savesyspath is not None:
sys.path[:] = self._savesyspath
self._savesyspath = None
if self._cwd is not None:
os.chdir(self._cwd)
self._cwd = None | unknown | codeparrot/codeparrot-clean | ||
"""
:mod:`jedi.evaluate.imports` is here to resolve import statements and return
the modules/classes/functions/whatever, which they stand for. However there's
not any actual importing done. This module is about finding modules in the
filesystem. This can be quite tricky sometimes, because Python imports are not
always that simple.
This module uses imp for python up to 3.2 and importlib for python 3.3 on; the
correct implementation is delegated to _compatibility.
This module also supports import autocompletion, which means to complete
statements like ``from datetim`` (curser at the end would return ``datetime``).
"""
import imp
import os
import pkgutil
import sys
from itertools import chain
from jedi._compatibility import find_module, unicode
from jedi import common
from jedi import debug
from jedi import cache
from jedi.parser import fast
from jedi.parser import tree
from jedi.evaluate import sys_path
from jedi.evaluate import helpers
from jedi import settings
from jedi.common import source_to_unicode
from jedi.evaluate import compiled
from jedi.evaluate import analysis
from jedi.evaluate.cache import memoize_default, NO_DEFAULT
def completion_names(evaluator, imp, pos):
name = imp.name_for_position(pos)
module = evaluator.wrap(imp.get_parent_until())
if name is None:
level = 0
for node in imp.children:
if node.end_pos <= pos:
if node in ('.', '...'):
level += len(node.value)
import_path = []
else:
# Completion on an existing name.
# The import path needs to be reduced by one, because we're completing.
import_path = imp.path_for_name(name)[:-1]
level = imp.level
importer = Importer(evaluator, tuple(import_path), module, level)
if isinstance(imp, tree.ImportFrom):
c = imp.children
only_modules = c[c.index('import')].start_pos >= pos
else:
only_modules = True
return importer.completion_names(evaluator, only_modules)
class ImportWrapper(tree.Base):
def __init__(self, evaluator, name):
self._evaluator = evaluator
self._name = name
self._import = name.get_parent_until(tree.Import)
self.import_path = self._import.path_for_name(name)
@memoize_default()
def follow(self, is_goto=False):
if self._evaluator.recursion_detector.push_stmt(self._import):
# check recursion
return []
try:
module = self._evaluator.wrap(self._import.get_parent_until())
import_path = self._import.path_for_name(self._name)
from_import_name = None
try:
from_names = self._import.get_from_names()
except AttributeError:
# Is an import_name
pass
else:
if len(from_names) + 1 == len(import_path):
# We have to fetch the from_names part first and then check
# if from_names exists in the modules.
from_import_name = import_path[-1]
import_path = from_names
importer = Importer(self._evaluator, tuple(import_path),
module, self._import.level)
types = importer.follow()
#if self._import.is_nested() and not self.nested_resolve:
# scopes = [NestedImportModule(module, self._import)]
if from_import_name is not None:
types = list(chain.from_iterable(
self._evaluator.find_types(t, unicode(from_import_name),
is_goto=is_goto)
for t in types))
if not types:
path = import_path + [from_import_name]
importer = Importer(self._evaluator, tuple(path),
module, self._import.level)
types = importer.follow()
# goto only accepts `Name`
if is_goto:
types = [s.name for s in types]
else:
# goto only accepts `Name`
if is_goto:
types = [s.name for s in types]
debug.dbg('after import: %s', types)
finally:
self._evaluator.recursion_detector.pop_stmt()
return types
class NestedImportModule(tree.Module):
"""
TODO while there's no use case for nested import module right now, we might
be able to use them for static analysis checks later on.
"""
def __init__(self, module, nested_import):
self._module = module
self._nested_import = nested_import
def _get_nested_import_name(self):
"""
Generates an Import statement, that can be used to fake nested imports.
"""
i = self._nested_import
# This is not an existing Import statement. Therefore, set position to
# 0 (0 is not a valid line number).
zero = (0, 0)
names = [unicode(name) for name in i.namespace_names[1:]]
name = helpers.FakeName(names, self._nested_import)
new = tree.Import(i._sub_module, zero, zero, name)
new.parent = self._module
debug.dbg('Generated a nested import: %s', new)
return helpers.FakeName(str(i.namespace_names[1]), new)
def __getattr__(self, name):
return getattr(self._module, name)
def __repr__(self):
return "<%s: %s of %s>" % (self.__class__.__name__, self._module,
self._nested_import)
def _add_error(evaluator, name, message=None):
if hasattr(name, 'parent'):
# Should be a name, not a string!
analysis.add(evaluator, 'import-error', name, message)
def get_init_path(directory_path):
"""
The __init__ file can be searched in a directory. If found return it, else
None.
"""
for suffix, _, _ in imp.get_suffixes():
path = os.path.join(directory_path, '__init__' + suffix)
if os.path.exists(path):
return path
return None
class Importer(object):
def __init__(self, evaluator, import_path, module, level=0):
"""
An implementation similar to ``__import__``. Use `follow`
to actually follow the imports.
*level* specifies whether to use absolute or relative imports. 0 (the
default) means only perform absolute imports. Positive values for level
indicate the number of parent directories to search relative to the
directory of the module calling ``__import__()`` (see PEP 328 for the
details).
:param import_path: List of namespaces (strings or Names).
"""
debug.speed('import %s' % (import_path,))
self._evaluator = evaluator
self.level = level
self.module = module
try:
self.file_path = module.py__file__()
except AttributeError:
# Can be None for certain compiled modules like 'builtins'.
self.file_path = None
if level:
base = module.py__package__().split('.')
if base == ['']:
base = []
if level > len(base):
path = module.py__file__()
import_path = list(import_path)
for i in range(level):
path = os.path.dirname(path)
dir_name = os.path.basename(path)
# This is not the proper way to do relative imports. However, since
# Jedi cannot be sure about the entry point, we just calculate an
# absolute path here.
if dir_name:
import_path.insert(0, dir_name)
else:
_add_error(self._evaluator, import_path[-1])
import_path = []
# TODO add import error.
debug.warning('Attempted relative import beyond top-level package.')
else:
# Here we basically rewrite the level to 0.
import_path = tuple(base) + import_path
self.import_path = import_path
@property
def str_import_path(self):
"""Returns the import path as pure strings instead of `Name`."""
return tuple(str(name) for name in self.import_path)
@memoize_default()
def sys_path_with_modifications(self):
in_path = []
sys_path_mod = list(sys_path.sys_path_with_modifications(self._evaluator, self.module))
if self.file_path is not None:
# If you edit e.g. gunicorn, there will be imports like this:
# `from gunicorn import something`. But gunicorn is not in the
# sys.path. Therefore look if gunicorn is a parent directory, #56.
if self.import_path: # TODO is this check really needed?
for path in sys_path.traverse_parents(self.file_path):
if os.path.basename(path) == self.str_import_path[0]:
in_path.append(os.path.dirname(path))
# Since we know nothing about the call location of the sys.path,
# it's a possibility that the current directory is the origin of
# the Python execution.
sys_path_mod.insert(0, os.path.dirname(self.file_path))
return in_path + sys_path_mod
@memoize_default(NO_DEFAULT)
def follow(self):
if not self.import_path:
return []
return self._do_import(self.import_path, self.sys_path_with_modifications())
def _do_import(self, import_path, sys_path):
"""
This method is very similar to importlib's `_gcd_import`.
"""
import_parts = [str(i) for i in import_path]
# Handle "magic" Flask extension imports:
# ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``.
if len(import_path) > 2 and import_parts[:2] == ['flask', 'ext']:
# New style.
ipath = ('flask_' + str(import_parts[2]),) + import_path[3:]
modules = self._do_import(ipath, sys_path)
if modules:
return modules
else:
# Old style
return self._do_import(('flaskext',) + import_path[2:], sys_path)
module_name = '.'.join(import_parts)
try:
return [self._evaluator.modules[module_name]]
except KeyError:
pass
if len(import_path) > 1:
# This is a recursive way of importing that works great with
# the module cache.
bases = self._do_import(import_path[:-1], sys_path)
if not bases:
return []
# We can take the first element, because only the os special
# case yields multiple modules, which is not important for
# further imports.
base = bases[0]
# This is a huge exception, we follow a nested import
# ``os.path``, because it's a very important one in Python
# that is being achieved by messing with ``sys.modules`` in
# ``os``.
if [str(i) for i in import_path] == ['os', 'path']:
return self._evaluator.find_types(base, 'path')
try:
# It's possible that by giving it always the sys path (and not
# the __path__ attribute of the parent, we get wrong results
# and nested namespace packages don't work. But I'm not sure.
paths = base.py__path__(sys_path)
except AttributeError:
# The module is not a package.
_add_error(self._evaluator, import_path[-1])
return []
else:
debug.dbg('search_module %s in paths %s', module_name, paths)
for path in paths:
# At the moment we are only using one path. So this is
# not important to be correct.
try:
module_file, module_path, is_pkg = \
find_module(import_parts[-1], [path])
break
except ImportError:
module_path = None
if module_path is None:
_add_error(self._evaluator, import_path[-1])
return []
else:
try:
debug.dbg('search_module %s in %s', import_parts[-1], self.file_path)
# Override the sys.path. It works only good that way.
# Injecting the path directly into `find_module` did not work.
sys.path, temp = sys_path, sys.path
try:
module_file, module_path, is_pkg = \
find_module(import_parts[-1])
finally:
sys.path = temp
except ImportError:
# The module is not a package.
_add_error(self._evaluator, import_path[-1])
return []
source = None
if is_pkg:
# In this case, we don't have a file yet. Search for the
# __init__ file.
module_path = get_init_path(module_path)
elif module_file:
source = module_file.read()
module_file.close()
if module_file is None and not module_path.endswith('.py'):
module = compiled.load_module(module_path)
else:
module = _load_module(self._evaluator, module_path, source, sys_path)
self._evaluator.modules[module_name] = module
return [module]
def _generate_name(self, name):
return helpers.FakeName(name, parent=self.module)
def _get_module_names(self, search_path=None):
"""
Get the names of all modules in the search_path. This means file names
and not names defined in the files.
"""
names = []
# add builtin module names
if search_path is None:
names += [self._generate_name(name) for name in sys.builtin_module_names]
if search_path is None:
search_path = self.sys_path_with_modifications()
for module_loader, name, is_pkg in pkgutil.iter_modules(search_path):
names.append(self._generate_name(name))
return names
def completion_names(self, evaluator, only_modules=False):
"""
:param only_modules: Indicates wheter it's possible to import a
definition that is not defined in a module.
"""
from jedi.evaluate import finder
names = []
if self.import_path:
# flask
if self.str_import_path == ('flask', 'ext'):
# List Flask extensions like ``flask_foo``
for mod in self._get_module_names():
modname = str(mod)
if modname.startswith('flask_'):
extname = modname[len('flask_'):]
names.append(self._generate_name(extname))
# Now the old style: ``flaskext.foo``
for dir in self.sys_path_with_modifications():
flaskext = os.path.join(dir, 'flaskext')
if os.path.isdir(flaskext):
names += self._get_module_names([flaskext])
for scope in self.follow():
# Non-modules are not completable.
if not scope.type == 'file_input': # not a module
continue
# namespace packages
if isinstance(scope, tree.Module) and scope.path.endswith('__init__.py'):
paths = scope.py__path__(self.sys_path_with_modifications())
names += self._get_module_names(paths)
if only_modules:
# In the case of an import like `from x.` we don't need to
# add all the variables.
if ('os',) == self.str_import_path and not self.level:
# os.path is a hardcoded exception, because it's a
# ``sys.modules`` modification.
names.append(self._generate_name('path'))
continue
for names_dict in scope.names_dicts(search_global=False):
_names = list(chain.from_iterable(names_dict.values()))
if not _names:
continue
_names = finder.filter_definition_names(_names, scope)
names += _names
else:
# Empty import path=completion after import
if not self.level:
names += self._get_module_names()
if self.file_path is not None:
path = os.path.abspath(self.file_path)
for i in range(self.level - 1):
path = os.path.dirname(path)
names += self._get_module_names([path])
return names
def _load_module(evaluator, path=None, source=None, sys_path=None):
def load(source):
dotted_path = path and compiled.dotted_from_fs_path(path, sys_path)
if path is not None and path.endswith('.py') \
and not dotted_path in settings.auto_import_modules:
if source is None:
with open(path, 'rb') as f:
source = f.read()
else:
return compiled.load_module(path)
p = path
p = fast.FastParser(evaluator.grammar, common.source_to_unicode(source), p)
cache.save_parser(path, p)
return p.module
cached = cache.load_parser(path)
module = load(source) if cached is None else cached.module
module = evaluator.wrap(module)
return module
def add_module(evaluator, module_name, module):
if '.' not in module_name:
# We cannot add paths with dots, because that would collide with
# the sepatator dots for nested packages. Therefore we return
# `__main__` in ModuleWrapper.py__name__(), which is similar to
# Python behavior.
evaluator.modules[module_name] = module
def get_modules_containing_name(evaluator, mods, name):
"""
Search a name in the directories of modules.
"""
def check_python_file(path):
try:
return cache.parser_cache[path].parser.module
except KeyError:
try:
return check_fs(path)
except IOError:
return None
def check_fs(path):
with open(path, 'rb') as f:
source = source_to_unicode(f.read())
if name in source:
module_name = os.path.basename(path)[:-3] # Remove `.py`.
module = _load_module(evaluator, path, source)
add_module(evaluator, module_name, module)
return module
# skip non python modules
mods = set(m for m in mods if not isinstance(m, compiled.CompiledObject))
mod_paths = set()
for m in mods:
mod_paths.add(m.path)
yield m
if settings.dynamic_params_for_other_modules:
paths = set(settings.additional_dynamic_modules)
for p in mod_paths:
if p is not None:
d = os.path.dirname(p)
for entry in os.listdir(d):
if entry not in mod_paths:
if entry.endswith('.py'):
paths.add(d + os.path.sep + entry)
for p in sorted(paths):
# make testing easier, sort it - same results on every interpreter
c = check_python_file(p)
if c is not None and c not in mods and not isinstance(c, compiled.CompiledObject):
yield c | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.UpdateRaftVoterResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.protocol.Readable;
import java.util.Collections;
import java.util.Map;
public class UpdateRaftVoterResponse extends AbstractResponse {
private final UpdateRaftVoterResponseData data;
public UpdateRaftVoterResponse(UpdateRaftVoterResponseData data) {
super(ApiKeys.UPDATE_RAFT_VOTER);
this.data = data;
}
@Override
public UpdateRaftVoterResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
// not supported
}
@Override
public Map<Errors, Integer> errorCounts() {
if (data.errorCode() != Errors.NONE.code()) {
return Collections.singletonMap(Errors.forCode(data.errorCode()), 1);
} else {
return Collections.emptyMap();
}
}
public static UpdateRaftVoterResponse parse(Readable readable, short version) {
return new UpdateRaftVoterResponse(
new UpdateRaftVoterResponseData(readable, version));
}
} | java | github | https://github.com/apache/kafka | clients/src/main/java/org/apache/kafka/common/requests/UpdateRaftVoterResponse.java |
# -*- coding: utf-8 -*-
from vilya.libs import gyt
import shutil
import tempfile
from os.path import join as opj
from tests.base import TestCase
TEST_REPO = '/tmp/test_gyt/repo/.git'
ENV_FOR_GIT = {
'GIT_AUTHOR_NAME': 'default_test',
'GIT_AUTHOR_EMAIL': 'default_test@douban.com',
'GIT_COMMITTER_NAME': 'default_test',
'GIT_COMMITTER_EMAIL': 'default_test@douban.com',
}
class TestGyt(TestCase):
def _repo(self, bare=False):
if bare:
rep = gyt.repo(self._gd, bare=True, init=True)
else:
rep = gyt.repo(self._gd, self._wt, bare=False, init=True)
return rep
def _add_file(self, rep, filename, content='', commit=True,
commit_msg=None):
f = open(opj(rep.work_tree, filename), 'w')
f.write(content)
f.close()
rep.call(['add', filename])
if commit:
if not commit_msg:
commit_msg = 'unit test commit msg for %s' % filename
rep.call(['commit', '-m', commit_msg], _env=ENV_FOR_GIT)
def setUp(self):
self._gd = tempfile.mkdtemp(prefix='gyt_gd_') # Git dir
self._wt = tempfile.mkdtemp(prefix='gyt_wt_') # Work tree
self._gd2 = tempfile.mkdtemp(prefix='gyt_gd2_') # Git dir
self._wt2 = tempfile.mkdtemp(prefix='gyt_wt2_') # Work tree
def tearDown(self):
shutil.rmtree(self._gd)
shutil.rmtree(self._wt)
shutil.rmtree(self._gd2)
shutil.rmtree(self._wt2)
def test_one_diff(self):
rep = self._repo()
fn1 = 'test1'
content1a = 'a\nb\nc\nd'
content1b = 'a\nb\ncxxx\nd'
self._add_file(rep, fn1, commit_msg='msg1a', content=content1a)
sha = rep.sha()
self._add_file(rep, fn1, commit_msg='msg1b', content=content1b)
d = rep.diff(sha)
assert len(d) == 1
d1 = d[0]
assert d1['filename'] == fn1
assert d1['amode'] == d1['bmode'] == '100644'
assert len(d1['patch']) == 1
assert d1['patch'][0][0] == '@@ -1,4 +1,4 @@'
chunk = d1['patch'][0][1]
assert chunk == [
('idem', u'a'),
('idem', u'b'),
('rem', u'c'),
('add', u'cxxx'),
('idem', u'd'),
('other', u' No newline at end of file')
]
repo = rep.repo
ref_cmt1 = repo.revparse_single(sha)
ref_cmt2 = repo.revparse_single('HEAD')
diff = ref_cmt1.tree.diff(ref_cmt2.tree)
patches, filenames = gyt.parse_raw_diff_patches(diff.patch, True)
assert patches[0][0][1] == [
('idem', u'a'),
('idem', u'b'),
('rem', u'c'),
('add', u'cxxx'),
('idem', u'd'),
('other', u' No newline at end of file')
] | unknown | codeparrot/codeparrot-clean | ||
import requests
import collections
import sys
import time
import json
Entry = collections.namedtuple("Entry", "name position rows")
ROW_TYPES = {}
def row_type_for_columns(name, column_names):
column_names = tuple(column_names)
row_type = ROW_TYPES.get((name, column_names))
if row_type is None:
row_type = collections.namedtuple(name, column_names)
ROW_TYPES[(name, column_names)] = row_type
return row_type
def parse_response(content):
streams = json.loads(content)
result = {}
for name, value in streams.items():
row_type = row_type_for_columns(name, value["field_names"])
position = value["position"]
rows = [row_type(*row) for row in value["rows"]]
result[name] = Entry(name, position, rows)
return result
def replicate(server, streams):
return parse_response(requests.get(
server + "/_synapse/replication",
verify=False,
params=streams
).content)
def main():
server = sys.argv[1]
streams = None
while not streams:
try:
streams = {
row.name: row.position
for row in replicate(server, {"streams":"-1"})["streams"].rows
}
except requests.exceptions.ConnectionError as e:
time.sleep(0.1)
while True:
try:
results = replicate(server, streams)
except:
sys.stdout.write("connection_lost("+ repr(streams) + ")\n")
break
for update in results.values():
for row in update.rows:
sys.stdout.write(repr(row) + "\n")
streams[update.name] = update.position
if __name__=='__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
import os
import tempfile
import textwrap
import pytest
def test_options_from_env_vars(script):
"""
Test if ConfigOptionParser reads env vars (e.g. not using PyPI here)
"""
script.environ['PIP_NO_INDEX'] = '1'
result = script.pip('install', '-vvv', 'INITools', expect_error=True)
assert "Ignoring indexes:" in result.stdout, str(result)
assert (
"DistributionNotFound: No matching distribution found for INITools"
in result.stdout
)
def test_command_line_options_override_env_vars(script, virtualenv):
"""
Test that command line options override environmental variables.
"""
script.environ['PIP_INDEX_URL'] = 'https://b.pypi.python.org/simple/'
result = script.pip('install', '-vvv', 'INITools', expect_error=True)
assert (
"Getting page https://b.pypi.python.org/simple/initools"
in result.stdout
)
virtualenv.clear()
result = script.pip(
'install', '-vvv', '--index-url', 'https://download.zope.org/ppix',
'INITools',
expect_error=True,
)
assert "b.pypi.python.org" not in result.stdout
assert "Getting page https://download.zope.org/ppix" in result.stdout
@pytest.mark.network
def test_env_vars_override_config_file(script, virtualenv):
"""
Test that environmental variables override settings in config files.
"""
fd, config_file = tempfile.mkstemp('-pip.cfg', 'test-')
try:
_test_env_vars_override_config_file(script, virtualenv, config_file)
finally:
# `os.close` is a workaround for a bug in subprocess
# http://bugs.python.org/issue3210
os.close(fd)
os.remove(config_file)
def _test_env_vars_override_config_file(script, virtualenv, config_file):
# set this to make pip load it
script.environ['PIP_CONFIG_FILE'] = config_file
# It's important that we test this particular config value ('no-index')
# because there is/was a bug which only shows up in cases in which
# 'config-item' and 'config_item' hash to the same value modulo the size
# of the config dictionary.
(script.scratch_path / config_file).write(textwrap.dedent("""\
[global]
no-index = 1
"""))
result = script.pip('install', '-vvv', 'INITools', expect_error=True)
assert (
"DistributionNotFound: No matching distribution found for INITools"
in result.stdout
)
script.environ['PIP_NO_INDEX'] = '0'
virtualenv.clear()
result = script.pip('install', '-vvv', 'INITools', expect_error=True)
assert "Successfully installed INITools" in result.stdout
@pytest.mark.network
def test_command_line_append_flags(script, virtualenv, data):
"""
Test command line flags that append to defaults set by environmental
variables.
"""
script.environ['PIP_FIND_LINKS'] = 'http://pypi.pinaxproject.com'
result = script.pip(
'install', '-vvv', 'INITools', '--trusted-host',
'pypi.pinaxproject.com',
expect_error=True,
)
assert (
"Analyzing links from page http://pypi.pinaxproject.com"
in result.stdout
)
virtualenv.clear()
result = script.pip(
'install', '-vvv', '--find-links', data.find_links, 'INITools',
'--trusted-host', 'pypi.pinaxproject.com',
expect_error=True,
)
assert (
"Analyzing links from page http://pypi.pinaxproject.com"
in result.stdout
)
assert "Skipping link %s" % data.find_links in result.stdout
@pytest.mark.network
def test_command_line_appends_correctly(script, data):
"""
Test multiple appending options set by environmental variables.
"""
script.environ['PIP_FIND_LINKS'] = (
'http://pypi.pinaxproject.com %s' % data.find_links
)
result = script.pip(
'install', '-vvv', 'INITools', '--trusted-host',
'pypi.pinaxproject.com',
expect_error=True,
)
assert (
"Analyzing links from page http://pypi.pinaxproject.com"
in result.stdout
), result.stdout
assert "Skipping link %s" % data.find_links in result.stdout
def test_config_file_override_stack(script, virtualenv):
"""
Test config files (global, overriding a global config with a
local, overriding all with a command line flag).
"""
fd, config_file = tempfile.mkstemp('-pip.cfg', 'test-')
try:
_test_config_file_override_stack(script, virtualenv, config_file)
finally:
# `os.close` is a workaround for a bug in subprocess
# http://bugs.python.org/issue3210
os.close(fd)
os.remove(config_file)
def _test_config_file_override_stack(script, virtualenv, config_file):
# set this to make pip load it
script.environ['PIP_CONFIG_FILE'] = config_file
(script.scratch_path / config_file).write(textwrap.dedent("""\
[global]
index-url = https://download.zope.org/ppix
"""))
result = script.pip('install', '-vvv', 'INITools', expect_error=True)
assert (
"Getting page https://download.zope.org/ppix/initools" in result.stdout
)
virtualenv.clear()
(script.scratch_path / config_file).write(textwrap.dedent("""\
[global]
index-url = https://download.zope.org/ppix
[install]
index-url = https://pypi.gocept.com/
"""))
result = script.pip('install', '-vvv', 'INITools', expect_error=True)
assert "Getting page https://pypi.gocept.com/initools" in result.stdout
result = script.pip(
'install', '-vvv', '--index-url', 'https://pypi.python.org/simple',
'INITools',
expect_error=True,
)
assert (
"Getting page http://download.zope.org/ppix/INITools"
not in result.stdout
)
assert "Getting page https://pypi.gocept.com/INITools" not in result.stdout
assert (
"Getting page https://pypi.python.org/simple/initools" in result.stdout
)
def test_options_from_venv_config(script, virtualenv):
"""
Test if ConfigOptionParser reads a virtualenv-local config file
"""
from pip.locations import config_basename
conf = "[global]\nno-index = true"
ini = virtualenv.location / config_basename
with open(ini, 'w') as f:
f.write(conf)
result = script.pip('install', '-vvv', 'INITools', expect_error=True)
assert "Ignoring indexes:" in result.stdout, str(result)
assert (
"DistributionNotFound: No matching distribution found for INITools"
in result.stdout
)
def test_install_no_binary_via_config_disables_cached_wheels(script, data):
script.pip('install', 'wheel')
config_file = tempfile.NamedTemporaryFile(mode='wt')
script.environ['PIP_CONFIG_FILE'] = config_file.name
config_file.write(textwrap.dedent("""\
[global]
no-binary = :all:
"""))
config_file.flush()
res = script.pip(
'install', '--no-index', '-f', data.find_links,
'upper', expect_stderr=True)
assert "Successfully installed upper-2.0" in str(res), str(res)
# No wheel building for upper, which was blacklisted
assert "Running setup.py bdist_wheel for upper" not in str(res), str(res)
# Must have used source, not a cached wheel to install upper.
assert "Running setup.py install for upper" in str(res), str(res) | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.